data conveter 함수와 data class 함수를 들여보다가 느꼈습니다.
아 이건 최대한 raw data를 형태를 맞춰줘야겠다.
일단 모델 돌리고 그다음에 리펙토링을 하던가 하자.
Rename file
kitti와 다르게 이름 뒤에 _rgb가 붙어서 없애주려 합니다.
import argparse
import os
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='input folder path')
args = parser.parse_args()
return args
def convert_folder(folder):
filenames = os.listdir(folder)
for idx, filename in enumerate(tqdm(filenames)) :
output_path = os.path.join(folder,filename)
new_path = output_path.replace('_rgb','')
os.rename(output_path,new_path)
def main():
args = parse_args()
folder = args.path
convert_folder(folder)
if __name__ == '__main__':
main()
python ./data/carla/rename_img.py --path ./data/carla/Image_RGB/
잘 변환되기는 하는데 조금 기다려야 바뀌는 듯 합니다.
ImageSets
- 다음과 같이 test, train, trainval, val 에 해당하는 id들이 적힌 txt파일이 필요해서 관련 코드를 만들어주었습니다.
- trian : val : test 는 8 : 1 : 1 로 나눴습니다. 그리고 trainval은 train과 val이 합쳐진 리스트 입니다.
import argparse
import os
import errno
import random
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', help='input carla folder path')
parser.add_argument('-o', '--output_path', help='ImageSets folder path')
args = parser.parse_args()
return args
def split(input_folder):
# split train to train, val, test, trainval (8:1:1:9)
filenames = os.listdir(input_folder)
random.shuffle(filenames)
trainval = filenames[:len(filenames)*9//10]
test = filenames[len(filenames)*9//10:]
val = trainval[:len(trainval)//9]
train = trainval[len(trainval)//9:]
return train, val, test, trainval
def write_split_set(filenames, output_path):
with open(output_path, "w") as txtfile :
for idx, filename in enumerate(filenames) :
filename = filename.replace('.png','')
txtfile.write(f'{filename}\n')
def check_dir(output_folder):
try :
os.makedirs(output_folder)
except OSError as exc :
if exc.errno == errno.EEXIST and os.path.isdir(output_folder) :
pass
def main():
args = parse_args()
carla_folder = args.input_path
output_folder = args.output_path
# make ImageSets dir
check_dir(output_folder)
# split
Image_folder = os.path.join(carla_folder,'Image_RGB')
train, val, test, trainval = split(Image_folder)
# save txt
train_output_path = os.path.join(output_folder,'train.txt')
write_split_set(train, train_output_path)
val_output_path = os.path.join(output_folder,'val.txt')
write_split_set(val, val_output_path)
trainval_output_path = os.path.join(output_folder,'trainval.txt')
write_split_set(trainval, trainval_output_path)
test_output_path = os.path.join(output_folder,'test.txt')
write_split_set(test, test_output_path)
if __name__ == '__main__':
main()
python ./data/carla/imagesettxt.py -i ./data/carla/ -o ./data/carla/ImageSets
잘 변환된 것 같습니다.
Calib
- P0 ( 3x4 )
- P1 ( 3x4 )
- P2 ( 3x4 )
왼쪽 컬러 카메라 - P3 ( 3x4 )
- R0_rect ( 3x3 )
- Tr_velo_to_cam ( 3x4 )
- Tr_imu_to_velo ( 3x4 )
- 저희는 카메라 한 대 밖에 없는데 뭐가 많네요.
- 학습에 들어가는 요소는 아니라서 일단 포맷을 맞춰주기 위해 복사 붙여넣기 했습니다.
- 리팩토링 할꺼 생각하면 막막하네요.
import argparse
import os
import errno
calib = '''P0: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 \
0.000000000000e+00 0.000000000000e+00 7.215377000000e+02 \
1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 \
0.000000000000e+00 1.000000000000e+00 0.000000000000e+00\n\
P1: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 \
-3.875744000000e+02 0.000000000000e+00 7.215377000000e+02 \
1.728540000000e+02 0.000000000000e+00 0.000000000000e+00 \
0.000000000000e+00 1.000000000000e+00 0.000000000000e+00\n\
P2: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 \
4.485728000000e+01 0.000000000000e+00 7.215377000000e+02 \
1.728540000000e+02 2.163791000000e-01 0.000000000000e+00 \
0.000000000000e+00 1.000000000000e+00 2.745884000000e-03\n\
P3: 7.215377000000e+02 0.000000000000e+00 6.095593000000e+02 \
-3.395242000000e+02 0.000000000000e+00 7.215377000000e+02 \
1.728540000000e+02 2.199936000000e+00 0.000000000000e+00 \
0.000000000000e+00 1.000000000000e+00 2.729905000000e-03\n\
R0_rect: 9.999239000000e-01 9.837760000000e-03 -7.445048000000e-03 \
-9.869795000000e-03 9.999421000000e-01 -4.278459000000e-03 \
7.402527000000e-03 4.351614000000e-03 9.999631000000e-01\n\
Tr_velo_to_cam: 7.533745000000e-03 -9.999714000000e-01 -6.166020000000e-04 -4.069766000000e-03 \
1.480249000000e-02 7.280733000000e-04 -9.998902000000e-01 -7.631618000000e-02 \
9.998621000000e-01 7.523790000000e-03 1.480755000000e-02 -2.717806000000e-01\n\
Tr_imu_to_velo: 9.999976000000e-01 7.553071000000e-04 -2.035826000000e-03 -8.086759000000e-01 \
-7.854027000000e-04 9.998898000000e-01 -1.482298000000e-02 3.195559000000e-01 \
2.024406000000e-03 1.482454000000e-02 9.998881000000e-01 -7.997231000000e-01'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', help='input Image_RGB path')
parser.add_argument('-o', '--output_path', help='output folder path')
args = parser.parse_args()
return args
def convert_folder(input_folder, output_folder):
filenames = os.listdir(input_folder)
for filename in filenames :
output_path = os.path.join(output_folder,filename[:-3]+'txt')
write_void_calib(output_path)
def write_void_calib(output_path):
with open(output_path, "w") as txtfile :
txtfile.write(calib)
def check_dir(output_folder):
try :
os.makedirs(output_folder)
except OSError as exc :
if exc.errno == errno.EEXIST and os.path.isdir(output_folder) :
pass
def main():
args = parse_args()
input_folder = args.input_path
output_folder = args.output_path
check_dir(output_folder)
convert_folder(input_folder, output_folder)
if __name__ == '__main__':
main()
python ./data/carla/calibtxt.py -i ./data/carla/Image_RGB/ -o ./data/carla/calib
label_2
- 라벨 값들이 적힌 txt (14개 value)
- type : 타입 명
- truncated : 없습니다. -> 0
- occluded : 없습니다. -> 0
- alpha : 없습니다. -> 0
- bbox : 없습니다. -> 0, 0, 0, 0
- dimensions : 있습니다.
- location : 있습니다.
- rotation_y : 있습니다.
- score : 없습니다. -> 1
일단 없으면 없는대로 진행 하겠습니다.
import json
import numpy as np
import argparse
import os
import errno
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--jsonfolder_path', help='input json folder path',)
parser.add_argument('-p','--objpcdfolder_path',\
help='pcd folder path with OBJTag')
parser.add_argument('-o', '--output_path', help='output folder path')
args = parser.parse_args()
return args
def cuboid_word_list(json_path):
'''
input : output json file path
return : lidar_bboxes_points(n,8,3), yaws(n,), category names(n,)
^ z x 6 ------ 5
| / / | / |
| / 2 -|---- 1 | h
y | / | | | |
<------|o | 7 -----| 4
|/ o |/ l
3 ------ 0
w
x: front, y: left, z: top (lidar's location x=1, y=0, z=2 )
point0 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['bottom'].values()]
point1 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['top'].values()]
point2 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['top'].values()]
point3 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['bottom'].values()]
point4 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['bottom'].values()]
point5 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['top'].values()]
point6 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['top'].values()]
point7 = [x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['bottom'].values()]
'''
with open(json_path, "r") as json_file :
data_info = json.load(json_file)
names = []
yaws = []
for i in range(len(data_info['gt_data']['label_data']['cuboid_world'])):
yaws.append(data_info['gt_data']['label_data']['cuboid_world'][i]['rotation']["z"])
names.append(data_info['gt_data']['label_data']['cuboid_world'][i]['category']['level1'])
if i == 0 :
lidar_bboxes_points = np.array([[[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['bottom'].values()],
]], dtype=np.float32)
else :
lidar_bboxes_points = np.append(lidar_bboxes_points,
np.array([[
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['right']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['back']['left']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['bottom'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['right']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['top'].values()],
[x for x in data_info['gt_data']['label_data']['cuboid_world'][i]['front']['left']['bottom'].values()],
]])
, axis = 0)
return lidar_bboxes_points, yaws, names
def filter_cuboid_list(pcd_path, json_path):
'''
input : pcd path (which has objTag), output json path
output : filtered cuboid list(n,8,3), filtered yaws(n), filtered names(n)
'''
pcd_array = np.loadtxt(pcd_path, dtype=np.float32, skiprows=10, delimiter=' ')
# OBJTag (pcd_array[:,-1]) : 1 if obj else 0
instance_pcd = pcd_array[np.where(pcd_array[:,-1]>0)]
# Before filtered
cuboid_list, yaws, names = cuboid_word_list(json_path)
# filtering the cuboid by the counts of points
filter_cuboid_list = []
filter_yaws = []
filter_names = []
ego = 0
for idx, obj in enumerate(cuboid_list):
# filtering ego by y value
# if y contains 0, than that one is ego
if ((not ego) & (min(obj[:,0]) < 0) and (max(obj[:,0]) > 0)) \
and ((min(obj[:,1]) < 0) and (max(obj[:,1]))):
ego += 1
continue
# points_idx is the count of the points in Cuboid
points_count = sum ( (min(obj[:,0]) <= instance_pcd[:,0]) & (instance_pcd[:,0] <= max(obj[:,0]))
& (min(obj[:,1]) <= instance_pcd[:,1]) & (instance_pcd[:,1] <= max(obj[:,1]))
& (min(obj[:,2]) <= instance_pcd[:,2]) & (instance_pcd[:,2] <= max(obj[:,2]))
)
# if count of points lower than 10, than filtering
if points_count < 10 :
continue
else :
filter_cuboid_list.append(idx)
filter_yaws.append(yaws[idx])
filter_names.append(names[idx])
return cuboid_list[filter_cuboid_list], filter_yaws, filter_names
def get_parameters(cuboids):
'''
input : cuboids(n,8,3)
return : dimensions(n,3), locations(n,3)
'''
for idx, box in enumerate(cuboids) :
point0 = box[0]
point1 = box[1]
point3 = box[3]
point4 = box[4]
point7 = box[7]
h = max([abs(round(x-y,2)) for (x,y) in zip(point0,point1)])
l = max([abs(round(x-y,2)) for (x,y) in zip(point0,point4)])
w = max([abs(round(x-y,2)) for (x,y) in zip(point0,point3)])
x,y,z = [(x+y)/2 for (x,y) in zip(point0,point7)]
if idx == 0 :
dimensions = np.array([[l,w,h]], dtype=np.float32)
locations = np.array([[x,y,z]], dtype=np.float32)
else :
dimensions = np.append(dimensions,
np.array([[l,w,h]])
, axis = 0)
locations = np.append(locations,
np.array([[x,y,z]])
, axis = 0)
return dimensions, locations
def convert_folder(json_folder_path, pcd_folder_path, output_folder_path):
# file names
filenames = os.listdir(pcd_folder_path)
assert filenames[0][-3:] == 'pcd', 'not pcd'
# each file
for filename in filenames :
pcd_path = os.path.join(pcd_folder_path,filename)
json_path = os.path.join(json_folder_path,filename[:-3]+'json')
output_path = os.path.join(output_folder_path,filename[:-3]+'txt')
cuboids, yaws, names =\
filter_cuboid_list(pcd_path, json_path)
dimensions, locations =\
get_parameters(cuboids)
write_void_label(names, dimensions, locations, yaws, output_path)
def write_void_label(names, dimension, location, yaws, output_path):
with open(output_path, "w") as txtfile :
for idx, name in enumerate(names):
h, l, w = dimension[idx]
x, y, z = location[idx]
yaw = yaws[idx]
txtfile.write(f'{name}') # type
txtfile.write(' 0') # void truncated
txtfile.write(' 0') # void occluded
txtfile.write(' 0') # void alpha
txtfile.write(' 0 0 0 0') # void bounding box
txtfile.write(f' {h} {l} {w}') # dimension
txtfile.write(f' {x} {y} {z}') # location
txtfile.write(f' {yaw}') # yaw
txtfile.write(' 1\n') # void score
def check_dir(output_folder_path):
try :
os.makedirs(output_folder_path)
except OSError as exc :
if exc.errno == errno.EEXIST and os.path.isdir(output_folder_path) :
pass
def main():
args = parse_args()
json_folder_path = args.jsonfolder_path
pcd_folder_path = args.objpcdfolder_path
output_folder_path = args.output_path
check_dir(output_folder_path)
convert_folder(json_folder_path, pcd_folder_path, output_folder_path)
if __name__ == '__main__':
main()
python ./data/carla/labeltxt.py -j ./data/carla/outputJson/ -p ./data/carla/PCD/ -o ./data/carla/label_2
띄어쓰기 하나도 조심해야 합니다.
plytobin
PCD 는 x, y, z, Cosangle, ObjIdx, ObjTag 형태의 파일이고
PCD2 는 x, y, z, I 형태의 파일 입니다.
Kitti Dataset에는 PCD2 형태의 데이터가 필요합니다.
import numpy as np
import pandas as pd
import argparse
import os
import errno
from plyfile import PlyData
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', help='input folder path')
parser.add_argument('-o', '--output_path', help='output folder path')
args = parser.parse_args()
return args
def convert_ply(input_path, output_path):
plydata = PlyData.read(input_path) # read file
data = plydata.elements[0].data # read data
data_pd = pd.DataFrame(data) # convert to DataFrame
data_np = np.zeros(data_pd.shape, dtype=np.float32) # initialize array to store data
property_names = data[0].dtype.names # read names of properties
for i, name in enumerate(
property_names): # read data by property
data_np[:, i] = data_pd[name]
data_np.astype(np.float32).tofile(output_path)
def convert_folder(input_folder, output_folder):
filenames = os.listdir(input_folder)
for filename in filenames :
input_path = os.path.join(input_folder,filename)
output_path = os.path.join(output_folder,filename[:-3]+'bin')
convert_ply(input_path,output_path)
def check_dir(output_folder):
try :
os.makedirs(output_folder)
except OSError as exc :
if exc.errno == errno.EEXIST and os.path.isdir(output_folder) :
pass
def main():
args = parse_args()
input_folder = args.input_path
output_folder = args.output_path
check_dir(output_folder)
convert_folder(input_folder, output_folder)
if __name__ == '__main__':
main()
python ./data/carla/plytobin.py -i ./data/carla/PCD2/ -o ./data/carla/velodyne/
Move file
- 만들어놓은 데이터를 traing과 testing 폴더 안으로 옮겨 놓겠습니다.
- ImageSets 에서 만들어놓은 기준 대로 옮겼습니다.
import argparse
import os
import errno
import shutil
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', help='Carla folder path')
args = parser.parse_args()
return args
def check_dir(output_folder):
try :
os.makedirs(output_folder)
except OSError as exc :
if exc.errno == errno.EEXIST and os.path.isdir(output_folder) :
pass
def move_files(input_path, old_folder_name,\
new_folder_name, trainval_list, test_list):
# old to new
old_folder = os.path.join(input_path,old_folder_name)
if old_folder_name == 'Image_RGB':
not_number = '.png'
elif old_folder_name == 'calib' or old_folder_name == 'label_2':
not_number = '.txt'
else : # old_folder_name == 'velodyne':
not_number = '.bin'
# training folder
new_folder_path = os.path.join(input_path,'training',new_folder_name)
for num in trainval_list:
file_name = num + not_number
new_file_path = os.path.join(new_folder_path,file_name)
old_file_path = os.path.join(old_folder,file_name)
shutil.move(old_file_path, new_file_path)
# testing folder
new_folder_path = os.path.join(input_path,'testing',new_folder_name)
for num in test_list:
file_name = num + not_number
new_file_path = os.path.join(new_folder_path,file_name)
old_file_path = os.path.join(old_folder,file_name)
shutil.move(old_file_path, new_file_path)
def make_folder(output_folder):
testing_folder = os.path.join(output_folder,'testing')
check_dir(testing_folder)
calib_folder = os.path.join(testing_folder,'calib')
check_dir(calib_folder)
label_2_folder = os.path.join(testing_folder,'label_2')
check_dir(label_2_folder)
image_2_folder = os.path.join(testing_folder,'image_2')
check_dir(image_2_folder)
velodyne_folder = os.path.join(testing_folder,'velodyne')
check_dir(velodyne_folder)
training_folder = os.path.join(output_folder,'training')
check_dir(training_folder)
calib_folder = os.path.join(training_folder,'calib')
check_dir(calib_folder)
label_2_folder = os.path.join(training_folder,'label_2')
check_dir(label_2_folder)
image_2_folder = os.path.join(training_folder,'image_2')
check_dir(image_2_folder)
velodyne_folder = os.path.join(training_folder,'velodyne')
check_dir(velodyne_folder)
def files(txt_file):
with open(txt_file, 'r') as f:
file_list = f.readlines()
file_list = [file.rstrip() for file in file_list]
return file_list
def remove_folder(input_path):
old_calib = os.path.join(input_path,'calib')
old_velodyne = os.path.join(input_path,'velodyne')
old_Img = os.path.join(input_path,'Image_RGB')
os.rmdir(old_calib)
os.rmdir(old_velodyne)
os.rmdir(old_Img)
def main():
args = parse_args()
input_path = args.input_path
trainval_txt = os.path.join(input_path,'ImageSets', 'trainval.txt')
test_txt = os.path.join(input_path,'ImageSets', 'test.txt')
# test_list & trainval_list
trainval_list = files(trainval_txt)
test_list = files(test_txt)
# mkdir
make_folder(input_path)
# move file
move_files(input_path, 'calib',\
'calib', trainval_list, test_list)
move_files(input_path, 'label_2',\
'label_2', trainval_list, test_list)
move_files(input_path, 'Image_RGB',\
'image_2', trainval_list, test_list)
move_files(input_path, 'velodyne',\
'velodyne', trainval_list, test_list)
# rmdir
remove_folder(input_path)
if __name__ == '__main__':
main()
python ./data/carla/move_files.py -i ./data/carla/
1차 데이터 format setting
- 이제 Kitti 전용 코드를 한번 사용해 보려 합니다.
코드 출처 : https://mmdetection3d.readthedocs.io/en/latest/data_preparation.html#
python tools/create_data.py kitti --root-path ./data/kitti --out-dir ./data/kitti --extra-tag kitti
데이터가 자동차 밖에 없어서 말입니다.
일단 Kitti 전용 코드 에러 없이 돌아가긴 했습니다.
Config 수정
- config를 수정해 줄 차례 입니다.
- mmdetection3d > configs > pointpillars 폴더 안에 있는 config 파일을 사용하기 좋게 복사해서 따로 만든 제 폴더에 가져왔습니다.
변경은 별거 안해줬습니다.
저희 데이터에도 현재 차밖에 없길래 클래스도 그대로 냅뒀구요
폴더 경로만 바꾸어주었습니다.
Train
python tools/train.py configs/nia_config/hv_pointpillars_secfpn_6x8_160e_carla-3d-car.py
ValueError: need at least one array to stack
데이터가 모델에 안들어가지네요.
다음주에 다시 한번 해 봅시다.
'Project' 카테고리의 다른 글
[MMDetection3D] Train을 해봅시다. feat error (0) | 2023.02.14 |
---|---|
[MMDetection3D] Calib 없는 Custom 데이터로 Pointpillars 학습하기 (0) | 2023.02.14 |
[MMDetion3D] Customized Data _ ply파일을 bin으로 변환 (0) | 2023.02.10 |
[MMDetection3D] Customize Datasets _ Intro (0) | 2023.02.10 |
[MMDetection3D] KITTI Data Prepare (0) | 2023.02.09 |