본문 바로가기

Deep Learning

nuScenes devkit 명령어 모음 (useful commands)

반응형

1. How to get scene records

for scene_record in self.nusc.scene:
    yield scene_record['name'], scene_record

 

2. How to get sample records from scene record

sample_token = scene_record['first_sample_token']
while sample_token:
    sample_record = self.nusc.get('sample', sample_token)
    sample_token = sample_record['next']

 

3. How to get egolidar pose from sample record

lidar_record = self.nusc.get('sample_data', sample_record['data']['LIDAR_TOP'])
egolidar = self.nusc.get('ego_pose', lidar_record['ego_pose_token'])

flat, inv = True, True
if flat:
    yaw = Quaternion(rotation).yaw_pitch_roll[0]
    R = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).rotation_matrix
else:
    R = Quaternion(rotation).rotation_matrix
t = np.array(translation, dtype=np.float32)

pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R if not inv else R.T
pose[:3, -1] = t if not inv else R.T @ -t

world_from_egolidarflat = pose

 

4. How to get annotation samples from sample records

for ann_token in self.nusc.get('sample', sample['token'])['anns']:
    ann = self.nusc.get('sample_annotation', ann_token)

 

5. How get a bound box of a annotation

from nuscenes.utils import data_classes

ann = self.nusc.get('sample_annotation', ann_token)
box = data_classes.Box(ann['translation'], ann['size'], Quaternion(ann['rotation']))
corners_world_coord = box.bottom_corners() # 3 x 4

# egopose
trans = -np.array(egopose['translation'])
yaw = Quaternion(egopose['rotation']).yaw_pitch_roll[0]
rot = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse
box.translate(trans)
box.rotate(rot)
corners_egolidar_coord = box.bottom_corners() # 3 x 4

 

6. How to draw topview images

import matplotlib.pyplot as plt
from nuscenes.utils import data_classes

h, w = self.cfg['bev']['h'], self.cfg['bev']['w']
h_meters, w_meters = self.cfg['bev']['h_meters'], self.cfg['bev']['w_meters']
h_scale, w_scale = float(h) / h_meters, float(w) / w_meters
h_shift, w_shift = int(h/2), int(w/2)

topview = np.zeros(shape=(h, w)).astype('uint8')

lidar_record = self.nusc.get('sample_data', seq_sample_recs[0]['data']['LIDAR_TOP'])
egolidar = self.nusc.get('ego_pose', lidar_record['ego_pose_token'])
w2e_ref = get_pose(egolidar['rotation'], egolidar['translation'], inv=True, flat=True)

ins_map = {}
for t in range(0, len(seq_sample_recs)):

    for ann_token in seq_sample_recs[t]['anns']:
        ann = self.nusc.get('sample_annotation', ann_token)
        if ('vehicle' in  ann['category_name']):

            if ann['instance_token'] not in ins_map:
                ins_map[ann['instance_token']] = len(ins_map) + 1
            ins_id = ins_map[ann['instance_token']]

            box = data_classes.Box(ann['translation'], ann['size'], Quaternion(ann['rotation']))
            corners_w = box.bottom_corners() # 3 x 4
            corners_w = np.concatenate([corners_w, np.ones(4).reshape(1, 4)], axis=0) # 4 x 4
            conrners_e_ref = (w2e_ref @ corners_w)[:2] # 2 x 4

            col_pels = -(conrners_e_ref[1, :] * w_scale).astype(np.int32)
            row_pels = -(conrners_e_ref[0, :] * h_scale).astype(np.int32)

            col_pels += int(w_shift)
            row_pels += int(h_shift)

            corners_tv = np.concatenate([col_pels.reshape(4, 1), row_pels.reshape(4, 1)], axis=1)
            topview = cv2.fillConvexPoly(topview, corners_tv, ins_id)
plt.imshow(topview.astype('float'))
plt.show()

 

 

7. How to get a scene location info from a sample record

scene_token = sample_record['scene_token']
scene_record = self.nusc.get('scene', scene_token)
location = self.nusc.get('log', scene_record['log_token'])['location']

 

8. How to get a ego pose from a sensor

# lidar sensor
lidar_token = sample_record['data']['LIDAR_TOP']
lidar_record = self.nusc.get('sample_data', lidar_token)
egolidar = self.nusc.get('ego_pose', lidar_record['ego_pose_token'])
# camera
cam_token = sample_record['data']['CAM_FRONT_LEFT']
cam_record = self.nusc.get('sample_data', cam_token)
egocam = self.nusc.get('ego_pose', cam_record['ego_pose_token'])

 

9. How to get a camera pose

# egocam is the reference camera
cam_token = sample_record['data']['CAM_FRONT_LEFT']
cam_record = self.nusc.get('sample_data', cam_token)
cam = self.nusc.get('calibrated_sensor', cam_record['calibrated_sensor_token'])
egocam_to_cam = get_pose(cam['rotation'], cam['translation'], flat=False, inv=True) # egocam_to_cam