반응형
import os
from NuscenesDataset.nuscenes import NuScenes
from PIL import Image
from nuscenes.utils.data_classes import LidarPointCloud
from pyquaternion import Quaternion
import numpy as np
import matplotlib.pyplot as plt
nusc = NuScenes(version='v1.0-trainval', dataroot="/home/dooseop/DATASET/nuscenes/", verbose=False)
my_sample = nusc.sample[10]
# my_sample = nusc.sample[10]
# nusc.render_pointcloud_in_image(my_sample['token'], pointsensor_channel='LIDAR_TOP')
sample_record = nusc.get('sample', my_sample['token'])
pointsensor_token = sample_record['data']['LIDAR_TOP']
camera_token = sample_record['data']['CAM_FRONT']
cam = nusc.get('sample_data', camera_token)
pointsensor = nusc.get('sample_data', pointsensor_token)
pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
pc = LidarPointCloud.from_file(pcl_path)
im = Image.open(os.path.join(nusc.dataroot, cam['filename']))
# Points live in the point sensor frame. So they need to be transformed via global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
pc.translate(np.array(cs_record['translation']))
# Second step: transform from ego to the global frame.
poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
pc.translate(np.array(poserecord['translation']))
# Third step: transform from global into the ego vehicle frame for the timestamp of the image.
poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
pc.translate(-np.array(poserecord['translation']))
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
pc.translate(-np.array(cs_record['translation']))
pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)
depths = pc.points[2, :]
coloring = depths
from NuscenesDataset.nuscenes.utils.geometry_utils import view_points
points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > 1.0)
mask = np.logical_and(mask, points[0, :] > 1)
mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
mask = np.logical_and(mask, points[1, :] > 1)
mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
points = points[:, mask]
coloring = coloring[mask]
# Init axes.
fig, ax = plt.subplots(1, 1, figsize=(9, 16))
ax.imshow(im)
ax.scatter(points[0, :], points[1, :], c=coloring, s=5)
ax.axis('off')
plt.show()
'Deep Learning' 카테고리의 다른 글
Improve noise estimation performance (0) | 2025.03.31 |
---|---|
fatal factors that make the self-supervised training for monodepth estimation extremely difficult when trained on nuScenes (0) | 2025.03.24 |
Noise estimation idea 2 (0) | 2025.03.18 |
Improving DDPD noise estimation (0) | 2025.03.18 |
Class-specific DDPM (0) | 2025.03.11 |