in datasets/depth_dataset.py [0:0]
def __init__(self, cfg):
self.split = "train" ### Default is training
self.label_objs = []
self.data_paths = []
self.label_paths = []
self.cfg = cfg
self.batchsize_per_replica = cfg["BATCHSIZE_PER_REPLICA"]
self.label_sources = []#cfg["LABEL_SOURCES"]
self.dataset_names = cfg["DATASET_NAMES"]
self.label_type = cfg["LABEL_TYPE"]
self.AUGMENT_COORDS_TO_FEATS = False #optional
self._labels_init = False
self._get_data_files("train")
self.data_objs = np.load(self.data_paths[0]) ### Only load the first one for now
#### Add the voxelizer here
if ("Lidar" in cfg) and cfg["VOX"]:
self.VOXEL_SIZE = [0.1, 0.1, 0.2]
self.point_cloud_range = POINT_RANGE#np.array([ 0. , -75. , -3. , 75.0, 75. , 3. ], dtype=np.float32)
self.MAX_POINTS_PER_VOXEL = 5
self.MAX_NUMBER_OF_VOXELS = 16000
self.voxel_generator = VoxelGenerator(
voxel_size=self.VOXEL_SIZE,
point_cloud_range=self.point_cloud_range,
max_num_points=self.MAX_POINTS_PER_VOXEL,
max_voxels=self.MAX_NUMBER_OF_VOXELS
)
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(self.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = self.VOXEL_SIZE
elif cfg["VOX"]:
augment_data = (self.split == "TRAIN")
#### Vox parameters here
self.VOXEL_SIZE = 0.05 #0.02 # 5cm
self.CLIP_BOUND = None#(-1000, -1000, -1000, 1000, 1000, 1000)
self.data_aug_color_trans_ratio = 0.1
self.data_aug_color_jitter_std = 0.05
self.ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))
if augment_data:
self.prevoxel_transform_train = []
self.prevoxel_transform_train.append(transforms.ElasticDistortion(self.ELASTIC_DISTORT_PARAMS))
self.prevoxel_transform = transforms.Compose(self.prevoxel_transform_train)
self.input_transforms = []
self.input_transforms += [
transforms.RandomDropout(0.2),
transforms.RandomHorizontalFlip('z', False),
#transforms.ChromaticAutoContrast(),
transforms.ChromaticTranslation(self.data_aug_color_trans_ratio),
transforms.ChromaticJitter(self.data_aug_color_jitter_std),
# t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
]
self.input_transforms = transforms.Compose(self.input_transforms)
# Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
# augmentation has to be done before voxelization
self.SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
self.ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi,np.pi))
self.TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0))
self.voxelizer = Voxelizer(
voxel_size=self.VOXEL_SIZE,
clip_bound=self.CLIP_BOUND,
use_augmentation=augment_data,
scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND,
ignore_label=True)