configs/detection3d/bevformer/bevformer_tiny_r50_nuscenes.py (291 lines of code) (raw):
# BEvFormer-tiny consumes at lease 6700M GPU memory
# compared to bevformer_base, bevformer_tiny has
# smaller backbone: R101-DCN -> R50
# smaller BEV: 200*200 -> 50*50
# less encoder layers: 6 -> 3
# smaller input size: 1600*900 -> 800*450
# multi-scale feautres -> single scale features (C5)
_base_ = ['configs/base.py']
# If point cloud range is changed, the models should also change their point
# cloud range accordingly
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
voxel_size = [0.2, 0.2, 8]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# For nuScenes we usually do 10-class detection
CLASSES = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
input_modality = dict(
use_lidar=False,
use_camera=True,
use_radar=False,
use_map=False,
use_external=True)
embed_dim = 256
pos_dim = embed_dim // 2
ffn_dim = embed_dim * 2
num_levels = 1
bev_h = 50
bev_w = 50
queue_length = 3 # each sequence contains `queue_length` frames.
model = dict(
type='BEVFormer',
use_grid_mask=True,
video_test_mode=True,
extract_feat_serially=True,
pretrained=dict(img='torchvision://resnet50'),
img_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
# out_indices=(3,),
out_indices=(4, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
zero_init_residual=True),
img_neck=dict(
type='FPN',
in_channels=[2048],
out_channels=embed_dim,
start_level=0,
add_extra_convs='on_output',
num_outs=num_levels,
relu_before_extra_convs=True),
pts_bbox_head=dict(
type='BEVFormerHead',
bev_h=bev_h,
bev_w=bev_w,
num_query=900,
num_classes=10,
in_channels=embed_dim,
sync_cls_avg_factor=True,
with_box_refine=True,
as_two_stage=False,
transformer=dict(
type='PerceptionTransformer',
rotate_prev_bev=True,
use_shift=True,
use_can_bus=True,
embed_dims=embed_dim,
encoder=dict(
type='BEVFormerEncoder',
num_layers=3,
pc_range=point_cloud_range,
num_points_in_pillar=4,
return_intermediate=False,
transformerlayers=dict(
type='BEVFormerLayer',
attn_cfgs=[
dict(
type='TemporalSelfAttention',
embed_dims=embed_dim,
num_levels=1),
dict(
type='SpatialCrossAttention',
pc_range=point_cloud_range,
deformable_attention=dict(
type='MSDeformableAttention3D',
embed_dims=embed_dim,
num_points=8,
num_levels=num_levels),
embed_dims=embed_dim,
)
],
ffn_cfgs=dict(
type='FFN',
embed_dims=256,
feedforward_channels=ffn_dim,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True),
),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm'))),
decoder=dict(
type='Detr3DTransformerDecoder',
num_layers=6,
return_intermediate=True,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=embed_dim,
num_heads=8,
dropout=0.1),
dict(
type='CustomMSDeformableAttention',
embed_dims=embed_dim,
num_levels=1),
],
ffn_cfgs=dict(
type='FFN',
embed_dims=256,
feedforward_channels=ffn_dim,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True),
),
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))),
bbox_coder=dict(
type='NMSFreeBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
pc_range=point_cloud_range,
max_num=300,
voxel_size=voxel_size,
num_classes=10),
positional_encoding=dict(
type='LearnedPositionalEncoding',
num_feats=pos_dim,
row_num_embed=bev_h,
col_num_embed=bev_w,
),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', loss_weight=0.25),
loss_iou=dict(type='GIoULoss', loss_weight=0.0)),
# model training and testing settings
train_cfg=dict(
pts=dict(
grid_size=[512, 512, 1],
voxel_size=voxel_size,
point_cloud_range=point_cloud_range,
out_size_factor=4,
assigner=dict(
type='HungarianBBoxAssigner3D',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBox3DL1Cost', weight=0.25),
iou_cost=dict(
type='IoUCost', weight=0.0
), # Fake cost. This is just to make it compatible with DETR head.
pc_range=point_cloud_range))))
dataset_type = 'NuScenesDataset'
data_root = 'data/nuScenes/nuscenes-v1.0/'
train_pipeline = [
dict(type='RandomScaleImageMultiViewImage', scales=[0.5]),
dict(type='PhotoMetricDistortionMultiViewImage'),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectNameFilter', classes=CLASSES),
dict(type='NormalizeMultiviewImage', **img_norm_cfg),
# dict(type='RandomScaleImageMultiViewImage', scales=[0.5]),
dict(type='PadMultiViewImage', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=CLASSES),
dict(
type='Collect3D',
keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img',
'depth2img', 'cam2img', 'pad_shape', 'scale_factor', 'flip',
'pcd_horizontal_flip', 'pcd_vertical_flip', 'box_mode_3d',
'box_type_3d', 'img_norm_cfg', 'pcd_trans', 'sample_idx',
'prev_idx', 'next_idx', 'pcd_scale_factor', 'pcd_rotation',
'pts_filename', 'transformation_3d_flow', 'scene_token',
'can_bus'))
]
test_pipeline = [
dict(type='NormalizeMultiviewImage', **img_norm_cfg),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1600, 900),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(type='RandomScaleImageMultiViewImage', scales=[0.5]),
dict(type='PadMultiViewImage', size_divisor=32),
dict(
type='DefaultFormatBundle3D',
class_names=CLASSES,
with_label=False),
dict(
type='Collect3D',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img',
'depth2img', 'cam2img', 'pad_shape', 'scale_factor',
'flip', 'pcd_horizontal_flip', 'pcd_vertical_flip',
'box_mode_3d', 'box_type_3d', 'img_norm_cfg',
'pcd_trans', 'sample_idx', 'prev_idx', 'next_idx',
'pcd_scale_factor', 'pcd_rotation', 'pts_filename',
'transformation_3d_flow', 'scene_token', 'can_bus'))
])
]
data = dict(
imgs_per_gpu=1, # 8gpus, total batch size=8
workers_per_gpu=4,
pin_memory=True,
# shuffler_sampler=dict(type='DistributedGroupSampler'),
# nonshuffler_sampler=dict(type='DistributedSampler'),
train=dict(
type=dataset_type,
data_source=dict(
type='Det3dSourceNuScenes',
data_root=data_root,
ann_file=data_root + 'nuscenes_infos_temporal_train.pkl',
pipeline=[
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
backend='turbojpeg'),
dict(
type='LoadAnnotations3D',
with_bbox_3d=True,
with_label_3d=True,
with_attr_label=False)
],
classes=CLASSES,
modality=input_modality,
test_mode=False,
use_valid_flag=True,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR'),
pipeline=train_pipeline,
queue_length=queue_length,
),
val=dict(
imgs_per_gpu=1,
type=dataset_type,
data_source=dict(
type='Det3dSourceNuScenes',
data_root=data_root,
ann_file=data_root + 'nuscenes_infos_temporal_val.pkl',
pipeline=[
dict(
type='LoadMultiViewImageFromFiles',
to_float32=True,
backend='turbojpeg')
],
classes=CLASSES,
modality=input_modality,
test_mode=True),
pipeline=test_pipeline))
paramwise_cfg = {'img_backbone': dict(lr_mult=0.1)}
optimizer = dict(
type='AdamW', lr=2e-4, paramwise_options=paramwise_cfg, weight_decay=0.01)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
min_lr_ratio=1e-3)
total_epochs = 24
eval_config = dict(initial=False, interval=1, gpu_collect=False)
eval_pipelines = [
dict(
mode='test',
data=data['val'],
dist_eval=True,
evaluators=[
dict(
type='NuScenesEvaluator',
classes=CLASSES,
result_names=['pts_bbox'])
],
)
]
log_config = dict(
interval=50,
hooks=[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')])
checkpoint_config = dict(interval=1)
cudnn_benchmark = True
export = dict(
export_type='blade',
blade_config=dict(
enable_fp16=True,
fp16_fallback_op_ratio=0.0,
customize_op_black_list=[
'aten::select', 'aten::index', 'aten::slice', 'aten::view',
'aten::upsample', 'aten::clamp'
]))