configs/video_recognition/swin/video_swin_b.py [29:116]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
)

img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)

train_pipeline = [
    dict(type='DecordInit'),
    dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 256)),
    dict(type='VideoRandomResizedCrop'),
    dict(type='VideoResize', scale=(224, 224), keep_ratio=False),
    dict(type='VideoFlip', flip_ratio=0.5),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs', 'label'])
]

val_pipeline = [
    dict(type='DecordInit'),
    dict(
        type='SampleFrames',
        clip_len=32,
        frame_interval=2,
        num_clips=1,
        test_mode=True),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 256)),
    dict(type='VideoCenterCrop', crop_size=224),
    dict(type='VideoFlip', flip_ratio=0),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs'])
]

test_pipeline = [
    dict(type='DecordInit'),
    dict(
        type='SampleFrames',
        clip_len=32,
        frame_interval=2,
        num_clips=4,
        test_mode=True),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 224)),
    dict(type='VideoThreeCrop', crop_size=224),
    dict(type='VideoFlip', flip_ratio=0),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs'])
]

data_root = 'data/video/'
train_ann_file = 'data/video/kinetics400/test.txt'
val_ann_file = 'data/video/kinetics400/test.txt'
train_dataset = dict(
    type='VideoDataset',
    data_source=dict(
        type='VideoDatasource',
        ann_file=train_ann_file,
        data_root=data_root,
        split=' ',
    ),
    pipeline=train_pipeline,
)

val_dataset = dict(
    type='VideoDataset',
    imgs_per_gpu=1,
    data_source=dict(
        type='VideoDatasource',
        ann_file=val_ann_file,
        data_root=data_root,
        split=' ',
    ),
    pipeline=val_pipeline,
)

data = dict(
    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)

# optimizer
total_epochs = 30
optimizer = dict(
    type='AdamW',
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



configs/video_recognition/swin/video_swin_s.py [29:116]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
)

img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)

train_pipeline = [
    dict(type='DecordInit'),
    dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 256)),
    dict(type='VideoRandomResizedCrop'),
    dict(type='VideoResize', scale=(224, 224), keep_ratio=False),
    dict(type='VideoFlip', flip_ratio=0.5),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs', 'label'])
]

val_pipeline = [
    dict(type='DecordInit'),
    dict(
        type='SampleFrames',
        clip_len=32,
        frame_interval=2,
        num_clips=1,
        test_mode=True),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 256)),
    dict(type='VideoCenterCrop', crop_size=224),
    dict(type='VideoFlip', flip_ratio=0),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs'])
]

test_pipeline = [
    dict(type='DecordInit'),
    dict(
        type='SampleFrames',
        clip_len=32,
        frame_interval=2,
        num_clips=4,
        test_mode=True),
    dict(type='DecordDecode'),
    dict(type='VideoResize', scale=(-1, 224)),
    dict(type='VideoThreeCrop', crop_size=224),
    dict(type='VideoFlip', flip_ratio=0),
    dict(type='VideoNormalize', **img_norm_cfg),
    dict(type='FormatShape', input_format='NCTHW'),
    dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
    dict(type='VideoToTensor', keys=['imgs'])
]

data_root = 'data/video/'
train_ann_file = 'data/video/kinetics400/test.txt'
val_ann_file = 'data/video/kinetics400/test.txt'
train_dataset = dict(
    type='VideoDataset',
    data_source=dict(
        type='VideoDatasource',
        ann_file=train_ann_file,
        data_root=data_root,
        split=' ',
    ),
    pipeline=train_pipeline,
)

val_dataset = dict(
    type='VideoDataset',
    imgs_per_gpu=1,
    data_source=dict(
        type='VideoDatasource',
        ann_file=val_ann_file,
        data_root=data_root,
        split=' ',
    ),
    pipeline=val_pipeline,
)

data = dict(
    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)

# optimizer
total_epochs = 30
optimizer = dict(
    type='AdamW',
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



