configs/deit_unept_ade20k.py [57:84]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        pipeline=test_pipeline))

# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
    type='UN_EPT',
    feat_dim=256,
    k=16,
    L=3,
    dropout=0.1,
    heads=8,
    hidden_dim=2048,
    depth=2,
    pretrained='deit_base_distilled_patch16_384-d0272ac0.pth',
    backbone_cfg=dict(
                    type='DeiT',
                    img_size=480,
                    patch_size=16,
                    embed_dim=768,
                    bb_depth=12,
                    num_heads=12,
                    mlp_ratio=4),
    loss_decode=dict(
                     type='CrossEntropyLoss',
                     use_sigmoid=False,
                     loss_weight=1.0))
# model training and testing settings
train_cfg = dict()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



configs/deit_unept_pcontext.py [60:87]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        pipeline=test_pipeline))

# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
    type='UN_EPT',
    feat_dim=256,
    k=16,
    L=3,
    dropout=0.1,
    heads=8,
    hidden_dim=2048,
    depth=2,
    pretrained='deit_base_distilled_patch16_384-d0272ac0.pth',
    backbone_cfg=dict(
                    type='DeiT',
                    img_size=480,
                    patch_size=16,
                    embed_dim=768,
                    bb_depth=12,
                    num_heads=12,
                    mlp_ratio=4),
    loss_decode=dict(
                     type='CrossEntropyLoss',
                     use_sigmoid=False,
                     loss_weight=1.0))
# model training and testing settings
train_cfg = dict()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



