def _gen_mobilenet_edgetpu()

in timm/models/efficientnet.py [0:0]


def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
    """
    Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision
    """
    if 'edgetpu_v2' in variant:
        stem_size = 64
        stem_kernel_size = 5
        group_size = 64
        num_features = 1280
        act_layer = resolve_act_layer(kwargs, 'relu')

        def _arch_def(chs: List[int], group_size: int):
            return [
                # stage 0, 112x112 in
                [f'cn_r1_k1_s1_c{chs[0]}'],  # NOTE with expansion==1, official impl block ends just 1x1 pwl
                # stage 1, 112x112 in
                [f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'],
                # stage 2, 56x56 in
                [
                    f'er_r1_k3_s2_e8_c{chs[2]}',
                    f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}',
                    f'er_r1_k3_s1_e4_c{chs[2]}',
                    f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}',
                ],
                # stage 3, 28x28 in
                [f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'],
                # stage 4, 14x14in
                [f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'],
                # stage 5, 14x14in
                [f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'],
                # stage 6, 7x7 in
                [f'ir_r1_k3_s1_e8_c{chs[6]}'],
            ]

        if 'edgetpu_v2_xs' in variant:
            stem_size = 32
            stem_kernel_size = 3
            channels = [16, 32, 48, 96, 144, 160, 192]
        elif 'edgetpu_v2_s' in variant:
            channels = [24, 48, 64, 128, 160, 192, 256]
        elif 'edgetpu_v2_m' in variant:
            channels = [32, 64, 80, 160, 192, 240, 320]
            num_features = 1344
        elif 'edgetpu_v2_l' in variant:
            stem_kernel_size = 7
            group_size = 128
            channels = [32, 64, 96, 192, 240, 256, 384]
            num_features = 1408
        else:
            assert False

        arch_def = _arch_def(channels, group_size)
    else:
        # v1
        stem_size = 32
        stem_kernel_size = 3
        num_features = 1280
        act_layer = resolve_act_layer(kwargs, 'relu')
        arch_def = [
            # stage 0, 112x112 in
            ['cn_r1_k1_s1_c16'],
            # stage 1, 112x112 in
            ['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'],
            # stage 2, 56x56 in
            ['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'],
            # stage 3, 28x28 in
            ['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'],
            # stage 4, 14x14in
            ['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'],
            # stage 5, 14x14in
            ['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'],
            # stage 6, 7x7 in
            ['ir_r1_k3_s1_e8_c192'],
        ]

    model_kwargs = dict(
        block_args=decode_arch_def(arch_def, depth_multiplier),
        num_features=num_features,
        stem_size=stem_size,
        stem_kernel_size=stem_kernel_size,
        round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
        norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
        act_layer=act_layer,
        **kwargs,
    )
    model = _create_effnet(variant, pretrained, **model_kwargs)
    return model