def compile_mxnet_gulon_resnet()

in apps/deploy/resnet_export.py [0:0]


def compile_mxnet_gulon_resnet(_env, _model):
    """ Compile Model """
    # Generate tvm IR from mxnet gluon model
    # Populate the shape and data type dictionary for ImageNet classifier input
    dtype_dict = {"data": 'float32'}
    shape_dict = {"data": (_env.BATCH, 3, 224, 224)}
    # Get off the shelf gluon model, and convert to relay
    gluon_model = vision.get_model(_model, pretrained=True)
    # Start front end compilation
    mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
    mod = merge_transform_to_mxnet_model(mod)
    # Update shape and type dictionary
    shape_dict.update({k: v.shape for k, v in params.items()})
    dtype_dict.update({k: str(v.dtype) for k, v in params.items()})

    # Load pre-configured AutoTVM schedules
    with autotvm.tophub.context(_env.target):
        # Perform quantization in Relay
        # Note: We set opt_level to 3 in order to fold batch norm
        with relay.build_config(opt_level=3):
            with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
                mod = relay.quantize.quantize(mod, params=params)
            # Perform graph packing and constant folding for VTA target
            relay_prog = graph_pack(
                mod["main"],
                _env.BATCH,
                _env.BLOCK_IN,
                _env.WGT_WIDTH,
                start_name=PACK_DICT[_model][0],
                stop_name=PACK_DICT[_model][1])

    # Compile Relay program with AlterOpLayout disabled
    with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
        with vta.build_config(debug_flag=0):
            graph, lib, params = relay.build(
                relay_prog, target=_env.target,
                params=params, target_host=_env.target_host)

    return graph, lib, params