in mobile_cv/model_zoo/tools/model_exporter.py [0:0]
def parse_args(args_list=None):
parser = argparse.ArgumentParser(description="Model zoo model exporter")
parser.add_argument(
"--task",
type=str,
default=None,
help="Task name, if @ is inside the name, use the str after it as the "
"path to import",
)
parser.add_argument("--task_args", type=json.loads, default={}, help="Task args")
parser.add_argument("--output_dir", type=str, required=True, help="Output base dir")
parser.add_argument(
"--export_types",
type=str,
nargs="+",
default=DEFAULT_EXPORT_FORMATS,
help=f"Export format, supported formats: {ExportFactory.keys()}",
)
parser.add_argument(
"--raise_if_failed",
type=int,
default=0,
help="Throw an exception if conversion failed, elsewise skipped",
)
parser.add_argument(
"--post_quant_backend",
type=str,
choices=["qnnpack", "fbgemm", "default"],
default="fbgemm",
help="Post quantization: backend",
)
parser.add_argument(
"--post_quant_calibration_batches",
type=int,
default=1,
help="Post quantization: Num of batches of images for calibration",
)
parser.add_argument(
"--use_graph_mode_quant",
action="store_true",
help="Use fx quantization for int8 models",
)
parser.add_argument(
"--use_get_traceable",
type=int,
default=0,
help="Use get_traceable_model to convert the model before tracing if 1",
)
parser.add_argument(
"--trace_type",
type=str,
default="trace",
choices=["trace", "script"],
help="Use trace or script to get the torchscript model",
)
parser.add_argument(
"--opt_for_mobile",
type=int,
default=0,
help="Run optimize for mobile if 1",
)
parser.add_argument(
"--save_for_lite_interpreter",
action="store_true",
help="Also export lite interpreter model",
)
parser.add_argument(
"--batch_mode",
type=str,
default=None,
help="Specify the registed name to run export in batch",
)
assert len(ExportFactory.keys()) > 0
ret = parser.parse_args(args_list)
return ret