in pytext/main.py [0:0]
def torchscript_export(context, export_json, model, output_path, quantize, target):
"""Convert a pytext model snapshot to a torchscript model."""
export_cfg = ExportConfig()
# only populate from export_json if no export option is configured from the command line.
if export_json:
export_json_config = _load_and_validate_export_json_config(export_json)
read_chunk_size = export_json_config.pop("read_chunk_size", None)
if read_chunk_size is not None:
print("Warning: Ignoring read_chunk_size.")
if export_json_config.get("read_chunk_size", None) is not None:
print("Error: Do not know what to do with read_chunk_size. Ignoring.")
if "export" in export_json_config.keys():
export_cfgs = [export_json_config["export"]]
else:
export_cfgs = export_json_config["export_list"]
if target:
print(
"A single export was specified in the command line. Filtering out all other export options"
)
export_cfgs = [cfg for cfg in export_cfgs if cfg["target"] == target]
if export_cfgs == []:
print(
"No ExportConfig matches the target name specified in the command line."
)
for partial_export_cfg in export_cfgs:
if not quantize and not output_path:
export_cfg = config_from_json(ExportConfig, partial_export_cfg)
else:
print(
"the export-json config is ignored because export options are found the command line"
)
export_cfg = config_from_json(
ExportConfig,
partial_export_cfg,
("export_caffe2_path", "export_onnx_path"),
)
export_cfg.torchscript_quantize = quantize
# if config has export_torchscript_path, use export_torchscript_path from config, otherwise keep the default from CLI
if export_cfg.export_torchscript_path is not None:
output_path = export_cfg.export_torchscript_path
if not model or not output_path:
config = context.obj.load_config()
model = model or config.save_snapshot_path
output_path = output_path or f"{config.save_snapshot_path}.torchscript"
print(f"Exporting {model} to torchscript file: {output_path}")
print(export_cfg)
export_saved_model_to_torchscript(model, output_path, export_cfg)