in src/sagemaker_pytorch_serving_container/torchserve.py [0:0]
def start_torchserve(handler_service=DEFAULT_HANDLER_SERVICE):
"""Configure and start the model server.
Args:
handler_service (str): Python path pointing to a module that defines
a class with the following:
- A ``handle`` method, which is invoked for all incoming inference
requests to the model server.
- A ``initialize`` method, which is invoked at model server start up
for loading the model.
Defaults to ``sagemaker_pytorch_serving_container.default_handler_service``.
"""
if ENABLE_MULTI_MODEL:
if "SAGEMAKER_HANDLER" not in os.environ:
os.environ["SAGEMAKER_HANDLER"] = handler_service
_set_python_path()
else:
_adapt_to_ts_format(handler_service)
_create_torchserve_config_file()
if os.path.exists(REQUIREMENTS_PATH):
_install_requirements()
ts_torchserve_cmd = [
"torchserve",
"--start",
"--model-store",
MODEL_STORE,
"--ts-config",
TS_CONFIG_FILE,
"--log-config",
DEFAULT_TS_LOG_FILE,
"--models",
"model.mar"
]
print(ts_torchserve_cmd)
logger.info(ts_torchserve_cmd)
subprocess.Popen(ts_torchserve_cmd)
ts_process = _retrieve_ts_server_process()
_add_sigterm_handler(ts_process)
ts_process.wait()