in torchx/apps/serve/serve.py [0:0]
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="uploads the provided model to torchserve",
)
parser.add_argument(
"--model_path",
type=str,
help="model to serve",
required=True,
)
parser.add_argument(
"--management_api",
type=str,
help="address of the management api. e.g. http://localhost:8081",
required=True,
)
parser.add_argument(
"--timeout",
type=int,
help="timeout for requests to management api",
default=60,
)
parser.add_argument(
"--dryrun",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--port",
type=int,
help="""port for the HTTP file server to listen on when torchserve is loading the model.