def main()

in torchx/apps/serve/serve.py [0:0]


def main(argv: List[str]) -> None:
    args = parse_args(argv)
    if args.dryrun:
        print("App serve started successfully")
        return
    with tempfile.TemporaryDirectory() as tmpdir:
        model_name = args.model_name or "model"
        model_file = f"{model_name}_{rand_id()}.mar"
        model_path = os.path.join(tmpdir, model_file)
        print(f"downloading model from {args.model_path} to {model_path}...")
        fs, _, rpaths = fsspec.get_fs_token_paths(args.model_path)
        assert len(rpaths) == 1, "must have single path"
        fs.get(rpaths[0], model_path)

        addr = ("", args.port)
        print(f"starting HTTP server at {addr}...")

        handler_class = partial(SimpleHTTPRequestHandler, directory=tmpdir)
        server: ThreadingHTTPServer = ThreadingHTTPServer(addr, handler_class)

        try:

            def serve() -> None:
                server.serve_forever()

            t = threading.Thread(target=serve)
            t.start()

            ip_address = get_routable_ip_to(args.management_api)
            model_url = f"http://{ip_address}:{server.server_port}/{model_file}"
            print(f"serving file at {model_url}")

            url = f"{args.management_api}/models"
            print(f"POST {url}")
            payload = {
                "url": model_url,
            }
            for param in TORCHSERVE_PARAMS:
                v = getattr(args, param)
                if v is not None:
                    payload[param] = v
            r = requests.post(url, params=payload, timeout=args.timeout)
            print(r.text)
            r.raise_for_status()

        finally:
            print("shutting down...")
            server.shutdown()