def server()

in ludwig/serve.py [0:0]


def server(model):
    app = FastAPI()

    input_features = {
        f['name'] for f in model.model_definition['input_features']
    }

    @app.get('/')
    def check_health():
        return JSONResponse({"message": "Ludwig server is up"})

    @app.post('/predict')
    async def predict(request: Request):
        form = await request.form()
        files, entry = convert_input(form)

        try:
            if (entry.keys() & input_features) != input_features:
                return JSONResponse(ALL_FEATURES_PRESENT_ERROR,
                                    status_code=400)
            try:
                resp = model.predict(data_dict=[entry]).to_dict('records')[0]
                return JSONResponse(resp)
            except Exception as e:
                logger.error("Error: {}".format(str(e)))
                return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR,
                                    status_code=500)
        finally:
            for f in files:
                os.remove(f.name)

    return app