def _process_input()

in tensorflow_script_mode_local_model_inference/code/inference.py [0:0]


def _process_input(data, context):
    if context.request_content_type == 'application/json':
        # pass through json (assumes it's correctly formed)
        d = data.read().decode('utf-8')
        print('input data: {}'.format(d))

        input_json = json.loads(d)

        print('reading object from S3')
        obj = s3.Object(input_json['bucket_name'], input_json['object_name'])
        body = obj.get()['Body'].read()
        print('body: {}'.format(body))

        return body

    if context.request_content_type == 'text/csv':
        # very simple csv handler
        return json.dumps({
            'instances': [float(x) for x in data.read().decode('utf-8').split(',')]
        })

    raise ValueError('{{"error": "unsupported content type {}"}}'.format(
        context.request_content_type or "unknown"))