in source_directory/inference/inference.py [0:0]
def _process_output(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
print("response.json():", response.json())
# remove whitespace from output JSON string
prediction = response.content.decode('utf-8').translate(dict.fromkeys(map(ord,whitespace)))
return prediction, response_content_type