in source_directory/inference/inference.py [0:0]
def _process_input(data, context):
""" Pre-process request input before it is sent to TensorFlow Serving REST API
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
read_data = data.read()
# endpoint API
if context.request_content_type == 'application/json':
# read as numpy array
image_np = np.asarray(json.loads(read_data)).astype(np.dtype('uint8'))
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# batch transform of jpegs
elif context.request_content_type == 'application/x-image':
# load image from bytes and resize
image_from_bytes = Image.open(BytesIO(read_data)).convert('RGB')
image_from_bytes = image_from_bytes.resize((INPUT_HEIGHT,INPUT_WIDTH))
image_np = np.array(image_from_bytes)
# batch transform of tfrecord
elif context.request_content_type == 'application/x-tfexample':
example = tf.train.Example()
example.ParseFromString(read_data)
example_feature = MessageToDict(example.features)
image_encoded = str.encode(example_feature['feature']['image']['bytesList']['value'][0])
image_b64 = base64.decodebytes(image_encoded)
image_np = np.frombuffer(image_b64, dtype=np.dtype('uint8')).reshape(32,32,3)
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# raise error if content type is not supported
else:
print("")
_return_error(415, 'Unsupported content type "{}"'.format(
context.request_content_type or 'Unknown'))
# preprocess for resnet50
image_np = tf.keras.applications.resnet_v2.preprocess_input(image_np)
# json serialize
data_np_json = {"instances": [image_np.tolist()]}
data_np_json_serialized = json.dumps(data_np_json)
return data_np_json_serialized