in deployment/handler.py [0:0]
def postprocess(self, inference_output):
if torch.cuda.is_available():
inference_output = inference_output[0].argmax(dim=0).cpu()
else:
inference_output = inference_output[0].argmax(dim=0)
return [
{
"base64_prediction": base64.b64encode(
inference_output.numpy().astype(np.uint8)
).decode("utf-8")
}
]