def transform_fn()

in src/inference_pytorch.py [0:0]


def transform_fn(model, request_body, content_type='application/x-image', accept_type=None):
    '''
    Deserialize the request body and predicts on the deserialized object with the model from model_fn()
    '''
    if content_type == 'application/x-image':             
        img = np.array(Image.open(io.BytesIO(request_body)))
    elif content_type == 'application/x-npy':    
        img = np.frombuffer(request_body, dtype='uint8').reshape(137, 236)   
    else:
        raise ValueError(
            'Requested unsupported ContentType in content_type : ' + content_type)

    t0 = time.time()
    
    test_transforms = transforms.Compose([
        transforms.ToTensor()
    ])
    img_tensor = test_transforms(img).to(device)
    img_tensor = img_tensor.unsqueeze(0)
    
    with torch.no_grad():    
        result = model(img_tensor)

    t1 = time.time() - t0
    print("--- Elapsed time: %s secs ---" % t1)
    
    scores = result[0]['scores'].detach().cpu().numpy()
    bboxes = result[0]['boxes'].detach().cpu().numpy()
    cids = result[0]['labels'].detach().cpu().numpy()     
    
    outputs = json.dumps({'score': scores.tolist(), 
                       'bbox': bboxes.tolist(),
                         'cid': cids.tolist()})
    
    return outputs