def predict()

in services/read-gauge-ml.py [0:0]


def predict(image):
    print("start predict")
    torch.cuda._initialized=True
    device = torch.device('cpu')
    resourcePath = os.environ['MODEL_FILE_NAME']
    try:
        with open(resourcePath, 'rb') as f:
            print('model found in local drive !!!')
            model_regr.load_state_dict(torch.load(f, map_location=device))
            model_regr.eval()
            # Pass the image through our model
            output = model_regr(image)
            print(output)
            print(output[0].detach().numpy())
            # Reverse the log function in our output
            return output[0].detach().numpy().tolist()
    except IOError:
        print('model not found !!!')
        load_model()
        with open(resourcePath, 'rb') as f:
            model_regr.load_state_dict(torch.load(f, map_location=device))
            model_regr.eval()
            # Pass the image through our model
            output = model_regr(image)
            print(output)
            print(output[0].detach().numpy())
            # Reverse the log function in our output
            return output[0].detach().numpy()