in services/read-gauge-ml.py [0:0]
def process_image(img):
# Get the dimensions of the image
width, height = img.size
#test RGB conversion
img = img.convert('RGB')
# Resize by keeping the aspect ratio, but changing the dimension
# so the shortest size is 255px
img = img.resize((255, int(255*(height/width))) if width < height else (int(255*(width/height)), 255))
# Get the dimensions of the new image size
width, height = img.size
# Set the coordinates to do a center crop of 224 x 224
left = (width - 224)/2
top = (height - 224)/2
right = (width + 224)/2
bottom = (height + 224)/2
img = img.crop((left, top, right, bottom))
# Turn image into numpy array
img = np.array(img)
# Make the color channel dimension first instead of last
img = img.transpose((2, 0, 1))
# Make all values between 0 and 1
img = img/255
# Normalize based on the preset mean and standard deviation
img[0] = (img[0] - 0.485)/0.229
img[1] = (img[1] - 0.456)/0.224
img[2] = (img[2] - 0.406)/0.225
# Add a fourth dimension to the beginning to indicate batch size
img = img[np.newaxis,:]
# Turn into a torch tensor
image = torch.from_numpy(img)
image = image.float()
print("image processed")
return image