in tensorflow_similarity/visualization/projector.py [0:0]
def tensor2images(tensor: Tensor, size: Optional[int] = 64) -> List[str]:
"""Convert tensor images back to in memory images
encoded in base 64.
Args:
tensor: 4D tensor that represent an image list.
size: Image size to output in pixels. Defaults to 64.
Returns:
list of images encoded as base64 strings
"""
# casting as iterating over a Tensor is slow.
data = np.array(tensor)
# if image provided are scaled between [0,1] then rescale
if np.max(data) <= 1:
data = data * 255
# cast as int so PIL accepts its
data = data.astype(np.uint8)
imgs_b64 = []
for a in tqdm(data, desc="generating diplayabe images"):
# if single channel, treat it as black and white
if a.shape[-1] == 1:
a = np.reshape(a, (a.shape[0], a.shape[1]))
img = PIL.Image.fromarray(a, 'L')
else:
img = PIL.Image.fromarray(a)
img_resized = img.resize((size, size))
buffer = io.BytesIO()
img_resized.save(buffer, format='JPEG')
img_bytes = buffer.getvalue()
img64 = 'data:image/png;base64,%s' % str(
base64.b64encode(img_bytes))[2:-1]
imgs_b64.append(img64)
return imgs_b64