in seamseg/data/transform.py [0:0]
def __call__(self, img, msk, cat, iscrowd):
# Random flip
if self.random_flip:
img, msk = self._random_flip(img, msk)
# Adjust scale, possibly at random
if self.random_scale is not None:
target_size = self._random_target_size()
else:
target_size = self.shortest_size
scale = self._adjusted_scale(img.size[0], img.size[1], target_size)
out_size = tuple(int(dim * scale) for dim in img.size)
img = img.resize(out_size, resample=Image.BILINEAR)
msk = [m.resize(out_size, resample=Image.NEAREST) for m in msk]
# Wrap in np.array
cat = np.array(cat, dtype=np.int32)
iscrowd = np.array(iscrowd, dtype=np.uint8)
# Image transformations
img = tfn.to_tensor(img)
img = self._normalize_image(img)
# Label transformations
msk = np.stack([np.array(m, dtype=np.int32, copy=False) for m in msk], axis=0)
msk, cat, iscrowd = self._compact_labels(msk, cat, iscrowd)
# Convert labels to torch and extract bounding boxes
msk = torch.from_numpy(msk.astype(np.long))
cat = torch.from_numpy(cat.astype(np.long))
iscrowd = torch.from_numpy(iscrowd)
bbx = extract_boxes(msk, cat.numel())
return dict(img=img, msk=msk, cat=cat, iscrowd=iscrowd, bbx=bbx)