in src/cloud/pipelines/semantic_segmentation/preprocessing.py [0:0]
def augment_data(path, augment=True):
save_path = path
images = sorted(glob(os.path.join(path, PREFIX_NAME_IMAGE + "/*")))
masks = sorted(glob(os.path.join(path, PREFIX_NAME_MASK + "/*")))
for x, y in tqdm(zip(images, masks), total=len(images)):
name = x.split("/")[-1].split(".")
img_name = name[0]
image_extn = name[1]
name = y.split("/")[-1].split(".")
mask_name = name[0]
mask_extn = name[1]
# Read image mask
x = cv2.imread(x, cv2.IMREAD_COLOR)
y = cv2.imread(y, cv2.IMREAD_COLOR)
# Augment dataset
if augment == True:
aug = RandomRotate90(p=1.0)
augmented = aug(image=x, mask=y)
x1 = augmented['image']
y1 = augmented['mask']
aug = RandomRotate90(p=1.0)
augmented = aug(image=x, mask=y)
x2 = augmented['image']
y2 = augmented['mask']
aug = GridDistortion(p=1.0)
augmented = aug(image=x, mask=y)
x3 = augmented['image']
y3 = augmented['mask']
aug = HorizontalFlip(p=1.0)
augmented = aug(image=x, mask=y)
x4 = augmented['image']
y4 = augmented['mask']
aug = VerticalFlip(p=1.0)
augmented = aug(image=x, mask=y)
x5 = augmented['image']
y5 = augmented['mask']
save_images = [x, x1, x2, x3, x4, x5]
save_masks = [y, y1, y2, y3, y4, y5]
else:
save_images = [x]
save_masks = [y]
""" Saving the image and mask. """
idx = 0
Path(save_path + "/" + PREFIX_NAME_IMAGE ).mkdir(parents=True, exist_ok=True)
Path(save_path + "/" + PREFIX_NAME_MASK ).mkdir(parents=True, exist_ok=True)
for i, m in zip(save_images, save_masks):
i = cv2.resize(i, (IMAGE_WIDTH, IMAGE_HEIGHT))
m = cv2.resize(m, (IMAGE_WIDTH, IMAGE_HEIGHT))
if len(images) == 1:
tmp_img_name = f"{img_name}.{image_extn}"
tmp_mask_name = f"{mask_name}.{mask_extn}"
else:
tmp_img_name = f"{img_name}_{idx}.{image_extn}"
tmp_mask_name = f"{mask_name}_{idx}.{mask_extn}"
image_path = os.path.join(save_path, PREFIX_NAME_IMAGE, tmp_img_name)
mask_path = os.path.join(save_path, PREFIX_NAME_MASK, tmp_mask_name)
cv2.imwrite(image_path, i)
cv2.imwrite(mask_path, m)
idx += 1