in separate_vae/data/base_dataset.py [0:0]
def get_transform(opt, params, method=Image.BICUBIC, normalize=True, labimg=False):
transform_list = []
if 'pad' in opt.resize_or_crop:
if labimg:
transform_list.append(PadToSquare((0, 128, 128))) # Black in Lab space is (0, 128, 128)
else:
transform_list.append(PadToSquare(0))
if 'resize' in opt.resize_or_crop:
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, method))
elif 'scale_width' in opt.resize_or_crop:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method)))
if 'crop' in opt.resize_or_crop:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
if opt.resize_or_crop == 'none':
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list += [transforms.ToTensor()]
# transform_list.append(transforms.Lambda(lambda tensor: __one_hot_tensor(tensor, opt.output_nc)))
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)