in transforms_v2.py [0:0]
def get_transforms(input_size=224,test_size=224, kind='full', crop=True, need=('train', 'val'), backbone=None):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
if backbone is not None and backbone in ['pnasnet5large', 'nasnetamobile']:
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
transformations = {}
if 'train' in need:
if kind == 'torch':
transformations['train'] = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
elif kind == 'full':
transformations['train'] = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
else:
raise ValueError('Transforms kind {} unknown'.format(kind))
if 'val' in need:
if crop:
transformations['val_test'] = transforms.Compose(
[Resize(int((256 / 224) * test_size)), # to maintain same ratio w.r.t. 224 images
transforms.CenterCrop(test_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
transformations['val_train'] = transforms.Compose(
[Resize(int((256 / 224) * test_size)), # to maintain same ratio w.r.t. 224 images
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.05, 0.05, 0.05),
CenterCrop(test_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
else:
transformations['val'] = transforms.Compose(
[Resize(test_size, largest=True),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
return transformations