in part_selector.py [0:0]
def __init__(self, base_path, name, image_size):
super().__init__()
self.image_size = image_size
if 'bird' in name:
self.target_parts = ['eye', 'head', 'body', 'beak', 'legs', 'wings', 'mouth', 'tail', 'none']
self.id_to_part = {0:'initial', 1:'eye', 4:'head', 3:'body', 2:'beak', 5:'legs', 8:'wings', 6:'mouth', 7:'tail'}
elif 'generic' in name or 'fin' in name or 'horn' in name:
self.target_parts = ['eye', 'arms', 'beak', 'mouth', 'body', 'ears', 'feet', 'fin',
'hair', 'hands', 'head', 'horns', 'legs', 'nose', 'paws', 'tail', 'wings', 'none']
self.id_to_part = { 0:'initial', 1:'eye', 2:'arms', 3:'beak', 4:'mouth', 5:'body', 6:'ears', 7:'feet', 8:'fin',
9:'hair', 10:'hands', 11:'head', 12:'horns', 13:'legs', 14:'nose', 15:'paws', 16:'tail', 17:'wings'}
folder = base_path+'%s_json_'+'%d_train'%image_size
self.paths = []
self.paths_test = []
# split the training data based on thte aids of the eye sketches
for i, p in enumerate(Path(f'{folder%self.target_parts[0]}').glob(f'**/*.json')):
if i%5 == 0:
self.paths_test.append(p)
else:
self.paths.append(p)
for part in self.target_parts[1:]:
for i, p in enumerate(Path(f'{folder%part}').glob(f'**/*.json')):
if Path(str(p).replace('_'+part, '_'+self.target_parts[0])) in self.paths_test:
self.paths_test.append(p)
else:
self.paths.append(p)
self.parts_id = [self.target_parts.index(str(path).split('_')[-5]) for path in self.paths]
self.parts_id_test = [self.target_parts.index(str(path).split('_')[-5]) for path in self.paths_test]
self.rotate = [-1/12*np.pi, 1/12*np.pi]
self.trans = 0.01
self.scale = [0.9, 1.1]
self.n_part = len(self.id_to_part)
self.samples_partid_test = [torch.LongTensor([self.parts_id_test[sample_id]]) for sample_id in range(self.__len_test__())]
self.samples_partial_test = []
for sample_id in range(self.__len_test__()):
input_parts_json = json.load(open(self.paths_test[sample_id]))['input_parts']
img_partial_test = []
vector_input_part = []
for i in range(self.n_part):
key = self.id_to_part[i]
vector_input_part += input_parts_json[key]
img_partial_test.append(self.processed_part_to_raster(input_parts_json[key], side=self.image_size))
img_partial_test.append(self.processed_part_to_raster(vector_input_part, side=self.image_size))
self.samples_partial_test.append(torch.cat(img_partial_test, 0))
# import ipdb;ipdb.set_trace()
self.samples_partid_test = torch.stack(self.samples_partid_test)
self.samples_partial_test = torch.stack(self.samples_partial_test)
print(' | '.join(['%s : %d'%(target_part, (self.samples_partid_test==i).sum()) for i, target_part in enumerate(self.target_parts)])+
' | overall : %d'%(len(self.samples_partid_test)))