in anticipation/anticipation/datasets/epic_future_labels.py [0:0]
def visits_to_labels(self, visits):
if len(visits)==0:
return torch.zeros(self.num_verbs)-1, torch.zeros(self.num_nouns)-1, torch.zeros(self.num_actions)-1
v_id = visits[0]['start'][0]
n_frames = []
for visit in visits:
n_frames += [(v_id, f_id) for f_id in range(visit['start'][1], visit['stop'][1]+1)]
records = [self.frame_to_record[frame] for frame in n_frames if frame in self.frame_to_record]
records = {record.uid:record for record in records}.values() # remove duplicate entries
def get_dist(recs, N, label_fn, manyshot=None):
counts = []
for record in recs:
counts.append(label_fn(record))
if manyshot is not None:
counts = [manyshot.index(x) for x in counts if x in manyshot]
counts = collections.Counter(counts)
dist = torch.zeros(N)
if len(counts)>0:
for item, count in counts.items():
dist[item] = 1 # not count
# dist = dist/dist.sum()
return dist
verb_dist = get_dist(records, self.num_verbs, lambda record: record.label[0], self.manyshot_verbs if self.train_many_shot else None)
noun_dist = get_dist(records, self.num_nouns, lambda record: record.label[1], self.manyshot_nouns if self.train_many_shot else None)
int_dist = get_dist([record for record in records if (record.label[0], record.label[1]) in self.int_to_idx], self.num_actions, lambda record: self.int_to_idx[(record.label[0], record.label[1])])
return verb_dist, noun_dist, int_dist