in src/models.py [0:0]
def filtering(scores, these_queries, filters, n_rel, n_ent,
c_begin, chunk_size, query_type):
# set filtered and true scores to -1e6 to be ignored
# take care that scores are chunked
for i, query in enumerate(these_queries):
existing_s = (query[0].item(), query[1].item()) in filters # reciprocal training always has candidates = rhs
existing_r = (query[2].item(), query[1].item() + n_rel) in filters # standard training separate rhs and lhs
if query_type == 'rhs':
if existing_s:
filter_out = filters[(query[0].item(), query[1].item())]
# filter_out += [queries[b_begin + i, 2].item()]
filter_out += [query[2].item()]
if query_type == 'lhs':
if existing_r:
filter_out = filters[(query[2].item(), query[1].item() + n_rel)]
# filter_out += [queries[b_begin + i, 0].item()]
filter_out += [query[0].item()]
if query_type == 'rel':
pass
if chunk_size < n_ent:
filter_in_chunk = [
int(x - c_begin) for x in filter_out
if c_begin <= x < c_begin + chunk_size
]
scores[i, torch.LongTensor(filter_in_chunk)] = -1e6
else:
scores[i, torch.LongTensor(filter_out)] = -1e6
return scores