in python/dglke/models/infer.py [0:0]
def topK(self, head=None, rel=None, tail=None, exec_mode='all', k=10):
if head is None:
head = F.arange(0, self.model.num_entity)
else:
head = F.tensor(head)
if rel is None:
rel = F.arange(0, self.model.num_rel)
else:
rel = F.tensor(rel)
if tail is None:
tail = F.arange(0, self.model.num_entity)
else:
tail = F.tensor(tail)
num_head = F.shape(head)[0]
num_rel = F.shape(rel)[0]
num_tail = F.shape(tail)[0]
if exec_mode == 'triplet_wise':
result = []
assert num_head == num_rel, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
assert num_head == num_tail, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
raw_score = self.model.score(head, rel, tail, triplet_wise=True)
score = self.score_func(raw_score)
idx = F.arange(0, num_head)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
result.append((F.asnumpy(head[idx]),
F.asnumpy(rel[idx]),
F.asnumpy(tail[idx]),
F.asnumpy(score)))
elif exec_mode == 'all':
result = []
raw_score = self.model.score(head, rel, tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_rel * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
F.asnumpy(rel[rel_idx]),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_head':
result = []
for i in range(num_head):
raw_score = self.model.score(F.unsqueeze(head[i], 0), rel, tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_rel * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
result.append((np.full((k,), F.asnumpy(head[i])),
F.asnumpy(rel[rel_idx]),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_rel':
result = []
for i in range(num_rel):
raw_score = self.model.score(head, F.unsqueeze(rel[i], 0), tail)
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_tail)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
np.full((k,), F.asnumpy(rel[i])),
F.asnumpy(tail[tail_idx]),
F.asnumpy(score)))
elif exec_mode == 'batch_tail':
result = []
for i in range(num_tail):
raw_score = self.model.score(head, rel, F.unsqueeze(tail[i], 0))
score = self.score_func(raw_score)
idx = F.arange(0, num_head * num_rel)
sidx = F.argsort(score, dim=0, descending=True)
sidx = sidx[:k]
score = score[sidx]
idx = idx[sidx]
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
result.append((F.asnumpy(head[head_idx]),
F.asnumpy(rel[rel_idx]),
np.full((k,), F.asnumpy(tail[i])),
F.asnumpy(score)))
else:
assert False, 'unknow execution mode type {}'.format(exec_mode)
return result