in python/dglke/models/ke_model.py [0:0]
def _infer_score_func(self, head, rel, tail, triplet_wise=False, batch_size=DEFAULT_INFER_BATCHSIZE):
head_emb = self.entity_embed[head]
rel_emb = self.relation_embed[rel]
tail_emb = self.entity_embed[tail]
num_head = head.shape[0]
num_rel = rel.shape[0]
num_tail = tail.shape[0]
score = []
if triplet_wise:
# triplet wise score: head, relation and tail tensor have the same length N,
# for i in range(N):
# result.append(score(head[i],rel[i],tail[i]))
class FakeEdge(object):
def __init__(self, head_emb, rel_emb, tail_emb, device=-1):
self._hobj = {}
self._robj = {}
self._tobj = {}
self._hobj['emb'] = head_emb.to(device)
self._robj['emb'] = rel_emb.to(device)
self._tobj['emb'] = tail_emb.to(device)
@property
def src(self):
return self._hobj
@property
def dst(self):
return self._tobj
@property
def data(self):
return self._robj
# calculate scores in mini-batches
# so we can use GPU to accelerate the speed with avoiding GPU OOM
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
edata = FakeEdge(sh_emb, sr_emb, st_emb, self._device)
score.append(self._score_func.edge_func(edata)['score'].to(th.device('cpu')))
score = th.cat(score, dim=0)
return score
else:
# head, relation and tail tensors has different size
# for h_i in range(head):
# for r_j in range(relation):
# for t_k in range(tail):
# result.append(score(h_i, r_j, t_k))
# The result will have shape (len(head), len(relation), len(tail))
rel_emb = rel_emb.to(self._device)
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
s_score = []
sh_emb = sh_emb.to(self._device)
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = st_emb.to(self._device)
s_score.append(self._score_func.infer(sh_emb, rel_emb, st_emb).to(th.device('cpu')))
score.append(th.cat(s_score, dim=2))
score = th.cat(score, dim=0)
return th.reshape(score, (num_head, num_rel, num_tail))