in python/featgraph/op/vanilla_spmm.py [0:0]
def vanilla_spmm_csr_x86(SrcFeat,
Adj_indptr,
Adj_indices,
Adj_vals,
num_feat_partitions=1):
"""Compute sparse-dense matrix multiplication of Adj and SrcFeat on x86.
This implementation applies feature dimension partitioning, which requires transforming the layout of SrcFeat.
Parameters
----------
SrcFeat : tvm.te.Tensor
2-D with shape [num_src_vertices, feat_len]
Adj_indptr : tvm.te.Tensor
1-D with shape [num_dst_vertices + 1] (CSR)
Adj_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR)
Adj_vals : tvm.te.Tensor
1-D with shape [nnz] (CSR)
num_feat_partitions : int
Doing feature dimension tiling
Returns
-------
Out : tvm.te.Tensor
2-D with shape [num_dst_vertices, feat_len]
"""
assert Adj_indices.shape[0].value == Adj_vals.shape[0].value
num_src_vertices, feat_len = get_const_tuple(SrcFeat.shape)
num_dst_vertices = Adj_indptr.shape[0].value - 1
oshape = (num_dst_vertices, feat_len)
feat_len_per_partition = feat_len // num_feat_partitions # we assume feat_len % num_feat_partitions = 0
ReshapedSrcFeat = te.compute((num_feat_partitions, num_src_vertices, feat_len_per_partition), \
lambda fo, nn, fi: SrcFeat[nn, fo * feat_len_per_partition + fi], name='ReshapedSrcFeat')
def msgfunc(fo, row, fi):
row_start = Adj_indptr[row]
row_end = Adj_indptr[row + 1]
row_num_elems = row_end - row_start
elem_idx = te.reduce_axis((0, row_num_elems), name="elem_idx")
adj_val = Adj_vals[row_start + elem_idx]
feat_val = ReshapedSrcFeat[fo, Adj_indices[row_start + elem_idx], fi]
return te.sum(adj_val * feat_val, axis=elem_idx)
ReshapedOut = te.compute((num_feat_partitions, num_dst_vertices, feat_len_per_partition),
msgfunc, name='ReshapedOut')
Out = te.compute(oshape, \
lambda nn, ff: ReshapedOut[ff // feat_len_per_partition, nn, ff % feat_len_per_partition], \
name='Out')
return Out