in experiments/codes/model/rgcn/rgcn.py [0:0]
def __init__(self, config):
super(CompositionRGCNEncoder, self).__init__(config)
self.name = "CompositionRGCNConv"
self.rgcn_layers = []
for l in range(config.model.rgcn.num_layers):
in_channels = config.model.relation_embedding_dim
out_channels = config.model.relation_embedding_dim
num_bases = config.model.relation_embedding_dim
uniform_size = num_bases * in_channels
basis = torch.Tensor(size=(num_bases, in_channels, out_channels)).to(
config.general.device
)
basis.requires_grad = True
self.add_weight(
basis,
"{}.{}.basis".format(self.name, l),
initializer=(uniform, uniform_size),
weight_norm=config.model.weight_norm,
)
if config.model.rgcn.root_weight:
root = torch.Tensor(size=(in_channels, out_channels)).to(
config.general.device
)
root.requires_grad = True
self.add_weight(
root,
"{}.{}.root".format(self.name, l),
initializer=(uniform, uniform_size),
weight_norm=config.model.weight_norm,
)
if config.model.rgcn.bias:
bias = torch.Tensor(size=(out_channels,)).to(config.general.device)
bias.requires_grad = True
self.add_weight(
bias,
"{}.{}.bias".format(self.name, l),
initializer=(uniform, uniform_size),
weight_norm=config.model.weight_norm,
)
self.rgcn_layers.append(
RGCNConv(
in_channels,
out_channels,
config.model.num_classes,
num_bases,
root_weight=config.model.rgcn.root_weight,
bias=config.model.rgcn.bias,
)
)
## Add classify params
in_class_dim = (
config.model.relation_embedding_dim * 2
+ config.model.relation_embedding_dim
)
self.add_classify_weights(in_class_dim)