experiments/codes/model/gat/layers.py [164:202]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        weight = get_param(params, param_name_dict, "weight")
        if torch.is_tensor(x):
            x = torch.matmul(x, weight)
        else:
            x = (
                None if x[0] is None else torch.matmul(x[0], weight),
                None if x[1] is None else torch.matmul(x[1], weight),
            )
        # x = x.view(-1, self.heads, self.out_channels)
        # x = torch.mm(x, weight).view(-1, self.heads, self.out_channels)
        return self.propagate(
            edge_index, size=size, x=x, num_nodes=x.size(0), edge_attr=edge_attr
        )

    def message(self, edge_index_i, x_i, x_j, size_i, num_nodes, edge_attr):
        # Compute attention coefficients
        # N.B - only modification is the attention is now computed with the edge attributes
        x_j = x_j.view(-1, self.heads, self.out_channels)

        if x_i is None:
            alpha = (x_j * self.att[:, :, self.out_channels :]).sum(dim=-1)
        else:
            x_i = x_i.view(-1, self.heads, self.out_channels)
            alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)

        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, edge_index_i, size_i)

        # Sample attention coefficients stochastically.
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)

        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):
        if self.concat is True:
            aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
        else:
            aggr_out = aggr_out.mean(dim=1)  # N x (out_channels + edge_dim)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



experiments/codes/model/gat/layers.py [392:428]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        weight = get_param(params, param_name_dict, "weight")
        if torch.is_tensor(x):
            x = torch.matmul(x, weight)
        else:
            x = (
                None if x[0] is None else torch.matmul(x[0], weight),
                None if x[1] is None else torch.matmul(x[1], weight),
            )
        return self.propagate(
            edge_index, size=size, x=x, num_nodes=x.size(0), edge_attr=edge_attr
        )

    def message(self, edge_index_i, x_i, x_j, size_i, num_nodes, edge_attr):
        # Compute attention coefficients
        # N.B - only modification is the attention is now computed with the edge attributes
        x_j = x_j.view(-1, self.heads, self.out_channels)

        if x_i is None:
            alpha = (x_j * self.att[:, :, self.out_channels :]).sum(dim=-1)
        else:
            x_i = x_i.view(-1, self.heads, self.out_channels)
            alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)

        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, edge_index_i, size_i)

        # Sample attention coefficients stochastically.
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)

        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):
        if self.concat is True:
            aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
        else:
            aggr_out = aggr_out.mean(dim=1)  # N x (out_channels + edge_dim)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



