experiments/codes/model/gat/layers.py [286:341]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if x_i is None:
            alpha = (x_j * self.att[:, :, self.out_channels :]).sum(dim=-1)
        else:
            x_i = x_i.view(-1, self.heads, self.out_channels)
            alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)

        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, edge_index_i, size_i)

        # Sample attention coefficients stochastically.
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)

        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):
        if self.concat is True:
            aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
        else:
            aggr_out = aggr_out.mean(dim=1)  # N x (out_channels + edge_dim)
        # Gated update
        aggr_out = self.gru_cell(aggr_out, self.gru_hx)
        # aggr_out = torch.mm(aggr_out, self.edge_update)  # N x out_channels

        if self.bias is not None:
            aggr_out = aggr_out + self.bias
        return aggr_out  # N x out_channels

    def gru_cell(self, x, hx):
        """
        implementation of GRUCell which is compatible with functional elements
        :param x:
        :return:
        """
        x = x.view(-1, x.size(1))

        gate_x = F.linear(x, self.gru_weight_ih.t(), self.gru_bias_ih)
        gate_h = F.linear(hx, self.gru_weight_hh.t(), self.gru_bias_hh)

        gate_x = gate_x.squeeze()
        gate_h = gate_h.squeeze()

        i_r, i_i, i_n = gate_x.chunk(3, 1)
        h_r, h_i, h_n = gate_h.chunk(3, 1)

        resetgate = F.sigmoid(i_r + h_r)
        inputgate = F.sigmoid(i_i + h_i)
        newgate = F.tanh(i_n + (resetgate * h_n))

        hy = newgate + inputgate * (hx - newgate)
        return hy

    def __repr__(self):
        return "{}({}, {}, heads={})".format(
            self.__class__.__name__, self.in_channels, self.out_channels, self.heads
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



experiments/codes/model/gat/layers.py [409:462]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        if x_i is None:
            alpha = (x_j * self.att[:, :, self.out_channels :]).sum(dim=-1)
        else:
            x_i = x_i.view(-1, self.heads, self.out_channels)
            alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)

        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, edge_index_i, size_i)

        # Sample attention coefficients stochastically.
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)

        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):
        if self.concat is True:
            aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
        else:
            aggr_out = aggr_out.mean(dim=1)  # N x (out_channels + edge_dim)
        aggr_out = self.gru_cell(aggr_out, self.gru_hx)

        if self.bias is not None:
            aggr_out = aggr_out + self.bias
        return aggr_out  # N x out_channels

    def gru_cell(self, x, hx):
        """
        implementation of GRUCell which is compatible with functional elements
        :param x:
        :return:
        """
        x = x.view(-1, x.size(1))

        gate_x = F.linear(x, self.gru_weight_ih.t(), self.gru_bias_ih)
        gate_h = F.linear(hx, self.gru_weight_hh.t(), self.gru_bias_hh)

        gate_x = gate_x.squeeze()
        gate_h = gate_h.squeeze()

        i_r, i_i, i_n = gate_x.chunk(3, 1)
        h_r, h_i, h_n = gate_h.chunk(3, 1)

        resetgate = F.sigmoid(i_r + h_r)
        inputgate = F.sigmoid(i_i + h_i)
        newgate = F.tanh(i_n + (resetgate * h_n))

        hy = newgate + inputgate * (hx - newgate)
        return hy

    def __repr__(self):
        return "{}({}, {}, heads={})".format(
            self.__class__.__name__, self.in_channels, self.out_channels, self.heads
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



