opacus/grad_sample/dp_rnn.py [17:36]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
) -> Dict[nn.Parameter, torch.Tensor]:
    """
    Computes per sample gradients for ``RNNLinear`` layer. The RNN-like (DPLSTM, DPGRU) models
    are written using this layer as its building block.

    class

    Args:
        layer: Layer
        activations: Activations
        backprops: Backpropagations
    """

    gs = torch.einsum("n...i,n...j->nij", backprops, activations)

    ret = {layer.weight: gs}
    if layer.bias is not None:
        ret[layer.bias] = torch.einsum("n...k->nk", backprops)

    return ret
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



opacus/grad_sample/linear.py [15:29]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
) -> Dict[nn.Parameter, torch.Tensor]:
    """
    Computes per sample gradients for ``nn.Linear`` layer

    Args:
        layer: Layer
        activations: Activations
        backprops: Backpropagations
    """
    gs = torch.einsum("n...i,n...j->nij", backprops, activations)
    ret = {layer.weight: gs}
    if layer.bias is not None:
        ret[layer.bias] = torch.einsum("n...k->nk", backprops)

    return ret
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



