in fast_grad_models.py [0:0]
def forward(self, x):
"""
Forward pass that returns also returns
* the activations (H) and
* the linear combinations (Z)
of each layer, to be able to use the trick from [1].
Args:
- x : The inputs of the network
Returns:
- logits
- activations at each layer (including the inputs)
- linear combinations at each layer
> [1] EFFICIENT PER-EXAMPLE GRADIENT COMPUTATIONS
> by Ian Goodfellow
> https://arxiv.org/pdf/1510.01799.pdf
"""
x = x.view(-1, self.input_size)
out = x
# Save the model inputs, which are considered the activations of the 0'th layer.
activations = [out]
linearCombs = []
for layer in self.hidden_layers:
linearComb = layer(out)
out = self.act(linearComb)
# Save the activations and linear combinations from this layer.
activations.append(out)
linearComb.requires_grad_(True)
linearComb.retain_grad()
linearCombs.append(linearComb)
logits = self.output_layer(out)
logits.requires_grad_(True)
logits.retain_grad()
linearCombs.append(logits)
return (logits, activations, linearCombs)