neuron_explainer/activations/derived_scalars/logprobs.py [33:47]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            transformer=transformer,
            detach_layer_norm_scale=self.detach_layer_norm_scale,
        )

    def reconstitute_activations(
        self,
        resid: torch.Tensor,
        other_arg: torch.Tensor | None,
        layer_index: LayerIndex,
        pass_type: PassType,
    ) -> torch.Tensor:
        assert other_arg is None
        assert layer_index == self._model_context.n_layers - 1
        assert pass_type == PassType.FORWARD
        return self._reconstitute_activations_fn(resid)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



neuron_explainer/activations/derived_scalars/logprobs.py [67:81]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            transformer=transformer,
            detach_layer_norm_scale=self.detach_layer_norm_scale,
        )

    def reconstitute_activations(
        self,
        resid: torch.Tensor,
        other_arg: torch.Tensor | None,
        layer_index: LayerIndex,
        pass_type: PassType,
    ) -> torch.Tensor:
        assert other_arg is None
        assert layer_index == self._model_context.n_layers - 1
        assert pass_type == PassType.FORWARD
        return self._reconstitute_activations_fn(resid)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



