def device()

in neuron_explainer/activations/derived_scalars/activations_and_metadata.py [0:0]


    def device(self) -> torch.device | None:
        if len(self.activations_by_layer_index) > 0:
            return next(iter(self.activations_by_layer_index.values())).device
        else:
            # if there are no activations in this object (e.g. if performing a backward pass
            # from a layer 0 attention head, and examining MLP activations) then there are no
            # tensors and thus no devices
            return None