def calculate_max_activation()

in neuron-explainer/neuron_explainer/activations/activation_records.py [0:0]


def calculate_max_activation(activation_records: Sequence[ActivationRecord]) -> float:
    """Return the maximum activation value of the neuron across all the activation records."""
    flattened = [
        # Relu is used to assume any values less than 0 are indicating the neuron is in the resting
        # state. This is a simplifying assumption that works with relu/gelu.
        max(relu(x) for x in activation_record.activations)
        for activation_record in activation_records
    ]
    return max(flattened)