captum/robust/_core/metrics/attack_comparator.py [237:324]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        r"""
        Evaluate model and attack performance on provided inputs

        Args:

        inputs (any): Input for which attack metrics
                are computed. It can be provided as a tensor, tuple of tensors,
                or any raw input type (e.g. PIL image or text string).
                This input is provided directly as input to preproc function as well
                as any attack applied before preprocessing. If no pre-processing
                function is provided, this input is provided directly to the main
                model and all attacks.

        additional_forward_args (any, optional): If the forward function
                requires additional arguments other than the preprocessing
                outputs (or inputs if preproc_fn is None), this argument
                can be provided. It must be either a single additional
                argument of a Tensor or arbitrary (non-tuple) type or a
                tuple containing multiple additional arguments including
                tensors or any arbitrary python types. These arguments
                are provided to forward_func in order following the
                arguments in inputs.
                For a tensor, the first dimension of the tensor must
                correspond to the number of examples. For all other types,
                the given argument is used for all forward evaluations.
                Default: None
        perturbations_per_eval (int, optional): Allows perturbations of multiple
                attacks to be grouped and evaluated in one call of forward_fn
                Each forward pass will contain a maximum of
                perturbations_per_eval * #examples samples.
                For DataParallel models, each batch is split among the
                available devices, so evaluations on each available
                device contain at most
                (perturbations_per_eval * #examples) / num_devices
                samples.
                In order to apply this functionality, the output of preproc_fn
                (or inputs itself if no preproc_fn is provided) must be a tensor
                or tuple of tensors.
                Default: 1
        kwargs (any, optional): Additional keyword arguments provided to metric function
                as well as selected attacks based on chosen additional_args

        Returns:

        - **attack results** Dict: str -> Dict[str, Union[Tensor, Tuple[Tensor, ...]]]:
                Dictionary containing attack results for provided batch.
                Maps attack name to dictionary,
                containing best-case, worst-case and average-case results for attack.
                Dictionary contains keys "mean", "max" and "min" when num_attempts > 1
                and only "mean" for num_attempts = 1, which contains the (single) metric
                result for the attack attempt.
                An additional key of 'Original' is included with metric results
                without any perturbations.


        Examples::

        >>> def accuracy_metric(model_out: Tensor, targets: Tensor):
        >>>     return torch.argmax(model_out, dim=1) == targets).float()

        >>> attack_metric = AttackComparator(model=resnet18,
                                             metric=accuracy_metric,
                                             preproc_fn=normalize)

        >>> random_rotation = transforms.RandomRotation()
        >>> jitter = transforms.ColorJitter()

        >>> attack_metric.add_attack(random_rotation, "Random Rotation",
        >>>                          num_attempts = 5)
        >>> attack_metric.add_attack((jitter, "Jitter", num_attempts = 1)
        >>> attack_metric.add_attack(FGSM(resnet18), "FGSM 0.1", num_attempts = 1,
        >>>                          apply_before_preproc=False,
        >>>                          attack_kwargs={epsilon: 0.1},
        >>>                          additional_args=["targets"])

        >>> for images, labels in dataloader:
        >>>     batch_results = attack_metric.evaluate(inputs=images, targets=labels)
        """
        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        expanded_additional_args = (
            _expand_additional_forward_args(
                additional_forward_args, perturbations_per_eval
            )
            if perturbations_per_eval > 1
            else additional_forward_args
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



captum/robust/_core/metrics/min_param_perturbation.py [345:436]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        r"""
        This method evaluates the model at each perturbed input and identifies
        the minimum perturbation that leads to an incorrect model prediction.

        It is recommended to provide a single input (batch size = 1) when using
        this to identify a minimal perturbation for the chosen example. If a
        batch of examples is provided, the default correct function identifies
        the minimal perturbation for at least 1 example in the batch to be
        misclassified. A custom correct_fn can be provided to customize
        this behavior and define correctness for the batch.

        Args:

            inputs (Any): Input for which minimal perturbation
                    is computed. It can be provided as a tensor, tuple of tensors,
                    or any raw input type (e.g. PIL image or text string).
                    This input is provided directly as input to preproc function
                    as well as any attack applied before preprocessing. If no
                    pre-processing function is provided,
                    this input is provided directly to the main model and all attacks.

            additional_forward_args (any, optional): If the forward function
                    requires additional arguments other than the preprocessing
                    outputs (or inputs if preproc_fn is None), this argument
                    can be provided. It must be either a single additional
                    argument of a Tensor or arbitrary (non-tuple) type or a
                    tuple containing multiple additional arguments including
                    tensors or any arbitrary python types. These arguments
                    are provided to forward_func in order following the
                    arguments in inputs.
                    For a tensor, the first dimension of the tensor must
                    correspond to the number of examples. For all other types,
                    the given argument is used for all forward evaluations.
                    Default: None
            target (TargetType): Target class for classification. This is required if
                using the default correct_fn

            perturbations_per_eval (int, optional): Allows perturbations of multiple
                    attacks to be grouped and evaluated in one call of forward_fn
                    Each forward pass will contain a maximum of
                    perturbations_per_eval * #examples samples.
                    For DataParallel models, each batch is split among the
                    available devices, so evaluations on each available
                    device contain at most
                    (perturbations_per_eval * #examples) / num_devices
                    samples.
                    In order to apply this functionality, the output of preproc_fn
                    (or inputs itself if no preproc_fn is provided) must be a tensor
                    or tuple of tensors.
                    Default: 1
            attack_kwargs (dictionary, optional): Optional dictionary of keyword
                    arguments provided to attack function
            correct_fn_kwargs (dictionary, optional): Optional dictionary of keyword
                    arguments provided to correct function

        Returns:

            Tuple of (perturbed_inputs, param_val) if successful
            else Tuple of (None, None)

            - **perturbed inputs** (Any):
                   Perturbed input (output of attack) which results in incorrect
                   prediction.
            - param_val (int, float)
                    Param value leading to perturbed inputs causing misclassification

        Examples::

        >>> def gaussian_noise(inp: Tensor, std: float) -> Tensor:
        >>>     return inp + std*torch.randn_like(inp)

        >>> min_pert = MinParamPerturbation(forward_func=resnet18,
                                           attack=gaussian_noise,
                                           arg_name="std",
                                           arg_min=0.0,
                                           arg_max=2.0,
                                           arg_step=0.01,
                                        )
        >>> for images, labels in dataloader:
        >>>     noised_image, min_std = min_pert.evaluate(inputs=images, target=labels)

        """
        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        expanded_additional_args = (
            _expand_additional_forward_args(
                additional_forward_args, perturbations_per_eval
            )
            if perturbations_per_eval > 1
            else additional_forward_args
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



