captum/attr/_core/gradient_shap.py [266:279]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        nt = NoiseTunnel(input_min_baseline_x_grad)

        # NOTE: using attribute.__wrapped__ to not log
        attributions = nt.attribute.__wrapped__(
            nt,  # self
            inputs,
            nt_type="smoothgrad",
            nt_samples=n_samples,
            stdevs=stdevs,
            draw_baseline_from_distrib=True,
            baselines=baselines,
            target=target,
            additional_forward_args=additional_forward_args,
            return_convergence_delta=return_convergence_delta,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



captum/attr/_core/layer/layer_gradient_shap.py [301:313]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        nt = NoiseTunnel(input_min_baseline_x_grad)

        attributions = nt.attribute.__wrapped__(
            nt,  # self
            inputs,
            nt_type="smoothgrad",
            nt_samples=n_samples,
            stdevs=stdevs,
            draw_baseline_from_distrib=True,
            baselines=baselines,
            target=target,
            additional_forward_args=additional_forward_args,
            return_convergence_delta=return_convergence_delta,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



