def sensitivity_scale()

in private_prediction.py [0:0]


def sensitivity_scale(epsilon, delta, weight_decay,
                      criterion, dataset_size, noise_dist,
                      chaudhuri=True):
    """
    Given differential privacy parameters `epsilon` and `delta`, L2
    regularization parameter `weight_decay`, the specified `criterion`, dataset
    size `dataset_size`, and the noise distribution `noise_dist` compute the
    `scale` of the distribution to be used for the model and logit sensitivity
    methods.

    If `chaudhuri` is True, we use assumptions from Chaudhuri et al. to compute
    the scale.
    """
    if noise_dist in ["gaussian", "advanced_gaussian"]:
        if delta <= 0:
            raise ValueError(f"Delta must be > 0 for Gaussian noise (not {delta}).")
    elif delta != 0:
        raise ValueError(f"Delta must be 0 for non-Gaussian noise (not {delta}).")

    # standard Gaussian mechanism of Dwork (2014):
    if noise_dist == "gaussian":
        if epsilon < 0 or epsilon > 1:
            raise ValueError(
                f"Epsilon must be in (0, 1) for Gaussian noise (not {epsilon}).")
        scale = epsilon / math.sqrt(2 * math.log(1.25 / delta))

    # advanced Gaussian mechanism of Balle and Wang (2018):
    elif noise_dist == "advanced_gaussian":

        # compute delta knot:
        gaussian = torch.distributions.normal.Normal(0, 1)
        delta0 = gaussian.cdf(0) - math.exp(epsilon) * gaussian.cdf(-math.sqrt(2. * epsilon))

        # define B-function:
        supremum = (delta >= delta0)
        b_func = get_b_function(epsilon, delta, supremum=supremum)

        # define constraint on output of B-function:
        def constraint(x):
            return x <= delta if supremum else x < -delta

        # find maximum value of B-function:
        try:
            maximum = next(2 ** k for k in range(128) if not constraint(b_func(2 ** k)))
        except StopIteration:
            logging.error("Optimal value for v* out of range [0, 2 ** 128].")
        tol = 1e-5
        v_star = util.binary_search(b_func, constraint, 0, maximum, tol=tol)

        # compute noise multiplier:
        if supremum:
            alpha = math.sqrt(1 + v_star / 2.0) - math.sqrt(v_star / 2.0)
        else:
            v_star += tol  # binary search returns value that is slightly too small
            alpha = math.sqrt(1 + v_star / 2.0) + math.sqrt(v_star / 2.0)
        scale = math.sqrt(2. * epsilon) / alpha

    # standard bounds for exponential / gamma mechanism:
    elif noise_dist == "laplacian" or noise_dist == "sqrt_gaussian":
        scale = epsilon
    else:
        raise ValueError(f"Unknown noise distribution: {noise_dist}")

    # computes the Lipschitz constant for a given loss:
    if isinstance(criterion, nn.CrossEntropyLoss):
        k = math.sqrt(2.0)
    elif isinstance(criterion, nn.BCELoss):
        k = 1.0
    else:
        raise ValueError("Lipschitz constant of loss unknown.")

    # compute final sensitivity scale:
    if chaudhuri:
        scale *= (weight_decay * dataset_size / (2.0 * k))
    return scale