def logit_sensitivity_method()

in private_prediction.py [0:0]


def logit_sensitivity_method(data, args, visualizer=None, title=None):
    """
    Given a dataset `data` and arguments `args`, run a full test of the logit
    sensitivity method. Returns a `dict` containing the `predictions` for the
    training and test data.

    Note: This algorithm only guarantees privacy for models with convex losses.
    """
    assert args.model == "linear", f"Model {args.model} not supported."

    # unspecified inference budgets means we are trying many values:
    if args.inference_budget == -1:
        inference_budgets = INFERENCE_BUDGETS
    else:
        inference_budgets = [args.inference_budget]

    # initialize model and criterion:
    num_classes = int(data["train"]["targets"].max()) + 1
    num_samples, num_features = data["train"]["features"].size()
    model = modeling.initialize_model(num_features, num_classes, device=args.device)
    criterion = nn.CrossEntropyLoss()
    regularized_criterion = modeling.add_l2_regularization(
        criterion, model, args.weight_decay
    )

    # train classifier:
    logging.info("Training non-private classifier...")
    modeling.train_model(model, data["train"],
                         criterion=regularized_criterion,
                         optimizer=args.optimizer,
                         num_epochs=args.num_epochs,
                         learning_rate=args.learning_rate,
                         batch_size=args.batch_size,
                         visualizer=visualizer,
                         title=title)

    # perform inference on both training and test set:
    logging.info("Performing inference with private predictor...")
    predictions = {}
    for split in data.keys():
        if split not in predictions:
            predictions[split] = {}
        for inference_budget in inference_budgets:

            # account for the budget in the noise scale:
            scale = sensitivity_scale(
                args.epsilon / float(inference_budget),
                args.delta / float(inference_budget), args.weight_decay,
                criterion, num_samples, args.noise_dist)
            if args.delta > 0:
                # linearly search for the optimal noise scale under advanced
                # composition:
                del_primes = torch.linspace(0, args.delta, 1000)[1:-1]
                ind_eps_del = [advanced_compose(
                    args.epsilon, args.delta, inference_budget, dp)
                    for dp in del_primes]
                scales = [sensitivity_scale(
                    epsilon, delta, args.weight_decay,
                    criterion, num_samples, args.noise_dist)
                    for epsilon, delta in ind_eps_del]
                # for small budgets the naive scale may be better:
                scale = max(max(scales), scale)

            # make private predictions:
            noise_dist = "gaussian" if args.noise_dist in ["gaussian", "advanced_gaussian"] \
                else args.noise_dist
            preds = modeling.test_model(model, data[split])
            mean = torch.zeros_like(preds).T
            preds += getattr(noise, noise_dist)(mean, scale).T

            # make private predictions:
            predictions[split][inference_budget] = preds.argmax(dim=1)

    # return predictions:
    return predictions