def dpsgd_method()

in private_prediction.py [0:0]


def dpsgd_method(data, args, visualizer=None, title=None):
    """
    Given a dataset `data` and arguments `args`, run a full test of private
    prediction using the differentially private SGD training method of dpsgd
    et al. (2016).
    """

    # assertions:
    if args.optimizer != "sgd":
        raise ValueError(f"DP-SGD does not work with {args.optimizer} optimizer.")
    if args.delta <= 0.:
        raise ValueError(f"Specified delta must be positive (not {args.delta}).")

    # initialize model and criterion:
    num_classes = int(data["train"]["targets"].max()) + 1
    num_samples = data["train"]["features"].size(0)
    num_features = data["train"]["features"].size(1)
    model = modeling.initialize_model(
        num_features, num_classes, model=args.model, device=args.device
    )
    regularized_criterion = modeling.add_l2_regularization(
        nn.CrossEntropyLoss(), model, args.weight_decay
    )

    # compute standard deviation of noise to add to gradient:
    num_samples = data["train"]["features"].size(0)
    std, eps = dpsgd_privacy.compute_noise_multiplier(
        args.epsilon, args.delta, num_samples, args.batch_size, args.num_epochs)
    logging.info(f"DP-SGD with noise multiplier (sigma) of {std}.")
    logging.info(f"Epsilon error is {abs(eps - args.epsilon):.5f}.")

    # convert model to make differentially private gradient updates:
    model = modeling.privatize_model(model, args.clip, std)

    # train classifier:
    logging.info("Training classifier using private SGD...")
    augmentation = (args.model != "linear")
    modeling.train_model(model, data["train"],
                         optimizer=args.optimizer,
                         criterion=regularized_criterion,
                         num_epochs=args.num_epochs,
                         learning_rate=args.learning_rate,
                         batch_size=args.batch_size,
                         momentum=0.0,
                         use_lr_scheduler=args.use_lr_scheduler,
                         augmentation=augmentation,
                         visualizer=visualizer,
                         title=title)

    # convert model back to "regular" model:
    model = modeling.unprivatize_model(model)

    # perform inference on both training and test set:
    logging.info("Performing inference with DP-SGD predictor...")
    predictions = {split: modeling.test_model(
                   model, data_split, augmentation=augmentation
                   ).argmax(dim=1) for split, data_split in data.items()}
    return predictions