def train_private()

in train_func.py [0:0]


def train_private(args, extr, clf, loss_fn, device, train_loader, optimizer, epoch, C, std, include_linear=False, verbose=True):
    extr.train()
    clf.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        # compute per-example gradients
        num_batches = int(math.ceil(float(data.size(0)) / args.process_batch_size))
        loss = 0
        grad_vec = None
        for i in range(num_batches):
            start = i * args.process_batch_size
            end = min((i+1) * args.process_batch_size, data.size(0))
            data_batch = data[start:end]
            target_batch = target[start:end]
            loss_batch, gradients_batch = per_example_gradient(extr, clf, data_batch, target_batch, loss_fn, include_linear=include_linear)
            loss += data_batch.size(0) * loss_batch.item()
            if i == 0:
                grad_vec = clip_and_sum_gradients(gradients_batch, C)
            else:
                grad_vec += clip_and_sum_gradients(gradients_batch, C)
        loss /= data.size(0)
        grad_vec /= data.size(0)
        noise = add_noisy_gradient(extr, clf, device, grad_vec, C, std / data.size(0), include_linear=include_linear)
        optimizer.step()
        if verbose and (batch_idx + 1) % args.log_interval == 0:
            print('Epoch %d [%d/%d]: loss = %.4f, grad_norm = %.4f, noise_norm = %.4f' % (
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset), loss,
                grad_vec.norm(), noise.norm()))