def calibrated_gradient_attack()

in attacks/privacy_attacks.py [0:0]


def calibrated_gradient_attack(params):
    """
    run a calibrated gradient attack. 
    """
    #load the masks
    known_masks, hidden_masks = {}, {}
    hidden_masks['public'], hidden_masks['private']={},{}
    known_masks['public'] = torch.load(params.mask_path + "public.pth")
    known_masks['private'] = torch.load( params.mask_path + "private.pth")
    hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
    hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
    hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
    hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")

    if params.public_data=='train':
        print('Using public training data for auxiliary model')
        attack_model=train(params, hidden_masks['public']['train'])
    elif params.public_data[:4]=='rand':
        print('Using random subset for auxiliary model')
        public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
        prop_selected=float(params.public_data[4:])/100
        num_selected=math.ceil(prop_selected*len(public_ids))
        permuted_ids=np.random.permutation(public_ids)
        aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
        print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
        attack_model=train(params, aux_data_mask)
    else:
        print('Using all public data for auxiliary model')
        attack_model=train(params, known_masks['public'])
    attack_model=attack_model.cuda()

    #get the attack model parameters
    original_attack_model=[]
    for p in attack_model.parameters():
        original_attack_model.append(p.view(-1))
    original_attack_model=torch.cat(original_attack_model)

    #get the final model parameters
    private_model=build_model(params)
    private_model_path = os.path.join(params.model_path, "checkpoint.pth")
    state_dict_private = torch.load(private_model_path)
    private_model.load_state_dict(state_dict_private['model'])
    private_model=private_model.cuda()
 
    original_private_model=[]
    for p in private_model.parameters():
        original_private_model.append(p.view(-1))
    original_private_model=torch.cat(original_private_model)

    #get the appropriate ids to dot product
    private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
    private_heldout_ids=(hidden_masks['private']['heldout']==True).nonzero().flatten().numpy()

    # reload model to allow use of gradsamplemodule
    new_model=build_model(params)
    new_model_path = os.path.join(params.dump_path, "checkpoint.pth")
    state_dict_new = torch.load(new_model_path)
    new_model.load_state_dict(state_dict_new['model'])
    new_model=new_model.cuda()

    private_model=GradSampleModule(private_model)
    attack_model=GradSampleModule(new_model)

    train_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_train_ids,hidden_masks['private']['train'])
    heldout_dots=get_calibrated_gradnorm(params, private_model, original_private_model, attack_model,original_attack_model,private_heldout_ids,hidden_masks['private']['heldout'])

    return train_dots, heldout_dots