attacks/privacy_attacks.py [621:655]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    known_masks, hidden_masks = {}, {}
    hidden_masks['public'], hidden_masks['private']={},{}
    known_masks['public'] = torch.load(params.mask_path + "public.pth")
    known_masks['private'] = torch.load( params.mask_path + "private.pth")
    hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
    hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
    hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
    hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")

    if params.public_data=='train':
        print('Using public training data for auxiliary model')
        attack_model=train(params, hidden_masks['public']['train'])
    elif params.public_data[:4]=='rand':
        print('Using random subset for auxiliary model')
        public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
        prop_selected=float(params.public_data[4:])/100
        num_selected=math.ceil(prop_selected*len(public_ids))
        permuted_ids=np.random.permutation(public_ids)
        aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
        print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
        attack_model=train(params, aux_data_mask)
    else:
        print('Using all public data for auxiliary model')
        attack_model=train(params, known_masks['public'])
    attack_model=attack_model.cuda()

    #get the final model parameters
    private_model=build_model(params)
    private_model_path = os.path.join(params.model_path, "checkpoint.pth")
    state_dict_private = torch.load(private_model_path)
    private_model.load_state_dict(state_dict_private['model'])
    private_model=private_model.cuda()

    #get the appropriate ids to dot product
    private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



attacks/privacy_attacks.py [1050:1084]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    known_masks, hidden_masks = {}, {}
    hidden_masks['public'], hidden_masks['private']={},{}
    known_masks['public'] = torch.load(params.mask_path + "public.pth")
    known_masks['private'] = torch.load( params.mask_path + "private.pth")
    hidden_masks['private']['train']=torch.load( params.mask_path + "hidden/train.pth")
    hidden_masks['private']['heldout'] = torch.load( params.mask_path + "hidden/heldout.pth")
    hidden_masks['public']['train']=torch.load( params.mask_path + "hidden/public_train.pth")
    hidden_masks['public']['heldout'] = torch.load( params.mask_path + "hidden/public_heldout.pth")

    if params.public_data=='train':
        print('Using public training data for auxiliary model')
        attack_model=train(params, hidden_masks['public']['train'])
    elif params.public_data[:4]=='rand':
        print('Using random subset for auxiliary model')
        public_ids=(known_masks['public']==True).nonzero().flatten().numpy()
        prop_selected=float(params.public_data[4:])/100
        num_selected=math.ceil(prop_selected*len(public_ids))
        permuted_ids=np.random.permutation(public_ids)
        aux_data_mask=to_mask(len(known_masks['public']),permuted_ids[:num_selected])
        print('Number of public model training points', len((aux_data_mask==True).nonzero().flatten().numpy()))
        attack_model=train(params, aux_data_mask)
    else:
        print('Using all public data for auxiliary model')
        attack_model=train(params, known_masks['public'])
    attack_model=attack_model.cuda()

    #get the final model parameters
    private_model=build_model(params)
    private_model_path = os.path.join(params.model_path, "checkpoint.pth")
    state_dict_private = torch.load(private_model_path)
    private_model.load_state_dict(state_dict_private['model'])
    private_model=private_model.cuda()

    #get the appropriate ids 
    private_train_ids=(hidden_masks['private']['train']==True).nonzero().flatten().numpy()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



