def get_calibrated_gradnorm()

in attacks/privacy_attacks.py [0:0]


def get_calibrated_gradnorm(params, private_model, private_params, attack_model,attack_params, ids, mask, aug_style='mean',norm_type=None):
    """
    return calibrated gradient norm values. 
    """
    #load the dataset
    dataset = get_dataset(params)

    #initialize to 0
    grad_norms=np.zeros(len(mask))

    if params.aug:
        batch_vals=[[0] for i in np.arange(len(mask))]
        for t in np.arange(10):
            batched_ids=np.array_split(ids, 1000)
            for b_ids in batched_ids:
                image_data=torch.stack([dataset[i][0] for i in b_ids])
                image_data=image_data.cuda()
                target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
                target_data=target_data.cuda()
            
                private_model.zero_grad()
                out_private=private_model(image_data)
                loss_private=F.cross_entropy(out_private, target_data)
                loss_private.backward()

                attack_model.zero_grad()          
                out_attack=attack_model(image_data)
                loss_attack=F.cross_entropy(out_attack, target_data)
                loss_attack.backward()
                

                for i,id in enumerate(b_ids):

                    private_grads=[]
                    for param in private_model.parameters():
                        private_grads.append(param.grad_sample[i].view(-1))
                    private_grads = torch.cat(private_grads)

                    attack_grads=[]
                    for param in attack_model.parameters():
                        attack_grads.append(param.grad_sample[i].view(-1))
                    attack_grads = torch.cat(attack_grads)

                    g_private=private_grads.cpu().numpy()
                    g_attack=attack_grads.cpu().numpy() 
                    
                    if norm_type=='inf':
                        batch_vals[id].append(max(g_private-g_attack))
                    else:
                        if norm_type=='1':
                            norm_type=1
                        elif norm_type=='2':
                            norm_type=2
                        elif norm_type=='3':
                            norm_type=3
                        batch_vals[id].append(np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type))
    
        for id in ids:
            if aug_style=='mean':    
                grad_norms[id]=np.mean(batch_vals[id][1:])
            elif aug_style=='max':
                grad_norms[id]=np.max(batch_vals[id][1:])
            elif aug_style=='median':
                grad_norms[id]=np.median(batch_vals[id][1:])
            elif aug_style=='std':
                grad_norms[id]=np.std(batch_vals[id][1:])
    else:
        batched_ids=np.array_split(ids, 1000)
        for b_ids in batched_ids:
            image_data=torch.stack([dataset[i][0] for i in b_ids])
            image_data=image_data.cuda()
            target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
            target_data=target_data.cuda()
            
            private_model.zero_grad()
            out_private=private_model(image_data)
            loss_private=F.cross_entropy(out_private, target_data)
            loss_private.backward()

            attack_model.zero_grad()          
            out_attack=attack_model(image_data)
            loss_attack=F.cross_entropy(out_attack, target_data)
            loss_attack.backward()

            for i,id in enumerate(b_ids):
            
                private_grads=[]
                for param in private_model.parameters():
                    private_grads.append(param.grad_sample[i].view(-1))
                private_grads = torch.cat(private_grads)

                attack_grads=[]
                for param in attack_model.parameters():
                    attack_grads.append(param.grad_sample[i].view(-1))
                attack_grads = torch.cat(attack_grads)

                g_private=private_grads.cpu().numpy()
                g_attack=attack_grads.cpu().numpy()         

                
                if norm_type=='inf':
                    grad_norms[id]=max(g_private-g_attack)
                else:
                    if norm_type=='1':
                        norm_type=1
                    elif norm_type=='2':
                        norm_type=2
                    elif norm_type=='3':
                        norm_type=3
                    grad_norms[id]=np.linalg.norm(g_private, ord=norm_type)-np.linalg.norm(g_attack,ord=norm_type)
                
        
    return grad_norms