attacks/privacy_attacks.py [334:367]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            batched_ids=np.array_split(ids, 1000)
            for b_ids in batched_ids:
                image_data=torch.stack([dataset[i][0] for i in b_ids])
                image_data=image_data.cuda()
                target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
                target_data=target_data.cuda()
            
                private_model.zero_grad()
                out_private=private_model(image_data)
                loss_private=F.cross_entropy(out_private, target_data)
                loss_private.backward()

                attack_model.zero_grad()          
                out_attack=attack_model(image_data)
                loss_attack=F.cross_entropy(out_attack, target_data)
                loss_attack.backward()
                

                for i,id in enumerate(b_ids):

                    private_grads=[]
                    for param in private_model.parameters():
                        private_grads.append(param.grad_sample[i].view(-1))
                    private_grads = torch.cat(private_grads)

                    attack_grads=[]
                    for param in attack_model.parameters():
                        attack_grads.append(param.grad_sample[i].view(-1))
                    attack_grads = torch.cat(attack_grads)

                    g_private=private_grads.cpu().numpy()
                    g_attack=attack_grads.cpu().numpy() 
                    
                    if norm_type=='inf':
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



attacks/privacy_attacks.py [388:421]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        batched_ids=np.array_split(ids, 1000)
        for b_ids in batched_ids:
            image_data=torch.stack([dataset[i][0] for i in b_ids])
            image_data=image_data.cuda()
            target_data=torch.stack([torch.tensor(dataset[i][1]) for i in b_ids])
            target_data=target_data.cuda()
            
            private_model.zero_grad()
            out_private=private_model(image_data)
            loss_private=F.cross_entropy(out_private, target_data)
            loss_private.backward()

            attack_model.zero_grad()          
            out_attack=attack_model(image_data)
            loss_attack=F.cross_entropy(out_attack, target_data)
            loss_attack.backward()

            for i,id in enumerate(b_ids):
            
                private_grads=[]
                for param in private_model.parameters():
                    private_grads.append(param.grad_sample[i].view(-1))
                private_grads = torch.cat(private_grads)

                attack_grads=[]
                for param in attack_model.parameters():
                    attack_grads.append(param.grad_sample[i].view(-1))
                attack_grads = torch.cat(attack_grads)

                g_private=private_grads.cpu().numpy()
                g_attack=attack_grads.cpu().numpy()         

                
                if norm_type=='inf':
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



