in attacks/privacy_attacks.py [0:0]
def get_calibrated_distances(params, model1, model2, ids):
"""
return calibrated boundary distances.
"""
dataset = get_dataset(params)
images=torch.stack([dataset[i][0] for i in ids])
images=images.cuda()
targets=torch.stack([torch.tensor(dataset[i][1]) for i in ids])
targets=targets.cuda()
outputs1=model1(images)
outputs2=model2(images)
images_pert1= hop_skip_jump_attack(model1,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
images_pert2= hop_skip_jump_attack(model2,images,2, verbose=False,clip_min=params.clip_min, clip_max=params.clip_max)
# images_pert1=carlini_wagner_l2(model1,images,params.num_classes ,targets)
# images_pert2=carlini_wagner_l2(model2,images,params.num_classes ,targets)
dists1=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs1[i], 1)
if pred==targets[i].item():
dists1.append(torch.norm(images_pert1[i]- images[i], p=2).item())
else:
dists1.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
dists2=[]
for i, id in enumerate(ids):
_, pred = torch.topk(outputs2[i], 1)
if pred==targets[i].item():
dists2.append(torch.norm(images_pert2[i]- images[i], p=2).item())
else:
dists2.append(-torch.norm(images_pert1[i]- images[i], p=2).item())
calibrated_dists=np.subtract(np.array(dists1),np.array(dists2))
return calibrated_dists