attacks/privacy_attacks.py [942:954]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            train_images=torch.stack([dataset[i][0] for i in private_train_ids])
            train_images=train_images.cpu()
            
            heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
            heldout_images=heldout_images.cpu()

            log_softmax = torch.nn.LogSoftmax(dim=1)
        
            train_output=private_model(train_images)
            heldout_output=private_model(heldout_images)
            
            log_train_output=log_softmax(train_output)
            log_heldout_output=log_softmax(heldout_output)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



attacks/privacy_attacks.py [992:1004]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        train_images=torch.stack([dataset[i][0] for i in private_train_ids])
        train_images=train_images.cpu()
        
        heldout_images=torch.stack([dataset[i][0] for i in private_heldout_ids])
        heldout_images=heldout_images.cpu()
        
        log_softmax = torch.nn.LogSoftmax(dim=1)
        
        train_output=private_model(train_images)
        heldout_output=private_model(heldout_images)
        
        log_train_output=log_softmax(train_output)
        log_heldout_output=log_softmax(heldout_output)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



