def train_attack_models()

in privacy_lint/attacks/shadow.py [0:0]


    def train_attack_models(self):
        self.attack_models = {}
        for label in range(self.labels.max() + 1):
            print(f"Training shadow model on label {label}")
            train_X = self.softmaxes[self.labels == label]
            train_Y = self.masks[self.labels == label]
            self.attack_models[label] = train_shadow(
                train_X, train_Y.long(), verbose=self.verbose
            )