LaNAS/Distributed_LaNAS/server/Classifier.py [68:154]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.training_counter += 1
        # in a rare case, one branch has no networks
        if len(self.nets) == 0:
            return
        for epoch in range(self.epochs):
            epoch += 1
            nets = self.nets
            acc  = self.acc
            #clear grads
            self.optimiser.zero_grad()
            #forward to get predicted values
            outputs = self.model.forward( nets )
            loss = nn.MSELoss()(outputs, acc)
            loss.backward()# back props
            nn.utils.clip_grad_norm_(self.model.parameters(), 5)
            self.optimiser.step()# update the parameters
#            if epoch % 1000 == 0:
#                print('@' + self.__class__.__name__ + ' epoch {}, loss {}'.format(epoch, loss.data))

    def predict(self, remaining):
        assert type(remaining) == type({})
        remaining_archs    = []
        for k, v in remaining.items():
            net = json.loads(k)
            remaining_archs.append( net )
        remaining_archs = torch.from_numpy(np.asarray(remaining_archs, dtype=np.float32).reshape(-1, self.input_dim))
        if torch.cuda.is_available():
            remaining_archs = remaining_archs.cuda()
        outputs = self.model.forward(remaining_archs)
        if torch.cuda.is_available():
            remaining_archs = remaining_archs.cpu()
            outputs         = outputs.cpu()
        result  = {}
        counter = 0
        for k in range(0, len(remaining_archs) ):
            counter += 1
            arch = remaining_archs[k].detach().numpy()
            arch_str = json.dumps( arch.tolist() )
            result[ arch_str ] = outputs[k].detach().numpy().tolist()[0]
        assert len(result) == len(remaining)
        return result

    def split_predictions(self, remaining):
        assert type(remaining) == type({})
        samples_badness  = {}
        samples_goodies  = {}
        if len(remaining) == 0:
            return samples_badness, samples_goodies
        predictions = self.predict(remaining)
        avg_acc          = self.predict_mean()
        self.boundary    = avg_acc
        for k, v in predictions.items():
            if v < avg_acc:
                samples_badness[k] = v
            else:
                samples_goodies[k] = v
        assert len(samples_badness) + len(samples_goodies) == len(remaining)
        return  samples_goodies, samples_badness


    def predict_mean(self):
        if len(self.nets) == 0:
            return 0
        # can we use the actual acc?
        outputs    = self.model.forward(self.nets)
        pred_np    = None
        if torch.cuda.is_available():
            pred_np = outputs.detach().cpu().numpy()
        else:
            pred_np = outputs.detach().numpy()
        return np.mean(pred_np)
    
    def split_data(self):
        samples_badness  = {}
        samples_goodies  = {}
        if len(self.nets) == 0:
            return samples_badness, samples_goodies
        self.train()
        avg_acc          = self.predict_mean()
        self.boundary    = avg_acc
        for k, v in self.samples.items():
            if v < avg_acc:
                samples_badness[k]  = v
            else:
                samples_goodies[k] = v
        assert len(samples_badness) + len(samples_goodies) == len( self.samples )
        return  samples_goodies, samples_badness
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



LaNAS/one-shot_LaNAS/LaNAS/Classifier.py [73:159]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.training_counter += 1
        # in a rare case, one branch has no networks
        if len(self.nets) == 0:
            return
        for epoch in range(self.epochs):
            epoch += 1
            nets = self.nets
            acc  = self.acc
            #clear grads
            self.optimiser.zero_grad()
            #forward to get predicted values
            outputs = self.model.forward( nets )
            loss = nn.MSELoss()(outputs, acc)
            loss.backward()# back props
            nn.utils.clip_grad_norm_(self.model.parameters(), 5)
            self.optimiser.step()# update the parameters
#            if epoch % 1000 == 0:
#                print('@' + self.__class__.__name__ + ' epoch {}, loss {}'.format(epoch, loss.data))

    def predict(self, remaining):
        assert type(remaining) == type({})
        remaining_archs    = []
        for k, v in remaining.items():
            net = json.loads(k)
            remaining_archs.append( net )
        remaining_archs = torch.from_numpy(np.asarray(remaining_archs, dtype=np.float32).reshape(-1, self.input_dim))
        if torch.cuda.is_available():
            remaining_archs = remaining_archs.cuda()
        outputs = self.model.forward(remaining_archs)
        if torch.cuda.is_available():
            remaining_archs = remaining_archs.cpu()
            outputs         = outputs.cpu()
        result  = {}
        counter = 0
        for k in range(0, len(remaining_archs) ):
            counter += 1
            arch = remaining_archs[k].detach().numpy()
            arch_str = json.dumps( arch.tolist() )
            result[ arch_str ] = outputs[k].detach().numpy().tolist()[0]
        assert len(result) == len(remaining)
        return result

    def split_predictions(self, remaining):
        assert type(remaining) == type({})
        samples_badness  = {}
        samples_goodies  = {}
        if len(remaining) == 0:
            return samples_badness, samples_goodies
        predictions = self.predict(remaining)
        avg_acc          = self.predict_mean()
        self.boundary    = avg_acc
        for k, v in predictions.items():
            if v < avg_acc:
                samples_badness[k] = v
            else:
                samples_goodies[k] = v
        assert len(samples_badness) + len(samples_goodies) == len(remaining)
        return  samples_goodies, samples_badness


    def predict_mean(self):
        if len(self.nets) == 0:
            return 0
        # can we use the actual acc?
        outputs    = self.model.forward(self.nets)
        pred_np    = None
        if torch.cuda.is_available():
            pred_np = outputs.detach().cpu().numpy()
        else:
            pred_np = outputs.detach().numpy()
        return np.mean(pred_np)
    
    def split_data(self):
        samples_badness  = {}
        samples_goodies  = {}
        if len(self.nets) == 0:
            return samples_badness, samples_goodies
        self.train()
        avg_acc          = self.predict_mean()
        self.boundary    = avg_acc
        for k, v in self.samples.items():
            if v < avg_acc:
                samples_badness[k]  = v
            else:
                samples_goodies[k] = v
        assert len(samples_badness) + len(samples_goodies) == len( self.samples )
        return  samples_goodies, samples_badness
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



