crlapi/sl/clmodels/bagging.py [128:193]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        device=self.config.device

        models = [model.to(device) for model in models]
        optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))

        #Launching training procedure
        logger.message("Start training for " + str(self.config.max_epochs) + " epochs")
        iteration, n_fwd_samples = 0, 0
        for epoch in range(self.config.max_epochs):

            # Make sure model is ready for train
            [model.train() for model in models]

            # Keep a single track of these for now
            training_loss=0.0
            training_accuracy=0.0
            n=0

            for i, items in enumerate(zip(*training_loaders)):
                xs, ys = [], []
                for item in items:
                    x, y = item
                    x, y = x.to(device), y.to(device)
                    xs += [train_aug(x)]
                    ys += [y]

                xs = torch.stack(xs)
                ys = torch.stack(ys)

                loss, acc = 0, 0
                for model_idx in range(self.config.k):
                    model, x, y = models[model_idx], xs[model_idx], ys[model_idx]

                    predicted = model(x)
                    loss += F.cross_entropy(predicted,y)
                    nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
                    acc  += nb_ok/x.size()[0]

                accuracy = acc / self.config.k
                loss_    = loss.item() / self.config.k
                training_accuracy += accuracy
                training_loss     += loss_

                n += xs.size(1)
                n_fwd_samples += xs.size(1)

                logger.add_scalar("train/loss",loss_,iteration)
                logger.add_scalar("train/accuracy",accuracy,iteration)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                iteration += 1

            #Validation
            training_accuracy /= i
            training_loss /= i
            outs = [self._validation_loop(model,device,validation_loader) for model in models]
            validation_losses = [x['loss'] for x in outs]
            validation_accuracies = [x['accuracy'] for x in outs]

            validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)

            logger.add_scalar("validation/loss",validation_loss,epoch)
            logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



crlapi/sl/clmodels/k_ensemble.py [101:167]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        device=self.config.device

        models = [model.to(device) for model in models]
        optimizer = self.get_optimizer(chain(*[model.parameters() for model in models]))

        #Launching training procedure
        logger.message("Start training for " + str(self.config.max_epochs) + " epochs")

        iteration, n_fwd_samples = 0, 0
        for epoch in range(self.config.max_epochs):

            # Make sure model is ready for train
            [model.train() for model in models]

            # Keep a single track of these for now
            training_loss=0.0
            training_accuracy=0.0
            n=0

            for i, items in enumerate(zip(*training_loaders)):
                xs, ys = [], []
                for item in items:
                    x, y = item
                    x, y = x.to(device), y.to(device)
                    xs += [train_aug(x)]
                    ys += [y]

                xs = torch.stack(xs)
                ys = torch.stack(ys)

                loss, acc = 0, 0
                for model_idx in range(self.config.k):
                    model, x, y = models[model_idx], xs[model_idx], ys[model_idx]

                    predicted = model(x)
                    loss += F.cross_entropy(predicted,y)
                    nb_ok = predicted.max(1)[1].eq(y).float().sum().item()
                    acc  += nb_ok/x.size()[0]

                accuracy = acc / self.config.k
                loss_    = loss.item() / self.config.k
                training_accuracy += accuracy
                training_loss     += loss_

                n += xs.size(1)
                n_fwd_samples += xs.size(1)

                logger.add_scalar("train/loss",loss_,iteration)
                logger.add_scalar("train/accuracy",accuracy,iteration)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                iteration += 1

            #Validation
            training_accuracy /= i
            training_loss /= i
            outs = [self._validation_loop(model,device,validation_loader) for model in models]
            validation_losses = [x['loss'] for x in outs]
            validation_accuracies = [x['accuracy'] for x in outs]

            validation_loss, validation_accuracy = np.mean(validation_losses), np.mean(validation_accuracies)

            logger.add_scalar("validation/loss",validation_loss,epoch)
            logger.add_scalar("validation/accuracy",validation_accuracy,epoch)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



