crlapi/sl/clmodels/debug_ensemble.py [92:133]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                model.train()

                # Training loop
                training_loss=0.0
                training_accuracy=0.0
                n=0
                for i, (raw_x, y) in enumerate(training_loader):
                    raw_x, y = raw_x.to(device), y.to(device)
                    n+=raw_x.size()[0]

                    # apply transformations
                    x = train_aug(raw_x)

                    predicted=model(x)
                    loss=F.cross_entropy(predicted,y)
                    nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
                    accuracy=nb_ok/x.size()[0]
                    training_accuracy+=nb_ok
                    training_loss+=loss.item()

                    logger.add_scalar("train/loss",loss.item(),iteration)
                    logger.add_scalar("train/accuracy",accuracy,iteration)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    iteration += 1
                    n_fwd_samples += x.size(0)

                # Validation
                training_accuracy/=n
                training_loss/=n
                out=self._validation_loop(model,device,validation_loader)
                validation_loss,validation_accuracy=out["loss"],out["accuracy"]

                logger.add_scalar("validation/loss",validation_loss,epoch)
                logger.add_scalar("validation/accuracy",validation_accuracy,epoch)

                # Right now CV against accuracy
                # if best_loss is None or validation_loss < (best_loss - patience_delta):
                if best_acc is None or validation_accuracy > (best_acc + patience_delta):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



crlapi/sl/clmodels/ensemble.py [124:163]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            model.train()

            #Training loop
            training_loss=0.0
            training_accuracy=0.0
            n=0
            for i, (raw_x, y) in enumerate(training_loader):
                raw_x, y = raw_x.to(device), y.to(device)
                n+=raw_x.size()[0]
                # apply transformations
                x = train_aug(raw_x)

                predicted=model(x)
                loss=F.cross_entropy(predicted,y)
                nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
                accuracy=nb_ok/x.size()[0]
                training_accuracy+=nb_ok
                training_loss+=loss.item()
                logger.add_scalar("train/loss",loss.item(),iteration)
                logger.add_scalar("train/accuracy",accuracy,iteration)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                iteration += 1
                n_fwd_samples += x.size(0)

            #Validation
            training_accuracy/=n
            training_loss/=n
            out=self._validation_loop(model,device,validation_loader)
            validation_loss,validation_accuracy=out["loss"],out["accuracy"]

            logger.add_scalar("validation/loss",validation_loss,epoch)
            logger.add_scalar("validation/accuracy",validation_accuracy,epoch)

            # Right now CV against accuracy
            # if best_loss is None or validation_loss < (best_loss - patience_delta):
            if best_acc is None or validation_accuracy > (best_acc + patience_delta):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



