crlapi/sl/clmodels/adaboost.py [154:192]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
                    accuracy=nb_ok/x.size()[0]
                    training_accuracy+=nb_ok
                    training_loss+=loss.item()

                    logger.add_scalar("train/loss",loss.item(),iteration)
                    logger.add_scalar("train/accuracy",accuracy,iteration)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    iteration += 1
                    n_fwd_samples += x.size(0)

                # Validation
                training_accuracy/=n
                training_loss/=n
                out=self._validation_loop(model,device,validation_loader)
                validation_loss,validation_accuracy=out["loss"],out["accuracy"]

                logger.add_scalar("validation/loss",validation_loss,epoch)
                logger.add_scalar("validation/accuracy",validation_accuracy,epoch)

                # Right now CV against accuracy
                if best_acc is None or validation_accuracy > (best_acc + patience_delta):
                    print(f"\t Round {around}. Found best model at epoch ",epoch)
                    best_model.load_state_dict(_state_dict(model,"cpu"))
                    best_loss = validation_loss
                    best_acc  = validation_accuracy
                    patience_count = 0
                else:
                    patience_count += 1

                logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
                logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")

                if patience_count == patience or epoch == self.config.max_epochs:
                    break
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



crlapi/sl/clmodels/debug_ensemble.py [107:146]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                    nb_ok=predicted.max(1)[1].eq(y).float().sum().item()
                    accuracy=nb_ok/x.size()[0]
                    training_accuracy+=nb_ok
                    training_loss+=loss.item()

                    logger.add_scalar("train/loss",loss.item(),iteration)
                    logger.add_scalar("train/accuracy",accuracy,iteration)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    iteration += 1
                    n_fwd_samples += x.size(0)

                # Validation
                training_accuracy/=n
                training_loss/=n
                out=self._validation_loop(model,device,validation_loader)
                validation_loss,validation_accuracy=out["loss"],out["accuracy"]

                logger.add_scalar("validation/loss",validation_loss,epoch)
                logger.add_scalar("validation/accuracy",validation_accuracy,epoch)

                # Right now CV against accuracy
                # if best_loss is None or validation_loss < (best_loss - patience_delta):
                if best_acc is None or validation_accuracy > (best_acc + patience_delta):
                    print(f"\t Round {around}. Found best model at epoch ",epoch)
                    best_model.load_state_dict(_state_dict(model,"cpu"))
                    best_loss = validation_loss
                    best_acc  = validation_accuracy
                    patience_count = 0
                else:
                    patience_count += 1

                logger.message(f"Validation Acc {validation_accuracy:.4f}\t Validation Loss {validation_loss:.4f}")
                logger.message(f"Training Acc {training_accuracy:.4f}\t Training Loss {training_loss:.4f}")

                if patience_count == patience or epoch == self.config.max_epochs:
                    break
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



