trainers/simple.py [72:118]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for i_epoch in range(0, train_options.nepochs):
        start_time_epoch = time.time()
        num_batches, avg_loss_epoch, q1, q2 = train_epoch(
            model=model,
            loss=loss,
            optimizer=optimizer,
            batch_processor=batch_processor,
            trainer_id=trainer_id,
            i_epoch=i_epoch,
            device=device,
            logging_options=logging_options,
            writer=writer,
            train_dataloader_batches=train_dataloader_batches,
            batch_size=batch_size,
            is_dataloader=is_train_dataloader,
        )

        logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))


        avg_loss_epoch = log_train_info(
            start_time=start_time_epoch,
            i_batch=num_batches,
            i_epoch=i_epoch,
            trainer_id=trainer_id,
            total_loss=avg_loss_epoch * num_batches * batch_size,
            num_batches=num_batches,
            batch_size=batch_size,
        )
        if writer is not None:
            writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
        output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})


        if val_dataloader_batches is not None:
            avg_val_loss, _, _, avg_auc = evaluate(
                model=model,
                loss=loss,
                dataloader=val_dataloader_batches,
                batch_processor=batch_processor,
                device=device,
                batch_size=batch_size,
                is_dataloader=is_val_dataloader,
                i_epoch=i_epoch,
            )
            output[-1]["avg_val_loss"] = avg_val_loss
            output[-1]["roc_auc_score"] = avg_auc
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



trainers/simple_final.py [80:124]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    for i_epoch in range(0, train_options.nepochs):
        start_time_epoch = time.time()
        num_batches, avg_loss_epoch, q1, q2 = train_epoch(
            model=model,
            loss=loss,
            optimizer=optimizer,
            batch_processor=batch_processor,
            trainer_id=trainer_id,
            i_epoch=i_epoch,
            device=device,
            logging_options=logging_options,
            writer=writer,
            train_dataloader_batches=train_dataloader_batches,
            batch_size=batch_size,
            is_dataloader=is_train_dataloader,
        )

        logger.warning("Epoch:{}, Time for training: {}".format(i_epoch, time.time() - start_time_epoch))

        avg_loss_epoch = log_train_info(
            start_time=start_time_epoch,
            i_batch=num_batches,
            i_epoch=i_epoch,
            trainer_id=trainer_id,
            total_loss=avg_loss_epoch * num_batches * batch_size,
            num_batches=num_batches,
            batch_size=batch_size,
        )
        if writer is not None:
            writer.add_scalar("train_metric/loss_epoch", avg_loss_epoch, i_epoch)
        output.append({"i_epoch": i_epoch, "avg_train_loss": avg_loss_epoch})

        if val_dataloader_batches is not None:
            avg_val_loss, _, _, avg_auc = evaluate(
                model=model,
                loss=loss,
                dataloader=val_dataloader_batches,
                batch_processor=batch_processor,
                device=device,
                batch_size=batch_size,
                is_dataloader=is_val_dataloader,
                i_epoch=i_epoch,
            )
            output[-1]["avg_val_loss"] = avg_val_loss
            output[-1]["roc_auc_score"] = avg_auc
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



