trainers/simple.py [136:186]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if writer is not None:
        writer.close()
    if send_end:
        send_end.send(output)
    return output

def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
    is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
    is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
    prev_avg_val_loss = avg_val_loss
    return is_local_optimal, is_improving, prev_avg_val_loss

def train_epoch(
    model,
    loss,
    optimizer,
    batch_processor,
    logging_options,
    device,
    trainer_id,
    i_epoch,
    lock=None,
    writer=None,
    train_dataloader_batches=None,
    batch_size=1024,
    is_dataloader=True,
):
    model.train()
    start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
    start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)

    loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
        0.0,
        len(train_dataloader_batches),
        0.0,
    )
    batch_size = batch_size

    q1, q2 = [], []

    qq3 = time.perf_counter()

    for i_batch, sample_batched in enumerate(train_dataloader_batches):

        if not is_dataloader and i_batch <= THRESHOLD:
            label, feats, weight = sample_batched
        elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:

            label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)

        else:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



trainers/simple_final.py [166:217]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if writer is not None:
        writer.close()
    if send_end:
        send_end.send(output)
    return output


def _check_local_optimal(i_epoch, is_improving, avg_val_loss, prev_avg_val_loss):
    is_local_optimal = i_epoch > 0 and is_improving and avg_val_loss > prev_avg_val_loss
    is_improving = i_epoch == 0 or prev_avg_val_loss > avg_val_loss
    prev_avg_val_loss = avg_val_loss
    return is_local_optimal, is_improving, prev_avg_val_loss


def train_epoch(
        model,
        loss,
        optimizer,
        batch_processor,
        logging_options,
        device,
        trainer_id,
        i_epoch,
        lock=None,
        writer=None,
        train_dataloader_batches=None,
        batch_size=1024,
        is_dataloader=True,
):
    model.train()
    start_time, loss_val, num_batches, sample_weight_sum = time.time(), 0.0, 0, 0.0
    start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)

    loss_val_epoch, total_num_batches, sample_weight_sum_epoch = (
        0.0,
        len(train_dataloader_batches),
        0.0,
    )
    batch_size = batch_size

    q1, q2 = [], []

    qq3 = time.perf_counter()

    for i_batch, sample_batched in enumerate(train_dataloader_batches):

        if not is_dataloader and i_batch <= THRESHOLD:
            label, feats, weight = sample_batched
        elif not is_dataloader and i_batch > THRESHOLD and i_epoch > 0:

            label, feats, weight = batch_processor(mini_batch=sample_batched, reverse=1)
        else:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



