trainers/simple.py [190:261]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        z_pred = model(feats=feats)

        # backward pass
        E = apply_loss(loss, z_pred, label, weight)
        optimizer.zero_grad()
        E.backward()




        qq1 = time.perf_counter()

        dd3 = qq1 - qq3



        # torch.cuda.synchronize()  # wait for mm to finish
        qq2 = time.perf_counter()

        optimizer.step()



        # torch.cuda.synchronize()  # wait for mm to finish
        qq3 = time.perf_counter()

        loss_val_batch = E.detach().cpu().numpy() * batch_size
        sample_weight_sum_batch = (
            batch_size if weight is None else torch.sum(weight).detach()
        )

        num_batches += 1
        loss_val += loss_val_batch
        loss_val_tb += loss_val_batch
        loss_val_epoch += loss_val_batch
        sample_weight_sum += sample_weight_sum_batch
        sample_weight_sum_tb += sample_weight_sum_batch
        sample_weight_sum_epoch += sample_weight_sum_batch

        if need_to_log_batch(i_batch, logging_options, batch_size):
            log_train_info(
                i_batch=i_batch,
                i_epoch=i_epoch,
                trainer_id=trainer_id,
                start_time=start_time,
                total_loss=loss_val,
                num_batches=num_batches,
                sample_weight_sum=sample_weight_sum,
                batch_size=batch_size,
                lock=lock,
            )
            start_time, loss_val, num_batches, sample_weight_sum = (
                time.time(),
                0.0,
                0,
                0.0,
            )
        if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
            log_tb_info_batch(
                writer=writer,
                model=model,
                pred=z_pred,
                label=label,
                optimizer=optimizer,
                logging_options=logging_options,
                iter=total_num_batches * i_epoch + i_batch,
                start_time=start_time_tb,
                trainer_id=trainer_id,
                avg_loss=loss_val_tb / sample_weight_sum_tb,
                lock=lock,
            )
            start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



trainers/simple_final.py [225:289]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        z_pred = model(feats=feats)

        # backward pass
        E = apply_loss(loss, z_pred, label, weight)
        optimizer.zero_grad()
        E.backward()

        qq1 = time.perf_counter()

        dd3 = qq1 - qq3

        # torch.cuda.synchronize()  # wait for mm to finish
        qq2 = time.perf_counter()

        optimizer.step()

        # torch.cuda.synchronize()  # wait for mm to finish
        qq3 = time.perf_counter()

        loss_val_batch = E.detach().cpu().numpy() * batch_size
        sample_weight_sum_batch = (
            batch_size if weight is None else torch.sum(weight).detach()
        )

        num_batches += 1
        loss_val += loss_val_batch
        loss_val_tb += loss_val_batch
        loss_val_epoch += loss_val_batch
        sample_weight_sum += sample_weight_sum_batch
        sample_weight_sum_tb += sample_weight_sum_batch
        sample_weight_sum_epoch += sample_weight_sum_batch

        if need_to_log_batch(i_batch, logging_options, batch_size):
            log_train_info(
                i_batch=i_batch,
                i_epoch=i_epoch,
                trainer_id=trainer_id,
                start_time=start_time,
                total_loss=loss_val,
                num_batches=num_batches,
                sample_weight_sum=sample_weight_sum,
                batch_size=batch_size,
                lock=lock,
            )
            start_time, loss_val, num_batches, sample_weight_sum = (
                time.time(),
                0.0,
                0,
                0.0,
            )
        if writer is not None and need_to_log_tb(i_batch, logging_options, batch_size):
            log_tb_info_batch(
                writer=writer,
                model=model,
                pred=z_pred,
                label=label,
                optimizer=optimizer,
                logging_options=logging_options,
                iter=total_num_batches * i_epoch + i_batch,
                start_time=start_time_tb,
                trainer_id=trainer_id,
                avg_loss=loss_val_tb / sample_weight_sum_tb,
                lock=lock,
            )
            start_time_tb, loss_val_tb, sample_weight_sum_tb = (time.time(), 0.0, 0.0)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



