src/baselines/dnn.py [650:765]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            }
            outputs = model(**inputs)
            loss = outputs[0]

            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel training
            if args.gradient_accumulation_steps > 1:
                # The loss of each batch is divided by gradient_accumulation_steps
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            epoch_iterator.set_description("loss {}".format(round(loss.item(), 5)))

            tr_loss += loss.item()
            end_time = time.time()
            use_time += (end_time - begin_time)
            if (step + 1) % args.gradient_accumulation_steps == 0:
                # Clear the gradient after completing gradient_accumulation_steps steps
                if args.fp16:
                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
                else:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

                optimizer.step()
                scheduler.step()  # Update learning rate schedule
                model.zero_grad()
                global_step += 1
                # evaluate per logging_steps steps
                update_flag = False
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
                    logs = {}
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
                        result = evaluate(args, model, dev_dataset, prefix="checkpoint-{}".format(global_step), log_fp=log_fp)
                        # update_flag = False
                        for key, value in result.items():
                            eval_key = "eval_{}".format(key)
                            logs[eval_key] = value
                            if key == max_metric_type:
                                if max_metric_value < value:
                                    max_metric_value = value
                                    update_flag = True
                                    last_max_metric_global_step = cur_max_metric_global_step
                                    cur_max_metric_global_step = global_step
                        logs["update_flag"] = update_flag
                        if update_flag:
                            max_metric_model_info.update({"epoch": epoch + 1, "global_step": global_step})
                            max_metric_model_info.update(logs)
                        _, _, test_result = predict(args, model, test_dataset, "checkpoint-{}".format(global_step), log_fp=log_fp)
                        for key, value in test_result.items():
                            eval_key = "test_{}".format(key)
                            logs[eval_key] = value
                    avg_iter_time = round(use_time / (args.gradient_accumulation_steps * args.logging_steps), 2)
                    logger.info("avg time per batch(s): %f\n" % avg_iter_time)
                    log_fp.write("avg time per batch (s): %f\n" % avg_iter_time)
                    use_time = 0
                    loss_scalar = (tr_loss - logging_loss) / args.logging_steps
                    learning_rate_scalar = scheduler.get_lr()[0]
                    logs["learning_rate"] = learning_rate_scalar
                    logs["loss"] = loss_scalar
                    logs["epoch"] = epoch + 1
                    logging_loss = tr_loss

                    for key, value in logs.items():
                        if isinstance(value, dict):
                            for key1, value1 in value.items():
                                tb_writer.add_scalar(key + "_" + key1, value1, global_step)
                        else:
                            tb_writer.add_scalar(key, value, global_step)

                    logger.info(json.dumps({**logs, **{"step": global_step}}, ensure_ascii=False))
                    log_fp.write(json.dumps({**logs, **{"step": global_step}}, ensure_ascii=False) + "\n")
                    log_fp.write("##############################\n")
                    log_fp.flush()
                # save checkpoint per save_steps steps
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
                    # Save model checkpoint
                    output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
                    print("save dir: ", output_dir)
                    if args.save_all:
                        save_check_point(args, model, output_dir)
                    elif update_flag:
                        if args.delete_old:
                            # delete the old chechpoint
                            filename_list = os.listdir(args.output_dir)
                            for filename in filename_list:
                                if "checkpoint-" in filename and filename != "checkpoint-{}".format(global_step):
                                    shutil.rmtree(os.path.join(args.output_dir, filename))
                        save_check_point(args, model, output_dir)
            if 0 < args.max_steps < global_step:
                epoch_iterator.close()
                break
        real_epoch = epoch + 1
        if 0 < args.max_steps < global_step:
            train_iterator.close()
            break
    run_end_time = time.time()
    if args.local_rank in [-1, 0]:
        tb_writer.close()
    log_fp.write(json.dumps(max_metric_model_info, ensure_ascii=False) + "\n")
    log_fp.write("##############################\n")
    avg_time_per_epoch = round((run_end_time - run_begin_time)/real_epoch, 2)
    logger.info("Avg time per epoch(s, %d epoch): %f\n" %(real_epoch, avg_time_per_epoch))
    log_fp.write("Avg time per epoch(s, %d epoch): %f\n" %(real_epoch, avg_time_per_epoch))

    return global_step, tr_loss / global_step, max_metric_model_info


def save_check_point(args, model, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    model_to_save = model.module if hasattr(model, "module") else model  # Take care of distributed/parallel training
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/deep_baselines/run.py [608:723]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            }
            outputs = model(**inputs)
            loss = outputs[0]

            if args.n_gpu > 1:
                loss = loss.mean() # mean() to average on multi-gpu parallel training
            if args.gradient_accumulation_steps > 1:
                # The loss of each batch is divided by gradient_accumulation_steps
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            epoch_iterator.set_description("loss {}".format(round(loss.item(), 5)))

            tr_loss += loss.item()
            end_time = time.time()
            use_time += (end_time - begin_time)
            if (step + 1) % args.gradient_accumulation_steps == 0:
                # Clear the gradient after completing gradient_accumulation_steps steps
                if args.fp16:
                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
                else:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

                optimizer.step()
                scheduler.step()  # Update learning rate schedule
                model.zero_grad()
                global_step += 1
                # evaluate per logging_steps steps
                update_flag = False
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
                    logs = {}
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
                        result = evaluate(args, model, dev_dataset, prefix="checkpoint-{}".format(global_step), log_fp=log_fp)
                        # update_flag = False
                        for key, value in result.items():
                            eval_key = "eval_{}".format(key)
                            logs[eval_key] = value
                            if key == max_metric_type:
                                if max_metric_value < value:
                                    max_metric_value = value
                                    update_flag = True
                                    last_max_metric_global_step = cur_max_metric_global_step
                                    cur_max_metric_global_step = global_step
                        logs["update_flag"] = update_flag
                        if update_flag:
                            max_metric_model_info.update({"epoch": epoch + 1, "global_step": global_step})
                            max_metric_model_info.update(logs)
                        _, _, test_result = predict(args, model, test_dataset, "checkpoint-{}".format(global_step), log_fp=log_fp)
                        for key, value in test_result.items():
                            eval_key = "test_{}".format(key)
                            logs[eval_key] = value
                    avg_iter_time = round(use_time / (args.gradient_accumulation_steps * args.logging_steps), 2)
                    logger.info("avg time per batch(s): %f\n" % avg_iter_time)
                    log_fp.write("avg time per batch (s): %f\n" % avg_iter_time)
                    use_time = 0
                    loss_scalar = (tr_loss - logging_loss) / args.logging_steps
                    learning_rate_scalar = scheduler.get_lr()[0]
                    logs["learning_rate"] = learning_rate_scalar
                    logs["loss"] = loss_scalar
                    logs["epoch"] = epoch + 1
                    logging_loss = tr_loss

                    for key, value in logs.items():
                        if isinstance(value, dict):
                            for key1, value1 in value.items():
                                tb_writer.add_scalar(key + "_" + key1, value1, global_step)
                        else:
                            tb_writer.add_scalar(key, value, global_step)

                    logger.info(json.dumps({**logs, **{"step": global_step}}, ensure_ascii=False))
                    log_fp.write(json.dumps({**logs, **{"step": global_step}}, ensure_ascii=False) + "\n")
                    log_fp.write("##############################\n")
                    log_fp.flush()
                # save checkpoint per save_steps steps
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
                    # Save model checkpoint
                    output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
                    print("save dir: ", output_dir)
                    if args.save_all:
                        save_check_point(args, model, output_dir)
                    elif update_flag:
                        if args.delete_old:
                            # delete the old chechpoint
                            filename_list = os.listdir(args.output_dir)
                            for filename in filename_list:
                                if "checkpoint-" in filename and filename != "checkpoint-{}".format(global_step):
                                    shutil.rmtree(os.path.join(args.output_dir, filename))
                        save_check_point(args, model, output_dir)
            if 0 < args.max_steps < global_step:
                epoch_iterator.close()
                break
        real_epoch = epoch + 1
        if 0 < args.max_steps < global_step:
            train_iterator.close()
            break
    run_end_time = time.time()
    if args.local_rank in [-1, 0]:
        tb_writer.close()
    log_fp.write(json.dumps(max_metric_model_info, ensure_ascii=False) + "\n")
    log_fp.write("##############################\n")
    avg_time_per_epoch = round((run_end_time - run_begin_time)/real_epoch, 2)
    logger.info("Avg time per epoch(s, %d epoch): %f\n" %(real_epoch, avg_time_per_epoch))
    log_fp.write("Avg time per epoch(s, %d epoch): %f\n" %(real_epoch, avg_time_per_epoch))

    return global_step, tr_loss / global_step, max_metric_model_info


def save_check_point(args, model, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    model_to_save = model.module if hasattr(model, "module") else model  # Take care of distributed/parallel training
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



