src/baselines/dnn.py [541:647]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        model.to(args.device)
        model.eval()
        pred, true, result = predict(args, model, test_dataset, prefix=prefix, log_fp=log_fp)
        result = dict(("evaluation_" + k + "_{}".format(global_step), v) for k, v in result.items())
        logger.info(json.dumps(result, ensure_ascii=False))
        log_fp.write(json.dumps(result, ensure_ascii=False) + "\n")
    log_fp.close()


def trainer(args, model, train_dataset, dev_dataset, test_dataset, log_fp=None):
    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter(log_dir=args.tb_log_dir)
    if log_fp is None:
        log_fp = open(os.path.join(args.log_dir, "logs.txt"), "w")
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)

    # multi-gpu training
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Distributed training
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
    train_dataset_total_num = len(train_dataset)
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
    train_batch_total_num = len(train_dataloader)
    print("Train dataset len: %d, batch num: %d" % (train_dataset_total_num, train_batch_total_num))

    if args.max_steps > 0:
        t_total = args.max_steps
        args.num_train_epochs = args.max_steps // (train_batch_total_num // args.gradient_accumulation_steps) + 1
    else:
        t_total = train_batch_total_num // args.gradient_accumulation_steps * args.num_train_epochs

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay},
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
    ]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

        # multi-gpu training (should be after apex fp16 initialization)
        if args.n_gpu > 1:
            model = torch.nn.DataParallel(model)

        # Distributed training (should be after apex fp16 initialization)
        if args.local_rank != -1:
            model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                              output_device=args.local_rank,
                                                              find_unused_parameters=True)

    # Train
    log_fp.write("***** Running training *****\n")
    logger.info("***** Running training *****")
    log_fp.write("Train Dataset Num examples = %d\n" % train_dataset_total_num)
    logger.info("Train Dataset  Num examples = %d", train_dataset_total_num)
    log_fp.write("Train Dataset Num Epochs = %d\n" % args.num_train_epochs)
    logger.info("Train Dataset Num Epochs = %d", args.num_train_epochs)
    log_fp.write("Train Dataset Instantaneous batch size per GPU = %d\n" % args.per_gpu_train_batch_size)
    logger.info("Train Dataset Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    log_fp.write("Train Dataset Total train batch size (w. parallel, distributed & accumulation) = %d\n" % (args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
    logger.info("Train Dataset Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    log_fp.write("Train Dataset Gradient Accumulation steps = %d\n" % args.gradient_accumulation_steps)
    logger.info("Train Dataset Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    log_fp.write("Train Dataset Total optimization steps = %d\n" % t_total)
    logger.info("Train Dataset Total optimization steps = %d", t_total)
    log_fp.write("#" * 50 + "\n")
    log_fp.flush()

    global_step = 0
    tr_loss, logging_loss = 0.0, 0.0
    model.zero_grad()
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)

    max_metric_type = args.max_metric_type
    max_metric_value = 0
    max_metric_model_info = {}
    last_max_metric_global_step = None
    cur_max_metric_global_step = None
    use_time = 0
    run_begin_time = time.time()
    real_epoch = 0

    for epoch in train_iterator:
        if args.tfrecords:
            epoch_iterator = tqdm(train_dataloader, total=train_batch_total_num, desc="Iteration", disable=args.local_rank not in [-1, 0])
        else:
            epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        for step, batch in enumerate(epoch_iterator):
            begin_time = time.time()
            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/deep_baselines/run.py [498:604]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        model.to(args.device)
        model.eval()
        pred, true, result = predict(args, model, test_dataset, prefix=prefix, log_fp=log_fp)
        result = dict(("evaluation_" + k + "_{}".format(global_step), v) for k, v in result.items())
        logger.info(json.dumps(result, ensure_ascii=False))
        log_fp.write(json.dumps(result, ensure_ascii=False) + "\n")
    log_fp.close()


def trainer(args, model, train_dataset, dev_dataset, test_dataset, log_fp=None):
    if args.local_rank in [-1, 0]:
        tb_writer = SummaryWriter(log_dir=args.tb_log_dir)
    if log_fp is None:
        log_fp = open(os.path.join(args.log_dir, "logs.txt"), "w")
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)

    # multi-gpu training
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Distributed training
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
    train_dataset_total_num = len(train_dataset)
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
    train_batch_total_num = len(train_dataloader)
    print("Train dataset len: %d, batch num: %d" % (train_dataset_total_num, train_batch_total_num))

    if args.max_steps > 0:
        t_total = args.max_steps
        args.num_train_epochs = args.max_steps // (train_batch_total_num // args.gradient_accumulation_steps) + 1
    else:
        t_total = train_batch_total_num // args.gradient_accumulation_steps * args.num_train_epochs

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay},
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
    ]

    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Distributed training (should be after apex fp16 initialization)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)

    # Train
    log_fp.write("***** Running training *****\n")
    logger.info("***** Running training *****")
    log_fp.write("Train Dataset Num examples = %d\n" % train_dataset_total_num)
    logger.info("Train Dataset  Num examples = %d", train_dataset_total_num)
    log_fp.write("Train Dataset Num Epochs = %d\n" % args.num_train_epochs)
    logger.info("Train Dataset Num Epochs = %d", args.num_train_epochs)
    log_fp.write("Train Dataset Instantaneous batch size per GPU = %d\n" % args.per_gpu_train_batch_size)
    logger.info("Train Dataset Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
    log_fp.write("Train Dataset Total train batch size (w. parallel, distributed & accumulation) = %d\n" % (args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
    logger.info("Train Dataset Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    log_fp.write("Train Dataset Gradient Accumulation steps = %d\n" % args.gradient_accumulation_steps)
    logger.info("Train Dataset Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    log_fp.write("Train Dataset Total optimization steps = %d\n" % t_total)
    logger.info("Train Dataset Total optimization steps = %d", t_total)
    log_fp.write("#" * 50 + "\n")
    log_fp.flush()

    global_step = 0
    tr_loss, logging_loss = 0.0, 0.0
    model.zero_grad()
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)

    max_metric_type = args.max_metric_type
    max_metric_value = 0
    max_metric_model_info = {}
    last_max_metric_global_step = None
    cur_max_metric_global_step = None
    use_time = 0
    run_begin_time = time.time()
    real_epoch = 0

    for epoch in train_iterator:
        if args.tfrecords:
            epoch_iterator = tqdm(train_dataloader, total=train_batch_total_num, desc="Iteration", disable=args.local_rank not in [-1, 0])
        else:
            epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        for step, batch in enumerate(epoch_iterator):
            begin_time = time.time()
            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



