in code/run_mrqa.py [0:0]
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu: {}, 16-bits training: {}".format(
device, n_gpu, args.fp16))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = \
args.train_batch_size // args.gradient_accumulation_steps
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if args.do_train:
assert (args.train_file is not None) and (args.dev_file is not None)
if args.eval_test:
assert args.test_file is not None
else:
assert args.dev_file is not None
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.do_train:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, "train.log"), 'w'))
else:
logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, "eval.log"), 'w'))
logger.info(args)
tokenizer = BertTokenizer.from_pretrained(
args.model, do_lower_case=args.do_lower_case)
if args.do_train or (not args.eval_test):
with gzip.GzipFile(args.dev_file, 'r') as reader:
content = reader.read().decode('utf-8').strip().split('\n')[1:]
eval_dataset = [json.loads(line) for line in content]
eval_examples = read_mrqa_examples(
input_file=args.dev_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logger.info("***** Dev *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
if args.do_train:
train_examples = read_mrqa_examples(
input_file=args.train_file, is_training=True)
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':
train_features = sorted(train_features, key=lambda f: np.sum(f.input_mask))
else:
random.shuffle(train_features)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size)
train_batches = [batch for batch in train_dataloader]
num_train_optimization_steps = \
len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
logger.info("***** Train *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
eval_step = max(1, len(train_batches) // args.eval_per_epoch)
best_result = None
lrs = [args.learning_rate] if args.learning_rate else [1e-6, 2e-6, 3e-6, 5e-6, 1e-5, 2e-5, 3e-5, 5e-5]
for lr in lrs:
model = BertForQuestionAnswering.from_pretrained(
args.model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE)
if args.fp16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex"
"to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=lr,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=lr,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
tr_loss = 0
nb_tr_examples = 0
nb_tr_steps = 0
global_step = 0
start_time = time.time()
for epoch in range(int(args.num_train_epochs)):
model.train()
logger.info("Start epoch #{} (lr = {})...".format(epoch, lr))
for step, batch in enumerate(train_batches):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
lr_this_step = lr * \
warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if (step + 1) % eval_step == 0:
logger.info('Epoch: {}, Step: {} / {}, used_time = {:.2f}s, loss = {:.6f}'.format(
epoch, step + 1, len(train_dataloader), time.time() - start_time, tr_loss / nb_tr_steps))
save_model = False
if args.do_eval:
result, _, _ = \
evaluate(args, model, device, eval_dataset,
eval_dataloader, eval_examples, eval_features)
model.train()
result['global_step'] = global_step
result['epoch'] = epoch
result['learning_rate'] = lr
result['batch_size'] = args.train_batch_size
if (best_result is None) or (result[args.eval_metric] > best_result[args.eval_metric]):
best_result = result
save_model = True
logger.info("!!! Best dev %s (lr=%s, epoch=%d): %.2f" %
(args.eval_metric, str(lr), epoch, result[args.eval_metric]))
else:
save_model = True
if save_model:
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
if best_result:
with open(os.path.join(args.output_dir, EVAL_FILE), "w") as writer:
for key in sorted(best_result.keys()):
writer.write("%s = %s\n" % (key, str(best_result[key])))
if args.do_eval:
if args.eval_test:
with gzip.GzipFile(args.test_file, 'r') as reader:
content = reader.read().decode('utf-8').strip().split('\n')[1:]
eval_dataset = [json.loads(line) for line in content]
eval_examples = read_mrqa_examples(
input_file=args.test_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logger.info("***** Test *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size)
model = BertForQuestionAnswering.from_pretrained(args.output_dir)
if args.fp16:
model.half()
model.to(device)
result, preds, nbest_preds = \
evaluate(args, model, device, eval_dataset,
eval_dataloader, eval_examples, eval_features)
with open(os.path.join(args.output_dir, PRED_FILE), "w") as writer:
writer.write(json.dumps(preds, indent=4) + "\n")
with open(os.path.join(args.output_dir, TEST_FILE), "w") as writer:
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))