in self-training-text-classification/finetuning.py [0:0]
def finetune(accelerator, model_name_or_path, train_file, output_dir, **kwargs):
"""Fine-tuning a pre-trained model on a downstream task.
Args:
accelerator: An instance of an accelerator for distributed training (on
multi-GPU, TPU) or mixed precision training.
model_name_or_path: Path to pretrained model or model identifier from
huggingface.co/models.
train_file: A csv or a json file containing the training data.
output_dir: The output directory where the model predictions and checkpoints
will be written.
**kwargs: Dictionary of key/value pairs with which to update the
configuration object after loading. The values in kwargs of any keys which
are configuration attributes will be used to override the loaded values.
"""
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
model_args = FTModelArguments(model_name_or_path=model_name_or_path)
data_args = FTDataArguments(train_file=train_file)
training_args = FTTrainingArguments(output_dir=output_dir)
args = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(arg_class).items():
setattr(args, key, value)
for key, value in kwargs.items():
if hasattr(args, key):
setattr(args, key, value)
# Sanity checks
data_files = {}
args.data_file_extension = None
# You need to provide the training data as we always run training
args.do_train = True
assert args.train_file is not None
data_files[Split.TRAIN.value] = args.train_file
if args.do_eval or args.eval_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
data_files[Split.EVAL.value] = args.eval_file
if args.do_eval and args.test_file is not None:
data_files[Split.TEST.value] = args.test_file
if args.do_predict:
assert args.infer_file is not None
data_files[Split.INFER.value] = args.infer_file
for key in data_files:
extension = data_files[key].split(".")[-1]
assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
args.data_file_extension = extension
else:
assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# Handle the output directory creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# You need to provide your CSV/JSON data files.
#
# For CSV/JSON files, this script will use as labels the column called 'label'
# and as pair of sentences the sentences in columns called 'sentence1' and
# 'sentence2' if these columns exist or the first two columns not named
# 'label' if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single
# sentence classification on this single column.
#
# In distributed training, the load_dataset function guarantees that only one
# local process can download the dataset.
# Loading the dataset from local csv or json files.
raw_datasets = load_dataset(args.data_file_extension, data_files=data_files)
# Labels
is_regression = raw_datasets[Split.TRAIN.value].features["label"].dtype in ["float32", "float64"]
args.is_regression = is_regression
if args.is_regression:
label_list = None
num_labels = 1
else:
label_list = args.label_list
assert label_list is not None
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
args.num_labels = num_labels
# Load pre-trained model
config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path)
# Preprocessing the datasets
non_label_column_names = [name for name in raw_datasets[Split.TRAIN.value].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
label_to_id = {v: i for i, v in enumerate(label_list)}
config.label2id = label_to_id
config.id2label = {id: label for label, id in config.label2id.items()}
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will
# expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets[Split.TRAIN.value].column_names,
desc="Running tokenizer on dataset",
)
num_examples = {}
splits = [s.value for s in Split]
for split in splits:
if split in processed_datasets:
num_examples[split] = len(processed_datasets[split])
args.num_examples = num_examples
train_dataset = processed_datasets[Split.TRAIN.value]
eval_dataset = processed_datasets[Split.EVAL.value] if Split.EVAL.value in processed_datasets else None
test_dataset = processed_datasets[Split.TEST.value] if Split.TEST.value in processed_datasets else None
infer_dataset = processed_datasets[Split.INFER.value] if Split.INFER.value in processed_datasets else None
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info("Sample %d of the training set: %s.", index, train_dataset[index])
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data
# collator that will just convert everything to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by
# padding to the maximum length of the samples passed). When using mixed
# precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple of
# 8s, which will enable the use of Tensor Cores on NVIDIA hardware with
# compute capability >= 7.5 (Volta).
# For fp8, we pad to multiple of 16.
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=pad_to_multiple_of)
train_dataloader = DataLoader(
train_dataset,
batch_size=args.per_device_train_batch_size,
shuffle=True,
collate_fn=data_collator,
)
eval_dataloader, test_dataloader, infer_dataloader = None, None, None
if eval_dataset is not None:
eval_dataloader = DataLoader(
eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator
)
if test_dataset is not None:
test_dataloader = DataLoader(
test_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator
)
if infer_dataset is not None:
infer_dataloader = DataLoader(
infer_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab its
# length below (cause its length will be shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_steps == -1:
args.max_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps,
)
# Train
completed_steps, avg_train_loss = train(
args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader
)
accelerator.wait_for_everyone()
logger.info("Training job completed: completed_steps = %d, avg_train_loss = %f", completed_steps, avg_train_loss)
args.model_name_or_path = os.path.join(args.output_dir, "best-checkpoint")
logger.info("Loading the best checkpoint: %s", args.model_name_or_path)
config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path)
model = accelerator.prepare(model)
if args.do_eval:
# Evaluate
if eval_dataloader is not None:
logger.info("***** Running evaluation on the eval data using the best checkpoint *****")
eval_results = evaluate(args, accelerator, eval_dataloader, Split.EVAL.value, model, "best-checkpoint")
avg_eval_loss = eval_results["avg_eval_loss"]
eval_metric = eval_results[args.eval_metric]
logger.info("Evaluation job completed: avg_eval_loss = %f", avg_eval_loss)
logger.info("Evaluation result for the best checkpoint: %s = %f", args.eval_metric, eval_metric)
if test_dataloader is not None:
logger.info("***** Running evaluation on the test data using the best checkpoint *****")
eval_results = evaluate(args, accelerator, test_dataloader, Split.TEST.value, model, "best-checkpoint")
avg_eval_loss = eval_results["avg_eval_loss"]
eval_metric = eval_results[args.eval_metric]
logger.info("Test job completed: avg_test_loss = %f", avg_eval_loss)
logger.info("Test result for the best checkpoint: %s = %f", args.eval_metric, eval_metric)
if args.do_predict:
# Predict
if infer_dataloader is not None:
logger.info("***** Running inference using the best checkpoint *****")
evaluate(
args, accelerator, infer_dataloader, Split.INFER.value, model, "best-checkpoint", has_labels=False
)
logger.info("Inference job completed.")
# Release all references to the internal objects stored and call the garbage
# collector. You should call this method between two trainings with different
# models/optimizers.
accelerator.free_memory()