in training/run_eval.py [0:0]
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser([DataTrainingArguments])
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
else:
data_args = parser.parse_args_into_dataclasses()[0]
# 2. Setup logging
# Make one log on every process with the configuration for debugging.
logger.setLevel(logging.INFO)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
# 3. Set seed for reproducibility
set_seed(data_args.seed)
if data_args.use_pipeline and data_args.batch_size > 1:
raise ValueError("Make sure that `batch_size` is set to 1 when `use_pipeline=True`.")
has_wandb = is_wandb_available()
if has_wandb:
import wandb
import wandb as wandb_logger
# store generation HPs for runs
generation_arguments = {
"torch_version": str(torch.__version__),
"transformers_version": str(transformers.__version__),
"attn_implementation": data_args.attn_implementation,
"model_name_or_path": data_args.model_name_or_path,
"subfolder": data_args.subfolder,
"assistant_model_name_or_path": data_args.assistant_model_name_or_path,
"seed": data_args.seed,
"batch_size": data_args.batch_size,
"num_beams": data_args.num_beams,
"return_timestamps": data_args.return_timestamps,
"condition_on_prev_tokens": data_args.condition_on_prev_tokens,
"temperature_fallback": data_args.temperature_fallback,
"logprob_threshold": data_args.logprob_threshold,
"no_speech_threshold": data_args.no_speech_threshold,
"use_pipeline": data_args.use_pipeline,
"chunk_length_s": data_args.chunk_length_s,
}
# Set up wandb run
wandb_logger.init(
project=data_args.wandb_project,
name=data_args.wandb_name,
job_type=data_args.wandb_job_type,
dir=data_args.wandb_dir,
save_code=data_args.save_code_to_wandb,
config=generation_arguments,
)
else:
raise ValueError("Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.")
# 3. Load dataset
raw_datasets = IterableDatasetDict()
# Convert lists of dataset names/configs/splits to a dict
# names: "librispeech_asr+gigaspeech", configs: "all+l", splits: "validation.clean+validation"
# -> [{"name: "librispeech_asr": "config": "all", "split": "validation.clean"}, {"name: "gigaspeech": "config": "l", "split": "validation"}
dataset_names_dict = convert_dataset_str_to_list(
data_args.dataset_name,
data_args.dataset_config_name,
splits=data_args.dataset_split_name,
text_column_names=data_args.text_column_name,
)
# load multiple eval sets
for dataset_dict in tqdm(dataset_names_dict, desc="Loading datasets..."):
sub_dataset = load_dataset(
dataset_dict["name"],
dataset_dict["config"],
split=dataset_dict["split"],
cache_dir=data_args.dataset_cache_dir,
streaming=data_args.streaming,
num_proc=data_args.preprocessing_num_workers,
)
if data_args.only_short_form:
sub_dataset = sub_dataset.filter(lambda x: len(x["audio"]["array"]) / x["audio"]["sampling_rate"] <= 30)
if data_args.only_long_form:
sub_dataset = sub_dataset.filter(lambda x: len(x["audio"]["array"]) / x["audio"]["sampling_rate"] > 30)
if dataset_dict["text_column_name"] not in list(sub_dataset.features.keys()):
raise ValueError(
f"`--text_column_name` {dataset_dict['text_column_name']} not found in the evaluation "
f"dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column "
f"for the target text. Should be one of {' '.join(list(sub_dataset.features.keys()))}"
)
if dataset_dict["text_column_name"] != "text":
sub_dataset = sub_dataset.rename_column(dataset_dict["text_column_name"], "text")
if not data_args.streaming:
sub_dataset = sub_dataset.to_iterable_dataset()
# Clean-up the dataset name for pretty logging
# ("distil-whisper/librispeech_asr", "validation.clean") -> "librispeech_asr/validation-clean"
pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}"
raw_datasets[pretty_name] = sub_dataset
# 5. Load pretrained model, tokenizer, and feature extractor
processor = WhisperProcessor.from_pretrained(
data_args.model_name_or_path,
subfolder=data_args.subfolder,
cache_dir=data_args.cache_dir,
use_fast=data_args.use_fast_tokenizer,
)
dtype = getattr(torch, data_args.dtype)
model = WhisperForConditionalGeneration.from_pretrained(
data_args.model_name_or_path,
subfolder=data_args.subfolder,
torch_dtype=dtype,
attn_implementation=data_args.attn_implementation,
low_cpu_mem_usage=is_accelerate_available(),
cache_dir=data_args.cache_dir,
variant=data_args.model_variant,
)
model.to("cuda:0", dtype=dtype)
model_pipeline = None
if data_args.use_pipeline:
model_pipeline = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
torch_dtype=dtype,
device=model.device,
chunk_length_s=data_args.chunk_length_s,
)
model_pipeline_forward = model_pipeline._forward
assistant_model = None
if data_args.assistant_model_name_or_path is not None:
logger.info("Loading assistant model...")
if data_args.assistant_model_name_or_path.startswith("openai"):
assistant_model = WhisperForConditionalGeneration.from_pretrained(
data_args.assistant_model_name_or_path,
torch_dtype=dtype,
attn_implementation=data_args.attn_implementation,
low_cpu_mem_usage=is_accelerate_available(),
cache_dir=data_args.cache_dir,
)
else:
assistant_model = WhisperForCausalLM.from_pretrained(
data_args.assistant_model_name_or_path,
torch_dtype=dtype,
attn_implementation=data_args.attn_implementation,
low_cpu_mem_usage=is_accelerate_available(),
cache_dir=data_args.cache_dir,
)
assistant_model.cuda()
# 6. Resample speech dataset: `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name,
datasets.features.Audio(sampling_rate=processor.feature_extractor.sampling_rate),
)
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
audio_column_name = data_args.audio_column_name
language = language_to_id(data_args.language, model.generation_config) if data_args.language else None
if language is None or language == "<|en|>":
normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer)
else:
normalizer = BasicTextNormalizer()
sampling_rate = processor.feature_extractor.sampling_rate
if data_args.samples_per_dataset is not None:
for split in raw_datasets:
raw_datasets[split] = raw_datasets[split].take(data_args.samples_per_dataset)
def prepare_dataset(batch):
# process audio
audio = [sample["array"].astype(np.float32) for sample in batch[audio_column_name]]
if model_pipeline is None:
inputs = processor.feature_extractor(
audio,
sampling_rate=sampling_rate,
return_tensors="pt",
truncation=False,
padding="longest",
return_attention_mask=True,
)
if inputs.input_features.shape[-1] < 3000:
inputs = processor.feature_extractor(
audio,
sampling_rate=sampling_rate,
return_tensors="pt",
return_attention_mask=True,
)
batch["input_features"] = inputs.input_features.to(dtype)
batch["attention_mask"] = inputs.attention_mask
else:
batch["input_features"] = audio
# process audio length
batch["length_in_s"] = [len(sample) / sampling_rate for sample in audio]
# process targets
batch["reference"] = batch["text"]
return batch
vectorized_datasets = IterableDatasetDict()
for split in raw_datasets:
raw_datasets_features = list(raw_datasets[split].features.keys())
vectorized_datasets[split] = raw_datasets[split].map(
function=prepare_dataset,
remove_columns=raw_datasets_features,
batch_size=data_args.batch_size,
batched=True,
)
# for large datasets it is advised to run the preprocessing on a
# single machine first with `args.preprocessing_only` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step `args.preprocessing_only` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
logger.info(f"Data preprocessing finished. Files cached at {cache}.")
return
metric = evaluate.load("wer")
def compute_metrics(pred_str, label_str):
# normalize everything and re-compute the WER
norm_pred_str = [normalizer(pred) for pred in pred_str]
norm_label_str = [normalizer(label) for label in label_str]
# filtering step to only evaluate the samples that correspond to non-zero normalized references:
norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0]
norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0]
wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str)
return wer
gen_kwargs = {
"max_length": data_args.generation_max_length,
"return_timestamps": data_args.return_timestamps,
"num_beams": data_args.num_beams,
"top_k": 0,
}
if hasattr(model.generation_config, "is_multilingual") and model.generation_config.is_multilingual:
gen_kwargs["language"] = data_args.language
gen_kwargs["task"] = data_args.task
elif data_args.language is not None:
raise ValueError(
"Setting language token for an English-only checkpoint is not permitted. The language argument should "
"only be set for multilingual checkpoints."
)
if assistant_model is not None:
gen_kwargs["assistant_model"] = assistant_model
if data_args.prompt_text is not None:
gen_kwargs["prompt_ids"] = processor.get_prompt_ids(data_args.prompt_text, return_tensors="pt").to("cuda:0")
long_form_gen_kwargs = {
"condition_on_prev_tokens": data_args.condition_on_prev_tokens,
"compression_ratio_threshold": data_args.compression_ratio_threshold,
"temperature": (0.0, 0.2, 0.4, 0.6, 0.8, 1.0) if data_args.temperature_fallback else 0,
"logprob_threshold": data_args.logprob_threshold,
"no_speech_threshold": data_args.no_speech_threshold,
}
forced_decoder_ids = processor.get_decoder_prompt_ids(
task=data_args.task,
language=data_args.language,
no_timestamps=not data_args.return_timestamps
)
def benchmark(batch):
if model_pipeline is None:
inputs = torch.stack(batch["input_features"], dim=0).cuda()
attention_mask = torch.stack(batch["attention_mask"], dim=0).cuda()
# automatically use long-form args if required
inner_batch_size, num_mels, seq_len = inputs.shape
if seq_len == 3000:
batch_gen_kwargs = gen_kwargs
else:
batch_gen_kwargs = {**gen_kwargs, **long_form_gen_kwargs}
set_seed(data_args.seed)
start_time = time.time()
output_ids = model.generate(inputs, attention_mask=attention_mask, **batch_gen_kwargs)
gen_time = time.time() - start_time
batch["time"] = inner_batch_size * [(gen_time) / inner_batch_size]
if not data_args.precise_tok_per_s:
n_generated_tokens = output_ids.numel() - inner_batch_size * len(forced_decoder_ids)
batch["tokens_per_sec"] = inner_batch_size * [(n_generated_tokens / gen_time) / inner_batch_size]
batch["transcription"] = processor.batch_decode(
output_ids, skip_special_tokens=True, decode_with_timestamps=data_args.return_timestamps
)
else:
inputs = batch["input_features"]
# Time forward: let's make sure that only forward is timed and not pre- and post-processing
time_result = []
n_generated_tokens = []
def _forward_time(*args, **kwargs):
start_time = time.time()
result = model_pipeline_forward(*args, **kwargs)
end_time = time.time() - start_time
time_result.append(end_time)
for toks in result['tokens']:
n_generated_tokens.append(len(toks) - len(forced_decoder_ids))
return result
model_pipeline._forward = _forward_time
result = model_pipeline(
inputs,
batch_size=PIPELINE_BATCH_SIZE,
generate_kwargs={
**gen_kwargs
}
)[0]["text"]
if not data_args.precise_tok_per_s:
n_generated_tokens = sum(n_generated_tokens)
gen_time = time_result[0]
batch["tokens_per_sec"] = [n_generated_tokens / gen_time]
batch["transcription"] = [result]
batch["time"] = [sum(time_result)]
batch["num_words"] = [len(r.split()) for r in batch["reference"]]
return batch
result_datasets = DatasetDict()
for split in vectorized_datasets:
result_datasets[split] = vectorized_datasets[split].map(
function=benchmark,
remove_columns=["input_features"],
batch_size=data_args.batch_size,
batched=True,
)
stats_dataset = DatasetDict()
all_stats = {"rtf": 0, "wer": 0, "tokens_per_sec": 0}
rtf_stats = {
"times_audio_total": 0,
"times_transcription_total": 0,
}
def benchmark_gen(num_batches):
tokens_per_secs = []
for _ in range(num_batches):
dummy_encoder_outputs = BaseModelOutput(
torch.randn((data_args.batch_size, model.config.max_source_positions, model.config.d_model),
dtype=model.dtype,
device=model.device
)
)
n_tokens = data_args.num_tokens
if model_pipeline is None:
# benchmark time to generate fixed number of tokens
start_time = time.time()
_ = model.generate(
encoder_outputs=dummy_encoder_outputs,
min_new_tokens=n_tokens,
max_new_tokens=n_tokens,
**gen_kwargs
)
gen_time = time.time() - start_time
else:
# benchmark time to generate fixed number of tokens
start_time = time.time()
_ = model_pipeline.model.generate(
encoder_outputs=dummy_encoder_outputs,
min_new_tokens=n_tokens,
max_new_tokens=n_tokens,
**gen_kwargs
)
gen_time = time.time() - start_time
n_generated_tokens = n_tokens * data_args.batch_size
tokens_per_secs.append(n_generated_tokens / gen_time)
return tokens_per_secs
logger.info("***** Running Evaluation *****")
for key in generation_arguments:
logger.info(f" {key}: {generation_arguments[key]}")
datasets_evaluated_progress_bar = tqdm(result_datasets, desc="Datasets", position=0)
for split in datasets_evaluated_progress_bar:
transcriptions = []
references = []
stats = {}
times_audio_total = 0
times_transcription_total = 0
tokens_per_secs = []
if data_args.precise_tok_per_s:
# evaluate generation speed for few batch
tokens_per_secs = benchmark_gen(data_args.num_batches)
datasets_evaluated_progress_bar.write(f"Start benchmarking {split}...")
result_iter = iter(result_datasets[split])
for result in tqdm(result_iter, desc="Samples", position=1):
times_audio_total += result["length_in_s"]
times_transcription_total += result["time"]
# ensure prompt is removed from the transcription (awaiting fix in Transformers)
if data_args.prompt_text is not None:
result["transcription"] = result["transcription"].replace(data_args.prompt_text, "")
transcriptions.append(result["transcription"])
references.append(result["reference"])
if not data_args.precise_tok_per_s:
tokens_per_secs.append(result["tokens_per_sec"])
norm_transcriptions = [normalizer(pred) for pred in transcriptions]
norm_references = [normalizer(label) for label in references]
transcriptions = [transcriptions[i] for i in range(len(transcriptions)) if len(norm_references[i]) > 0]
references = [references[i] for i in range(len(references)) if len(norm_references[i]) > 0]
norm_transcriptions = [
norm_transcriptions[i] for i in range(len(norm_transcriptions)) if len(norm_references[i]) > 0
]
norm_references = [norm_references[i] for i in range(len(norm_references)) if len(norm_references[i]) > 0]
stats["wer"] = compute_metrics(norm_transcriptions, norm_references)
wer_per_sample = []
for pred, ref in zip(norm_transcriptions, norm_references):
wer_per_sample.append(compute_metrics([pred], [ref]))
stats["rtf"] = times_audio_total / times_transcription_total
stats["tokens_per_sec"] = sum(tokens_per_secs) / len(tokens_per_secs)
stats_dataset[split] = stats
wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in stats.items()])
datasets_evaluated_progress_bar.write(wer_desc)
write_wandb_metric(wandb_logger, stats, prefix=split)
if data_args.log_predictions:
write_wandb_pred(
wandb_logger,
transcriptions,
references,
norm_transcriptions,
norm_references,
wer_per_sample,
prefix=split,
)
rtf_stats["times_audio_total"] += times_audio_total
rtf_stats["times_transcription_total"] += times_transcription_total
all_stats["wer"] += stats["wer"]
all_stats["tokens_per_sec"] += stats["tokens_per_sec"]
all_stats["wer"] = all_stats["wer"] / len(result_datasets)
# technically this is the reciprocal of the RTF, but it makes the scale easier to read on wandb
all_stats["rtf"] = rtf_stats["times_audio_total"] / rtf_stats["times_transcription_total"]
all_stats["tokens_per_sec"] = all_stats["tokens_per_sec"] / len(result_datasets)
stats_dataset["all"] = all_stats
write_wandb_metric(wandb_logger, all_stats, prefix="all")
benchmark_artifact = wandb.Artifact("Benchmark", type="datasets")
with tempfile.TemporaryDirectory() as temp_dir:
for split in stats_dataset:
file_name = os.path.join(temp_dir, f"{'_'.join(split.split('/'))}.json")
with open(file_name, "w") as json_file:
json.dump(stats_dataset[split], json_file)
benchmark_artifact.add_file(file_name, split)
wandb_logger.log_artifact(benchmark_artifact)