in training/flax/run_speed_pt.py [0:0]
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser([DataTrainingArguments])
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
else:
data_args = parser.parse_args_into_dataclasses()[0]
# 2. Setup logging
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if data_args.use_pipeline and data_args.batch_size > 1:
raise ValueError("Make sure that `batch_size` is set to 1 when `use_pipeline=True`.")
has_wandb = is_wandb_available()
if has_wandb:
import wandb
import wandb as wandb_logger
# Set up wandb run
wandb_logger.init(
project=data_args.wandb_project,
name=data_args.wandb_name,
job_type=data_args.wandb_job_type,
dir=data_args.wandb_dir,
save_code=data_args.save_code_to_wandb,
)
wandb_logger.log({"torch_version": str(torch.__version__)})
wandb_logger.log({"transformers_version": str(transformers.__version__)})
wandb_logger.log({"batch_size": data_args.batch_size})
if data_args.use_pipeline:
wandb_logger.log({"chunk_length_s": data_args.chunk_length_s})
else:
raise ValueError("Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.")
# 3. Load dataset
raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
# Convert lists of dataset names/configs/splits to a dict
# names: "librispeech_asr+gigaspeech", configs: "all+l", splits: "validation.clean+validation"
# -> [{"name: "librispeech_asr": "config": "all", "split": "validation.clean"}, {"name: "gigaspeech": "config": "l", "split": "validation"}
dataset_names_dict = convert_dataset_str_to_list(
data_args.dataset_name,
data_args.dataset_config_name,
splits=data_args.dataset_split_name,
text_column_names=data_args.text_column_name,
)
if len(dataset_names_dict) == 1:
# load a single eval set
dataset_dict = dataset_names_dict[0]
raw_datasets["eval"] = load_dataset(
dataset_dict["name"],
dataset_dict["config"],
split=dataset_dict["split"],
cache_dir=data_args.dataset_cache_dir,
use_auth_token=True,
streaming=data_args.streaming,
)
if dataset_dict["text_column_name"] not in list(raw_datasets["eval"].features.keys()):
raise ValueError(
f"--text column name {dataset_dict['text_column_name']} not found in the evaluation "
f"dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column "
f"for the target text. Should be one of {' '.join(list(raw_datasets['eval'].features.keys()))}"
)
if dataset_dict["text_column_name"] != "text":
raw_datasets["eval"] = raw_datasets["eval"].rename_column(dataset_dict["text_column_name"], "text")
else:
# load multiple eval sets
for dataset_dict in tqdm(dataset_names_dict, desc="Loading datasets..."):
# Clean-up the dataset name for pretty logging
# ("distil-whisper/librispeech_asr", "validation.clean") -> "librispeech_asr/validation-clean"
pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}"
raw_datasets[pretty_name] = load_dataset(
dataset_dict["name"],
dataset_dict["config"],
split=dataset_dict["split"],
cache_dir=data_args.dataset_cache_dir,
use_auth_token=True,
streaming=data_args.streaming,
)
if dataset_dict["text_column_name"] not in list(raw_datasets[pretty_name].features.keys()):
raise ValueError(
f"`--text_column_name` {dataset_dict['text_column_name']} not found in the evaluation "
f"dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column "
f"for the target text. Should be one of {' '.join(list(raw_datasets[pretty_name].features.keys()))}"
)
if dataset_dict["text_column_name"] != "text":
raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(
dataset_dict["text_column_name"], "text"
)
# 5. Load pretrained model, tokenizer, and feature extractor
processor = WhisperProcessor.from_pretrained(data_args.model_name_or_path)
dtype = torch.float16 if data_args.use_fp16 else torch.float32
if data_args.use_bf16:
dtype = torch.bfloat16
use_flash_attention_2 = data_args.attn_type is not None and "flash2" in data_args.attn_type
# make sure we're not using a T4
result = subprocess.run(["nvidia-smi"], capture_output=True, text=True)
gpu_type = [x for x in result.stdout.split("=") if len(x) > 1][1].split("0")[1].split()
use_sdpa = False
if gpu_type[0] == "Tesla" and use_flash_attention_2:
use_flash_attention_2 = False
use_sdpa = True
use_orig_whisper = False
if data_args.use_orig_whisper:
use_orig_whisper = True
model_name = data_args.model_name_or_path.split("/")[-1].split("whisper-")[-1]
model = whisper.load_model(model_name)
model.cuda()
else:
model = WhisperForConditionalGeneration.from_pretrained(
data_args.model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2
)
model.cuda()
if use_sdpa:
logger.info("Use SDPA via BetterTransformers...")
model.to_bettertransformer()
if data_args.use_torch_compile:
logger.info("Enabling torch compile for the encoder.")
# let's compile the encoder forward path
model.model.encoder.forward = torch.compile(
model.model.encoder.forward, mode="reduce-overhead", fullgraph=True
)
# init torch compile once to create binaries
input_values = np.random.randn(data_args.batch_size, 16_000)
input_features = processor(input_values, return_tensors="pt", sampling_rate=16_000).input_features
input_features = input_features.to(dtype=dtype, device=model.device)
# run generation three times to that model is compiled
for _ in range(3):
_ = model.generate(input_features)
model_pipeline = None
if data_args.use_pipeline:
model_pipeline = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
torch_dtype=dtype,
device=model.device,
chunk_length_s=data_args.chunk_length_s,
)
model_pipeline_forward = model_pipeline._forward
assistant_model = None
if data_args.assistant_model_name_or_path is not None:
logger.info("Loading assistant model...")
if data_args.assistant_model_name_or_path.startswith("openai"):
assistant_model = WhisperForConditionalGeneration.from_pretrained(
data_args.assistant_model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2
)
else:
assistant_model = WhisperForCausalLM.from_pretrained(
data_args.assistant_model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2
)
assistant_model.cuda()
# 6. Resample speech dataset: `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name,
datasets.features.Audio(sampling_rate=processor.feature_extractor.sampling_rate),
)
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
max_label_length = (
data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length
)
audio_column_name = data_args.audio_column_name
num_workers = data_args.preprocessing_num_workers
model_input_name = processor.feature_extractor.model_input_names[0]
normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer)
if data_args.max_eval_samples is not None:
for split in raw_datasets:
raw_datasets[split] = (
raw_datasets[split].take(data_args.max_eval_samples)
if data_args.streaming
else raw_datasets[split].select(range(data_args.max_eval_samples))
)
def prepare_dataset(batch):
# process audio
sample = batch[audio_column_name]
if model_pipeline is None and not use_orig_whisper:
inputs = processor.feature_extractor(
sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt"
)
batch[model_input_name] = inputs.get(model_input_name)
else:
batch[model_input_name] = sample["array"]
# process audio length
batch["length_in_s"] = len(sample["array"]) / sample["sampling_rate"]
# process targets
input_str = batch["text"]
batch["labels"] = processor.tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids
return batch
vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
for split in raw_datasets:
raw_datasets_features = list(raw_datasets[split].features.keys())
map_fn = partial(
raw_datasets[split].map,
function=prepare_dataset,
remove_columns=raw_datasets_features,
)
vectorized_datasets[split] = (
map_fn(num_proc=num_workers, desc="preprocess eval dataset")
if not data_args.streaming
else map_fn() # In streaming, we can't run multiproc - errors out if we try to
)
# for large datasets it is advised to run the preprocessing on a
# single machine first with `args.preprocessing_only` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step `args.preprocessing_only` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
logger.info(f"Data preprocessing finished. Files cached at {cache}.")
return
# 8. Load Metric
metric = evaluate.load("wer")
# convention is that we space all punctuation *except* apostrophes
list(string.punctuation.replace("'", ""))
def compute_metrics(pred_str, label_str):
# normalize everything and re-compute the WER
norm_pred_str = [normalizer(pred) for pred in pred_str]
norm_label_str = [normalizer(label) for label in label_str]
# for logging, we need the pred/labels to match the norm_pred/norm_labels, so discard any filtered samples here
pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0]
label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0]
# filtering step to only evaluate the samples that correspond to non-zero normalized references:
norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0]
norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0]
# if any of the two lengths is 0, return 0 WER
if len(norm_pred_str) == 0 or len(norm_label_str) == 0:
return 0.0
wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str)
return wer
result_datasets = DatasetDict()
def benchmark(batch):
if model_pipeline is None and not use_orig_whisper:
inputs = torch.cat(batch[model_input_name], dim=0).cuda()
if data_args.use_fp16:
inputs = inputs.to(torch.float16)
if data_args.use_bf16:
inputs = inputs.to(torch.bfloat16)
inner_batch_size = inputs.shape[0]
else:
inner_batch_size = 1
inputs = batch[model_input_name]
gen_kwargs = {
"return_timestamps": data_args.return_timestamps,
"max_length": data_args.max_gen_length,
}
# if not data_args.model_name_or_path.endswith(".en"):
if not data_args.model_name_or_path.endswith(".en") and not data_args.model_name_or_path.endswith("24-2"):
gen_kwargs["language"] = "<|en|>"
gen_kwargs["task"] = "transcribe"
gen_kwargs["num_beams"] = data_args.num_beams
# Time forward
if use_orig_whisper:
raw_audio = inputs[0].astype(np.float32)
out_dict = model.transcribe(raw_audio)
batch["transcription"] = [out_dict["text"]]
batch["time"] = [out_dict["all_time"]]
elif model_pipeline is not None:
# if model is pipeline let's make sure that only forward is timed and not pre- and post-process
time_result = []
def _forward_time(*args, **kwargs):
start_time = time.time()
result = model_pipeline_forward(*args, **kwargs)
end_time = time.time() - start_time
time_result.append(end_time)
return result
model_pipeline._forward = _forward_time
result = model_pipeline(inputs, batch_size=PIPELINE_BATCH_SIZE, generate_kwargs=gen_kwargs)[0]["text"]
batch["transcription"] = [result]
batch["time"] = [sum(time_result)]
elif assistant_model is not None:
gen_kwargs["assistant_model"] = assistant_model
start_time = time.time()
with torch.no_grad():
encoder_outputs = model.get_encoder()(inputs)
gen_kwargs["encoder_outputs"] = encoder_outputs
if data_args.assistant_model_name_or_path.startswith("openai"):
with torch.no_grad():
assistant_encoder_outputs = assistant_model.get_encoder()(inputs)
gen_kwargs["assistant_encoder_outputs"] = assistant_encoder_outputs
else:
gen_kwargs["assistant_encoder_outputs"] = encoder_outputs
output_ids = model.generate(**gen_kwargs)
batch["time"] = inner_batch_size * [(time.time() - start_time) / inner_batch_size]
batch["transcription"] = processor.batch_decode(output_ids, skip_special_tokens=True)
else:
start_time = time.time()
output_ids = model.generate(inputs, **gen_kwargs)
batch["time"] = inner_batch_size * [(time.time() - start_time) / inner_batch_size]
batch["transcription"] = processor.batch_decode(output_ids, skip_special_tokens=True)
batch["length_in_s"] = batch["length_in_s"]
batch["reference"] = processor.batch_decode(batch["labels"], skip_special_tokens=True)
batch["num_words"] = [len(r.split()) for r in batch["reference"]]
return batch
for split in vectorized_datasets:
vectorized_datasets_features = [model_input_name]
map_fn = partial(
vectorized_datasets[split].map,
function=benchmark,
remove_columns=vectorized_datasets_features,
batch_size=data_args.batch_size,
batched=True,
)
result_datasets[split] = (
map_fn(num_proc=1, desc="benchmark eval dataset") if not data_args.streaming else map_fn()
)
stats_dataset = DatasetDict()
all_stats = {
"times_audio_total": 0,
"times_transcription_total": 0,
"num_words_total": 0,
"num_samples": 0,
"time_per_sample": 0,
"rtf": 0,
"words_per_s": 0,
"wer": 0,
}
count = 0
for split in result_datasets:
transcriptions = []
references = []
stats = {k: 0 for k in all_stats.keys()}
print(f"Start benchmarking {split}...")
if data_args.streaming:
result_iter = iter(result_datasets[split])
for result in result_iter:
stats["times_audio_total"] += result["length_in_s"]
stats["times_transcription_total"] += result["time"]
stats["num_words_total"] += result["num_words"]
stats["num_samples"] += 1
transcriptions.append(result["transcription"])
references.append(result["reference"])
count += 1
print(f"Processed {count} samples...")
if data_args.samples_per_dataset is not None and stats["num_samples"] == data_args.samples_per_dataset:
break
stats["time_per_sample"] = stats["times_transcription_total"] / stats["num_samples"]
stats["avg_length_sample"] = stats["times_audio_total"] / stats["num_samples"]
stats["wer"] = compute_metrics(transcriptions, references)
stats["rtf"] = stats["times_audio_total"] / stats["times_transcription_total"]
stats["words_per_s"] = stats["num_words_total"] / stats["times_transcription_total"]
stats_dataset[split] = stats
log_stats = {f"{split}_{k}": v for k, v in stats.items()}
wandb_logger.log(log_stats)
all_stats["times_audio_total"] += stats["times_audio_total"]
all_stats["times_transcription_total"] += stats["times_transcription_total"]
all_stats["wer"] += stats["wer"]
all_stats["num_samples"] += stats["num_samples"]
all_stats["num_words_total"] += stats["num_words_total"]
all_stats["time_per_sample"] = all_stats["times_transcription_total"] / all_stats["num_samples"]
all_stats["avg_length_sample"] = all_stats["times_audio_total"] / all_stats["num_samples"]
all_stats["wer"] = all_stats["wer"] / len(result_datasets)
all_stats["rtf"] = all_stats["times_audio_total"] / all_stats["times_transcription_total"]
all_stats["words_per_s"] = all_stats["num_words_total"] / all_stats["times_transcription_total"]
stats_dataset["all"] = all_stats
log_all_stats = {f"all_{k}": v for k, v in all_stats.items()}
wandb_logger.log(log_all_stats)
benchmark_artifact = wandb.Artifact("Benchmark", type="datasets")
with tempfile.TemporaryDirectory() as temp_dir:
for split in stats_dataset:
file_name = os.path.join(temp_dir, f"{'_'.join(split.split('/'))}.json")
with open(file_name, "w") as json_file:
json.dump(stats_dataset[split], json_file)
benchmark_artifact.add_file(file_name, split)
wandb_logger.log_artifact(benchmark_artifact)
print("Done!")