granite/run_eval.py (181 lines of code) (raw):

import argparse import os import torch from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq, models import evaluate from normalizer import data_utils import time from tqdm import tqdm # ensure installed transformers supports granite_speech assert hasattr(models, "granite_speech") wer_metric = evaluate.load("wer") torch.set_float32_matmul_precision('high') def main(args): processor = AutoProcessor.from_pretrained(args.model_id) tokenizer = processor.tokenizer model = AutoModelForSpeechSeq2Seq.from_pretrained(args.model_id).to(args.device) # create text prompt chat = [ { "role": "system", "content": "Knowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant", }, { "role": "user", "content": "<|audio|>can you transcribe the speech into a written format?", } ] text = tokenizer.apply_chat_template( chat, tokenize=False, add_generation_prompt=True ) gen_kwargs = {"max_new_tokens": args.max_new_tokens, "num_beams": args.num_beams} def benchmark(batch, min_new_tokens=None): # Load audio inputs audios = [audio["array"] for audio in batch["audio"]] minibatch_size = len(audios) texts=[text] * minibatch_size # START TIMING start_time = time.time() with torch.autocast(model.device.type, enabled=True): model_inputs = processor( texts, audios, device=args.device, # Computation device; returned tensors are put on CPU return_tensors="pt", ).to(args.device) # Model Inference model_outputs = model.generate( **model_inputs, bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, repetition_penalty=1.0, **gen_kwargs, min_new_tokens=min_new_tokens, ) # Transformers includes the input IDs in the response. num_input_tokens = model_inputs["input_ids"].shape[-1] new_tokens = model_outputs[:, num_input_tokens:] output_text = tokenizer.batch_decode( new_tokens, add_special_tokens=False, skip_special_tokens=True ) # END TIMING runtime = time.time() - start_time # normalize by minibatch size since we want the per-sample time batch["transcription_time_s"] = minibatch_size * [runtime / minibatch_size] # normalize transcriptions with English normalizer batch["predictions"] = [data_utils.normalizer(pred) for pred in output_text] batch["references"] = batch["norm_text"] return batch if args.warmup_steps is not None: dataset = data_utils.load_data(args) dataset = data_utils.prepare_data(dataset) num_warmup_samples = args.warmup_steps * args.batch_size if args.streaming: warmup_dataset = dataset.take(num_warmup_samples) else: warmup_dataset = dataset.select(range(min(num_warmup_samples, len(dataset)))) warmup_dataset = iter(warmup_dataset.map(benchmark, batch_size=args.batch_size, batched=True, fn_kwargs={"min_new_tokens": args.max_new_tokens})) for _ in tqdm(warmup_dataset, desc="Warming up..."): continue dataset = data_utils.load_data(args) if args.max_eval_samples is not None and args.max_eval_samples > 0: print(f"Subsampling dataset to first {args.max_eval_samples} samples!") if args.streaming: dataset = dataset.take(args.max_eval_samples) else: dataset = dataset.select(range(min(args.max_eval_samples, len(dataset)))) dataset = data_utils.prepare_data(dataset) dataset = dataset.map( benchmark, batch_size=args.batch_size, batched=True, remove_columns=["audio"], ) all_results = { "audio_length_s": [], "transcription_time_s": [], "predictions": [], "references": [], } result_iter = iter(dataset) for result in tqdm(result_iter, desc="Samples..."): for key in all_results: all_results[key].append(result[key]) # Write manifest results (WER and RTFX) manifest_path = data_utils.write_manifest( all_results["references"], all_results["predictions"], args.model_id, args.dataset_path, args.dataset, args.split, audio_length=all_results["audio_length_s"], transcription_time=all_results["transcription_time_s"], ) print("Results saved at path:", os.path.abspath(manifest_path)) wer = wer_metric.compute( references=all_results["references"], predictions=all_results["predictions"] ) wer = round(100 * wer, 2) rtfx = round(sum(all_results["audio_length_s"]) / sum(all_results["transcription_time_s"]), 2) print("WER:", wer, "%", "RTFx:", rtfx) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers", ) parser.add_argument( "--dataset_path", type=str, default="esb/datasets", help="Dataset path. By default, it is `esb/datasets`", ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names " "can be found at `https://huggingface.co/datasets/esb/datasets`", ) parser.add_argument( "--split", type=str, default="test", help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.", ) parser.add_argument( "--device", type=int, default=-1, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) parser.add_argument( "--batch_size", type=int, default=16, help="Number of samples to go through each streamed batch.", ) parser.add_argument( "--num_beams", type=int, default=1, help="Number of beams for beam search.", ) parser.add_argument( "--max_eval_samples", type=int, default=None, help="Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.", ) parser.add_argument( "--no-streaming", dest="streaming", action="store_false", help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.", ) parser.add_argument( "--max_new_tokens", type=int, default=None, help="Maximum number of tokens to generate (for auto-regressive models).", ) parser.add_argument( "--warmup_steps", type=int, default=2, help="Number of warm-up steps to run before launching the timed runs.", ) args = parser.parse_args() parser.set_defaults(streaming=False) main(args)