Path Lines of Code CMakeLists.txt 112 CODE_OF_CONDUCT.md 57 CONTRIBUTING.md 180 README.md 115 examples/asr/emformer_rnnt/README.md 62 examples/asr/emformer_rnnt/common.py 73 examples/asr/emformer_rnnt/eval.py 113 examples/asr/emformer_rnnt/global_stats.py 75 examples/asr/emformer_rnnt/librispeech/global_stats.json 166 examples/asr/emformer_rnnt/librispeech/lightning.py 193 examples/asr/emformer_rnnt/librispeech/train_spm.py 56 examples/asr/emformer_rnnt/mustc/dataset.py 43 examples/asr/emformer_rnnt/mustc/lightning.py 160 examples/asr/emformer_rnnt/mustc/train_spm.py 52 examples/asr/emformer_rnnt/pipeline_demo.py 82 examples/asr/emformer_rnnt/tedlium3/eval_pipeline.py 71 examples/asr/emformer_rnnt/tedlium3/global_stats.json 166 examples/asr/emformer_rnnt/tedlium3/lightning.py 185 examples/asr/emformer_rnnt/tedlium3/train_spm.py 67 examples/asr/emformer_rnnt/train.py 127 examples/asr/librispeech_ctc_decoder/README.md 22 examples/asr/librispeech_ctc_decoder/inference.py 110 examples/hubert/dataset/__init__.py 10 examples/hubert/dataset/hubert_dataset.py 189 examples/hubert/preprocess.py 115 examples/hubert/train.py 1 examples/hubert/utils/__init__.py 9 examples/hubert/utils/common_utils.py 46 examples/hubert/utils/feature_utils.py 104 examples/hubert/utils/kmeans.py 110 examples/interactive_asr/README.md 56 examples/interactive_asr/__init__.py 2 examples/interactive_asr/asr.py 23 examples/interactive_asr/utils.py 119 examples/interactive_asr/vad.py 154 examples/libtorchaudio/CMakeLists.txt 12 examples/libtorchaudio/README.md 23 examples/libtorchaudio/augmentation/CMakeLists.txt 3 examples/libtorchaudio/augmentation/README.md 23 examples/libtorchaudio/augmentation/create_jittable_pipeline.py 46 examples/libtorchaudio/augmentation/main.cpp 19 examples/libtorchaudio/build.sh 12 examples/libtorchaudio/data/README.md 3 examples/libtorchaudio/speech_recognition/CMakeLists.txt 6 examples/libtorchaudio/speech_recognition/README.md 127 examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py 137 examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py 94 examples/libtorchaudio/speech_recognition/greedy_decoder.py 17 examples/libtorchaudio/speech_recognition/parse_librispeech.py 27 examples/libtorchaudio/speech_recognition/parse_voxforge.py 37 examples/libtorchaudio/speech_recognition/transcribe.cpp 36 examples/libtorchaudio/speech_recognition/transcribe_list.cpp 61 examples/pipeline_tacotron2/README.md 212 examples/pipeline_tacotron2/datasets.py 82 examples/pipeline_tacotron2/inference.py 260 examples/pipeline_tacotron2/loss.py 21 examples/pipeline_tacotron2/text/__init__.py 1 examples/pipeline_tacotron2/text/numbers.py 67 examples/pipeline_tacotron2/text/text_preprocessing.py 91 examples/pipeline_tacotron2/train.py 399 examples/pipeline_tacotron2/utils.py 31 examples/pipeline_wav2letter/README.md 43 examples/pipeline_wav2letter/ctc_decoders.py 5 examples/pipeline_wav2letter/datasets.py 85 examples/pipeline_wav2letter/languagemodels.py 28 examples/pipeline_wav2letter/main.py 496 examples/pipeline_wav2letter/transforms.py 7 examples/pipeline_wav2letter/utils.py 35 examples/pipeline_wavernn/README.md 37 examples/pipeline_wavernn/datasets.py 71 examples/pipeline_wavernn/inference.py 80 examples/pipeline_wavernn/losses.py 61 examples/pipeline_wavernn/main.py 364 examples/pipeline_wavernn/processing.py 21 examples/pipeline_wavernn/utils.py 38 examples/pipeline_wavernn/wavernn_inference_wrapper.py 63 examples/source_separation/README.md 55 examples/source_separation/conv_tasnet/README.md 33 examples/source_separation/conv_tasnet/__init__.py 2 examples/source_separation/conv_tasnet/train.py 281 examples/source_separation/conv_tasnet/trainer.py 98 examples/source_separation/eval.py 82 examples/source_separation/lightning_train.py 300 examples/source_separation/train.py 126 examples/source_separation/utils/__init__.py 6 examples/source_separation/utils/dataset/__init__.py 2 examples/source_separation/utils/dataset/utils.py 68 examples/source_separation/utils/dataset/wsj0mix.py 41 examples/source_separation/utils/dist_utils.py 64 examples/source_separation/utils/metrics.py 56 examples/test/__init__.py 1 examples/test/test_interactive_asr.py 94 examples/tutorials/README.rst 2 examples/tutorials/asr_inference_with_ctc_decoder_tutorial.py 137 examples/tutorials/audio_data_augmentation_tutorial.py 229 examples/tutorials/audio_datasets_tutorial.py 39 examples/tutorials/audio_feature_augmentation_tutorial.py 80 examples/tutorials/audio_feature_extractions_tutorial.py 265 examples/tutorials/audio_io_tutorial.py 186 examples/tutorials/audio_resampling_tutorial.py 263 examples/tutorials/device_asr.py 76 examples/tutorials/forced_alignment_tutorial.py 229 examples/tutorials/mvdr_tutorial.py 86 examples/tutorials/online_asr_tutorial.py 57 examples/tutorials/speech_recognition_pipeline_tutorial.py 61 examples/tutorials/streaming_api_tutorial.py 161 examples/tutorials/tacotron2_pipeline_tutorial.py 108 mypy.ini 3 packaging/README.md 77 requirements.txt 14 sokrates_conventions.json 72 test/torchaudio_unittest/README.md 109 test/torchaudio_unittest/assets/VCTK-Corpus/txt/p224/p224_002.txt 1 test/torchaudio_unittest/assets/decoder/lexicon.txt 3 test/torchaudio_unittest/assets/decoder/tokens.txt 7 test/torchaudio_unittest/assets/sox_effect_test_fir_coeffs.txt 1 test/torchaudio_unittest/assets/wav2vec2/fairseq/hubert_base_ls960.json 69 test/torchaudio_unittest/assets/wav2vec2/fairseq/hubert_large_ll60k.json 68 test/torchaudio_unittest/assets/wav2vec2/fairseq/hubert_large_ll60k_finetune_ls960.json 89 test/torchaudio_unittest/assets/wav2vec2/fairseq/hubert_xtralarge_ll60k.json 68 test/torchaudio_unittest/assets/wav2vec2/fairseq/hubert_xtralarge_ll60k_finetune_ls960.json 89 test/torchaudio_unittest/assets/wav2vec2/fairseq/libri960_big.json 54 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_large_960h.json 146 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_large_lv60k_960h.json 146 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_large_lv60k_self_960h.json 146 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_small.json 54 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_small_960h.json 146 test/torchaudio_unittest/assets/wav2vec2/fairseq/wav2vec_vox_new.json 54 test/torchaudio_unittest/assets/wav2vec2/fairseq/xlsr_53_56k.json 51 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-base-10k-voxpopuli.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-base-960h.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-base.json 77 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-960h-lv60-self.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-960h-lv60.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-960h.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-lv60.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-xlsr-53-german.json 68 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large-xlsr-53.json 75 test/torchaudio_unittest/assets/wav2vec2/huggingface/wav2vec2-large.json 68 torchaudio/csrc/CMakeLists.txt 253 torchaudio/csrc/ffmpeg/README.md 102