id: 1 unit: public void run() file: ImageSegmentation/app/src/main/java/org/pytorch/imagesegmentation/MainActivity.java start line: 118 end line: 170 size: 48 LOC McCabe index: 8 number of parameters: 0 id: 2 unit: protected void onCreate() file: ImageSegmentation/app/src/main/java/org/pytorch/imagesegmentation/MainActivity.java start line: 64 end line: 115 size: 45 LOC McCabe index: 5 number of parameters: 1 id: 3 unit: def test_export_torchvision_format() file: D2Go/create_d2go.py start line: 0 end line: 0 size: 38 LOC McCabe index: 1 number of parameters: 0 id: 4 unit: static Matrixf melfilter() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 101 end line: 143 size: 37 LOC McCabe index: 4 number of parameters: 5 id: 5 unit: def train() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 34 LOC McCabe index: 7 number of parameters: 8 id: 6 unit: def trainIters() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 30 LOC McCabe index: 5 number of parameters: 6 id: 7 unit: protected void onCreate() file: HelloWorldApp/app/src/main/java/org/pytorch/helloworld/MainActivity.java start line: 29 end line: 76 size: 30 LOC McCabe index: 4 number of parameters: 1 id: 8 unit: static Vectorf pad() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 45 end line: 76 size: 29 LOC McCabe index: 10 number of parameters: 5 id: 9 unit: def create_fake_detection_data_loader() file: D2Go/create_d2go.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 3 id: 10 unit: public static String assetFilePath() file: NativeApp/app/src/main/java/org/pytorch/nativeapp/MainActivity.java start line: 17 end line: 37 size: 20 LOC McCabe index: 5 number of parameters: 2 id: 11 unit: JNIEXPORT jint JNI_OnLoad() file: NativeApp/app/src/main/cpp/pytorch_nativeapp.cpp start line: 75 end line: 98 size: 20 LOC McCabe index: 4 number of parameters: 2 id: 12 unit: def fit() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 19 LOC McCabe index: 2 number of parameters: 2 id: 13 unit: def evaluate() file: ViT4MNIST/mnist_vit.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 3 id: 14 unit: public static String assetFilePath() file: ImageSegmentation/app/src/main/java/org/pytorch/imagesegmentation/MainActivity.java start line: 44 end line: 61 size: 17 LOC McCabe index: 4 number of parameters: 2 id: 15 unit: public static String assetFilePath() file: HelloWorldApp/app/src/main/java/org/pytorch/helloworld/MainActivity.java start line: 83 end line: 100 size: 17 LOC McCabe index: 4 number of parameters: 2 id: 16 unit: def validate() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 17 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 18 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 10 id: 19 unit: torch::Tensor warp_perspective() file: NativeApp/app/src/main/cpp/pytorch_nativeapp.cpp start line: 19 end line: 35 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 20 unit: def forward() file: SpeechRecognition/create_wav2vec2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 2 id: 21 unit: static Matrixcf stft() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 78 end line: 95 size: 14 LOC McCabe index: 3 number of parameters: 6 id: 22 unit: static void loadAndForwardModel() file: NativeApp/app/src/main/cpp/pytorch_nativeapp.cpp start line: 53 end line: 72 size: 14 LOC McCabe index: 1 number of parameters: 3 id: 23 unit: def train_epoch() file: ViT4MNIST/mnist_vit.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 4 id: 24 unit: static Matrixf dct() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 161 end line: 175 size: 13 LOC McCabe index: 2 number of parameters: 3 id: 25 unit: def process() file: StreamingASR/run_sasr.py start line: 0 end line: 0 size: 13 LOC McCabe index: 4 number of parameters: 1 id: 26 unit: def readLangs() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 13 LOC McCabe index: 7 number of parameters: 3 id: 27 unit: def prepareData() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 3 id: 28 unit: def forward() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 29 unit: def __init__() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 30 unit: static Matrixf melspectrogram() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 145 end line: 154 size: 10 LOC McCabe index: 1 number of parameters: 11 id: 31 unit: def transcribe() file: StreamingASR/run_sasr.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 2 id: 32 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 33 unit: def forward() file: ASLRecognition/scripts/cnn_models.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 34 unit: def __init__() file: ASLRecognition/scripts/cnn_models.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 1 id: 35 unit: def addWord() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 36 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 37 unit: def compute() file: NativeApp/make_warp_perspective_pt.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 38 unit: def __init__() file: SpeechRecognition/create_wav2vec2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 39 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 40 unit: def seed_everything() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 41 unit: protected void onCreate() file: NativeApp/app/src/main/java/org/pytorch/nativeapp/MainActivity.java start line: 40 end line: 45 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 42 unit: def __init__() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 43 unit: def timeSince() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 44 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 45 unit: def __init__() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 46 unit: def __getitem__() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 47 unit: template void log() file: NativeApp/app/src/main/cpp/pytorch_nativeapp.cpp start line: 40 end line: 44 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 48 unit: def unicodeToAscii() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 49 unit: def normalizeString() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 50 unit: def __init__() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 51 unit: def forward() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 52 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 53 unit: static Matrixf power2db() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 156 end line: 159 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 54 unit: def callback() file: StreamingASR/run_sasr.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 55 unit: def _piecewise_linear_log() file: StreamingASR/run_sasr.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 56 unit: def tensorFromSentence() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 57 unit: def tensorsFromPair() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 58 unit: def asMinutes() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 59 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 60 unit: static Matrixf spectrogram() file: StreamingASR/StreamingASR/app/src/main/cpp/librosa/librosa.h start line: 97 end line: 99 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 61 unit: def get_demo_wrapper() file: StreamingASR/save_model_for_mobile.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 62 unit: def get_demo_wrapper() file: StreamingASR/run_sasr.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 63 unit: public static void loadAndForwardModel() file: NativeApp/app/src/main/java/org/pytorch/nativeapp/NativeClient.java start line: 5 end line: 7 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 64 unit: static file: NativeApp/app/src/main/java/org/pytorch/nativeapp/NativeClient.java start line: 10 end line: 12 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 65 unit: def addSentence() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 66 unit: def filterPair() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 67 unit: def __init__() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 68 unit: def filterPairs() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 1 id: 69 unit: def initHidden() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 70 unit: def initHidden() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 71 unit: def indexesFromSentence() file: Seq2SeqNMT/seq2seq_nmt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 72 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 73 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 74 unit: def forward() file: ViT4MNIST/vit_pytorch.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 75 unit: def __len__() file: ASLRecognition/scripts/train.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1