id: 1 unit: def main() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 278 LOC McCabe index: 67 number of parameters: 0 id: 2 unit: def main() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 244 LOC McCabe index: 47 number of parameters: 0 id: 3 unit: def main() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 130 LOC McCabe index: 23 number of parameters: 1 id: 4 unit: def convert_examples_to_features() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 121 LOC McCabe index: 32 number of parameters: 6 id: 5 unit: def write_predictions() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 101 LOC McCabe index: 20 number of parameters: 9 id: 6 unit: def run_evaluation() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 86 LOC McCabe index: 28 number of parameters: 3 id: 7 unit: def train() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 86 LOC McCabe index: 6 number of parameters: 1 id: 8 unit: def model_fn_builder() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 64 LOC McCabe index: 8 number of parameters: 8 id: 9 unit: def create_hooks() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 59 LOC McCabe index: 23 number of parameters: 1 id: 10 unit: def __init__() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 57 LOC McCabe index: 13 number of parameters: 11 id: 11 unit: def convert_examples_to_features() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 56 LOC McCabe index: 27 number of parameters: 4 id: 12 unit: def read_squad_examples() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 56 LOC McCabe index: 16 number of parameters: 2 id: 13 unit: def convert_single_example() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 54 LOC McCabe index: 26 number of parameters: 5 id: 14 unit: def create_training_instance() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 53 LOC McCabe index: 17 number of parameters: 2 id: 15 unit: def get_final_text() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 50 LOC McCabe index: 21 number of parameters: 4 id: 16 unit: def convert_examples_to_features() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 41 LOC McCabe index: 26 number of parameters: 5 id: 17 unit: def input_fn_builder() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 37 LOC McCabe index: 5 number of parameters: 4 id: 18 unit: def create_masked_lm_predictions() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 35 LOC McCabe index: 10 number of parameters: 2 id: 19 unit: def forward() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 30 LOC McCabe index: 16 number of parameters: 3 id: 20 unit: def synchronize() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 29 LOC McCabe index: 9 number of parameters: 1 id: 21 unit: def file_based_input_fn_builder() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 29 LOC McCabe index: 6 number of parameters: 4 id: 22 unit: def comm_ready_buckets() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 29 LOC McCabe index: 6 number of parameters: 2 id: 23 unit: def __init__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 29 LOC McCabe index: 10 number of parameters: 7 id: 24 unit: def create_model() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 27 LOC McCabe index: 2 number of parameters: 8 id: 25 unit: def get_perf_metrics() file: pretrain/PyTorch/benchmark.py start line: 0 end line: 0 size: 27 LOC McCabe index: 3 number of parameters: 1 id: 26 unit: def __init__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 27 LOC McCabe index: 8 number of parameters: 7 id: 27 unit: def __init__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 27 LOC McCabe index: 11 number of parameters: 7 id: 28 unit: def sync_bucket_structure() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 26 LOC McCabe index: 11 number of parameters: 1 id: 29 unit: def create_training_instance() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 2 id: 30 unit: def pretrain_validation() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 1 id: 31 unit: def compute_metrics() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 24 LOC McCabe index: 11 number of parameters: 3 id: 32 unit: def __init__() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 12 id: 33 unit: def register_model() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 20 LOC McCabe index: 8 number of parameters: 3 id: 34 unit: def evaluate() file: finetune/evaluate_squad.py start line: 0 end line: 0 size: 20 LOC McCabe index: 5 number of parameters: 2 id: 35 unit: def file_based_convert_examples_to_features() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 5 id: 36 unit: def set_environment_variables_for_nccl_backend() file: pretrain/PyTorch/azureml_adapter.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 2 id: 37 unit: def get_train_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 2 id: 38 unit: def _compute_softmax() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 17 LOC McCabe index: 7 number of parameters: 1 id: 39 unit: def __init__() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 17 LOC McCabe index: 3 number of parameters: 9 id: 40 unit: def get_dev_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 2 id: 41 unit: def _check_is_max_context() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 16 LOC McCabe index: 16 number of parameters: 3 id: 42 unit: def set_optimizer_params_grad() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 16 LOC McCabe index: 7 number of parameters: 3 id: 43 unit: def set_optimizer_params_grad() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 15 LOC McCabe index: 7 number of parameters: 3 id: 44 unit: def _create_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 45 unit: def _create_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 46 unit: def _create_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 3 id: 47 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 15 LOC McCabe index: 8 number of parameters: 3 id: 48 unit: def forward() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 6 id: 49 unit: def allreduce_bucket() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 14 LOC McCabe index: 6 number of parameters: 2 id: 50 unit: def __init__() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 2 id: 51 unit: def __init__() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 7 id: 52 unit: def forward() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 3 id: 53 unit: def __init__() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 8 id: 54 unit: def _create_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 55 unit: def _create_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 56 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 57 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 58 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 59 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 60 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 61 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 62 unit: def latest_checkpoint_file() file: pretrain/PyTorch/checkpoint.py start line: 0 end line: 0 size: 12 LOC McCabe index: 7 number of parameters: 2 id: 63 unit: def encode_sequence() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 64 unit: def _allreduce_tensor() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 65 unit: def normalize_answer() file: finetune/evaluate_squad.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 66 unit: def f1_score() file: finetune/evaluate_squad.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 67 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 11 LOC McCabe index: 7 number of parameters: 3 id: 68 unit: def _create_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 3 id: 69 unit: def __repr__() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 70 unit: def truncate_input_sequence() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 11 LOC McCabe index: 7 number of parameters: 3 id: 71 unit: def truncate_input_sequence() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 11 LOC McCabe index: 7 number of parameters: 3 id: 72 unit: def convert_examples_to_features() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 73 unit: def apply_flat_dist_call() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 3 id: 74 unit: def allreduce_maybe_retain() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 3 id: 75 unit: def __init__() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 76 unit: def _create_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 77 unit: def _truncate_seq_pair() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 3 id: 78 unit: def _truncate_seq_pair() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 3 id: 79 unit: def _read_tsv() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 3 id: 80 unit: def _truncate_seq_pair() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 3 id: 81 unit: def _improve_answer_span() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 5 id: 82 unit: def load_checkpoint() file: pretrain/PyTorch/checkpoint.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 83 unit: def split_half_float_double() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 1 id: 84 unit: def extract_tensors() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 2 id: 85 unit: def __init__() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 86 unit: def log_summary_writer() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 4 id: 87 unit: def acc_and_f1() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 88 unit: def pearson_and_spearman() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 89 unit: def _get_best_indexes() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 2 id: 90 unit: def checkpoint_model() file: pretrain/PyTorch/checkpoint.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 6 id: 91 unit: def parse_data() file: pretrain/PyTorch/dataprep/create_pretraining.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 92 unit: def split_by_type() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 1 id: 93 unit: def allreduce_fallback() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 8 LOC McCabe index: 8 number of parameters: 1 id: 94 unit: def warmup_linear_decay_exp() file: pretrain/PyTorch/optimization.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 5 id: 95 unit: def _read_tsv() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 96 unit: def _read_tsv() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 97 unit: def before_run() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 98 unit: def after_run() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 99 unit: def reduce() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 1 id: 100 unit: def get_lr() file: pretrain/PyTorch/optimization.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 101 unit: def get_dataloader() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 2 id: 102 unit: def copy_optimizer_params_to_model() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 103 unit: def metric_max_over_ground_truths() file: finetune/evaluate_squad.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 104 unit: def copy_optimizer_params_to_model() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 105 unit: def get_random_partition() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 106 unit: def __getstate__() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 107 unit: def __init__() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 108 unit: def _get_batch_type_error() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 109 unit: def warmup_linear() file: pretrain/PyTorch/optimization.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 110 unit: def __init__() file: pretrain/PyTorch/optimization.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 6 id: 111 unit: def _make_hook() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 112 unit: def __init__() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 113 unit: def __init__() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 114 unit: def __init__() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 115 unit: def __init__() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 116 unit: def __init__() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 117 unit: def __init__() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 118 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 119 unit: def flat_dist_call() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 120 unit: def __setstate__() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 121 unit: def register_batch() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 122 unit: def get_effective_batch() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 123 unit: def warmup_linear() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 124 unit: def get_train_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 125 unit: def get_dev_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 126 unit: def get_dev_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 127 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 128 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 129 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 130 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 131 unit: def warmup_linear() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 132 unit: def get_timestamp() file: pretrain/PyTorch/benchmark.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 133 unit: def replace_path_placeholders() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 134 unit: def map_to_torch() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 135 unit: def map_to_torch_float() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 136 unit: def map_to_torch_half() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 137 unit: def __getitem__() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 138 unit: def info() file: pretrain/PyTorch/logger.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 4 id: 139 unit: def __init__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 4 id: 140 unit: def load() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 141 unit: def set_accumulation_step() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 142 unit: def get_dev_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 143 unit: def get_train_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 144 unit: def get_train_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 145 unit: def get_dev_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 146 unit: def accuracy() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 147 unit: def get_train_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 148 unit: def get_test_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 149 unit: def get_train_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 150 unit: def get_dev_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 151 unit: def get_test_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 152 unit: def get_train_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 153 unit: def get_dev_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 154 unit: def get_test_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 155 unit: def begin() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 156 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 157 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 158 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 159 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 160 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 161 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 162 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 163 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 164 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 165 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 166 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 167 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 168 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 169 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 170 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 171 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 172 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 173 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 174 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 175 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 176 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 177 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 178 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 179 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 180 unit: def __init__() file: pretrain/PyTorch/logger.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 181 unit: def save() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 182 unit: def to() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 183 unit: def __getstate__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 184 unit: def save() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 185 unit: def adjust_gradient_accumulation_steps() file: finetune/PyTorch/azureml_bert_util.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 186 unit: def get_train_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 187 unit: def get_dev_examples() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 188 unit: def get_labels() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 189 unit: def get_labels() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 190 unit: def get_labels() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 191 unit: def get_labels() file: finetune/PyTorch/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 192 unit: def get_train_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 193 unit: def get_dev_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 194 unit: def get_test_examples() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 195 unit: def get_labels() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 196 unit: def __init__() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 197 unit: def get_labels() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 198 unit: def get_labels() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 199 unit: def get_labels() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 200 unit: def get_labels() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 201 unit: def __init__() file: finetune/TensorFlow/run_classifier.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 202 unit: def exact_match_score() file: finetune/evaluate_squad.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 203 unit: def get_train_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 204 unit: def get_dev_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 205 unit: def get_test_examples() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 206 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 207 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 208 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 209 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 210 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 211 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 212 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 213 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 214 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 215 unit: def get_labels() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 216 unit: def simple_accuracy() file: finetune/run_classifier_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 217 unit: def __str__() file: finetune/run_squad_azureml.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 218 unit: def get_local_rank() file: pretrain/PyTorch/azureml_adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 219 unit: def get_global_size() file: pretrain/PyTorch/azureml_adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 220 unit: def get_local_size() file: pretrain/PyTorch/azureml_adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 221 unit: def get_world_size() file: pretrain/PyTorch/azureml_adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 222 unit: def __init__() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 223 unit: def get_name() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 224 unit: def get_token_file_type() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 225 unit: def get_model_file_type() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 226 unit: def get_learning_rate() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 227 unit: def get_warmup_proportion() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 228 unit: def get_total_training_steps() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 229 unit: def get_total_epoch_count() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 230 unit: def get_num_workers() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 231 unit: def get_validation_folder_path() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 232 unit: def get_wiki_pretrain_dataset_path() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 233 unit: def get_decay_rate() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 234 unit: def get_decay_step() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 235 unit: def get_model_config() file: pretrain/PyTorch/configuration.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 236 unit: def __len__() file: pretrain/PyTorch/dataset.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 237 unit: def enable_need_reduction() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 238 unit: def disable_need_reduction() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 239 unit: def needs_refresh() file: pretrain/PyTorch/distributed_apex.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 240 unit: def error() file: pretrain/PyTorch/logger.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 241 unit: def load() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 242 unit: def move_batch() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 243 unit: def eval() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 244 unit: def train() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 245 unit: def save_bert() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 246 unit: def half() file: pretrain/PyTorch/models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 247 unit: def get_values() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 248 unit: def __len__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 249 unit: def __setstate__() file: pretrain/PyTorch/sources.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 250 unit: def mask() file: pretrain/PyTorch/text.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 251 unit: def torch_long() file: pretrain/PyTorch/text.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 252 unit: def str2bool() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 1 id: 253 unit: def check_write_log() file: pretrain/PyTorch/train.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 0 id: 254 unit: def get_sample_writer() file: pretrain/PyTorch/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2