id: 1 unit: fn main() file: launcher/src/main.rs start line: 2034 end line: 2358 size: 275 LOC McCabe index: 30 number of parameters: 0 id: 2 unit: fn shard_manager() file: launcher/src/main.rs start line: 923 end line: 1249 size: 246 LOC McCabe index: 42 number of parameters: 33 id: 3 unit: def concatenate() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 220 LOC McCabe index: 21 number of parameters: 2 id: 4 unit: def prepare_for_prefill() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 184 LOC McCabe index: 32 number of parameters: 1 id: 5 unit: def concatenate() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 178 LOC McCabe index: 32 number of parameters: 2 id: 6 unit: def concatenate() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 178 LOC McCabe index: 32 number of parameters: 2 id: 7 unit: def generate_token() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 165 LOC McCabe index: 16 number of parameters: 2 id: 8 unit: fn spawn_webserver() file: launcher/src/main.rs start line: 1827 end line: 2009 size: 164 LOC McCabe index: 21 number of parameters: 7 id: 9 unit: def filter() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 157 LOC McCabe index: 12 number of parameters: 2 id: 10 unit: def concatenate() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 153 LOC McCabe index: 30 number of parameters: 2 id: 11 unit: def filter() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 145 LOC McCabe index: 9 number of parameters: 2 id: 12 unit: fn image_tokens() file: router/src/validation.rs start line: 609 end line: 775 size: 144 LOC McCabe index: 15 number of parameters: 4 id: 13 unit: def cuda_graph_warmup() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 135 LOC McCabe index: 11 number of parameters: 4 id: 14 unit: def cuda_graph_warmup() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 130 LOC McCabe index: 11 number of parameters: 4 id: 15 unit: def static() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 119 LOC McCabe index: 18 number of parameters: 5 id: 16 unit: fn next_batch() file: backends/v2/src/queue.rs start line: 188 end line: 346 size: 114 LOC McCabe index: 17 number of parameters: 5 id: 17 unit: def static() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 111 LOC McCabe index: 15 number of parameters: 5 id: 18 unit: fn download_convert_model() file: launcher/src/main.rs start line: 1430 end line: 1572 size: 108 LOC McCabe index: 17 number of parameters: 7 id: 19 unit: fn spawn_shards() file: launcher/src/main.rs start line: 1575 end line: 1685 size: 108 LOC McCabe index: 5 number of parameters: 14 id: 20 unit: def warmup_hpu_graph() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 101 LOC McCabe index: 14 number of parameters: 2 id: 21 unit: def warmup_hpu_graph() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 100 LOC McCabe index: 14 number of parameters: 2 id: 22 unit: def concatenate() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 95 LOC McCabe index: 12 number of parameters: 2 id: 23 unit: fn test_chat_stream_tool_get_weather() file: router/src/chat.rs start line: 596 end line: 699 size: 94 LOC McCabe index: 8 number of parameters: 0 id: 24 unit: def filter() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 92 LOC McCabe index: 14 number of parameters: 2 id: 25 unit: fn executor_status_looper() file: backends/trtllm/src/looper.rs start line: 65 end line: 168 size: 90 LOC McCabe index: 13 number of parameters: 4 id: 26 unit: def filter() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 90 LOC McCabe index: 10 number of parameters: 2 id: 27 unit: def filter() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 90 LOC McCabe index: 10 number of parameters: 2 id: 28 unit: fn try_into_generate() file: router/src/lib.rs start line: 941 end line: 1036 size: 89 LOC McCabe index: 4 number of parameters: 2 id: 29 unit: def prefill() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 88 LOC McCabe index: 27 number of parameters: 2 id: 30 unit: def get_weights_row() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 86 LOC McCabe index: 22 number of parameters: 3 id: 31 unit: def filter() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 82 LOC McCabe index: 11 number of parameters: 2 id: 32 unit: fn main() file: benchmark/src/main.rs start line: 108 end line: 208 size: 81 LOC McCabe index: 4 number of parameters: 0 id: 33 unit: def find_params() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 78 LOC McCabe index: 21 number of parameters: 3 id: 34 unit: def find_params() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 78 LOC McCabe index: 21 number of parameters: 3 id: 35 unit: fn send_responses() file: backends/v2/src/backend.rs start line: 386 end line: 474 size: 76 LOC McCabe index: 5 number of parameters: 2 id: 36 unit: fn send_responses() file: backends/v3/src/backend.rs start line: 448 end line: 536 size: 76 LOC McCabe index: 5 number of parameters: 2 id: 37 unit: def main() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 75 LOC McCabe index: 10 number of parameters: 2 id: 38 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 75 LOC McCabe index: 15 number of parameters: 3 id: 39 unit: fn build_backend() file: backends/trtllm/build.rs start line: 87 end line: 176 size: 74 LOC McCabe index: 16 number of parameters: 3 id: 40 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 74 LOC McCabe index: 19 number of parameters: 3 id: 41 unit: def forward() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 70 LOC McCabe index: 12 number of parameters: 3 id: 42 unit: fn invariants_hold_on_many_insertions() file: backends/v3/src/radix.rs start line: 928 end line: 1012 size: 68 LOC McCabe index: 7 number of parameters: 1 id: 43 unit: fn create_event_from_stream_token() file: router/src/chat.rs start line: 67 end line: 138 size: 68 LOC McCabe index: 4 number of parameters: 7 id: 44 unit: def _get_quantizer_config() file: backends/gaudi/server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 67 LOC McCabe index: 14 number of parameters: 2 id: 45 unit: def _get_quantizer_config() file: server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 67 LOC McCabe index: 14 number of parameters: 2 id: 46 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 67 LOC McCabe index: 18 number of parameters: 4 id: 47 unit: def get_weights() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 67 LOC McCabe index: 15 number of parameters: 3 id: 48 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 66 LOC McCabe index: 6 number of parameters: 4 id: 49 unit: fn test_chat_stream_tool_no_tool_simple() file: router/src/chat.rs start line: 457 end line: 527 size: 66 LOC McCabe index: 4 number of parameters: 0 id: 50 unit: fn test_chat_stream_usage() file: router/src/chat.rs start line: 388 end line: 454 size: 65 LOC McCabe index: 2 number of parameters: 0 id: 51 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 64 LOC McCabe index: 1 number of parameters: 5 id: 52 unit: def image_text_replacement() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 64 LOC McCabe index: 11 number of parameters: 3 id: 53 unit: fn resolve_attention() file: launcher/src/main.rs start line: 131 end line: 200 size: 64 LOC McCabe index: 20 number of parameters: 2 id: 54 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 64 LOC McCabe index: 1 number of parameters: 5 id: 55 unit: def image_text_replacement() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 64 LOC McCabe index: 11 number of parameters: 3 id: 56 unit: def filter() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 64 LOC McCabe index: 5 number of parameters: 2 id: 57 unit: fn new() file: backends/llamacpp/src/backend.rs start line: 219 end line: 282 size: 62 LOC McCabe index: 5 number of parameters: 1 id: 58 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 62 LOC McCabe index: 5 number of parameters: 5 id: 59 unit: fn allocate() file: backends/v3/src/radix.rs start line: 82 end line: 155 size: 62 LOC McCabe index: 6 number of parameters: 3 id: 60 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 62 LOC McCabe index: 4 number of parameters: 5 id: 61 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 62 LOC McCabe index: 5 number of parameters: 5 id: 62 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 61 LOC McCabe index: 6 number of parameters: 6 id: 63 unit: def _rope_scaling_validation() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py start line: 0 end line: 0 size: 60 LOC McCabe index: 17 number of parameters: 1 id: 64 unit: def _rope_scaling_validation() file: server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py start line: 0 end line: 0 size: 60 LOC McCabe index: 17 number of parameters: 1 id: 65 unit: def get_multi_weights_col() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 60 LOC McCabe index: 16 number of parameters: 4 id: 66 unit: def make_tokenizer_optional() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 59 LOC McCabe index: 12 number of parameters: 1 id: 67 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 59 LOC McCabe index: 8 number of parameters: 4 id: 68 unit: fn test_chat_stream_tool_no_tool_empty() file: router/src/chat.rs start line: 530 end line: 593 size: 59 LOC McCabe index: 4 number of parameters: 0 id: 69 unit: def get_position_ids() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 59 LOC McCabe index: 9 number of parameters: 3 id: 70 unit: def forward() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 58 LOC McCabe index: 18 number of parameters: 2 id: 71 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 57 LOC McCabe index: 4 number of parameters: 6 id: 72 unit: def prepare_for_decode() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 57 LOC McCabe index: 7 number of parameters: 5 id: 73 unit: def get_c4() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 57 LOC McCabe index: 8 number of parameters: 5 id: 74 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 57 LOC McCabe index: 6 number of parameters: 4 id: 75 unit: def get_c4() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 57 LOC McCabe index: 8 number of parameters: 5 id: 76 unit: def warmup_hpu_graph() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 56 LOC McCabe index: 8 number of parameters: 2 id: 77 unit: def get_multi_weights_col() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 56 LOC McCabe index: 15 number of parameters: 4 id: 78 unit: fn tokenizer_worker() file: router/src/validation.rs start line: 478 end line: 533 size: 55 LOC McCabe index: 5 number of parameters: 4 id: 79 unit: def __init__() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 55 LOC McCabe index: 10 number of parameters: 4 id: 80 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 54 LOC McCabe index: 15 number of parameters: 4 id: 81 unit: def prepare_for_prefill() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 54 LOC McCabe index: 13 number of parameters: 1 id: 82 unit: fn allocate() file: backends/v3/src/block_allocator.rs start line: 160 end line: 216 size: 53 LOC McCabe index: 8 number of parameters: 3 id: 83 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 51 LOC McCabe index: 5 number of parameters: 6 id: 84 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 51 LOC McCabe index: 13 number of parameters: 4 id: 85 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 51 LOC McCabe index: 13 number of parameters: 4 id: 86 unit: def get_weights() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 51 LOC McCabe index: 12 number of parameters: 3 id: 87 unit: def pack() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 51 LOC McCabe index: 10 number of parameters: 5 id: 88 unit: def pack() file: server/text_generation_server/layers/gptq/ipex.py start line: 0 end line: 0 size: 51 LOC McCabe index: 10 number of parameters: 5 id: 89 unit: def pack() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 51 LOC McCabe index: 10 number of parameters: 5 id: 90 unit: def gather_vision_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 50 LOC McCabe index: 12 number of parameters: 1 id: 91 unit: def gather_vision_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 50 LOC McCabe index: 12 number of parameters: 1 id: 92 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 49 LOC McCabe index: 15 number of parameters: 4 id: 93 unit: fn new() file: backends/llamacpp/src/backend.rs start line: 331 end line: 379 size: 48 LOC McCabe index: 5 number of parameters: 1 id: 94 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 48 LOC McCabe index: 4 number of parameters: 5 id: 95 unit: def warmup_decode() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 48 LOC McCabe index: 4 number of parameters: 4 id: 96 unit: fn from() file: launcher/src/main.rs start line: 355 end line: 402 size: 48 LOC McCabe index: 3 number of parameters: 1 id: 97 unit: def initialize_torch_distributed() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 48 LOC McCabe index: 7 number of parameters: 0 id: 98 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 48 LOC McCabe index: 4 number of parameters: 5 id: 99 unit: def attn_fwd() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 48 LOC McCabe index: 1 number of parameters: 0 id: 100 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 3 number of parameters: 4 id: 101 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 4 number of parameters: 5 id: 102 unit: fn test_chat_stream() file: router/src/chat.rs start line: 338 end line: 385 size: 47 LOC McCabe index: 2 number of parameters: 0 id: 103 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 4 number of parameters: 5 id: 104 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 3 number of parameters: 4 id: 105 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 7 number of parameters: 4 id: 106 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 47 LOC McCabe index: 4 number of parameters: 5 id: 107 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 46 LOC McCabe index: 9 number of parameters: 5 id: 108 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 45 LOC McCabe index: 4 number of parameters: 5 id: 109 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 45 LOC McCabe index: 4 number of parameters: 5 id: 110 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 45 LOC McCabe index: 1 number of parameters: 6 id: 111 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 45 LOC McCabe index: 7 number of parameters: 4 id: 112 unit: def check_cli() file: update_doc.py start line: 0 end line: 0 size: 45 LOC McCabe index: 8 number of parameters: 1 id: 113 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 45 LOC McCabe index: 4 number of parameters: 5 id: 114 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 45 LOC McCabe index: 4 number of parameters: 5 id: 115 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 45 LOC McCabe index: 4 number of parameters: 3 id: 116 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 45 LOC McCabe index: 1 number of parameters: 6 id: 117 unit: def __init__() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 45 LOC McCabe index: 7 number of parameters: 4 id: 118 unit: def __init__() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 45 LOC McCabe index: 12 number of parameters: 3 id: 119 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 44 LOC McCabe index: 5 number of parameters: 6 id: 120 unit: def get_window_index() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 44 LOC McCabe index: 2 number of parameters: 2 id: 121 unit: def get_c4_new() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 44 LOC McCabe index: 5 number of parameters: 5 id: 122 unit: fn prepare_input() file: router/src/validation.rs start line: 788 end line: 836 size: 44 LOC McCabe index: 5 number of parameters: 6 id: 123 unit: def get_window_index() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 44 LOC McCabe index: 2 number of parameters: 2 id: 124 unit: def get_c4_new() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 44 LOC McCabe index: 5 number of parameters: 5 id: 125 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 126 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 127 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 4 number of parameters: 4 id: 128 unit: fn default_entry() file: backends/v3/src/queue.rs start line: 566 end line: 609 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 129 unit: def check_openapi() file: update_doc.py start line: 0 end line: 0 size: 43 LOC McCabe index: 6 number of parameters: 1 id: 130 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 5 number of parameters: 5 id: 131 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 132 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 133 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 43 LOC McCabe index: 4 number of parameters: 4 id: 134 unit: fn default_entry() file: backends/v2/src/queue.rs start line: 407 end line: 449 size: 42 LOC McCabe index: 1 number of parameters: 0 id: 135 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 42 LOC McCabe index: 2 number of parameters: 5 id: 136 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 42 LOC McCabe index: 5 number of parameters: 4 id: 137 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 42 LOC McCabe index: 11 number of parameters: 4 id: 138 unit: def load() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 42 LOC McCabe index: 5 number of parameters: 4 id: 139 unit: fn new() file: router/src/usage_stats.rs start line: 188 end line: 235 size: 42 LOC McCabe index: 3 number of parameters: 0 id: 140 unit: def load_col() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 42 LOC McCabe index: 4 number of parameters: 4 id: 141 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 42 LOC McCabe index: 2 number of parameters: 5 id: 142 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 42 LOC McCabe index: 5 number of parameters: 4 id: 143 unit: def get_multi_weights_col() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 42 LOC McCabe index: 14 number of parameters: 4 id: 144 unit: def load() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 42 LOC McCabe index: 5 number of parameters: 4 id: 145 unit: fn post_process_decoded_token() file: backends/trtllm/src/looper.rs start line: 170 end line: 216 size: 41 LOC McCabe index: 2 number of parameters: 3 id: 146 unit: fn get_best_fit() file: router/src/config.rs start line: 173 end line: 218 size: 41 LOC McCabe index: 5 number of parameters: 4 id: 147 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 40 LOC McCabe index: 5 number of parameters: 4 id: 148 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 40 LOC McCabe index: 11 number of parameters: 4 id: 149 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 40 LOC McCabe index: 5 number of parameters: 4 id: 150 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 40 LOC McCabe index: 3 number of parameters: 2 id: 151 unit: def get_attention_mask() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 40 LOC McCabe index: 2 number of parameters: 3 id: 152 unit: def get_multi_weights_col() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 40 LOC McCabe index: 9 number of parameters: 4 id: 153 unit: def parse_cmdline_and_set_env() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 39 LOC McCabe index: 10 number of parameters: 1 id: 154 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 39 LOC McCabe index: 11 number of parameters: 5 id: 155 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 39 LOC McCabe index: 3 number of parameters: 3 id: 156 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 39 LOC McCabe index: 7 number of parameters: 4 id: 157 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 39 LOC McCabe index: 11 number of parameters: 5 id: 158 unit: def forward() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 39 LOC McCabe index: 3 number of parameters: 3 id: 159 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 39 LOC McCabe index: 7 number of parameters: 4 id: 160 unit: def get_linear() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 39 LOC McCabe index: 6 number of parameters: 2 id: 161 unit: def matmul248_kernel_config_pruner() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 39 LOC McCabe index: 3 number of parameters: 2 id: 162 unit: def _load_qkv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 3 number of parameters: 5 id: 163 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 2 number of parameters: 5 id: 164 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 2 number of parameters: 4 id: 165 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 38 LOC McCabe index: 2 number of parameters: 5 id: 166 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 4 number of parameters: 5 id: 167 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 38 LOC McCabe index: 5 number of parameters: 3 id: 168 unit: def image_attention_mask_for_packed_input_ids() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 38 LOC McCabe index: 11 number of parameters: 2 id: 169 unit: def _load_qkv() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 3 number of parameters: 5 id: 170 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 38 LOC McCabe index: 2 number of parameters: 5 id: 171 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 38 LOC McCabe index: 4 number of parameters: 5 id: 172 unit: fn fetch_image() file: router/src/validation.rs start line: 565 end line: 607 size: 37 LOC McCabe index: 7 number of parameters: 1 id: 173 unit: fn fmt() file: launcher/src/main.rs start line: 450 end line: 486 size: 37 LOC McCabe index: 1 number of parameters: 2 id: 174 unit: def get_multi_weights_col() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 37 LOC McCabe index: 9 number of parameters: 4 id: 175 unit: def forward() file: server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 37 LOC McCabe index: 3 number of parameters: 4 id: 176 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 36 LOC McCabe index: 6 number of parameters: 6 id: 177 unit: def check_supported_models() file: update_doc.py start line: 0 end line: 0 size: 36 LOC McCabe index: 8 number of parameters: 1 id: 178 unit: fn new() file: router/src/usage_stats.rs start line: 249 end line: 290 size: 36 LOC McCabe index: 4 number of parameters: 0 id: 179 unit: fn find_num_shards() file: launcher/src/main.rs start line: 1370 end line: 1408 size: 36 LOC McCabe index: 4 number of parameters: 2 id: 180 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 36 LOC McCabe index: 2 number of parameters: 4 id: 181 unit: def get_weights_row() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 36 LOC McCabe index: 8 number of parameters: 3 id: 182 unit: def get_weights_row() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 36 LOC McCabe index: 8 number of parameters: 3 id: 183 unit: def forward() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 36 LOC McCabe index: 9 number of parameters: 2 id: 184 unit: fn to_ggml_type() file: backends/llamacpp/src/backend.rs start line: 89 end line: 123 size: 35 LOC McCabe index: 1 number of parameters: 1 id: 185 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 35 LOC McCabe index: 5 number of parameters: 6 id: 186 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 35 LOC McCabe index: 5 number of parameters: 6 id: 187 unit: def get_weights() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 35 LOC McCabe index: 5 number of parameters: 3 id: 188 unit: fn free() file: backends/v3/src/radix.rs start line: 157 end line: 208 size: 35 LOC McCabe index: 5 number of parameters: 3 id: 189 unit: fn insert_() file: backends/v3/src/radix.rs start line: 423 end line: 471 size: 35 LOC McCabe index: 5 number of parameters: 4 id: 190 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 3 number of parameters: 4 id: 191 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 4 number of parameters: 4 id: 192 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 3 number of parameters: 4 id: 193 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 5 number of parameters: 4 id: 194 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 34 LOC McCabe index: 6 number of parameters: 4 id: 195 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 34 LOC McCabe index: 7 number of parameters: 4 id: 196 unit: fn get_config() file: launcher/src/main.rs start line: 91 end line: 129 size: 34 LOC McCabe index: 5 number of parameters: 2 id: 197 unit: def _attn() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 5 number of parameters: 6 id: 198 unit: def __init__() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 3 number of parameters: 4 id: 199 unit: def apply_rotary_emb_qkv() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 2 number of parameters: 3 id: 200 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 5 number of parameters: 5 id: 201 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 34 LOC McCabe index: 6 number of parameters: 4 id: 202 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 5 number of parameters: 5 id: 203 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 34 LOC McCabe index: 7 number of parameters: 4 id: 204 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 34 LOC McCabe index: 3 number of parameters: 4 id: 205 unit: def cuda_graph_warmup() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 34 LOC McCabe index: 1 number of parameters: 2 id: 206 unit: def from_pretrained() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 33 LOC McCabe index: 3 number of parameters: 3 id: 207 unit: def load_text_model() file: backends/gaudi/server/text_generation_server/models/custom_modeling/vlm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 8 number of parameters: 4 id: 208 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 33 LOC McCabe index: 2 number of parameters: 4 id: 209 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 33 LOC McCabe index: 7 number of parameters: 4 id: 210 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 33 LOC McCabe index: 4 number of parameters: 4 id: 211 unit: def encode_images() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 6 number of parameters: 2 id: 212 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 33 LOC McCabe index: 3 number of parameters: 4 id: 213 unit: fn vertex_deserialization() file: router/src/vertex.rs start line: 159 end line: 193 size: 33 LOC McCabe index: 1 number of parameters: 0 id: 214 unit: def load_text_model() file: server/text_generation_server/models/custom_modeling/vlm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 8 number of parameters: 4 id: 215 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 33 LOC McCabe index: 7 number of parameters: 4 id: 216 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 33 LOC McCabe index: 2 number of parameters: 5 id: 217 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 33 LOC McCabe index: 4 number of parameters: 4 id: 218 unit: def tunableop_warmup() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 1 number of parameters: 3 id: 219 unit: def encode_images() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 6 number of parameters: 2 id: 220 unit: def get_multi_weights_col() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 33 LOC McCabe index: 9 number of parameters: 4 id: 221 unit: def forward() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 33 LOC McCabe index: 5 number of parameters: 2 id: 222 unit: def __init__() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 33 LOC McCabe index: 3 number of parameters: 4 id: 223 unit: def download_and_unload_peft() file: backends/gaudi/server/text_generation_server/utils/peft.py start line: 0 end line: 0 size: 32 LOC McCabe index: 2 number of parameters: 3 id: 224 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 32 LOC McCabe index: 11 number of parameters: 2 id: 225 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 1 number of parameters: 4 id: 226 unit: def _load_experts_quantized() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 4 number of parameters: 4 id: 227 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 2 number of parameters: 4 id: 228 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 32 LOC McCabe index: 5 number of parameters: 4 id: 229 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 32 LOC McCabe index: 4 number of parameters: 4 id: 230 unit: fn allocator_frees_partially_overlapping_prefills() file: backends/v3/src/radix.rs start line: 727 end line: 766 size: 32 LOC McCabe index: 1 number of parameters: 0 id: 231 unit: fn new() file: router/src/usage_stats.rs start line: 307 end line: 344 size: 32 LOC McCabe index: 3 number of parameters: 0 id: 232 unit: fn f16_flop() file: launcher/src/main.rs start line: 1750 end line: 1781 size: 32 LOC McCabe index: 1 number of parameters: 1 id: 233 unit: def get_mlp_weights() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 32 LOC McCabe index: 11 number of parameters: 2 id: 234 unit: def download_and_unload_peft() file: server/text_generation_server/utils/peft.py start line: 0 end line: 0 size: 32 LOC McCabe index: 2 number of parameters: 3 id: 235 unit: def filter() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 32 LOC McCabe index: 11 number of parameters: 2 id: 236 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 32 LOC McCabe index: 3 number of parameters: 4 id: 237 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 2 number of parameters: 4 id: 238 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 2 number of parameters: 3 id: 239 unit: def _load_experts_quantized() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 32 LOC McCabe index: 4 number of parameters: 4 id: 240 unit: def forward() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 32 LOC McCabe index: 12 number of parameters: 2 id: 241 unit: def get_weights() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 32 LOC McCabe index: 6 number of parameters: 3 id: 242 unit: def get_weights_row() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 32 LOC McCabe index: 6 number of parameters: 3 id: 243 unit: def __init__() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 32 LOC McCabe index: 5 number of parameters: 4 id: 244 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 32 LOC McCabe index: 4 number of parameters: 4 id: 245 unit: def matmul248() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 32 LOC McCabe index: 1 number of parameters: 7 id: 246 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 31 LOC McCabe index: 4 number of parameters: 4 id: 247 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 31 LOC McCabe index: 3 number of parameters: 4 id: 248 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 31 LOC McCabe index: 2 number of parameters: 5 id: 249 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 31 LOC McCabe index: 2 number of parameters: 5 id: 250 unit: fn encoding_to_tokens() file: router/src/server.rs start line: 71 end line: 101 size: 31 LOC McCabe index: 2 number of parameters: 2 id: 251 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_config.py start line: 0 end line: 0 size: 31 LOC McCabe index: 1 number of parameters: 0 id: 252 unit: def fetch_images() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 31 LOC McCabe index: 8 number of parameters: 3 id: 253 unit: def get_weights_row() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 31 LOC McCabe index: 6 number of parameters: 3 id: 254 unit: def __init__() file: server/text_generation_server/layers/gptq/ipex.py start line: 0 end line: 0 size: 31 LOC McCabe index: 3 number of parameters: 8 id: 255 unit: def run() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 31 LOC McCabe index: 9 number of parameters: 3 id: 256 unit: def forward() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 31 LOC McCabe index: 4 number of parameters: 3 id: 257 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 8 number of parameters: 5 id: 258 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 2 number of parameters: 4 id: 259 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 2 number of parameters: 4 id: 260 unit: def build_alibi_tensor() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 3 number of parameters: 2 id: 261 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 6 number of parameters: 3 id: 262 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 0 id: 263 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 4 id: 264 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 30 LOC McCabe index: 8 number of parameters: 3 id: 265 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 30 LOC McCabe index: 7 number of parameters: 2 id: 266 unit: def add_batch() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 30 LOC McCabe index: 7 number of parameters: 3 id: 267 unit: fn select_best_resolution() file: router/src/config.rs start line: 26 end line: 59 size: 30 LOC McCabe index: 6 number of parameters: 4 id: 268 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 8 number of parameters: 5 id: 269 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 2 number of parameters: 4 id: 270 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 2 number of parameters: 4 id: 271 unit: def build_alibi_tensor() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 3 number of parameters: 2 id: 272 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 0 id: 273 unit: def __init__() file: server/text_generation_server/models/custom_modeling/gemma3/configuration_gemma3.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 0 id: 274 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 4 id: 275 unit: def init_cpu_threads_env() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 30 LOC McCabe index: 5 number of parameters: 2 id: 276 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 30 LOC McCabe index: 8 number of parameters: 3 id: 277 unit: def add_batch() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 30 LOC McCabe index: 7 number of parameters: 3 id: 278 unit: def forward() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 30 LOC McCabe index: 4 number of parameters: 4 id: 279 unit: def _attn_fwd_inner() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 0 id: 280 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 29 LOC McCabe index: 6 number of parameters: 4 id: 281 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 8 number of parameters: 2 id: 282 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 4 id: 283 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 4 id: 284 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 4 id: 285 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 29 LOC McCabe index: 1 number of parameters: 4 id: 286 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 4 id: 287 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 4 id: 288 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 5 id: 289 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 4 id: 290 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 4 id: 291 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 2 number of parameters: 4 id: 292 unit: def __init__() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 29 LOC McCabe index: 6 number of parameters: 4 id: 293 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 4 id: 294 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 29 LOC McCabe index: 1 number of parameters: 4 id: 295 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 4 id: 296 unit: def get_weights() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 29 LOC McCabe index: 4 number of parameters: 3 id: 297 unit: fn statis_spans Vec>() file: benchmark/src/app.rs start line: 485 end line: 513 size: 29 LOC McCabe index: 1 number of parameters: 2 id: 298 unit: fn ensure_paths_exist, PP: AsRef>() file: backends/trtllm/src/looper.rs start line: 218 end line: 255 size: 28 LOC McCabe index: 3 number of parameters: 2 id: 299 unit: fn main() file: backends/trtllm/build.rs start line: 206 end line: 250 size: 28 LOC McCabe index: 1 number of parameters: 0 id: 300 unit: fn main() file: backends/client/build.rs start line: 3 end line: 35 size: 28 LOC McCabe index: 1 number of parameters: 0 id: 301 unit: def get_mlp_weights() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 28 LOC McCabe index: 9 number of parameters: 2 id: 302 unit: def filter() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 28 LOC McCabe index: 6 number of parameters: 2 id: 303 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 304 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 5 number of parameters: 3 id: 305 unit: def step() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 1 number of parameters: 4 id: 306 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 307 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 308 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 6 id: 309 unit: def get_weights() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 28 LOC McCabe index: 3 number of parameters: 3 id: 310 unit: fn new() file: backends/v3/src/queue.rs start line: 195 end line: 223 size: 28 LOC McCabe index: 2 number of parameters: 7 id: 311 unit: fn test_llava_next_features() file: router/src/config.rs start line: 436 end line: 464 size: 28 LOC McCabe index: 1 number of parameters: 0 id: 312 unit: fn vram_maximum() file: launcher/src/main.rs start line: 62 end line: 89 size: 28 LOC McCabe index: 2 number of parameters: 3 id: 313 unit: fn log_lines() file: launcher/src/main.rs start line: 1340 end line: 1368 size: 28 LOC McCabe index: 7 number of parameters: 1 id: 314 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 4 number of parameters: 4 id: 315 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 316 unit: def step() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 1 number of parameters: 4 id: 317 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 318 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 28 LOC McCabe index: 2 number of parameters: 4 id: 319 unit: fn get_library_architecture() file: backends/trtllm/build.rs start line: 56 end line: 85 size: 27 LOC McCabe index: 3 number of parameters: 0 id: 320 unit: def main() file: backends/gaudi/server/text_generation_server/tgi_service.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 1 id: 321 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 4 id: 322 unit: def _load_experts() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 4 number of parameters: 4 id: 323 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 7 number of parameters: 2 id: 324 unit: def load_weights_pre_hook() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 27 LOC McCabe index: 11 number of parameters: 3 id: 325 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 27 LOC McCabe index: 4 number of parameters: 4 id: 326 unit: fn get_tool_call_content() file: router/src/chat.rs start line: 309 end line: 335 size: 27 LOC McCabe index: 2 number of parameters: 1 id: 327 unit: def filter() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 27 LOC McCabe index: 6 number of parameters: 2 id: 328 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 4 id: 329 unit: def _load_experts() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 4 number of parameters: 4 id: 330 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 5 number of parameters: 4 id: 331 unit: def update_model_kwargs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 8 number of parameters: 3 id: 332 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 27 LOC McCabe index: 7 number of parameters: 2 id: 333 unit: def get_weights() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 27 LOC McCabe index: 4 number of parameters: 3 id: 334 unit: def load_weights_pre_hook() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 27 LOC McCabe index: 11 number of parameters: 3 id: 335 unit: def forward() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 27 LOC McCabe index: 4 number of parameters: 4 id: 336 unit: fn create_backend_from_engine_folder() file: backends/trtllm/src/lib.rs start line: 64 end line: 95 size: 26 LOC McCabe index: 2 number of parameters: 2 id: 337 unit: fn build_ffi_layer() file: backends/trtllm/build.rs start line: 178 end line: 204 size: 26 LOC McCabe index: 1 number of parameters: 2 id: 338 unit: fn test_next_batch_token_budget() file: backends/v2/src/queue.rs start line: 528 end line: 558 size: 26 LOC McCabe index: 1 number of parameters: 0 id: 339 unit: def download_file() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 26 LOC McCabe index: 5 number of parameters: 3 id: 340 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 4 number of parameters: 4 id: 341 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 342 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 5 id: 343 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 344 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 345 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 4 id: 346 unit: def concatenate() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 26 LOC McCabe index: 5 number of parameters: 3 id: 347 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 2 id: 348 unit: fn trie_get_returns_correct_blocks() file: backends/v3/src/radix.rs start line: 816 end line: 847 size: 26 LOC McCabe index: 1 number of parameters: 0 id: 349 unit: fn from() file: router/src/lib.rs start line: 632 end line: 661 size: 26 LOC McCabe index: 1 number of parameters: 2 id: 350 unit: def download_file() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 26 LOC McCabe index: 5 number of parameters: 3 id: 351 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 4 number of parameters: 4 id: 352 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 4 id: 353 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 8 number of parameters: 3 id: 354 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 355 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 5 id: 356 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 357 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 4 id: 358 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 4 id: 359 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 4 id: 360 unit: def concatenate() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 26 LOC McCabe index: 5 number of parameters: 2 id: 361 unit: def __init__() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 26 LOC McCabe index: 3 number of parameters: 4 id: 362 unit: fn test_next_batch_min_size() file: backends/v2/src/queue.rs start line: 476 end line: 505 size: 25 LOC McCabe index: 1 number of parameters: 0 id: 363 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 4 id: 364 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 5 number of parameters: 4 id: 365 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 3 id: 366 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 6 id: 367 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 6 id: 368 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 6 id: 369 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 4 id: 370 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 2 number of parameters: 4 id: 371 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 3 id: 372 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 4 id: 373 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 25 LOC McCabe index: 5 number of parameters: 2 id: 374 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 3 id: 375 unit: def print_loss() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 376 unit: def get_wikitext2() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 377 unit: def get_ptb() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 378 unit: def get_ptb_new() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 379 unit: fn encode_trait() file: router/src/lib.rs start line: 85 end line: 109 size: 25 LOC McCabe index: 1 number of parameters: 3 id: 380 unit: fn num_cuda_devices() file: launcher/src/main.rs start line: 1262 end line: 1288 size: 25 LOC McCabe index: 2 number of parameters: 0 id: 381 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 4 id: 382 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 4 id: 383 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 2 number of parameters: 4 id: 384 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 5 number of parameters: 4 id: 385 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 3 id: 386 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 6 id: 387 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 4 id: 388 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 11 number of parameters: 3 id: 389 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 6 id: 390 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 6 id: 391 unit: def compute_bias() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 2 number of parameters: 4 id: 392 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 2 id: 393 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 2 number of parameters: 4 id: 394 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 3 id: 395 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 2 number of parameters: 4 id: 396 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 25 LOC McCabe index: 4 number of parameters: 4 id: 397 unit: def forward() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 25 LOC McCabe index: 5 number of parameters: 2 id: 398 unit: def print_loss() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 399 unit: def get_wikitext2() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 400 unit: def get_ptb() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 401 unit: def get_ptb_new() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 402 unit: request_id_t submit() file: backends/trtllm/csrc/ffi.hpp start line: 88 end line: 116 size: 24 LOC McCabe index: 2 number of parameters: 8 id: 403 unit: def neuron_config_to_env() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 24 LOC McCabe index: 6 number of parameters: 1 id: 404 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 24 LOC McCabe index: 6 number of parameters: 3 id: 405 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 4 id: 406 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 3 id: 407 unit: def _load_qkv_gptq() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 3 id: 408 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 4 id: 409 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 3 number of parameters: 6 id: 410 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 4 id: 411 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 4 id: 412 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 413 unit: def load() file: backends/gaudi/server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 3 id: 414 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 4 id: 415 unit: fn trie_evict_removes_correct_blocks() file: backends/v3/src/radix.rs start line: 850 end line: 885 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 416 unit: fn get_unpadded_features() file: router/src/config.rs start line: 61 end line: 86 size: 24 LOC McCabe index: 2 number of parameters: 5 id: 417 unit: fn from() file: router/src/lib.rs start line: 1295 end line: 1318 size: 24 LOC McCabe index: 1 number of parameters: 1 id: 418 unit: fn vram() file: launcher/src/main.rs start line: 1783 end line: 1806 size: 24 LOC McCabe index: 2 number of parameters: 2 id: 419 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 24 LOC McCabe index: 6 number of parameters: 3 id: 420 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 6 number of parameters: 3 id: 421 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 4 number of parameters: 4 id: 422 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 4 id: 423 unit: def __init__() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 3 id: 424 unit: def _load_qkv_gptq() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 3 id: 425 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 5 id: 426 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 3 number of parameters: 6 id: 427 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 3 number of parameters: 4 id: 428 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 4 id: 429 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 5 id: 430 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 4 id: 431 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 4 number of parameters: 4 id: 432 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 2 number of parameters: 5 id: 433 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 434 unit: def get_weights() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 24 LOC McCabe index: 4 number of parameters: 3 id: 435 unit: def load() file: server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 3 id: 436 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 24 LOC McCabe index: 5 number of parameters: 4 id: 437 unit: def _bench() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 24 LOC McCabe index: 7 number of parameters: 4 id: 438 unit: def matmul_248_kernel() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 439 unit: def fused_marlin_moe() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 440 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 441 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 442 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 6 id: 443 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 5 id: 444 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 445 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 446 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 6 id: 447 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 5 id: 448 unit: def _load_experts() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 3 id: 449 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 5 id: 450 unit: def filter() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 2 id: 451 unit: def _start_span() file: backends/gaudi/server/text_generation_server/tracing.py start line: 0 end line: 0 size: 23 LOC McCabe index: 5 number of parameters: 4 id: 452 unit: def make_quant_linear() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 23 LOC McCabe index: 7 number of parameters: 5 id: 453 unit: fn from_py() file: router/src/lib.rs start line: 41 end line: 63 size: 23 LOC McCabe index: 2 number of parameters: 4 id: 454 unit: fn default_parameters() file: router/src/lib.rs start line: 436 end line: 458 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 455 unit: fn flop() file: launcher/src/main.rs start line: 281 end line: 308 size: 23 LOC McCabe index: 2 number of parameters: 1 id: 456 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 4 id: 457 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 458 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 6 number of parameters: 4 id: 459 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 460 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 5 id: 461 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 462 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 463 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 6 id: 464 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 5 id: 465 unit: def _shift_right() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 2 id: 466 unit: def _load_experts() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 3 id: 467 unit: def filter() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 2 id: 468 unit: def _start_span() file: server/text_generation_server/tracing.py start line: 0 end line: 0 size: 23 LOC McCabe index: 5 number of parameters: 4 id: 469 unit: def forward() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 2 id: 470 unit: def prune_configs() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 23 LOC McCabe index: 7 number of parameters: 2 id: 471 unit: def make_quant_linear() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 23 LOC McCabe index: 7 number of parameters: 5 id: 472 unit: fn new() file: benchmark/src/app.rs start line: 379 end line: 404 size: 23 LOC McCabe index: 1 number of parameters: 2 id: 473 unit: def parse_error() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 23 LOC McCabe index: 12 number of parameters: 3 id: 474 unit: def run() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 22 LOC McCabe index: 4 number of parameters: 3 id: 475 unit: fn from() file: backends/v2/src/queue.rs start line: 365 end line: 388 size: 22 LOC McCabe index: 1 number of parameters: 1 id: 476 unit: fn sample() file: backends/llamacpp/src/backend.rs start line: 381 end line: 402 size: 22 LOC McCabe index: 2 number of parameters: 3 id: 477 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 3 number of parameters: 4 id: 478 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 479 unit: def load_vision_model() file: backends/gaudi/server/text_generation_server/models/custom_modeling/vlm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 3 id: 480 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 5 id: 481 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 482 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 4 id: 483 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 484 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 485 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 486 unit: def load() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 3 id: 487 unit: def load() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 488 unit: def load() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 489 unit: fn check_allocation_invariants() file: backends/v3/src/radix.rs start line: 1014 end line: 1040 size: 22 LOC McCabe index: 3 number of parameters: 1 id: 490 unit: fn from() file: backends/v3/src/queue.rs start line: 523 end line: 546 size: 22 LOC McCabe index: 1 number of parameters: 1 id: 491 unit: fn prefix_cache_benchmark() file: backends/v3/benches/prefix_cache.rs start line: 9 end line: 44 size: 22 LOC McCabe index: 3 number of parameters: 1 id: 492 unit: fn find_supported_resolutions() file: router/src/config.rs start line: 143 end line: 171 size: 22 LOC McCabe index: 5 number of parameters: 2 id: 493 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 3 number of parameters: 4 id: 494 unit: def __init__() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 495 unit: def load_vision_model() file: server/text_generation_server/models/custom_modeling/vlm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 3 id: 496 unit: def _apply_prefix_mask() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 4 number of parameters: 3 id: 497 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 4 id: 498 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 499 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 4 id: 500 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 5 id: 501 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 502 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 4 id: 503 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 504 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 505 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 4 id: 506 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 4 id: 507 unit: def get_inputs_embeds() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 3 number of parameters: 3 id: 508 unit: def load() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 3 id: 509 unit: def load() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 510 unit: def load() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 3 id: 511 unit: def fused_moe() file: server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 512 unit: fn add_latencies() file: benchmark/src/table.rs start line: 107 end line: 130 size: 22 LOC McCabe index: 2 number of parameters: 4 id: 513 unit: fn validate() file: backends/trtllm/src/looper.rs start line: 286 end line: 310 size: 21 LOC McCabe index: 4 number of parameters: 1 id: 514 unit: fn new() file: backends/llamacpp/src/backend.rs start line: 170 end line: 190 size: 21 LOC McCabe index: 1 number of parameters: 2 id: 515 unit: def build_layer_weight_lookup() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 21 LOC McCabe index: 7 number of parameters: 1 id: 516 unit: def convert_file() file: backends/gaudi/server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 21 LOC McCabe index: 8 number of parameters: 3 id: 517 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 518 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 4 id: 519 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 4 number of parameters: 3 id: 520 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 521 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 522 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 3 number of parameters: 4 id: 523 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 524 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 525 unit: def load_weights_post_hook() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 21 LOC McCabe index: 8 number of parameters: 3 id: 526 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 21 LOC McCabe index: 3 number of parameters: 8 id: 527 unit: fn split_node() file: backends/v3/src/radix.rs start line: 473 end line: 506 size: 21 LOC McCabe index: 1 number of parameters: 3 id: 528 unit: fn new() file: router/src/usage_stats.rs start line: 357 end line: 378 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 529 unit: def build_layer_weight_lookup() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 21 LOC McCabe index: 7 number of parameters: 1 id: 530 unit: def convert_file() file: server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 21 LOC McCabe index: 8 number of parameters: 3 id: 531 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 532 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 4 number of parameters: 3 id: 533 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 3 number of parameters: 4 id: 534 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 535 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 536 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 5 id: 537 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 4 number of parameters: 4 id: 538 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 2 number of parameters: 5 id: 539 unit: def _reorder_cache() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 21 LOC McCabe index: 7 number of parameters: 3 id: 540 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 21 LOC McCabe index: 4 number of parameters: 3 id: 541 unit: def load_weights_post_hook() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 21 LOC McCabe index: 8 number of parameters: 3 id: 542 unit: fn schedule() file: backends/trtllm/src/looper.rs start line: 315 end line: 338 size: 20 LOC McCabe index: 1 number of parameters: 2 id: 543 unit: def concatenate() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 20 LOC McCabe index: 5 number of parameters: 3 id: 544 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 6 id: 545 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 5 id: 546 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 4 id: 547 unit: def variance_scaling_() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 20 LOC McCabe index: 7 number of parameters: 4 id: 548 unit: def load_qkv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 6 id: 549 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 4 id: 550 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 2 id: 551 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 20 LOC McCabe index: 9 number of parameters: 4 id: 552 unit: fn alloc_or_reclaim() file: backends/v3/src/radix.rs start line: 52 end line: 77 size: 20 LOC McCabe index: 3 number of parameters: 2 id: 553 unit: fn find_() file: backends/v3/src/radix.rs start line: 286 end line: 310 size: 20 LOC McCabe index: 5 number of parameters: 4 id: 554 unit: fn from() file: launcher/src/main.rs start line: 1709 end line: 1728 size: 20 LOC McCabe index: 1 number of parameters: 1 id: 555 unit: def concatenate() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 20 LOC McCabe index: 5 number of parameters: 2 id: 556 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 5 id: 557 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 2 number of parameters: 5 id: 558 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 4 id: 559 unit: def variance_scaling_() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 20 LOC McCabe index: 7 number of parameters: 4 id: 560 unit: def load_qkv() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 6 id: 561 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 20 LOC McCabe index: 2 number of parameters: 4 id: 562 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 4 id: 563 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 20 LOC McCabe index: 9 number of parameters: 4 id: 564 unit: def forward() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 4 id: 565 unit: def _completion_stream_response() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 20 LOC McCabe index: 6 number of parameters: 2 id: 566 unit: def _chat_stream_response() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 20 LOC McCabe index: 6 number of parameters: 2 id: 567 unit: def generate() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 568 unit: def build_df() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 3 id: 569 unit: def main() file: backends/neuron/tgi_entry_point.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 0 id: 570 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 4 id: 571 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 5 id: 572 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 19 LOC McCabe index: 2 number of parameters: 4 id: 573 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 3 id: 574 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 4 id: 575 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 6 id: 576 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 0 id: 577 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 4 id: 578 unit: def new() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 6 id: 579 unit: fn parse_traceparent() file: router/src/logging.rs start line: 22 end line: 43 size: 19 LOC McCabe index: 3 number of parameters: 1 id: 580 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 19 LOC McCabe index: 5 number of parameters: 2 id: 581 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 5 id: 582 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 2 number of parameters: 4 id: 583 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 5 id: 584 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 4 id: 585 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 5 id: 586 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 19 LOC McCabe index: 2 number of parameters: 4 id: 587 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 4 id: 588 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 2 id: 589 unit: def preprocess() file: server/text_generation_server/models/custom_modeling/gemma3/image_processing_gemma3.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 590 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 4 id: 591 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 4 id: 592 unit: def tgi_flash_attention_forward() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 593 unit: def __init__() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 3 number of parameters: 0 id: 594 unit: def get_weights() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 3 id: 595 unit: def get_weights_row() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 3 id: 596 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 4 id: 597 unit: def new() file: server/text_generation_server/layers/gptq/ipex.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 6 id: 598 unit: def new() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 6 id: 599 unit: fn add_throuhgputs() file: benchmark/src/table.rs start line: 132 end line: 152 size: 19 LOC McCabe index: 2 number of parameters: 4 id: 600 unit: def chat() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 601 unit: constexpr explicit operator tle::SamplingConfig() file: backends/trtllm/csrc/backend.hpp start line: 41 end line: 58 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 602 unit: fn batch_push() file: backends/llamacpp/src/backend.rs start line: 294 end line: 311 size: 18 LOC McCabe index: 1 number of parameters: 5 id: 603 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 604 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 4 number of parameters: 5 id: 605 unit: def pixel_shuffle() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 2 id: 606 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 4 id: 607 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 6 id: 608 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 5 id: 609 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 6 id: 610 unit: def get_weights() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 3 id: 611 unit: def _preprocessing() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 1 id: 612 unit: fn terminate() file: launcher/src/main.rs start line: 2011 end line: 2032 size: 18 LOC McCabe index: 3 number of parameters: 3 id: 613 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 4 id: 614 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 4 id: 615 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 5 id: 616 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 617 unit: def _model_forward() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 618 unit: def tgi_flash_attention_forward() file: server/text_generation_server/models/transformers_flash_causal_lm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 619 unit: def tunableop_warmup() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 3 id: 620 unit: def get_fp8_linear() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 18 LOC McCabe index: 10 number of parameters: 1 id: 621 unit: def get_multi_weights_col() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 18 LOC McCabe index: 5 number of parameters: 4 id: 622 unit: def get_weights() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 3 id: 623 unit: fn latency_paragraph Paragraph() file: benchmark/src/app.rs start line: 459 end line: 482 size: 18 LOC McCabe index: 2 number of parameters: 2 id: 624 unit: def generate_stream() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 625 unit: def run() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 17 LOC McCabe index: 3 number of parameters: 3 id: 626 unit: def get_export_kwargs_from_env() file: backends/neuron/server/text_generation_server/model.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 0 id: 627 unit: def parse_lora_adapters() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 1 id: 628 unit: def pad_rank() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 3 id: 629 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 17 LOC McCabe index: 3 number of parameters: 2 id: 630 unit: def get_driver_version() file: backends/gaudi/server/text_generation_server/utils/version.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 0 id: 631 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 4 id: 632 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 4 id: 633 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 4 id: 634 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 3 id: 635 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 0 id: 636 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 3 id: 637 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 5 id: 638 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 639 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 4 id: 640 unit: def load_multi() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 6 id: 641 unit: fn print_debug_() file: backends/v3/src/radix.rs start line: 579 end line: 595 size: 17 LOC McCabe index: 2 number of parameters: 3 id: 642 unit: fn schedule() file: backends/v3/src/backend.rs start line: 79 end line: 103 size: 17 LOC McCabe index: 1 number of parameters: 2 id: 643 unit: fn compute_optimal() file: launcher/src/main.rs start line: 28 end line: 46 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 644 unit: fn main() file: launcher/build.rs start line: 4 end line: 29 size: 17 LOC McCabe index: 4 number of parameters: 0 id: 645 unit: void check_cuda() file: server/exllama_kernels/exllama_kernels/exllama_ext.cpp start line: 21 end line: 39 size: 17 LOC McCabe index: 3 number of parameters: 1 id: 646 unit: def parse_lora_adapters() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 1 id: 647 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 648 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 4 id: 649 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 650 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 5 id: 651 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 3 id: 652 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 0 id: 653 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 3 id: 654 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 655 unit: def freeze_model() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 2 id: 656 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 5 id: 657 unit: def forward() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 658 unit: def pack_fp8_as_int32() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 1 id: 659 unit: def forward() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 660 unit: def get_linear() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 661 unit: def load_multi() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 17 LOC McCabe index: 6 number of parameters: 6 id: 662 unit: def __init__() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 17 LOC McCabe index: 3 number of parameters: 8 id: 663 unit: def main() file: load_tests/orca.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 0 id: 664 unit: void initialize_logging() file: backends/trtllm/csrc/ffi.hpp start line: 145 end line: 161 size: 16 LOC McCabe index: 3 number of parameters: 0 id: 665 unit: fn new() file: backends/v2/src/queue.rs start line: 159 end line: 174 size: 16 LOC McCabe index: 1 number of parameters: 4 id: 666 unit: fn test_next_batch_max_size() file: backends/v2/src/queue.rs start line: 508 end line: 525 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 667 unit: fn schedule() file: backends/v2/src/backend.rs start line: 73 end line: 96 size: 16 LOC McCabe index: 1 number of parameters: 2 id: 668 unit: fn filter_send_generations() file: backends/v2/src/backend.rs start line: 361 end line: 383 size: 16 LOC McCabe index: 2 number of parameters: 2 id: 669 unit: def initialize_torch_distributed() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 0 id: 670 unit: def get_attn_weights() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 2 id: 671 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 672 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 16 LOC McCabe index: 9 number of parameters: 3 id: 673 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 1 id: 674 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 4 id: 675 unit: def unpad_image() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 2 id: 676 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 677 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 6 id: 678 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 6 id: 679 unit: def _trunc_normal_() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 5 id: 680 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 681 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 682 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 683 unit: def get_number_of_features() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 3 id: 684 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 16 LOC McCabe index: 6 number of parameters: 2 id: 685 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 4 id: 686 unit: def unpack_awq() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 687 unit: def pack_tensor() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 2 id: 688 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 689 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 690 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 4 id: 691 unit: fn filter_send_generations() file: backends/v3/src/backend.rs start line: 423 end line: 445 size: 16 LOC McCabe index: 2 number of parameters: 2 id: 692 unit: fn from() file: router/src/lib.rs start line: 612 end line: 628 size: 16 LOC McCabe index: 1 number of parameters: 2 id: 693 unit: fn main() file: router/build.rs start line: 4 end line: 26 size: 16 LOC McCabe index: 4 number of parameters: 0 id: 694 unit: def get_attn_weights() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 2 id: 695 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 696 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 16 LOC McCabe index: 9 number of parameters: 3 id: 697 unit: def to_pb() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 1 id: 698 unit: def unpad_image() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 2 id: 699 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 4 id: 700 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 4 id: 701 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 702 unit: def __init__() file: server/text_generation_server/models/custom_modeling/gemma3/image_processing_gemma3.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 703 unit: def _trunc_normal_() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 5 id: 704 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 705 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 3 id: 706 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 707 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 4 id: 708 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 709 unit: def __init__() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 710 unit: def get_number_of_features() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 3 id: 711 unit: def forward() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 16 LOC McCabe index: 6 number of parameters: 2 id: 712 unit: def get_multi_weights_col() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 4 id: 713 unit: def __init__() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 4 id: 714 unit: def __init__() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 715 unit: def __init__() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 716 unit: def is_supported() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 1 id: 717 unit: def can_scale() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 16 LOC McCabe index: 9 number of parameters: 2 id: 718 unit: def valid_best_of() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 16 LOC McCabe index: 7 number of parameters: 3 id: 719 unit: export function get_options() file: load_tests/long.js start line: 18 end line: 58 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 720 unit: export function get_options() file: load_tests/common.js start line: 18 end line: 58 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 721 unit: def clear() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 1 id: 722 unit: def dbg_trace() file: backends/gaudi/server/text_generation_server/utils/debug.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 2 id: 723 unit: def get_filename() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 15 LOC McCabe index: 6 number of parameters: 2 id: 724 unit: def get_tensor_shard() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 3 id: 725 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 15 LOC McCabe index: 6 number of parameters: 3 id: 726 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 727 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 728 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 729 unit: def pixel_shuffle() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 3 id: 730 unit: def vision_forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 4 id: 731 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 732 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 5 id: 733 unit: def log_warmup() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 6 id: 734 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 2 id: 735 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 736 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 737 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 738 unit: def serve() file: backends/gaudi/server/text_generation_server/cli.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 739 unit: fn add_node() file: backends/v3/src/radix.rs start line: 509 end line: 526 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 740 unit: fn remove_node() file: backends/v3/src/radix.rs start line: 540 end line: 556 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 741 unit: fn try_from() file: router/src/lib.rs start line: 474 end line: 488 size: 15 LOC McCabe index: 2 number of parameters: 1 id: 742 unit: fn fmt() file: launcher/src/main.rs start line: 1732 end line: 1746 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 743 unit: def get_filename() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 15 LOC McCabe index: 6 number of parameters: 2 id: 744 unit: def get_tensor_shard() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 3 id: 745 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 15 LOC McCabe index: 6 number of parameters: 3 id: 746 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 747 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_config.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 748 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 749 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 750 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 751 unit: def pixel_shuffle() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 3 id: 752 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 753 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 4 id: 754 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 5 id: 755 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 756 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 757 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 758 unit: def vision_forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 4 id: 759 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 760 unit: def get_linear() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 2 id: 761 unit: def forward() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 2 id: 762 unit: def create_exllama_buffers() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 1 id: 763 unit: def make_group_map() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 2 id: 764 unit: def __init__() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 765 unit: def __init__() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 766 unit: def __init__() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 767 unit: def __init__() file: server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 768 unit: def serve() file: server/text_generation_server/cli.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 769 unit: def main() file: load_tests/filter.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 0 id: 770 unit: tle::ParallelConfig backend_workspace_t::parallel_config() file: backends/trtllm/csrc/backend.cpp start line: 9 end line: 26 size: 14 LOC McCabe index: 2 number of parameters: 0 id: 771 unit: constexpr finish_reason_t as_finish_reason_t() file: backends/trtllm/csrc/ffi.hpp start line: 35 end line: 48 size: 14 LOC McCabe index: 5 number of parameters: 1 id: 772 unit: fn from() file: backends/v2/src/backend.rs start line: 495 end line: 509 size: 14 LOC McCabe index: 1 number of parameters: 1 id: 773 unit: fn main() file: backends/v2/build.rs start line: 3 end line: 19 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 774 unit: fn schedule() file: backends/llamacpp/src/backend.rs start line: 644 end line: 657 size: 14 LOC McCabe index: 1 number of parameters: 2 id: 775 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 14 LOC McCabe index: 2 number of parameters: 3 id: 776 unit: def convert_files() file: backends/gaudi/server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 3 id: 777 unit: def concat_text_chunks() file: backends/gaudi/server/text_generation_server/utils/chunks.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 1 id: 778 unit: def _seed_rng() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 14 LOC McCabe index: 2 number of parameters: 3 id: 779 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 780 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 781 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 782 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 4 id: 783 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 784 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 785 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 786 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 787 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 788 unit: def prompt_split_image_llama4() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 2 id: 789 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 790 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 791 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 3 id: 792 unit: fn allocator_collects_older_prefixes_first() file: backends/v3/src/radix.rs start line: 691 end line: 708 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 793 unit: fn from() file: backends/v3/src/backend.rs start line: 557 end line: 571 size: 14 LOC McCabe index: 1 number of parameters: 1 id: 794 unit: fn main() file: backends/v3/build.rs start line: 3 end line: 19 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 795 unit: fn from() file: router/src/lib.rs start line: 1127 end line: 1140 size: 14 LOC McCabe index: 1 number of parameters: 1 id: 796 unit: fn fmt() file: launcher/src/main.rs start line: 563 end line: 576 size: 14 LOC McCabe index: 1 number of parameters: 2 id: 797 unit: def convert_files() file: server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 3 id: 798 unit: def concat_text_chunks() file: server/text_generation_server/utils/chunks.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 1 id: 799 unit: def _seed_rng() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 14 LOC McCabe index: 2 number of parameters: 3 id: 800 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 801 unit: def forward() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 14 LOC McCabe index: 2 number of parameters: 0 id: 802 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 803 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 804 unit: def scaled_multihead_dot_product_attention() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 805 unit: def flash_attn_fn() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 806 unit: def triton_flash_attn_fn() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 807 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 808 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 809 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 810 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 811 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 812 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 4 id: 813 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 814 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 815 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 816 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 817 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 818 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 819 unit: def vblock() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 820 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 821 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 822 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 5 id: 823 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 4 id: 824 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 825 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 826 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 827 unit: def prompt_split_image_llama4() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 2 id: 828 unit: def __init__() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 829 unit: def info() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 1 id: 830 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 831 unit: def __init__() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 3 id: 832 unit: fn end_batch() file: benchmark/src/app.rs start line: 420 end line: 433 size: 14 LOC McCabe index: 1 number of parameters: 2 id: 833 unit: def concat() file: backends/gaudi/server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 13 LOC McCabe index: 6 number of parameters: 3 id: 834 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 3 id: 835 unit: def _cached_adapt_tokenizer() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 1 id: 836 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 6 id: 837 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 838 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 839 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 840 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 841 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 842 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 843 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 2 id: 844 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 845 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 846 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 847 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 848 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 849 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 850 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 851 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 13 LOC McCabe index: 4 number of parameters: 4 id: 852 unit: def reverse_awq_order() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 3 id: 853 unit: def load_conv2d() file: backends/gaudi/server/text_generation_server/layers/conv.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 7 id: 854 unit: def set_block_mapping() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 2 id: 855 unit: def trim_attn_metadata() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 1 id: 856 unit: fn human_size() file: launcher/src/main.rs start line: 48 end line: 60 size: 13 LOC McCabe index: 3 number of parameters: 2 id: 857 unit: fn get_head_dim() file: launcher/src/main.rs start line: 262 end line: 279 size: 13 LOC McCabe index: 3 number of parameters: 1 id: 858 unit: fn model_vram() file: launcher/src/main.rs start line: 337 end line: 351 size: 13 LOC McCabe index: 2 number of parameters: 1 id: 859 unit: def concat() file: server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 13 LOC McCabe index: 6 number of parameters: 3 id: 860 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 3 id: 861 unit: def _cached_adapt_tokenizer() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 1 id: 862 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 863 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 864 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 865 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 866 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 867 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 868 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 869 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 870 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 871 unit: def forward() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 872 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 873 unit: def load_qkv() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 5 id: 874 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 875 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 876 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 877 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 878 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 879 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 880 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 881 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 882 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 883 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 884 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 5 id: 885 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 886 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 5 id: 887 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 888 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 4 id: 889 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 890 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 891 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 892 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 893 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 894 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 895 unit: def project() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 6 number of parameters: 4 id: 896 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 897 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 898 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 899 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 900 unit: def _model_forward() file: server/text_generation_server/models/transformers_flash_causal_lm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 901 unit: def __init__() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 902 unit: def __init__() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 903 unit: def get_weights_row() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 3 id: 904 unit: def forward() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 2 id: 905 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 13 LOC McCabe index: 4 number of parameters: 4 id: 906 unit: def forward() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 2 id: 907 unit: def load_conv2d() file: server/text_generation_server/layers/conv.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 7 id: 908 unit: def attention() file: server/text_generation_server/layers/attention/ipex.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 909 unit: def attention() file: server/text_generation_server/layers/attention/rocm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 910 unit: def use_prefill_with_paged_kv_state() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 911 unit: def forward() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 912 unit: def attention() file: server/text_generation_server/layers/attention/cuda.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 913 unit: fn latency_histogram() file: benchmark/src/app.rs start line: 529 end line: 541 size: 13 LOC McCabe index: 1 number of parameters: 2 id: 914 unit: fn try_from() file: backends/trtllm/src/looper.rs start line: 51 end line: 62 size: 12 LOC McCabe index: 2 number of parameters: 1 id: 915 unit: void initialize_tensorrt_llm_backend() file: backends/trtllm/csrc/ffi.hpp start line: 163 end line: 178 size: 12 LOC McCabe index: 2 number of parameters: 0 id: 916 unit: fn send_errors() file: backends/v2/src/backend.rs start line: 478 end line: 492 size: 12 LOC McCabe index: 1 number of parameters: 2 id: 917 unit: def warmup() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 2 id: 918 unit: fn chunks_to_string() file: backends/client/src/lib.rs start line: 74 end line: 86 size: 12 LOC McCabe index: 1 number of parameters: 1 id: 919 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 920 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 6 number of parameters: 2 id: 921 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 5 id: 922 unit: def get_sharded() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 923 unit: def _blocks_to_block_sizes() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 924 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 925 unit: def load_row() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 4 id: 926 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 927 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 928 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 929 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 930 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 2 id: 931 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 932 unit: def load_row() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 4 id: 933 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 934 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 935 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 936 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 937 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 938 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 939 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 940 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 941 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 942 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 943 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 7 id: 944 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 945 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 946 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 947 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 948 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 949 unit: def get_model_with_lora_adapters() file: backends/gaudi/server/text_generation_server/models/__init__.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 950 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 1 id: 951 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 5 id: 952 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 12 LOC McCabe index: 7 number of parameters: 2 id: 953 unit: def unpack() file: backends/gaudi/server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 2 id: 954 unit: def sequential() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 955 unit: def pack() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 4 id: 956 unit: def pack_tensor() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 2 id: 957 unit: def get_kv_scales() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 958 unit: def attention() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 959 unit: def load() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 3 id: 960 unit: def serve() file: backends/gaudi/server/text_generation_server/server.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 961 unit: fn allocator_block_size() file: backends/v3/src/radix.rs start line: 647 end line: 659 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 962 unit: fn allocator_block_size_non_aligned() file: backends/v3/src/radix.rs start line: 662 end line: 674 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 963 unit: fn trie_insertions_have_correct_prefix_len() file: backends/v3/src/radix.rs start line: 769 end line: 789 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 964 unit: fn trie_insertions_block_size() file: backends/v3/src/radix.rs start line: 792 end line: 813 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 965 unit: fn send_errors() file: backends/v3/src/backend.rs start line: 540 end line: 554 size: 12 LOC McCabe index: 1 number of parameters: 2 id: 966 unit: fn next_int_id() file: router/src/lib.rs start line: 1038 end line: 1049 size: 12 LOC McCabe index: 4 number of parameters: 1 id: 967 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 968 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 6 number of parameters: 2 id: 969 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 5 id: 970 unit: def get_sharded() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 971 unit: def _blocks_to_block_sizes() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 972 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 5 id: 973 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 974 unit: def load_row() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 4 id: 975 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 976 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 977 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 978 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 979 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 980 unit: def __init__() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 981 unit: def forward() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 982 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 2 id: 983 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 984 unit: def load_row() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 4 id: 985 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 986 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 4 id: 987 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 988 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 989 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 990 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 991 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 992 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 993 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 994 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 995 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 996 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 997 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 998 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 999 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1000 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 1001 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1002 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 7 id: 1003 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1004 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1005 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1006 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 1007 unit: def get_model_with_lora_adapters() file: server/text_generation_server/models/__init__.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1008 unit: def fallback() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1009 unit: def pre_process_inputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 4 id: 1010 unit: def to_pb() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 1 id: 1011 unit: def __init__() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 5 id: 1012 unit: def repack_gptq_for_marlin() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1013 unit: def forward() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 12 LOC McCabe index: 7 number of parameters: 2 id: 1014 unit: def get_weights() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 3 id: 1015 unit: def get_weights_row() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 3 id: 1016 unit: def unpack() file: server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 2 id: 1017 unit: def sequential() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1018 unit: def pack() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 4 id: 1019 unit: def paged_attention() file: server/text_generation_server/layers/attention/ipex.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1020 unit: def get_kv_scales() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 1021 unit: def paged_attention() file: server/text_generation_server/layers/attention/rocm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1022 unit: def paged_attention() file: server/text_generation_server/layers/attention/cuda.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1023 unit: def load() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 3 id: 1024 unit: def serve() file: server/text_generation_server/server.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 1025 unit: fn avg_min_max() file: benchmark/src/table.rs start line: 154 end line: 165 size: 12 LOC McCabe index: 1 number of parameters: 1 id: 1026 unit: def valid_best_of_stream() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 1027 unit: def check_model_support() file: clients/python/text_generation/inference_api.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 2 id: 1028 unit: fn test_append() file: backends/v2/src/queue.rs start line: 452 end line: 465 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1029 unit: def serve() file: backends/neuron/server/text_generation_server/cli.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1030 unit: def _merge_lora_configs() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 1031 unit: def _get_config_json() file: backends/gaudi/server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1032 unit: def _weight_files_from_dir() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 11 LOC McCabe index: 7 number of parameters: 2 id: 1033 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 2 id: 1034 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 1035 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1036 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1037 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1038 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1039 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1040 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1041 unit: def load_row() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 4 id: 1042 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1043 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1044 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1045 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 2 id: 1046 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1047 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1048 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 1049 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1050 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 1051 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1052 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1053 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1054 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1055 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1056 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1057 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1058 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1059 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1060 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1061 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1062 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1063 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1064 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1065 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1066 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1067 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1068 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1069 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1070 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1071 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1072 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1073 unit: def load_row() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 4 id: 1074 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1075 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1076 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1077 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1078 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1079 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1080 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 5 id: 1081 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1082 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1083 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1084 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1085 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1086 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1087 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1088 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1089 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1090 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1091 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 6 id: 1092 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1093 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1094 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1095 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1096 unit: def get_model() file: backends/gaudi/server/text_generation_server/models/__init__.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1097 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1098 unit: def set_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 1099 unit: def info() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 1100 unit: def pad_weight() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 2 id: 1101 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 1102 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 2 id: 1103 unit: def pack() file: backends/gaudi/server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 1104 unit: def _get_gptq_params() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 2 id: 1105 unit: def find_layers() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 3 id: 1106 unit: def quantize() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1107 unit: def unpack_zeros_from_cuda_old_format() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 1 id: 1108 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 3 id: 1109 unit: def paged_attention_mla() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1110 unit: def quantize() file: backends/gaudi/server/text_generation_server/cli.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1111 unit: def prepare_weights() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1112 unit: fn allocator_reuses_prefixes() file: backends/v3/src/radix.rs start line: 677 end line: 688 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1113 unit: fn format_from_mimetype() file: router/src/validation.rs start line: 535 end line: 551 size: 11 LOC McCabe index: 1 number of parameters: 1 id: 1114 unit: fn format_to_mimetype() file: router/src/validation.rs start line: 553 end line: 563 size: 11 LOC McCabe index: 1 number of parameters: 1 id: 1115 unit: fn chunks_to_string() file: router/src/validation.rs start line: 864 end line: 874 size: 11 LOC McCabe index: 1 number of parameters: 1 id: 1116 unit: fn fmt() file: launcher/src/env_runtime.rs start line: 33 end line: 45 size: 11 LOC McCabe index: 1 number of parameters: 2 id: 1117 unit: fn trace() file: launcher/src/main.rs start line: 1319 end line: 1329 size: 11 LOC McCabe index: 1 number of parameters: 1 id: 1118 unit: fn compute_type() file: launcher/src/main.rs start line: 1815 end line: 1825 size: 11 LOC McCabe index: 2 number of parameters: 1 id: 1119 unit: def _merge_lora_configs() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 1120 unit: def _get_config_json() file: server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1121 unit: def _weight_files_from_dir() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 11 LOC McCabe index: 7 number of parameters: 2 id: 1122 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 2 id: 1123 unit: def filter() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 1124 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 2 id: 1125 unit: def rotary_forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 5 id: 1126 unit: def _reorder_cache() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 3 id: 1127 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1128 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1129 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1130 unit: def load_row() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 4 id: 1131 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1132 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1133 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 2 id: 1134 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1135 unit: def __call__() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1136 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1137 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1138 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1139 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1140 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1141 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1142 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1143 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1144 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1145 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1146 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1147 unit: def load_row() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 4 id: 1148 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1149 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1150 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1151 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1152 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1153 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1154 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 4 id: 1155 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1156 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 1157 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 5 id: 1158 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1159 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1160 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1161 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1162 unit: def get_model() file: server/text_generation_server/models/__init__.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1163 unit: def __init__() file: server/text_generation_server/models/transformers_flash_causal_lm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1164 unit: def __init__() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1165 unit: def set_inputs_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 1166 unit: def _get_gptq_params() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 2 id: 1167 unit: def __init__() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 1168 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 1169 unit: def pack() file: server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 1170 unit: def _get_gptq_params() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 2 id: 1171 unit: def warmup() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 1172 unit: def find_layers() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 3 id: 1173 unit: def quantize() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1174 unit: def create_exllama_buffers() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 1 id: 1175 unit: def use_decode_state() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1176 unit: def quantize() file: server/text_generation_server/cli.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1177 unit: def prepare_weights() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1178 unit: fn init_logging() file: benchmark/src/main.rs start line: 211 end line: 225 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1179 unit: def completion() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 1180 unit: def deployed_models() file: clients/python/text_generation/inference_api.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 1 id: 1181 unit: def run_docker() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1182 unit: constexpr explicit generation_config_t() file: backends/trtllm/csrc/backend.hpp start line: 70 end line: 80 size: 10 LOC McCabe index: 3 number of parameters: 1 id: 1183 unit: fn get_compiler_flag() file: backends/trtllm/build.rs start line: 45 end line: 54 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1184 unit: def is_cached() file: backends/neuron/server/text_generation_server/model.py start line: 0 end line: 0 size: 10 LOC McCabe index: 6 number of parameters: 1 id: 1185 unit: def log_cache_size() file: backends/neuron/server/text_generation_server/model.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 0 id: 1186 unit: fn from_str() file: backends/llamacpp/src/backend.rs start line: 30 end line: 39 size: 10 LOC McCabe index: 2 number of parameters: 1 id: 1187 unit: def _validate_lora_configs() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 1 id: 1188 unit: def advance_grammar_single() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 1189 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1190 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1191 unit: def load_col() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1192 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1193 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1194 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1195 unit: def compute_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1196 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1197 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1198 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1199 unit: def load_col() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1200 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 1201 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1202 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1203 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1204 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1205 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1206 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1207 unit: def _load_gqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1208 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1209 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1210 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1211 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1212 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1213 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1214 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1215 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1216 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1217 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1218 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1219 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1220 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 5 id: 1221 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1222 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1223 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1224 unit: def _q_proj_and_k_up_proj() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 2 id: 1225 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1226 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1227 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1228 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1229 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1230 unit: def dynamic_quant() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 2 id: 1231 unit: def from_unquant() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1232 unit: def load() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 5 id: 1233 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1234 unit: def configure() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1235 unit: def select_experts() file: backends/gaudi/server/text_generation_server/layers/moe/fused_moe.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1236 unit: def paged_attention() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1237 unit: fn hash() file: backends/v3/src/radix.rs start line: 9 end line: 18 size: 10 LOC McCabe index: 2 number of parameters: 1 id: 1238 unit: fn new() file: backends/v3/src/radix.rs start line: 614 end line: 623 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1239 unit: fn allocator_frees_fully_overlapping_prefills() file: backends/v3/src/radix.rs start line: 711 end line: 724 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1240 unit: fn partial_match_does_not_recurse() file: backends/v3/src/radix.rs start line: 899 end line: 908 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1241 unit: def get_openapi_schema() file: update_doc.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 0 id: 1242 unit: fn get_factors() file: router/src/config.rs start line: 130 end line: 141 size: 10 LOC McCabe index: 3 number of parameters: 1 id: 1243 unit: fn fmt() file: launcher/src/main.rs start line: 497 end line: 507 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 1244 unit: fn fmt() file: launcher/src/main.rs start line: 520 end line: 529 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 1245 unit: fn fmt() file: launcher/src/main.rs start line: 539 end line: 549 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 1246 unit: def _validate_lora_configs() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 1 id: 1247 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 1248 unit: def advance_grammar_single() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 1249 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1250 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1251 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1252 unit: def load_col() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1253 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1254 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1255 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1256 unit: def compute_attention() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1257 unit: def forward() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1258 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1259 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1260 unit: def load_col() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1261 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1262 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1263 unit: def check_valid_inputs() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 3 id: 1264 unit: def _cast_if_autocast_enabled() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 1 id: 1265 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1266 unit: def _load_gqa() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1267 unit: def __init__() file: server/text_generation_server/models/custom_modeling/gemma3/configuration_gemma3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1268 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1269 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1270 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1271 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1272 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1273 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 4 id: 1274 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1275 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1276 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1277 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1278 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1279 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1280 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1281 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 4 id: 1282 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1283 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 5 id: 1284 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1285 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1286 unit: def from_unquant() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 4 id: 1287 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 2 id: 1288 unit: def get_weights() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1289 unit: def load() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 5 id: 1290 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 3 id: 1291 unit: def __init__() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1292 unit: def decorator() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 1 id: 1293 unit: def configure() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 1294 unit: def forward() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 1295 unit: def load_fn() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 4 id: 1296 unit: fn throughput_paragraph Paragraph() file: benchmark/src/app.rs start line: 446 end line: 456 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 1297 unit: fn latency_histogram_data() file: benchmark/src/app.rs start line: 516 end line: 526 size: 10 LOC McCabe index: 1 number of parameters: 2 id: 1298 unit: def __init__() file: clients/python/text_generation/inference_api.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1299 unit: def __init__() file: clients/python/text_generation/inference_api.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 1300 unit: tle::ExecutorConfig backend_workspace_t::executor_config() file: backends/trtllm/csrc/backend.cpp start line: 29 end line: 44 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1301 unit: fn from() file: backends/v2/src/client/sharded_client.rs start line: 198 end line: 206 size: 9 LOC McCabe index: 1 number of parameters: 1 id: 1302 unit: def download_weights() file: backends/neuron/server/text_generation_server/cli.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1303 unit: fn from() file: backends/client/src/v2/sharded_client.rs start line: 197 end line: 205 size: 9 LOC McCabe index: 1 number of parameters: 1 id: 1304 unit: fn from() file: backends/client/src/v3/sharded_client.rs start line: 206 end line: 214 size: 9 LOC McCabe index: 1 number of parameters: 1 id: 1305 unit: fn drop() file: backends/llamacpp/src/backend.rs start line: 315 end line: 323 size: 9 LOC McCabe index: 3 number of parameters: 1 id: 1306 unit: def add_lora_sgmv_cutlass() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1307 unit: def _add_lora_sgmv_cutlass_legacy() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1308 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 2 id: 1309 unit: def _cached_compile_fsm() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 3 id: 1310 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1311 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1312 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1313 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1314 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1315 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1316 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1317 unit: def load_qkv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 5 id: 1318 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1319 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1320 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1321 unit: def _update_causal_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1322 unit: def _prepare_4d_causal_attention_mask_with_cache_position() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1323 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 4 id: 1324 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1325 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1326 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1327 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1328 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1329 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1330 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1331 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1332 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1333 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1334 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1335 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1336 unit: def _prepare_4d_causal_attention_mask_with_cache_position() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1337 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1338 unit: def get_and_maybe_dequant_weights() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 1 id: 1339 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1340 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 5 id: 1341 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1342 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 1343 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1344 unit: def preprocess_image() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 6 number of parameters: 2 id: 1345 unit: def check_initialized() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 1 id: 1346 unit: def pad_block_fp8_weight_naive() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 1347 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1348 unit: def _lookup_loader() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1349 unit: def fast_awq_to_gptq() file: backends/gaudi/server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 7 number of parameters: 2 id: 1350 unit: def grouped_topk() file: backends/gaudi/server/text_generation_server/layers/moe/fused_moe.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1351 unit: def _load_expert_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1352 unit: def _async_h2d_tensor_copy() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1353 unit: def trim_seqlen_metadata() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 1 id: 1354 unit: def download_weights() file: backends/gaudi/server/text_generation_server/cli.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1355 unit: fn get_anyres_image_grid_shape() file: router/src/config.rs start line: 13 end line: 21 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1356 unit: fn serialize_function() file: router/src/lib.rs start line: 1168 end line: 1176 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 1357 unit: fn image_tokens_fixup() file: router/src/validation.rs start line: 777 end line: 785 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 1358 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 2 id: 1359 unit: def __init__() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1360 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1361 unit: def __init__() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 5 id: 1362 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1363 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1364 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1365 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_config.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1366 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1367 unit: def forward() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1368 unit: def preprocess() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1369 unit: def __init__() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1370 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1371 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1372 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1373 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 2 id: 1374 unit: def _reorder_cache() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1375 unit: def __call__() file: server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1376 unit: def _process_images_for_pas() file: server/text_generation_server/models/custom_modeling/gemma3/image_processing_gemma3.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1377 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 1378 unit: def _reorder_cache() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1379 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1380 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 1381 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 5 id: 1382 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1383 unit: def _reorder_cache() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1384 unit: def _prepare_4d_causal_attention_mask_with_cache_position() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1385 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1386 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 1387 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1388 unit: def triton_copy_next_input_ids_inplace() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1389 unit: def forward() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1390 unit: def _forward_context() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1391 unit: def preprocess_image() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 6 number of parameters: 2 id: 1392 unit: def new_inference_params() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1393 unit: def check_initialized() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 1 id: 1394 unit: def __init__() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1395 unit: def _check_marlin_kernels() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 0 id: 1396 unit: def repack_fp8_for_marlin() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 1397 unit: def __init__() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1398 unit: def _lookup_loader() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 2 id: 1399 unit: def fast_awq_to_gptq() file: server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 7 number of parameters: 2 id: 1400 unit: def __init__() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1401 unit: def check_args() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1402 unit: def download_weights() file: server/text_generation_server/cli.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 1403 unit: def setup_sccache_locally() file: backends/trtllm/scripts/setup_sccache.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 0 id: 1404 unit: auto format() file: backends/trtllm/csrc/backend.hpp start line: 221 end line: 228 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1405 unit: fn new() file: backends/v2/src/client/grpc_client.rs start line: 249 end line: 256 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1406 unit: def append() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1407 unit: fn new() file: backends/client/src/v2/client.rs start line: 250 end line: 257 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1408 unit: fn new() file: backends/client/src/v3/client.rs start line: 296 end line: 303 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1409 unit: fn inject() file: backends/grpc-metadata/src/lib.rs start line: 23 end line: 30 size: 8 LOC McCabe index: 1 number of parameters: 1 id: 1410 unit: def lora_a_sgmv_cutlass() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1411 unit: def lora_b_sgmv_cutlass() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1412 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1413 unit: def from_pb() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1414 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 1415 unit: def from_pb_processor() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1416 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1417 unit: def load_attention() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 1418 unit: def load_row() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 4 id: 1419 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 1420 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1421 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1422 unit: def repeat_kv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1423 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1424 unit: def load() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1425 unit: def no_fp8() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 1 id: 1426 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 5 id: 1427 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1428 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1429 unit: def repeat_kv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1430 unit: def load() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1431 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1432 unit: def load() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1433 unit: def repeat_kv() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1434 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1435 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1436 unit: def __call__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1437 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 6 id: 1438 unit: def _prompt_split_image() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1439 unit: def from_pb_processor() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1440 unit: def init_kv_cache() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1441 unit: def fallback() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1442 unit: def forward() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1443 unit: def dequant_block_fp8_weight_naive() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1444 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 1445 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1446 unit: def forward_layer_type() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1447 unit: def load_gate_up() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 1448 unit: def load_qkv() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1449 unit: def load() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 1450 unit: def _get_rope_config() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1451 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1452 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1453 unit: def unpack_weight_from_cuda_old_format() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1454 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1455 unit: def load_layer_norm() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1456 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 1457 unit: def _load_expert_weights() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1458 unit: def _load_expert_weights_row() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1459 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1460 unit: def paged_reshape_and_cache() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1461 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 3 id: 1462 unit: def ranks() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 1 id: 1463 unit: fn new() file: backends/v3/src/client/grpc_client.rs start line: 285 end line: 292 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1464 unit: fn new() file: backends/v3/src/client/grpc_client.rs start line: 303 end line: 310 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1465 unit: fn allocate() file: backends/v3/src/block_allocator.rs start line: 132 end line: 140 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 1466 unit: fn new() file: backends/v3/src/block_allocator.rs start line: 148 end line: 156 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 1467 unit: fn schedule() file: router/src/infer/mod.rs start line: 30 end line: 42 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1468 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1469 unit: def from_pb_processor() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1470 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1471 unit: def load_attention() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 1472 unit: def load_row() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 4 id: 1473 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 1474 unit: def __init__() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1475 unit: def convert_to_rgb() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 1 id: 1476 unit: def normalize() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1477 unit: def incremental_to_binary_attention_mask() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1478 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1479 unit: def __init__() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 1480 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1481 unit: def load() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1482 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1483 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1484 unit: def gen_slopes() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 1485 unit: def pan_and_scan() file: server/text_generation_server/models/custom_modeling/gemma3/image_processing_gemma3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1486 unit: def no_fp8() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 1 id: 1487 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 5 id: 1488 unit: def __init__() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1489 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1490 unit: def _expand_mask() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 1491 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 1492 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1493 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1494 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1495 unit: def repeat_kv() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1496 unit: def load() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1497 unit: def load() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1498 unit: def repeat_kv() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1499 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1500 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1501 unit: def __call__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1502 unit: def extra_repr() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 1 id: 1503 unit: def _expand_mask() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 1504 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1505 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1506 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1507 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 1508 unit: def block_tables_to_ragged() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1509 unit: def copy_next_input_ids_inplace() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1510 unit: def to_pb() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1511 unit: def from_pb_processor() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1512 unit: def __init__() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1513 unit: def fallback() file: server/text_generation_server/models/transformers_flash_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1514 unit: def init_kv_cache() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1515 unit: def to_pb() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1516 unit: def fallback() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1517 unit: def _prompt_split_image() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1518 unit: def from_pb_processor() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1519 unit: def to_pb() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1520 unit: def fallback() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1521 unit: def forward() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1522 unit: def to_pb() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1523 unit: def __init__() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1524 unit: def get_linear() file: server/text_generation_server/layers/eetq.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 1525 unit: def __init__() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 1526 unit: def get_perms() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 8 LOC McCabe index: 5 number of parameters: 0 id: 1527 unit: def permute_scales() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1528 unit: def _check_valid_shape() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 8 LOC McCabe index: 5 number of parameters: 2 id: 1529 unit: def __init__() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1530 unit: def forward_layer_type() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1531 unit: def load_gate_up() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 1532 unit: def load_qkv() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1533 unit: def load() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 1534 unit: def forward() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 3 id: 1535 unit: def _get_rope_config() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 1536 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1537 unit: def load_layer_norm() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 1538 unit: def __init__() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1539 unit: def paged_reshape_and_cache() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1540 unit: def create_decode_state_cuda_graphs() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 1541 unit: def ranks() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 1 id: 1542 unit: fn create_sequence() file: benchmark/src/generation.rs start line: 227 end line: 238 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 1543 unit: inline std::optional get_device_count() file: backends/trtllm/csrc/hardware.hpp start line: 19 end line: 25 size: 7 LOC McCabe index: 2 number of parameters: 0 id: 1544 unit: fn new() file: backends/v2/src/client/grpc_client.rs start line: 232 end line: 238 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 1545 unit: fn from() file: backends/v2/src/queue.rs start line: 392 end line: 398 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1546 unit: def info() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1547 unit: fn new() file: backends/client/src/v2/client.rs start line: 233 end line: 239 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 1548 unit: fn new() file: backends/client/src/v3/client.rs start line: 279 end line: 285 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 1549 unit: fn set() file: backends/grpc-metadata/src/lib.rs start line: 13 end line: 19 size: 7 LOC McCabe index: 3 number of parameters: 3 id: 1550 unit: def allgather() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 4 id: 1551 unit: def load_kernel() file: backends/gaudi/server/text_generation_server/utils/kernels.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1552 unit: def load_module_map() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1553 unit: def segmented_matmul() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1554 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1555 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1556 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1557 unit: def advance_batch() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1558 unit: def use_loader() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1559 unit: def advance_grammar() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1560 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 2 id: 1561 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1562 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1563 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1564 unit: def prepare_inputs_for_generation() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1565 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1566 unit: def select_experts() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 1567 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1568 unit: def model_input_names() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1569 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1570 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1571 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1572 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 1573 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1574 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1575 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1576 unit: def get_image_positions() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1577 unit: def from_tokenized() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1578 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 1 id: 1579 unit: def load() file: backends/gaudi/server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 5 id: 1580 unit: def unpad_weight() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 4 id: 1581 unit: def from_fp8() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1582 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1583 unit: def load() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1584 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1585 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1586 unit: def apply_llama3_scaling() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1587 unit: def _preprocessing() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1588 unit: def free() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1589 unit: def load_layer_norm_no_bias() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 1590 unit: def _load_expert_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1591 unit: def store() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1592 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1593 unit: def store() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1594 unit: fn add_node_to_parent() file: backends/v3/src/radix.rs start line: 529 end line: 537 size: 7 LOC McCabe index: 2 number of parameters: 4 id: 1595 unit: fn update_access_time() file: backends/v3/src/radix.rs start line: 558 end line: 568 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1596 unit: fn from() file: backends/v3/src/queue.rs start line: 550 end line: 556 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1597 unit: def main() file: update_doc.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1598 unit: fn gcd() file: router/src/config.rs start line: 122 end line: 128 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1599 unit: fn encode_trait() file: router/src/lib.rs start line: 67 end line: 74 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1600 unit: fn encode_trait() file: router/src/lib.rs start line: 75 end line: 81 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 1601 unit: fn from() file: router/src/lib.rs start line: 1366 end line: 1372 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1602 unit: fn find_tool_by_name() file: router/src/infer/tool_grammar.rs start line: 12 end line: 18 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1603 unit: def allgather() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 4 id: 1604 unit: def load_kernel() file: server/text_generation_server/utils/kernels.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1605 unit: def load_module_map() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1606 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1607 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1608 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1609 unit: def advance_batch() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 1610 unit: def use_loader() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1611 unit: def get_xpu_free_memory() file: server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1612 unit: def advance_grammar() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1613 unit: def from_pb() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1614 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 2 id: 1615 unit: def to_pb() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1616 unit: def to_pb() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1617 unit: def __add__() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1618 unit: def _create_cos_sin() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 1619 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1620 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1621 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1622 unit: def rescale() file: server/text_generation_server/models/custom_modeling/idefics_image_processing.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1623 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1624 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1625 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1626 unit: def _attn_bias() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1627 unit: def __init__() file: server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1628 unit: def select_experts() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 1629 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1630 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1631 unit: def model_input_names() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1632 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1633 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1634 unit: def expand_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1635 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1636 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1637 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1638 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1639 unit: def has_triton() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 0 id: 1640 unit: def prepare_position_slot_ids() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1641 unit: def triton_prepare_position_slot_ids() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1642 unit: def from_tokenized() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1643 unit: def get_image_positions() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1644 unit: def load() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 5 id: 1645 unit: def load() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 5 id: 1646 unit: def get_linear() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 1647 unit: def from_fp8() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1648 unit: def _load_scalar_or_matrix_scale() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 3 id: 1649 unit: def from_fp8() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1650 unit: def __init__() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1651 unit: def load() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1652 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1653 unit: def forward() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 1654 unit: def get_cos_sin() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 7 LOC McCabe index: 5 number of parameters: 4 id: 1655 unit: def apply_llama3_scaling() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1656 unit: def forward() file: server/text_generation_server/layers/awq/quantize/cuda.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1657 unit: def free() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 1658 unit: def post_init() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1659 unit: def get_scratch_slice() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 1660 unit: def load_layer_norm_no_bias() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 1661 unit: def _load_expert_weights() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1662 unit: def _load_expert_multi_weights_col() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1663 unit: def _load_expert_multi_weights_col() file: server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1664 unit: def grouped_topk() file: server/text_generation_server/layers/moe/fused_moe_ipex.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1665 unit: def store() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1666 unit: def __init__() file: server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 1667 unit: fn push_decode() file: benchmark/src/app.rs start line: 412 end line: 418 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 1668 unit: fn progress_gauge() file: benchmark/src/app.rs start line: 437 end line: 443 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 1669 unit: def valid_grammar() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 7 LOC McCabe index: 6 number of parameters: 2 id: 1670 unit: def setup_sccache_for_s3() file: backends/trtllm/scripts/setup_sccache.py start line: 0 end line: 0 size: 6 LOC McCabe index: 5 number of parameters: 0 id: 1671 unit: explicit compute_capabilities_t() file: backends/trtllm/csrc/hardware.hpp start line: 35 end line: 40 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 1672 unit: fn append() file: backends/v2/src/queue.rs start line: 177 end line: 185 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1673 unit: def _generate_token() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1674 unit: def _cached_batch() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1675 unit: def filter() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 3 id: 1676 unit: def magnitude_based_pruning() file: backends/gaudi/server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1677 unit: def random_pruning() file: backends/gaudi/server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1678 unit: def load_and_merge_adapters() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1679 unit: def add_lora_a_bgmv() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1680 unit: def add_lora_b_bgmv() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1681 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1682 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1683 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1684 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1685 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1686 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1687 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 5 id: 1688 unit: def advance_at_index() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 4 id: 1689 unit: def filter() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1690 unit: def has_tensor() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1691 unit: def get_packed_sharded() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1692 unit: def is_driver_compatible() file: backends/gaudi/server/text_generation_server/utils/version.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 0 id: 1693 unit: def advance_grammar() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1694 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1695 unit: def create_n_gram_speculation() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1696 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1697 unit: def from_pb() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1698 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1699 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1700 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1701 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1702 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1703 unit: def _merge_heads() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1704 unit: def _shape() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1705 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1706 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1707 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1708 unit: def get_image_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1709 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1710 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1711 unit: def apply_rotary_pos_emb() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 6 id: 1712 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1713 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1714 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1715 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1716 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1717 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1718 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1719 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1720 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1721 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1722 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1723 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1724 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1725 unit: def _shape() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1726 unit: def trunc_normal_tf_() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1727 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1728 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1729 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1730 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1731 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1732 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1733 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1734 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1735 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1736 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1737 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1738 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1739 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1740 unit: def post_process_image_text_to_text() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1741 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1742 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1743 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1744 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1745 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1746 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1747 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1748 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1749 unit: def image_text_replacement_fixup() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1750 unit: def get_unpadded_features() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1751 unit: def get_vision_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1752 unit: def from_pb() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1753 unit: def use_graphs() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 4 id: 1754 unit: def align_workers() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1755 unit: def from_pb() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1756 unit: def decode_token() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1757 unit: def setup_tracing() file: backends/gaudi/server/text_generation_server/tracing.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1758 unit: def delete() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 1759 unit: def apply_block_fp8_linear_hpu_dynamic() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1760 unit: def fp8_quantize() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1761 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1762 unit: def forward_lora() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1763 unit: def collect_lora_a() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 7 number of parameters: 2 id: 1764 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 6 LOC McCabe index: 5 number of parameters: 3 id: 1765 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1766 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1767 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 6 id: 1768 unit: def linear_ramp_mask() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1769 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1770 unit: def unpack_weight_and_zeros() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 3 id: 1771 unit: def setdeepattr() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1772 unit: def getdeepattr() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1773 unit: def unload() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1774 unit: def _load_expert_weights_row() file: backends/gaudi/server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1775 unit: def load() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1776 unit: def load() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1777 unit: fn full_match_returns_correct_node() file: backends/v3/src/radix.rs start line: 888 end line: 896 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1778 unit: fn append() file: backends/v3/src/queue.rs start line: 226 end line: 234 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1779 unit: fn next_tool_call_id() file: router/src/lib.rs start line: 1054 end line: 1059 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 1780 unit: fn serialize_as_string() file: router/src/lib.rs start line: 1187 end line: 1192 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1781 unit: fn nvidia_smi() file: launcher/src/env_runtime.rs start line: 48 end line: 53 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1782 unit: fn xpu_smi() file: launcher/src/env_runtime.rs start line: 55 end line: 60 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1783 unit: fn hl_smi() file: launcher/src/env_runtime.rs start line: 62 end line: 67 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1784 unit: fn kv_vram_per_tok() file: launcher/src/main.rs start line: 310 end line: 318 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 1785 unit: fn token_vram() file: launcher/src/main.rs start line: 330 end line: 335 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 1786 unit: int get_groupsize() file: server/exllama_kernels/exllama_kernels/exllama_ext.cpp start line: 68 end line: 73 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1787 unit: def magnitude_based_pruning() file: server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1788 unit: def random_pruning() file: server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1789 unit: def load_and_merge_adapters() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1790 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1791 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1792 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1793 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1794 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1795 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1796 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1797 unit: def advance_at_index() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 4 id: 1798 unit: def filter() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1799 unit: def get_linear() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1800 unit: def has_tensor() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1801 unit: def get_packed_sharded() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1802 unit: def get_cpu_free_memory() file: server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1803 unit: def advance_grammar() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1804 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1805 unit: def create_n_gram_speculation() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1806 unit: def __init__() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1807 unit: def from_pb() file: server/text_generation_server/models/bloom.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1808 unit: def from_pb() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1809 unit: def _shape() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1810 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1811 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1812 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1813 unit: def _merge_heads() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1814 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1815 unit: def to_dict() file: server/text_generation_server/models/custom_modeling/idefics_config.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 1816 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1817 unit: def _merge_heads() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1818 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1819 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1820 unit: def _shape() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1821 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1822 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1823 unit: def get_attention_mask() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1824 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1825 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1826 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1827 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1828 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1829 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1830 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1831 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1832 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1833 unit: def _shape() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1834 unit: def trunc_normal_tf_() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1835 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1836 unit: def _shape() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1837 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1838 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1839 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1840 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1841 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1842 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1843 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1844 unit: def post_process_image_text_to_text() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1845 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1846 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1847 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1848 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1849 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1850 unit: def _shape() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1851 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 4 id: 1852 unit: def get_vision_embeds() file: server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1853 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1854 unit: def layer_norm() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 6 number of parameters: 3 id: 1855 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1856 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1857 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1858 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1859 unit: def get_vision_embeds() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1860 unit: def slots_filtering() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1861 unit: def triton_slots_filtering() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1862 unit: def triton_block_tables_to_padded() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1863 unit: def triton_block_tables_to_ragged() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1864 unit: def from_pb() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1865 unit: def from_pb() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1866 unit: def from_pb() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1867 unit: def image_text_replacement_fixup() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1868 unit: def get_unpadded_features() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1869 unit: def get_vision_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1870 unit: def from_pb() file: server/text_generation_server/models/galactica.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1871 unit: def from_pb() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1872 unit: def from_pb() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1873 unit: def decode_token() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1874 unit: def setup_tracing() file: server/text_generation_server/tracing.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1875 unit: def delete() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 1876 unit: def fp8_quantize() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1877 unit: def __init__() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1878 unit: def forward_lora() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1879 unit: def collect_lora_a() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 7 number of parameters: 2 id: 1880 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1881 unit: def __str__() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 1 id: 1882 unit: def __init__() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1883 unit: def forward() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1884 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1885 unit: def __init__() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 6 id: 1886 unit: def linear_ramp_mask() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1887 unit: def setdeepattr() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1888 unit: def getdeepattr() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1889 unit: def unload() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1890 unit: def ext_q4_matmul() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1891 unit: def ext_gemm_half_q_half() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1892 unit: def _load_expert_weights_row() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1893 unit: def _load_expert_multi_weights_col() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1894 unit: def _load_expert_weights_row() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1895 unit: def _pack_weight() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1896 unit: def _load_expert_weights_row() file: server/text_generation_server/layers/moe/unquantized.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1897 unit: def _use_rocm_custom_paged_attention() file: server/text_generation_server/layers/attention/rocm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1898 unit: def load() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1899 unit: def load() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1900 unit: def __init__() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1901 unit: def __init__() file: clients/python/text_generation/client.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1902 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1903 unit: def __init__() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1904 unit: def get_gpu_names() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1905 unit: def get_gpu_name() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1906 unit: static void trycatch() file: backends/trtllm/csrc/ffi.hpp start line: 18 end line: 22 size: 5 LOC McCabe index: 4 number of parameters: 2 id: 1907 unit: fn from() file: backends/v2/src/client/mod.rs start line: 51 end line: 55 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1908 unit: fn from() file: backends/v2/src/client/mod.rs start line: 59 end line: 63 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1909 unit: fn test_next_batch_empty() file: backends/v2/src/queue.rs start line: 468 end line: 473 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1910 unit: def get_env_dict() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1911 unit: def reset() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1912 unit: def clear() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 5 LOC McCabe index: 4 number of parameters: 2 id: 1913 unit: def _clear() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 5 LOC McCabe index: 4 number of parameters: 2 id: 1914 unit: fn from() file: backends/client/src/lib.rs start line: 44 end line: 48 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1915 unit: fn from() file: backends/client/src/lib.rs start line: 52 end line: 56 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1916 unit: fn clear_kv_cache() file: backends/llamacpp/src/backend.rs start line: 288 end line: 292 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1917 unit: fn drop() file: backends/llamacpp/src/backend.rs start line: 406 end line: 410 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1918 unit: def log_once() file: backends/gaudi/server/text_generation_server/utils/log.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1919 unit: def prune() file: backends/gaudi/server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1920 unit: def _load_and_merge() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1921 unit: def check_architectures() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1922 unit: def static_warper() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1923 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1924 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1925 unit: def _remove_duplicate_names() file: backends/gaudi/server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1926 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1927 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1928 unit: def _get_handle() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1929 unit: def _get_slice() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1930 unit: def get_weights_col_packed_qkv() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1931 unit: def from_pb() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1932 unit: def batch_top_tokens() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1933 unit: def _get_greenlist_ids() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1934 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1935 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1936 unit: def _expand_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1937 unit: def _prepare_attn_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1938 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1939 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1940 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1941 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1942 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1943 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1944 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1945 unit: def get_text_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1946 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1947 unit: def get_anyres_image_grid_shape() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1948 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1949 unit: def _merge_input_ids_with_image_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1950 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1951 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1952 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 1953 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1954 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1955 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1956 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1957 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1958 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1959 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1960 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1961 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1962 unit: def _merge_input_ids_with_image_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1963 unit: def _prepare_aspect_ratio_attention_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1964 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1965 unit: def apply_class_embedding() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1966 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1967 unit: def load() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 1968 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1969 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1970 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1971 unit: def _merge_input_ids_with_image_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1972 unit: def _v_up_proj_and_o_proj() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1973 unit: def apply_class_embedding() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1974 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1975 unit: def apply_class_embedding() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1976 unit: def get_anyres_image_grid_shape() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1977 unit: def pad_list() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1978 unit: def warmup() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1979 unit: def requantize_with_max_scale() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1980 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1981 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1982 unit: def _load_scalar_or_matrix_scale() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1983 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 2 id: 1984 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1985 unit: def init_8bit_state() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1986 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1987 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1988 unit: def __str__() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1989 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1990 unit: def simple_norm() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1991 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1992 unit: def _create_inv_freq() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1993 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1994 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1995 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1996 unit: def _quantize() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 5 id: 1997 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1998 unit: def add_batch() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1999 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2000 unit: def fused_topk() file: backends/gaudi/server/text_generation_server/layers/moe/fused_moe.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2001 unit: def is_supported() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 2002 unit: def __post_init__() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 2003 unit: def subtuple() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2004 unit: def get_start_stop_idxs_for_rank() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2005 unit: def shard_lora_weights() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2006 unit: def __init__() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2007 unit: def _transpose_weights() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 2008 unit: def can_vectorize() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2009 unit: def get_data() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2010 unit: def from_meta() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2011 unit: def adapter_keys() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2012 unit: def __init__() file: backends/gaudi/server/text_generation_server/server.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2013 unit: fn shared_prefix() file: backends/v3/src/radix.rs start line: 626 end line: 632 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 2014 unit: fn from() file: backends/v3/src/client/mod.rs start line: 43 end line: 47 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2015 unit: fn from() file: backends/v3/src/client/mod.rs start line: 51 end line: 55 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2016 unit: fn drop() file: backends/v3/src/block_allocator.rs start line: 20 end line: 24 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2017 unit: fn name() file: router/src/infer/mod.rs start line: 44 end line: 49 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2018 unit: fn mlp_vram_per_tok() file: launcher/src/main.rs start line: 320 end line: 328 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2019 unit: fn shutdown_shards() file: launcher/src/main.rs start line: 1251 end line: 1260 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2020 unit: void cleanup() file: server/exllama_kernels/exllama_kernels/exllama_ext.cpp start line: 95 end line: 99 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2021 unit: def log_once() file: server/text_generation_server/utils/log.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2022 unit: def prune() file: server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2023 unit: def _load_and_merge() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2024 unit: def check_architectures() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2025 unit: def static_warper() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2026 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2027 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2028 unit: def _remove_duplicate_names() file: server/text_generation_server/utils/convert.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2029 unit: def get_weights_col_packed() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2030 unit: def get_weights_col_packed() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2031 unit: def _get_handle() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2032 unit: def _get_slice() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2033 unit: def get_weights_col_packed_qkv() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2034 unit: def get_cuda_free_memory() file: server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2035 unit: def from_pb() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2036 unit: def batch_top_tokens() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2037 unit: def _get_greenlist_ids() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2038 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_vision.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2039 unit: def expand_mask() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2040 unit: def _split_heads() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2041 unit: def forward() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2042 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2043 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2044 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2045 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2046 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2047 unit: def _expand_mask() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2048 unit: def _prepare_attn_mask() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2049 unit: def get_anyres_image_grid_shape() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2050 unit: def forward() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2051 unit: def _merge_input_ids_with_image_features() file: server/text_generation_server/models/custom_modeling/llava_next.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2052 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2053 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2054 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2055 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2056 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2057 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2058 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2059 unit: def get_text_features() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2060 unit: def is_url() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2061 unit: def image_tokens() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2062 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2063 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2064 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2065 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2066 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2067 unit: def rms_norm() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2068 unit: def forward() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2069 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2070 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2071 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2072 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2073 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2074 unit: def _make_causal_mask() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2075 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_neox_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2076 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2077 unit: def _merge_input_ids_with_image_features() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2078 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2079 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2080 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2081 unit: def _merge_input_ids_with_image_features() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2082 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_deepseek_v3_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2083 unit: def apply_class_embedding() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2084 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2085 unit: def apply_class_embedding() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2086 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2087 unit: def _make_causal_mask() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2088 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2089 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2090 unit: def _prepare_aspect_ratio_attention_mask() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2091 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2092 unit: def apply_class_embedding() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2093 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2094 unit: def load() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 5 id: 2095 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2096 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2097 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2098 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2099 unit: def pre_process_inputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2100 unit: def pre_process_inputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2101 unit: def block_tables_to_padded() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2102 unit: def grid() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2103 unit: def grid() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2104 unit: def grid() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2105 unit: def grid() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2106 unit: def grid() file: server/text_generation_server/models/metadata_kernels.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2107 unit: def warmup() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2108 unit: def get_anyres_image_grid_shape() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2109 unit: def requantize_with_max_scale() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2110 unit: def __init__() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2111 unit: def get_weights_col_packed() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2112 unit: def forward() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 2 id: 2113 unit: def forward() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2114 unit: def pack_cols() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2115 unit: def unpack_cols() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2116 unit: def __init__() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2117 unit: def get_weights_col_packed() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2118 unit: def __post_init__() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2119 unit: def get_linear() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2120 unit: def __init__() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2121 unit: def get_weights_col_packed() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2122 unit: def get_linear() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2123 unit: def init_8bit_state() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2124 unit: def get_weights_col_packed() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2125 unit: def get_weights_col_packed() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2126 unit: def get_weights_col_packed() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2127 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2128 unit: def __str__() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2129 unit: def get_weights_col_packed() file: server/text_generation_server/layers/compressed_tensors/w8an_fp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2130 unit: def __init__() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2131 unit: def get_weights_col_packed() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2132 unit: def simple_norm() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2133 unit: def __init__() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2134 unit: def _create_inv_freq() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 2135 unit: def get_cos_sin() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2136 unit: def get_weights_col_packed() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2137 unit: def get_weights_col_packed() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2138 unit: def __init__() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 2139 unit: def _quantize() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 5 id: 2140 unit: def forward() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 2141 unit: def add_batch() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 2142 unit: def forward() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2143 unit: def ext_make_q_matrix() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2144 unit: def forward() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2145 unit: def forward() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 2146 unit: def can_use_marlin_moe_gemm() file: server/text_generation_server/layers/moe/gptq_marlin.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2147 unit: def fused_topk() file: server/text_generation_server/layers/moe/fused_moe_ipex.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2148 unit: def __post_init__() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 2149 unit: def get_workspace() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2150 unit: def create_decode_state() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2151 unit: def dropout_rng() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 6 id: 2152 unit: def get_start_stop_idxs_for_rank() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 2153 unit: def shard_lora_weights() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2154 unit: def __init__() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2155 unit: def _transpose_weights() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 2156 unit: def can_vectorize() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 2157 unit: def get_data() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2158 unit: def from_meta() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2159 unit: def adapter_keys() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 2160 unit: def __init__() file: server/text_generation_server/server.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 2161 unit: fn push_prefill() file: benchmark/src/app.rs start line: 406 end line: 410 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 2162 unit: def __init__() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2163 unit: function generate_payload() file: load_tests/long.js start line: 60 end line: 63 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2164 unit: function generate_payload() file: load_tests/common.js start line: 60 end line: 63 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2165 unit: std::vector backend_t::pull_tokens() file: backends/trtllm/csrc/backend.cpp start line: 71 end line: 74 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2166 unit: void backend_t::cancel() file: backends/trtllm/csrc/backend.cpp start line: 76 end line: 79 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2167 unit: auto format() file: backends/trtllm/csrc/backend.hpp start line: 213 end line: 216 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2168 unit: void cancel() file: backends/trtllm/csrc/ffi.hpp start line: 139 end line: 142 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2169 unit: def __init__() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2170 unit: def __init__() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2171 unit: def max_prefill_length() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2172 unit: def serve() file: backends/neuron/server/text_generation_server/server.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2173 unit: fn inject_context() file: backends/grpc-metadata/src/lib.rs start line: 37 end line: 40 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2174 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2175 unit: def disjoint_merge() file: backends/gaudi/server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2176 unit: def orient_for_rank() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2177 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2178 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2179 unit: def advance() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2180 unit: def _advance() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2181 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2182 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2183 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2184 unit: def from_pb() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2185 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2186 unit: def forward() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2187 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2188 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2189 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2190 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2191 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2192 unit: def reshape_for_broadcast() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 2193 unit: def apply_rotary_emb() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2194 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2195 unit: def vision_reshape_for_broadcast() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 2196 unit: def rotate_half() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2197 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_moe_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2198 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2199 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2200 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2201 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2202 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2203 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2204 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2205 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2206 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2207 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2208 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2209 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2210 unit: def _prepare_cross_attention_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2211 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2212 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2213 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2214 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2215 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2216 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2217 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2218 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2219 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2220 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2221 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2222 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2223 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2224 unit: def get_position_ids() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2225 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2226 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2227 unit: def get_position_ids() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2228 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2229 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2230 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2231 unit: def preprocess_text() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2232 unit: def update_encoder_cache() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2233 unit: def free_encoder_cache() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2234 unit: def get_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2235 unit: def forward() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2236 unit: def clear() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2237 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2238 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2239 unit: def normalize_e4m3fn_to_native_float8() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2240 unit: def per_tensor_dequantize() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2241 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2242 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2243 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2244 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2245 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2246 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2247 unit: def get_cos_sin() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2248 unit: def find_correction_dim() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2249 unit: def get_mscale() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2250 unit: def apply_order() file: backends/gaudi/server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2251 unit: def error_raiser_hpu() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2252 unit: def quantize() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2253 unit: def load() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2254 unit: def error_raiser_hpu() file: backends/gaudi/server/text_generation_server/layers/gptq/hpu.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2255 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2256 unit: def get_weight_fn_sharded() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2257 unit: def get_weight_fn() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2258 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2259 unit: def quant_input() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2260 unit: def map_weights_for_model() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2261 unit: def weights_a() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2262 unit: def weights_b() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2263 unit: def weights_a_t() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2264 unit: def weights_b_t() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2265 unit: def get_scaling_factor() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2266 unit: def _convert_lora() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2267 unit: def map_weights_for_model() file: backends/gaudi/server/text_generation_server/adapters/config.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2268 unit: def remove_adapter() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2269 unit: def __init__() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2270 unit: def disjoint_merge() file: server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2271 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2272 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2273 unit: def advance() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2274 unit: def _advance() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2275 unit: def _cached_compile_fsm() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2276 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2277 unit: def get_weights_row() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2278 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2279 unit: def from_pb() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2280 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2281 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2282 unit: def forward() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2283 unit: def prepare_attn_mask() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2284 unit: def rotate_half() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2285 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2286 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_rw_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2287 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 2288 unit: def __init__() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2289 unit: def model_input_names() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2290 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gpt2_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2291 unit: def forward() file: server/text_generation_server/models/custom_modeling/phi_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2292 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2293 unit: def model_input_names() file: server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2294 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2295 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2296 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2297 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2298 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2299 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2300 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2301 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2302 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2303 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2304 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_gemma_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2305 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2306 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2307 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2308 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2309 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2310 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2311 unit: def rotate_half() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2312 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2313 unit: def get_position_ids() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2314 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2315 unit: def rotate_half() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2316 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2317 unit: def get_position_ids() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2318 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2319 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2320 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2321 unit: def __init__() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2322 unit: def get_inputs_embeds() file: server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2323 unit: def _prepare_cross_attention_mask() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2324 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2325 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2326 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2327 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2328 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2329 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2330 unit: def shape() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2331 unit: def unshape() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2332 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2333 unit: def pre_process_inputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2334 unit: def preprocess_text() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2335 unit: def update_encoder_cache() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2336 unit: def free_encoder_cache() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2337 unit: def get_inputs_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2338 unit: def forward() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2339 unit: def _insert_split_marker() file: server/text_generation_server/models/galactica.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2340 unit: def clear() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2341 unit: def __init__() file: server/text_generation_server/layers/eetq.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2342 unit: def forward() file: server/text_generation_server/layers/eetq.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2343 unit: def __init__() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2344 unit: def __init__() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2345 unit: def normalize_e4m3fn_to_native_float8() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2346 unit: def per_tensor_dequantize() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2347 unit: def get_shared_device_identity() file: server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2348 unit: def __init__() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2349 unit: def get_pack_factor() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2350 unit: def __post_init__() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2351 unit: def __init__() file: server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2352 unit: def __init__() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2353 unit: def __init__() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2354 unit: def forward() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2355 unit: def forward() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2356 unit: def find_correction_dim() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2357 unit: def get_mscale() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2358 unit: def apply_order() file: server/text_generation_server/layers/awq/conversion_utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2359 unit: def forward() file: server/text_generation_server/layers/awq/quantize/ipex.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2360 unit: def forward() file: server/text_generation_server/layers/gptq/ipex.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2361 unit: def quantize() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2362 unit: def load() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2363 unit: def ext_make_q4() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 5 id: 2364 unit: def __init__() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2365 unit: def prepare() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 2366 unit: def __init__() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 2367 unit: def get_weight_fn() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 2368 unit: def dropout_offsets() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 6 id: 2369 unit: def dropout_mask() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 6 id: 2370 unit: def map_weights_for_model() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2371 unit: def weights_a() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2372 unit: def weights_b() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2373 unit: def weights_a_t() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2374 unit: def weights_b_t() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2375 unit: def get_scaling_factor() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2376 unit: def _convert_lora() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 2377 unit: def map_weights_for_model() file: server/text_generation_server/adapters/config.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 2378 unit: def remove_adapter() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2379 unit: fn px() file: benchmark/src/table.rs start line: 167 end line: 170 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 2380 unit: def valid_repetition_penalty() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2381 unit: def valid_frequency_penalty() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2382 unit: def valid_seed() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2383 unit: def valid_temp() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2384 unit: def valid_top_k() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2385 unit: def valid_top_p() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 2386 unit: def valid_truncate() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2387 unit: def valid_typical_p() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 2388 unit: def valid_top_n_tokens() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 2 id: 2389 unit: def valid_input() file: clients/python/text_generation/types.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 2390 unit: def stop() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2391 unit: def stop() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2392 unit: size_t backend_t::num_tokens_ready() file: backends/trtllm/csrc/backend.cpp start line: 49 end line: 51 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2393 unit: fn new() file: backends/v2/src/client/sharded_client.rs start line: 23 end line: 25 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2394 unit: fn start_health() file: backends/v2/src/backend.rs start line: 108 end line: 110 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2395 unit: fn name() file: backends/v2/src/backend.rs start line: 112 end line: 114 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2396 unit: def stopped() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2397 unit: def __init__() file: backends/neuron/server/text_generation_server/server.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2398 unit: def fetch_model() file: backends/neuron/server/text_generation_server/model.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2399 unit: fn new() file: backends/client/src/v2/sharded_client.rs start line: 23 end line: 25 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2400 unit: fn from() file: backends/client/src/lib.rs start line: 61 end line: 63 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2401 unit: fn chunks_to_string() file: backends/client/src/lib.rs start line: 70 end line: 73 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2402 unit: fn new() file: backends/client/src/v3/sharded_client.rs start line: 23 end line: 25 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2403 unit: fn inject_context() file: backends/grpc-metadata/src/lib.rs start line: 33 end line: 36 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2404 unit: fn decode() file: backends/llamacpp/src/backend.rs start line: 284 end line: 286 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2405 unit: fn generated_name_override() file: backends/llamacpp/build.rs start line: 9 end line: 11 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2406 unit: def log_master() file: backends/gaudi/server/text_generation_server/utils/log.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2407 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2408 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2409 unit: def merge_adapters() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2410 unit: def set_support_chunking() file: backends/gaudi/server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2411 unit: def get_support_chunking() file: backends/gaudi/server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2412 unit: def set_max_prefill_tokens() file: backends/gaudi/server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2413 unit: def get_max_prefill_tokens() file: backends/gaudi/server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2414 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2415 unit: def get_tmp_tensor_for_size() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2416 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2417 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2418 unit: def get_linear() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2419 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 2420 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 2421 unit: def get_hpu_free_memory() file: backends/gaudi/server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2422 unit: def get_speculate() file: backends/gaudi/server/text_generation_server/utils/speculate.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2423 unit: def set_speculate() file: backends/gaudi/server/text_generation_server/utils/speculate.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2424 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2425 unit: def pad_next_token_chooser_parameters() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2426 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2427 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2428 unit: def get_image_features() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2429 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2430 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2431 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2432 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2433 unit: def set_model_id() file: backends/gaudi/server/text_generation_server/models/globals.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2434 unit: def set_adapter_to_index() file: backends/gaudi/server/text_generation_server/models/globals.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2435 unit: def get_adapter_to_index() file: backends/gaudi/server/text_generation_server/models/globals.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2436 unit: def set() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2437 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 2438 unit: def collect_lora_a() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 3 LOC McCabe index: 6 number of parameters: 2 id: 2439 unit: def get_weights() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2440 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2441 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2442 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2443 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2444 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2445 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2446 unit: def get_cos_sin() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2447 unit: def __post_init__() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2448 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2449 unit: def __post_init__() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2450 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2451 unit: def load() file: backends/gaudi/server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2452 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2453 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2454 unit: def max_rank() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2455 unit: def __init__() file: backends/gaudi/server/text_generation_server/server.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2456 unit: def exit_gracefully() file: backends/gaudi/server/text_generation_server/server.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2457 unit: fn invariants_hold_on_many_operations_remove_all() file: backends/v3/src/radix.rs start line: 919 end line: 921 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2458 unit: fn invariants_hold_on_many_operations_remove_subset() file: backends/v3/src/radix.rs start line: 924 end line: 926 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2459 unit: fn new() file: backends/v3/src/client/sharded_client.rs start line: 23 end line: 25 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2460 unit: fn from() file: backends/v3/src/client/mod.rs start line: 60 end line: 62 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2461 unit: fn start_health() file: backends/v3/src/backend.rs start line: 115 end line: 117 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2462 unit: fn name() file: backends/v3/src/backend.rs start line: 119 end line: 121 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2463 unit: fn free() file: backends/v3/src/block_allocator.rs start line: 218 end line: 220 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2464 unit: fn default_true() file: router/src/lib.rs start line: 1350 end line: 1352 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2465 unit: fn chunks_to_string() file: router/src/validation.rs start line: 860 end line: 863 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2466 unit: fn default() file: router/src/usage_stats.rs start line: 382 end line: 384 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2467 unit: fn try_from() file: launcher/src/main.rs start line: 1335 end line: 1337 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2468 unit: fn from() file: launcher/src/main.rs start line: 1810 end line: 1812 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2469 unit: def log_master() file: server/text_generation_server/utils/log.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2470 unit: def __init__() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2471 unit: def __init__() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2472 unit: def merge_adapters() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2473 unit: def set_support_chunking() file: server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2474 unit: def get_support_chunking() file: server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2475 unit: def set_max_prefill_tokens() file: server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2476 unit: def get_max_prefill_tokens() file: server/text_generation_server/utils/prefill_chunking.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2477 unit: def __init__() file: server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2478 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2479 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2480 unit: def get_multi_weights_col() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 2481 unit: def get_speculate() file: server/text_generation_server/utils/speculate.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2482 unit: def set_speculate() file: server/text_generation_server/utils/speculate.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2483 unit: def __init__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2484 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2485 unit: def forward() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2486 unit: def get_image_features() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2487 unit: def forward() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2488 unit: def __init__() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2489 unit: def _expand_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2490 unit: def set_adapter_to_index() file: server/text_generation_server/models/globals.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2491 unit: def get_adapter_to_index() file: server/text_generation_server/models/globals.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2492 unit: def set() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2493 unit: def from_unquant() file: server/text_generation_server/layers/marlin/fp8.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2494 unit: def __init__() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2495 unit: def __post_init__() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2496 unit: def __init__() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 2497 unit: def collect_lora_a() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 3 LOC McCabe index: 6 number of parameters: 2 id: 2498 unit: def __str__() file: server/text_generation_server/layers/compressed_tensors/wna16_int.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2499 unit: def __str__() file: server/text_generation_server/layers/compressed_tensors/wna16_int_24.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2500 unit: def get_weights() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2501 unit: def get_multi_weights_col() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2502 unit: def get_weights_row() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2503 unit: def forward() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 2504 unit: def __init__() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2505 unit: def __init__() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2506 unit: def __post_init__() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2507 unit: def get_linear() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2508 unit: def __post_init__() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2509 unit: def _hook() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2510 unit: def __init__() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 2511 unit: def forward() file: server/text_generation_server/layers/gptq/triton.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 8 id: 2512 unit: def set_device() file: server/text_generation_server/layers/gptq/exllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2513 unit: def set_device() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2514 unit: def __init__() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2515 unit: def load() file: server/text_generation_server/layers/layernorm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 2516 unit: def create_prefill_with_paged_kv_state() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2517 unit: def create_prefill_state() file: server/text_generation_server/layers/attention/flashinfer.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 2518 unit: def max_rank() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 2519 unit: def __init__() file: server/text_generation_server/server.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 2520 unit: def exit_gracefully() file: server/text_generation_server/server.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 2521 unit: def __init__() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2522 unit: def run() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2523 unit: def stop() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2524 unit: def get_num_gpus() file: load_tests/benchmarks.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2525 unit: def sort_neuron_configs() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2526 unit: def lookup_compatible_cached_model() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2527 unit: def check_env_and_neuron_config_compatibility() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2528 unit: def get_neuron_config_for_model() file: backends/neuron/server/text_generation_server/tgi_env.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2529 unit: def info() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2530 unit: def warmup() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2531 unit: def prefill() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2532 unit: def decode() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2533 unit: def filter() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2534 unit: def clear() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2535 unit: def from_pretrained() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2536 unit: def id() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2537 unit: def state() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2538 unit: def batch_id() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2539 unit: def request_id() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2540 unit: def cached_text() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2541 unit: def generation_config() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2542 unit: def generated_tokens() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2543 unit: def assign() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2544 unit: def pause() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2545 unit: def resume() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2546 unit: def _decode_next_tokens() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2547 unit: def select() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2548 unit: def generated_text() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2549 unit: def next_token() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2550 unit: def attention_mask() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2551 unit: def max_token() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2552 unit: def max_new_tokens() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 1 id: 2553 unit: def truncate() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2554 unit: def on_device_sampling() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2555 unit: def decode() file: backends/neuron/server/text_generation_server/generator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2556 unit: def to_gb_rounded() file: backends/gaudi/server/text_generation_server/utils/debug.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2557 unit: def count_hpu_graphs() file: backends/gaudi/server/text_generation_server/utils/debug.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2558 unit: def wait() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2559 unit: def allreduce() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2560 unit: def barrier() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2561 unit: def size() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2562 unit: def rank() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2563 unit: def _get_backend_name() file: backends/gaudi/server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2564 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2565 unit: def _apply_weights() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2566 unit: def merge() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2567 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2568 unit: def merge() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2569 unit: def merge() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2570 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2571 unit: def merge() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2572 unit: def merge() file: backends/gaudi/server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2573 unit: def calculate_majority_sign_mask() file: backends/gaudi/server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2574 unit: def find_segments() file: backends/gaudi/server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2575 unit: def build() file: backends/gaudi/server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2576 unit: def get_loader() file: backends/gaudi/server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2577 unit: def __hash__() file: backends/gaudi/server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2578 unit: def has_sgmv() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2579 unit: def use_cutlass_shrink() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2580 unit: def get_tmp_tensor() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2581 unit: def get_tmp_tensor_for_size_no_kernels() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2582 unit: def get_tmp_expand_size() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2583 unit: def get_tmp_tensors() file: backends/gaudi/server/text_generation_server/utils/sgmv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2584 unit: def _cached_weight_files() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2585 unit: def _weight_hub_files_from_model_info() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2586 unit: def _get_cached_revision_directory() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2587 unit: def weight_hub_files() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2588 unit: def try_to_load_from_cache() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2589 unit: def weight_files() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2590 unit: def download_weights() file: backends/gaudi/server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2591 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2592 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2593 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2594 unit: def get_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2595 unit: def get_weights_col() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2596 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2597 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2598 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2599 unit: def get_linear() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2600 unit: def __init__() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2601 unit: def get_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2602 unit: def get_shape() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2603 unit: def get_tensor() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2604 unit: def get_partial_sharded() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2605 unit: def get_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2606 unit: def get_weights_col_packed_gate_up() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2607 unit: def get_weights_col_packed() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2608 unit: def get_weights_col() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2609 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2610 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2611 unit: def get_multi_weights() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2612 unit: def loader() file: backends/gaudi/server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2613 unit: def download_peft() file: backends/gaudi/server/text_generation_server/utils/peft.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2614 unit: def synchronize_hpu() file: backends/gaudi/server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2615 unit: def noop() file: backends/gaudi/server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2616 unit: def advance_grammar_single_with_past_state() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2617 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2618 unit: def is_tokenizer_transparent() file: backends/gaudi/server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2619 unit: def _calc_greenlist_mask() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2620 unit: def _bias_greenlist_logits() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2621 unit: def __call__() file: backends/gaudi/server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2622 unit: def prepare_for_prefill() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2623 unit: def batch_tokenized_inputs() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2624 unit: def generate_cross_attention_states() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2625 unit: def set_inputs_embeds() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 2626 unit: def warmup_decode() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2627 unit: def warmup_prefill() file: backends/gaudi/server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2628 unit: def to_pb() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2629 unit: def filter() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2630 unit: def concatenate() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2631 unit: def __len__() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2632 unit: def __len__() file: backends/gaudi/server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2633 unit: def load_multi_mqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2634 unit: def _load_multi_mqa_gptq() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2635 unit: def _load_multi_mqa() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2636 unit: def _make_causal_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2637 unit: def dropout_add() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2638 unit: def _split_heads() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2639 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2640 unit: def _convert_to_standard_cache() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2641 unit: def _convert_to_bloom_cache() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2642 unit: def set_input_embeddings() file: backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2643 unit: def get_input_embeddings() file: backends/gaudi/server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2644 unit: def _norm() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2645 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2646 unit: def extra_repr() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2647 unit: def create_chunked_attention_mask() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2648 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama4_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2649 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2650 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2651 unit: def promote_scalar() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2652 unit: def round_up() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2653 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2654 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 2655 unit: def lecun_normal_() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2656 unit: def default_flax_embed_init() file: backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2657 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2658 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2659 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2660 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2661 unit: def extra_repr() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2662 unit: def forward() file: backends/gaudi/server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2663 unit: def __init__() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2664 unit: def batch_decode() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2665 unit: def decode() file: backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2666 unit: def num_key_value_heads() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2667 unit: def promote_scalar() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2668 unit: def select_experts() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2669 unit: def round_up() file: backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2670 unit: def scatter_image_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2671 unit: def gather_image_embeds() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2672 unit: def batch_tokenized_inputs() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2673 unit: def prepare_for_prefill() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2674 unit: def batch_type() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2675 unit: def max_past() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2676 unit: def warmup_decode() file: backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2677 unit: def prepare_for_decode() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2678 unit: def flatten() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2679 unit: def gather_list() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 3 id: 2680 unit: def batch_tokenized_inputs() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2681 unit: def concatenate() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2682 unit: def prepare_for_prefill() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2683 unit: def __len__() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2684 unit: def batch_type() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2685 unit: def max_past() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2686 unit: def warmup_prefill() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2687 unit: def forward() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2688 unit: def generate_token() file: backends/gaudi/server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2689 unit: def __len__() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2690 unit: def batch_type() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2691 unit: def generate_token() file: backends/gaudi/server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2692 unit: def batch_type() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2693 unit: def generate_token() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2694 unit: def warmup() file: backends/gaudi/server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2695 unit: def __init__() file: backends/gaudi/server/text_generation_server/tracing.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2696 unit: def __init__() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2697 unit: def pop() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2698 unit: def __len__() file: backends/gaudi/server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2699 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2700 unit: def get_fp8_linear() file: backends/gaudi/server/text_generation_server/layers/fp8.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2701 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2702 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2703 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2704 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2705 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2706 unit: def get_linear() file: backends/gaudi/server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2707 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2708 unit: def collect_lora_a() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2709 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2710 unit: def load() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 5 id: 2711 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2712 unit: def _get_target_loaders() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2713 unit: def _create_loader_for_group() file: backends/gaudi/server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2714 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2715 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2716 unit: def find_correction_range() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2717 unit: def _update_cos_sin_cache() file: backends/gaudi/server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2718 unit: def __init__() file: backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2719 unit: def device() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2720 unit: def get_weights_col() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2721 unit: def get_multi_weights_col() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2722 unit: def get_weights_row() file: backends/gaudi/server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2723 unit: def device() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2724 unit: def is_layer_skipped_quantization() file: backends/gaudi/server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2725 unit: def enabled() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2726 unit: def ready() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2727 unit: def fasterquant() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2728 unit: def get_loaders() file: backends/gaudi/server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2729 unit: def torch_snr_error() file: backends/gaudi/server/text_generation_server/layers/gptq/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2730 unit: def load_conv2d_no_bias() file: backends/gaudi/server/text_generation_server/layers/conv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2731 unit: def get_weight_fn_sharded() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2732 unit: def get_weight_fn() file: backends/gaudi/server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2733 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2734 unit: def forward() file: backends/gaudi/server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2735 unit: def dtype() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2736 unit: def key() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2737 unit: def value() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2738 unit: def dtype() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2739 unit: def key() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2740 unit: def value() file: backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2741 unit: def matmul_fp8() file: backends/gaudi/server/text_generation_server/layers/attention/hpu.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2742 unit: def clamp() file: backends/gaudi/server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2743 unit: def shard_on_dim() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2744 unit: def get_batch_types() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2745 unit: def has_adapter() file: backends/gaudi/server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2746 unit: def get_batch_types() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2747 unit: def speculative_tokens() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2748 unit: def has_adapter() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2749 unit: def __init__() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2750 unit: def add_adapter() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2751 unit: def is_empty() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2752 unit: def layer_names() file: backends/gaudi/server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2753 unit: def __init__() file: server/text_generation_server/interceptor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2754 unit: def wait() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2755 unit: def allreduce() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2756 unit: def barrier() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2757 unit: def size() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2758 unit: def rank() file: server/text_generation_server/utils/dist.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2759 unit: def __init__() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2760 unit: def _apply_weights() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2761 unit: def merge() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2762 unit: def __init__() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2763 unit: def merge() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2764 unit: def merge() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2765 unit: def __init__() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2766 unit: def merge() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2767 unit: def merge() file: server/text_generation_server/utils/merges/strategies.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2768 unit: def calculate_majority_sign_mask() file: server/text_generation_server/utils/merges/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2769 unit: def find_segments() file: server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2770 unit: def build() file: server/text_generation_server/utils/segments.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2771 unit: def get_loader() file: server/text_generation_server/utils/quantization.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2772 unit: def __hash__() file: server/text_generation_server/utils/adapter.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2773 unit: def _cached_weight_files() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2774 unit: def _weight_hub_files_from_model_info() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2775 unit: def _get_cached_revision_directory() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2776 unit: def weight_hub_files() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2777 unit: def try_to_load_from_cache() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2778 unit: def weight_files() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2779 unit: def download_weights() file: server/text_generation_server/utils/hub.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2780 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2781 unit: def __call__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2782 unit: def __init__() file: server/text_generation_server/utils/logits_process.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2783 unit: def get_weights() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2784 unit: def get_weights_col() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2785 unit: def get_multi_weights_col() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2786 unit: def get_weights_row() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2787 unit: def get_linear() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2788 unit: def __init__() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2789 unit: def get_weights() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2790 unit: def get_shape() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2791 unit: def get_tensor() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2792 unit: def get_partial_sharded() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2793 unit: def get_weights() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2794 unit: def get_weights_col_packed_gate_up() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2795 unit: def get_weights_col_packed() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2796 unit: def get_weights_col() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2797 unit: def get_multi_weights_col() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2798 unit: def get_weights_row() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2799 unit: def loader() file: server/text_generation_server/utils/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2800 unit: def download_peft() file: server/text_generation_server/utils/peft.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2801 unit: def is_ipex_available() file: server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2802 unit: def noop() file: server/text_generation_server/utils/import_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2803 unit: def __call__() file: server/text_generation_server/utils/tokens.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2804 unit: def _calc_greenlist_mask() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2805 unit: def _bias_greenlist_logits() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2806 unit: def __call__() file: server/text_generation_server/utils/watermark.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2807 unit: def batch_type() file: server/text_generation_server/models/bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2808 unit: def forward() file: server/text_generation_server/models/bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2809 unit: def prepare_for_prefill() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2810 unit: def batch_tokenized_inputs() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2811 unit: def set_inputs_embeds() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 2812 unit: def cuda_graph_warmup() file: server/text_generation_server/models/mllama_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2813 unit: def to_pb() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2814 unit: def filter() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2815 unit: def concatenate() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2816 unit: def __len__() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2817 unit: def __len__() file: server/text_generation_server/models/types.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2818 unit: def make_causal_mask() file: server/text_generation_server/models/custom_modeling/neox_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2819 unit: def load_multi_mqa() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2820 unit: def _load_multi_mqa_gptq() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2821 unit: def _load_multi_mqa() file: server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2822 unit: def _make_causal_mask() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2823 unit: def dropout_add() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2824 unit: def _split_heads() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2825 unit: def forward() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2826 unit: def _convert_to_standard_cache() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2827 unit: def _convert_to_bloom_cache() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2828 unit: def set_input_embeddings() file: server/text_generation_server/models/custom_modeling/bloom_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2829 unit: def get_input_embeddings() file: server/text_generation_server/models/custom_modeling/clip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2830 unit: def is_image() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2831 unit: def batch_decode() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2832 unit: def decode() file: server/text_generation_server/models/custom_modeling/idefics_processing.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2833 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2834 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2835 unit: def _reset_is_causal() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2836 unit: def attn_bias_shape() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2837 unit: def build_attn_bias() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2838 unit: def build_alibi_bias() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2839 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2840 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2841 unit: def forward() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2842 unit: def __init__() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2843 unit: def _apply_sequence_id() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2844 unit: def prepare_inputs_for_generation() file: server/text_generation_server/models/custom_modeling/mpt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2845 unit: def batch_decode() file: server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2846 unit: def decode() file: server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2847 unit: def is_valid_list_of_images() file: server/text_generation_server/models/custom_modeling/gemma3/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 1 id: 2848 unit: def make_nested_list_of_images() file: server/text_generation_server/models/custom_modeling/gemma3/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2849 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_llama_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2850 unit: def forward() file: server/text_generation_server/models/custom_modeling/mamba_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2851 unit: def promote_scalar() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2852 unit: def round_up() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2853 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2854 unit: def forward() file: server/text_generation_server/models/custom_modeling/flash_phi_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 2855 unit: def lecun_normal_() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2856 unit: def default_flax_embed_init() file: server/text_generation_server/models/custom_modeling/siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2857 unit: def forward() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2858 unit: def _prepare_decoder_attention_mask() file: server/text_generation_server/models/custom_modeling/opt_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2859 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2860 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2861 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2862 unit: def __init__() file: server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2863 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2864 unit: def __init__() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2865 unit: def batch_decode() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2866 unit: def decode() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2867 unit: def apply_rotary_pos_emb_vision() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2868 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_5_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2869 unit: def apply_rotary_pos_emb_vision() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2870 unit: def forward() file: server/text_generation_server/models/custom_modeling/qwen2_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2871 unit: def _prepare_decoder_attention_mask() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2872 unit: def _update_model_kwargs_for_generation() file: server/text_generation_server/models/custom_modeling/idefics_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2873 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2874 unit: def forward() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2875 unit: def extra_repr() file: server/text_generation_server/models/custom_modeling/mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2876 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2877 unit: def forward() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2878 unit: def __init__() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2879 unit: def _relative_position_bucket() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2880 unit: def prepare_decoder_input_ids_from_labels() file: server/text_generation_server/models/custom_modeling/t5_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2881 unit: def num_key_value_heads() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2882 unit: def promote_scalar() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 2883 unit: def select_experts() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2884 unit: def round_up() file: server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2885 unit: def forward() file: server/text_generation_server/models/custom_modeling/idefics_perceiver.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2886 unit: def get_position_ids() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2887 unit: def post_process_outputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2888 unit: def post_process_outputs() file: server/text_generation_server/models/transformers_flash_vlm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2889 unit: def concatenate() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2890 unit: def __len__() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2891 unit: def batch_type() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2892 unit: def generate_token() file: server/text_generation_server/models/idefics_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2893 unit: def small_power_of_2() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2894 unit: def batch_tokenized_inputs() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2895 unit: def __len__() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2896 unit: def batch_type() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2897 unit: def forward() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2898 unit: def generate_token() file: server/text_generation_server/models/flash_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2899 unit: def __len__() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2900 unit: def batch_type() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2901 unit: def forward() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2902 unit: def generate_token() file: server/text_generation_server/models/causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2903 unit: def scatter_image_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2904 unit: def gather_image_embeds() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2905 unit: def batch_tokenized_inputs() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2906 unit: def batch_type() file: server/text_generation_server/models/vlm_causal_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2907 unit: def escape_custom_split_sequence() file: server/text_generation_server/models/galactica.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2908 unit: def __len__() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2909 unit: def batch_type() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2910 unit: def generate_token() file: server/text_generation_server/models/seq2seq_lm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2911 unit: def __len__() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2912 unit: def batch_type() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2913 unit: def warmup() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2914 unit: def forward() file: server/text_generation_server/models/mamba.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2915 unit: def batch_type() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2916 unit: def generate_token() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2917 unit: def warmup() file: server/text_generation_server/models/model.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2918 unit: def __init__() file: server/text_generation_server/tracing.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2919 unit: def __init__() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2920 unit: def pop() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2921 unit: def __len__() file: server/text_generation_server/cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2922 unit: def forward() file: server/text_generation_server/layers/linear.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2923 unit: def forward() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2924 unit: def forward() file: server/text_generation_server/layers/medusa.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2925 unit: def marlin_zero_points() file: server/text_generation_server/layers/marlin/util.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2926 unit: def can_use_gptq_marlin() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2927 unit: def awq_to_marlin_zero_points() file: server/text_generation_server/layers/marlin/gptq.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2928 unit: def get_linear() file: server/text_generation_server/layers/marlin/marlin.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2929 unit: def forward() file: server/text_generation_server/layers/speculative.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2930 unit: def get_linear() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2931 unit: def get_linear() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2932 unit: def get_linear() file: server/text_generation_server/layers/bnb.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2933 unit: def __init__() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2934 unit: def collect_lora_a() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2935 unit: def forward() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2936 unit: def load() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 5 id: 2937 unit: def forward() file: server/text_generation_server/layers/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2938 unit: def _get_target_loaders() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2939 unit: def _create_loader_for_group() file: server/text_generation_server/layers/compressed_tensors/loader.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2940 unit: def _get_tensor_or_else() file: server/text_generation_server/layers/compressed_tensors/w8a8_int.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2941 unit: def forward() file: server/text_generation_server/layers/tensor_parallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2942 unit: def forward() file: server/text_generation_server/layers/mlp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2943 unit: def find_correction_range() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2944 unit: def _update_cos_sin_cache() file: server/text_generation_server/layers/rotary.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2945 unit: def __init__() file: server/text_generation_server/layers/awq/quantize/ipex.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2946 unit: def __init__() file: server/text_generation_server/layers/awq/quantize/cuda.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2947 unit: def device() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2948 unit: def get_weights_col() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2949 unit: def get_multi_weights_col() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2950 unit: def get_weights_row() file: server/text_generation_server/layers/exl2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2951 unit: def device() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2952 unit: def is_layer_skipped_quantization() file: server/text_generation_server/layers/gptq/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2953 unit: def autotune() file: server/text_generation_server/layers/gptq/custom_autotune.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2954 unit: def enabled() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2955 unit: def ready() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2956 unit: def fasterquant() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2957 unit: def get_loaders() file: server/text_generation_server/layers/gptq/quantize.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2958 unit: def torch_snr_error() file: server/text_generation_server/layers/gptq/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2959 unit: def temp_dq_size() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2960 unit: def temp_fwd_size() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2961 unit: def scratch_space_fixed() file: server/text_generation_server/layers/gptq/exllamav2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2962 unit: def load_conv2d_no_bias() file: server/text_generation_server/layers/conv.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2963 unit: def get_weight_fn() file: server/text_generation_server/layers/moe/fp8.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2964 unit: def forward() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2965 unit: def forward() file: server/text_generation_server/layers/moe/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 2966 unit: def dtype() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2967 unit: def key() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2968 unit: def value() file: server/text_generation_server/layers/attention/kv_cache.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2969 unit: def cdiv_fn() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2970 unit: def max_fn() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2971 unit: def grid() file: server/text_generation_server/layers/attention/flash_attn_triton.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2972 unit: def clamp() file: server/text_generation_server/layers/attention/common.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2973 unit: def shard_on_dim() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 2974 unit: def get_batch_types() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2975 unit: def has_adapter() file: server/text_generation_server/adapters/lora.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2976 unit: def get_batch_types() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2977 unit: def speculative_tokens() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2978 unit: def has_adapter() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2979 unit: def __init__() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2980 unit: def add_adapter() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 2981 unit: def is_empty() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2982 unit: def layer_names() file: server/text_generation_server/adapters/weights.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 2983 unit: def set_keep_processing() file: server/text_generation_server/server.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2984 unit: def is_optional() file: server/bounds-from-nix.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 3 id: 2985 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2986 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2987 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2988 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2989 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2990 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2991 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2992 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2993 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2994 unit: def __init__() file: clients/python/text_generation/errors.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 2995 unit: size_t num_tokens_ready() file: backends/trtllm/csrc/ffi.hpp start line: 86 end line: 86 size: 1 LOC McCabe index: 1 number of parameters: 0