id: 1 unit: def __post_init__() file: optimum/habana/transformers/training_args.py start line: 0 end line: 0 size: 414 LOC McCabe index: 132 number of parameters: 1 id: 2 unit: def adapt_transformers_to_gaudi() file: optimum/habana/transformers/modeling_utils.py start line: 0 end line: 0 size: 403 LOC McCabe index: 54 number of parameters: 0 id: 3 unit: def _prepare_deepspeed() file: optimum/habana/accelerate/accelerator.py start line: 0 end line: 0 size: 224 LOC McCabe index: 55 number of parameters: 2 id: 4 unit: def prepare_model() file: optimum/habana/accelerate/accelerator.py start line: 0 end line: 0 size: 107 LOC McCabe index: 26 number of parameters: 4 id: 5 unit: def _setup_devices() file: optimum/habana/transformers/training_args.py start line: 0 end line: 0 size: 103 LOC McCabe index: 41 number of parameters: 1 id: 6 unit: def _load_best_model() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 84 LOC McCabe index: 25 number of parameters: 1 id: 7 unit: def _convert_model() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 79 LOC McCabe index: 27 number of parameters: 3 id: 8 unit: def create_accelerator_and_postprocess() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 78 LOC McCabe index: 12 number of parameters: 1 id: 9 unit: def finalize_beams() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 73 LOC McCabe index: 21 number of parameters: 4 id: 10 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 70 LOC McCabe index: 6 number of parameters: 3 id: 11 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 70 LOC McCabe index: 8 number of parameters: 3 id: 12 unit: def step() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 69 LOC McCabe index: 19 number of parameters: 3 id: 13 unit: def get_train_dataloader() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 68 LOC McCabe index: 11 number of parameters: 1 id: 14 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 67 LOC McCabe index: 11 number of parameters: 2 id: 15 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 66 LOC McCabe index: 14 number of parameters: 2 id: 16 unit: def handle_single_conversation() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 65 LOC McCabe index: 19 number of parameters: 1 id: 17 unit: def get_test_dataloader() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 62 LOC McCabe index: 9 number of parameters: 4 id: 18 unit: def _train_batched_samples() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 60 LOC McCabe index: 12 number of parameters: 5 id: 19 unit: def gaudi_prepare_inputs_for_generation() file: optimum/habana/peft/peft_model.py start line: 0 end line: 0 size: 58 LOC McCabe index: 27 number of parameters: 4 id: 20 unit: def gaudi_qwen2moe_block_sparse_moe_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 55 LOC McCabe index: 12 number of parameters: 2 id: 21 unit: def _push_from_checkpoint() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 53 LOC McCabe index: 20 number of parameters: 2 id: 22 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 51 LOC McCabe index: 9 number of parameters: 2 id: 23 unit: def tiled_decode_gaudi() file: optimum/habana/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py start line: 0 end line: 0 size: 51 LOC McCabe index: 14 number of parameters: 3 id: 24 unit: def _attn() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 49 LOC McCabe index: 19 number of parameters: 6 id: 25 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 49 LOC McCabe index: 10 number of parameters: 3 id: 26 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 49 LOC McCabe index: 15 number of parameters: 8 id: 27 unit: def _merge_input_ids_with_image_features() file: optimum/habana/transformers/models/llava_next/modeling_llava_next.py start line: 0 end line: 0 size: 48 LOC McCabe index: 13 number of parameters: 6 id: 28 unit: def _merge_input_ids_with_image_features() file: optimum/habana/transformers/models/llava_onevision/modeling_llava_onevision.py start line: 0 end line: 0 size: 48 LOC McCabe index: 13 number of parameters: 6 id: 29 unit: def prepare_model_inputs() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 48 LOC McCabe index: 8 number of parameters: 3 id: 30 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 48 LOC McCabe index: 1 number of parameters: 41 id: 31 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 46 LOC McCabe index: 8 number of parameters: 4 id: 32 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 46 LOC McCabe index: 12 number of parameters: 8 id: 33 unit: def _init_rope() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 45 LOC McCabe index: 7 number of parameters: 1 id: 34 unit: def gaudi_esmfolding_trunk_forward() file: optimum/habana/transformers/models/esm/modeling_esmfold.py start line: 0 end line: 0 size: 45 LOC McCabe index: 8 number of parameters: 7 id: 35 unit: def _init_rope() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 45 LOC McCabe index: 7 number of parameters: 1 id: 36 unit: def _load_optimizer_and_scheduler() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 45 LOC McCabe index: 16 number of parameters: 2 id: 37 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/configuration_deepseek_v3.py start line: 0 end line: 0 size: 44 LOC McCabe index: 1 number of parameters: 0 id: 38 unit: def create_optimizer() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 44 LOC McCabe index: 17 number of parameters: 1 id: 39 unit: def _generate_samples() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 44 LOC McCabe index: 3 number of parameters: 3 id: 40 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 44 LOC McCabe index: 1 number of parameters: 42 id: 41 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/configuration_deepseek_v2.py start line: 0 end line: 0 size: 43 LOC McCabe index: 1 number of parameters: 0 id: 42 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 43 LOC McCabe index: 2 number of parameters: 3 id: 43 unit: def backward() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 42 LOC McCabe index: 15 number of parameters: 2 id: 44 unit: def gaudi_awq_config_post_init() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 41 LOC McCabe index: 13 number of parameters: 1 id: 45 unit: def __init__() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 41 LOC McCabe index: 3 number of parameters: 8 id: 46 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 41 LOC McCabe index: 1 number of parameters: 37 id: 47 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 41 LOC McCabe index: 13 number of parameters: 7 id: 48 unit: def GaudiAdaptedAttentionPreAttnForward() file: optimum/habana/peft/layer.py start line: 0 end line: 0 size: 40 LOC McCabe index: 7 number of parameters: 3 id: 49 unit: def st_gaudi_data_collator_call() file: optimum/habana/sentence_transformers/st_gaudi_data_collator.py start line: 0 end line: 0 size: 40 LOC McCabe index: 17 number of parameters: 3 id: 50 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 40 LOC McCabe index: 15 number of parameters: 5 id: 51 unit: def build_chat_input() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 40 LOC McCabe index: 14 number of parameters: 4 id: 52 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux_img2img.py start line: 0 end line: 0 size: 40 LOC McCabe index: 13 number of parameters: 6 id: 53 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux.py start line: 0 end line: 0 size: 40 LOC McCabe index: 13 number of parameters: 6 id: 54 unit: def gaudi_rot_vec_mul() file: optimum/habana/transformers/models/esm/modeling_esmfold.py start line: 0 end line: 0 size: 38 LOC McCabe index: 5 number of parameters: 2 id: 55 unit: def __call__() file: optimum/habana/trl/trainer/reward_trainer.py start line: 0 end line: 0 size: 38 LOC McCabe index: 2 number of parameters: 3 id: 56 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 38 LOC McCabe index: 1 number of parameters: 36 id: 57 unit: def prepare_depth_map() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 38 LOC McCabe index: 11 number of parameters: 7 id: 58 unit: def import_weights() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 37 LOC McCabe index: 7 number of parameters: 2 id: 59 unit: def compute_query_states() file: optimum/habana/peft/layer.py start line: 0 end line: 0 size: 37 LOC McCabe index: 13 number of parameters: 2 id: 60 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 37 LOC McCabe index: 7 number of parameters: 4 id: 61 unit: def _save() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 37 LOC McCabe index: 15 number of parameters: 3 id: 62 unit: def trainer_config_process() file: optimum/habana/transformers/integrations/deepspeed.py start line: 0 end line: 0 size: 37 LOC McCabe index: 3 number of parameters: 3 id: 63 unit: def calculate_loss() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 37 LOC McCabe index: 2 number of parameters: 7 id: 64 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 36 LOC McCabe index: 8 number of parameters: 2 id: 65 unit: def _load_rng_state() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 36 LOC McCabe index: 8 number of parameters: 2 id: 66 unit: def __init__() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 36 LOC McCabe index: 1 number of parameters: 33 id: 67 unit: def gaudi_validate_environment() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 36 LOC McCabe index: 14 number of parameters: 3 id: 68 unit: def get_repo_root() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 35 LOC McCabe index: 13 number of parameters: 3 id: 69 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 35 LOC McCabe index: 9 number of parameters: 2 id: 70 unit: def _save_optimizer_and_scheduler() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 35 LOC McCabe index: 15 number of parameters: 2 id: 71 unit: def adjust_multimodal_inputs() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 34 LOC McCabe index: 2 number of parameters: 2 id: 72 unit: def _init_rope() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 34 LOC McCabe index: 5 number of parameters: 1 id: 73 unit: def _upad_input() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 34 LOC McCabe index: 3 number of parameters: 6 id: 74 unit: def __init__() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 33 LOC McCabe index: 8 number of parameters: 3 id: 75 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 33 LOC McCabe index: 4 number of parameters: 4 id: 76 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 32 LOC McCabe index: 4 number of parameters: 4 id: 77 unit: def _upcast_and_reordered_attn() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 32 LOC McCabe index: 13 number of parameters: 6 id: 78 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 32 LOC McCabe index: 5 number of parameters: 2 id: 79 unit: def __init__() file: optimum/habana/transformers/models/minicpm/configuration_minicpm.py start line: 0 end line: 0 size: 32 LOC McCabe index: 1 number of parameters: 0 id: 80 unit: def _get_train_sampler() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 32 LOC McCabe index: 12 number of parameters: 1 id: 81 unit: def save_model() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 32 LOC McCabe index: 14 number of parameters: 3 id: 82 unit: def _gaudi_validate_bnb_multi_backend_availability() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 32 LOC McCabe index: 9 number of parameters: 1 id: 83 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py start line: 0 end line: 0 size: 32 LOC McCabe index: 1 number of parameters: 0 id: 84 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 32 LOC McCabe index: 10 number of parameters: 5 id: 85 unit: def __init__() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 31 LOC McCabe index: 7 number of parameters: 2 id: 86 unit: def __post_init__() file: optimum/habana/trl/trainer/ppo_config.py start line: 0 end line: 0 size: 31 LOC McCabe index: 10 number of parameters: 1 id: 87 unit: def __call__() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 31 LOC McCabe index: 1 number of parameters: 0 id: 88 unit: def gaudi_rot_matmul() file: optimum/habana/transformers/models/esm/modeling_esmfold.py start line: 0 end line: 0 size: 30 LOC McCabe index: 6 number of parameters: 2 id: 89 unit: def __init__() file: optimum/habana/transformers/models/glm4v/configuration_chatglm.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 0 id: 90 unit: def forward() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 30 LOC McCabe index: 9 number of parameters: 4 id: 91 unit: def __init__() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 30 LOC McCabe index: 1 number of parameters: 20 id: 92 unit: def run() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 29 LOC McCabe index: 10 number of parameters: 1 id: 93 unit: def __init__() file: optimum/habana/transformers/models/chatglm/configuration_chatglm.py start line: 0 end line: 0 size: 29 LOC McCabe index: 1 number of parameters: 0 id: 94 unit: def _get_input_update_settings() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 29 LOC McCabe index: 12 number of parameters: 2 id: 95 unit: def _wrap_model() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 29 LOC McCabe index: 13 number of parameters: 4 id: 96 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py start line: 0 end line: 0 size: 29 LOC McCabe index: 1 number of parameters: 0 id: 97 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 29 LOC McCabe index: 11 number of parameters: 5 id: 98 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 29 LOC McCabe index: 11 number of parameters: 5 id: 99 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 29 LOC McCabe index: 1 number of parameters: 0 id: 100 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 29 LOC McCabe index: 11 number of parameters: 5 id: 101 unit: def from_pretrained() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 29 LOC McCabe index: 14 number of parameters: 4 id: 102 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 28 LOC McCabe index: 8 number of parameters: 2 id: 103 unit: def __init__() file: optimum/habana/transformers/models/mixtral/configuration_mixtral.py start line: 0 end line: 0 size: 28 LOC McCabe index: 1 number of parameters: 0 id: 104 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 28 LOC McCabe index: 1 number of parameters: 4 id: 105 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 28 LOC McCabe index: 5 number of parameters: 2 id: 106 unit: def __init__() file: optimum/habana/transformers/models/llama/configuration_llama.py start line: 0 end line: 0 size: 28 LOC McCabe index: 1 number of parameters: 0 id: 107 unit: def gaudi_DetrLoss_forward() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 28 LOC McCabe index: 15 number of parameters: 3 id: 108 unit: def _wrap_model() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 27 LOC McCabe index: 11 number of parameters: 4 id: 109 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 0 id: 110 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 0 id: 111 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 0 id: 112 unit: def gaudi_RTDetrHungarianMatcher_forward() file: optimum/habana/transformers/loss/loss_rt_detr.py start line: 0 end line: 0 size: 27 LOC McCabe index: 8 number of parameters: 3 id: 113 unit: def __init__() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 27 LOC McCabe index: 1 number of parameters: 8 id: 114 unit: def get_ds_injection_policy() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 26 LOC McCabe index: 9 number of parameters: 1 id: 115 unit: def forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 0 id: 116 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 4 id: 117 unit: def get_batch_samples_transformers() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 26 LOC McCabe index: 13 number of parameters: 4 id: 118 unit: def __init__() file: optimum/habana/accelerate/accelerator.py start line: 0 end line: 0 size: 26 LOC McCabe index: 2 number of parameters: 0 id: 119 unit: def __call__() file: optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 0 id: 120 unit: def __call__() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux_img2img.py start line: 0 end line: 0 size: 26 LOC McCabe index: 1 number of parameters: 0 id: 121 unit: def get_peval() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 1 id: 122 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 0 id: 123 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 0 id: 124 unit: def gaudi_SeamlessM4TCodeHifiGan_get_output_hifigan_lengths() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 25 LOC McCabe index: 6 number of parameters: 3 id: 125 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 0 id: 126 unit: def load_generation_config() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 25 LOC McCabe index: 14 number of parameters: 2 id: 127 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 0 id: 128 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 25 LOC McCabe index: 3 number of parameters: 5 id: 129 unit: def forward() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 25 LOC McCabe index: 6 number of parameters: 8 id: 130 unit: def __init__() file: optimum/habana/diffusers/models/unet_spatio_temporal_condition_controlnet.py start line: 0 end line: 0 size: 25 LOC McCabe index: 1 number of parameters: 8 id: 131 unit: def pytest_configure() file: conftest.py start line: 0 end line: 0 size: 24 LOC McCabe index: 7 number of parameters: 1 id: 132 unit: def process_hostfile() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 24 LOC McCabe index: 7 number of parameters: 1 id: 133 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 134 unit: def apply_customized_rope_module() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 24 LOC McCabe index: 4 number of parameters: 6 id: 135 unit: def forward() file: optimum/habana/transformers/models/llava/modeling_llava.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 136 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 137 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 24 LOC McCabe index: 6 number of parameters: 3 id: 138 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 139 unit: def forward() file: optimum/habana/transformers/models/llava_onevision/modeling_llava_onevision.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 140 unit: def check_synapse_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 24 LOC McCabe index: 7 number of parameters: 0 id: 141 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 142 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 143 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 144 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 24 LOC McCabe index: 3 number of parameters: 5 id: 145 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 146 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 147 unit: def __call__() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 148 unit: def __call__() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 149 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 24 LOC McCabe index: 8 number of parameters: 9 id: 150 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 151 unit: def __call__() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux.py start line: 0 end line: 0 size: 24 LOC McCabe index: 1 number of parameters: 0 id: 152 unit: def __init__() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 36 id: 153 unit: def incrementor() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 23 LOC McCabe index: 5 number of parameters: 2 id: 154 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 155 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 2 id: 156 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 5 number of parameters: 3 id: 157 unit: def forward() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 158 unit: def __init__() file: optimum/habana/transformers/models/mistral/configuration_mistral.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 159 unit: def forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 160 unit: def forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 161 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 23 LOC McCabe index: 2 number of parameters: 0 id: 162 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 3 number of parameters: 2 id: 163 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 5 number of parameters: 3 id: 164 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 165 unit: def forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 166 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 167 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 168 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 169 unit: def _prepare_input() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 23 LOC McCabe index: 14 number of parameters: 3 id: 170 unit: def _infer_device_type() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 23 LOC McCabe index: 6 number of parameters: 1 id: 171 unit: def _split_and_cat_tensors() file: optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py start line: 0 end line: 0 size: 23 LOC McCabe index: 11 number of parameters: 5 id: 172 unit: def _split_and_cat_tensors() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 23 LOC McCabe index: 11 number of parameters: 5 id: 173 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 174 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 23 LOC McCabe index: 1 number of parameters: 0 id: 175 unit: def set_attn_processor_hpu() file: optimum/habana/diffusers/models/unet_2d_condition.py start line: 0 end line: 0 size: 23 LOC McCabe index: 9 number of parameters: 4 id: 176 unit: def _pad_past_key_values() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 22 LOC McCabe index: 20 number of parameters: 2 id: 177 unit: def forward() file: optimum/habana/transformers/models/llava_next/modeling_llava_next.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 178 unit: def _update_model_kwargs_for_generation() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 22 LOC McCabe index: 4 number of parameters: 5 id: 179 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 180 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 6 id: 181 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 182 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 183 unit: def gaudi_check_and_enable_sdpa() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 22 LOC McCabe index: 7 number of parameters: 3 id: 184 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 185 unit: def __init__() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 3 id: 186 unit: def gaudi_DetrLoss_loss_labels() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 22 LOC McCabe index: 4 number of parameters: 5 id: 187 unit: def forward() file: optimum/habana/transformers/models/idefics2/modeling_idefics2.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 3 id: 188 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 5 number of parameters: 6 id: 189 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 190 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 22 LOC McCabe index: 2 number of parameters: 0 id: 191 unit: def deepspeed_init() file: optimum/habana/transformers/integrations/deepspeed.py start line: 0 end line: 0 size: 22 LOC McCabe index: 8 number of parameters: 3 id: 192 unit: def torch_call() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 22 LOC McCabe index: 6 number of parameters: 5 id: 193 unit: def run_unet() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 194 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 195 unit: def __call__() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 22 LOC McCabe index: 1 number of parameters: 0 id: 196 unit: def gaudi_generate() file: optimum/habana/peft/peft_model.py start line: 0 end line: 0 size: 21 LOC McCabe index: 5 number of parameters: 3 id: 197 unit: def GaudiAdaloraLayerSVDLinearForward() file: optimum/habana/peft/layer.py start line: 0 end line: 0 size: 21 LOC McCabe index: 6 number of parameters: 4 id: 198 unit: def gaudi_T5ForConditionalGeneration_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 199 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 200 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 201 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 202 unit: def gaudi_gpt_bigcode_model_forward() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 203 unit: def forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 2 number of parameters: 0 id: 204 unit: def pre_attn() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 205 unit: def pre_attn_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 206 unit: def pre_attn_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 21 LOC McCabe index: 2 number of parameters: 0 id: 207 unit: def pre_attn() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 21 LOC McCabe index: 2 number of parameters: 0 id: 208 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 21 LOC McCabe index: 1 number of parameters: 0 id: 209 unit: def generate() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 210 unit: def __init__() file: optimum/habana/transformers/generation/configuration_utils.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 2 id: 211 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 212 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 213 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 214 unit: def process_response() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 20 LOC McCabe index: 6 number of parameters: 3 id: 215 unit: def forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 216 unit: def forward() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 217 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 20 LOC McCabe index: 5 number of parameters: 7 id: 218 unit: def __init__() file: optimum/habana/transformers/models/baichuan/configuration_baichuan.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 219 unit: def pre_attn() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 220 unit: def forward() file: optimum/habana/transformers/models/video_llava/modeling_video_llava.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 221 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 20 LOC McCabe index: 2 number of parameters: 0 id: 222 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 223 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 224 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 20 LOC McCabe index: 4 number of parameters: 8 id: 225 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 226 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 227 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 20 LOC McCabe index: 2 number of parameters: 3 id: 228 unit: def pipeline_step() file: optimum/habana/trl/models/modeling_sd_base.py start line: 0 end line: 0 size: 20 LOC McCabe index: 1 number of parameters: 0 id: 229 unit: def gaudi_bitsandbytesconfig_post_init() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 20 LOC McCabe index: 12 number of parameters: 1 id: 230 unit: def _encode_image() file: optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py start line: 0 end line: 0 size: 20 LOC McCabe index: 4 number of parameters: 4 id: 231 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 20 LOC McCabe index: 3 number of parameters: 5 id: 232 unit: def get_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 20 LOC McCabe index: 4 number of parameters: 3 id: 233 unit: def __post_init__() file: optimum/habana/sentence_transformers/st_gaudi_training_args.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 1 id: 234 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 235 unit: def pre_attn() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 236 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 237 unit: def __init__() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 4 number of parameters: 2 id: 238 unit: def forward() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 239 unit: def pre_attn_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 240 unit: def pre_attn_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 241 unit: def gaudi_BartForConditionalGeneration_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 242 unit: def gaudi_BlipTextLMHead_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 243 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 244 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 245 unit: def get_masks() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 19 LOC McCabe index: 6 number of parameters: 4 id: 246 unit: def forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 247 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 248 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 249 unit: def compile_regions() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 19 LOC McCabe index: 6 number of parameters: 2 id: 250 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 251 unit: def set_attn_processor_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 19 LOC McCabe index: 7 number of parameters: 4 id: 252 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 253 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 254 unit: def __call__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 19 LOC McCabe index: 1 number of parameters: 0 id: 255 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 19 LOC McCabe index: 6 number of parameters: 9 id: 256 unit: def _load_best_model() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 18 LOC McCabe index: 8 number of parameters: 1 id: 257 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 258 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 5 number of parameters: 7 id: 259 unit: def apply_customized_rope() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 4 number of parameters: 5 id: 260 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 261 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 262 unit: def create_custom_forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 1 id: 263 unit: def get_masks() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 6 number of parameters: 4 id: 264 unit: def forward() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 265 unit: def forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 0 id: 266 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 18 LOC McCabe index: 8 number of parameters: 3 id: 267 unit: def pre_attn_forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 268 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 269 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 270 unit: def pre_attn() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 271 unit: def gaudi_BartModel_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 272 unit: def forward() file: optimum/habana/transformers/models/idefics2/modeling_idefics2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 273 unit: def forward() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 274 unit: def forward() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 275 unit: def forward() file: optimum/habana/transformers/models/xglm/modeling_xglm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 276 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 18 LOC McCabe index: 5 number of parameters: 7 id: 277 unit: def pre_attn_forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 278 unit: def pre_attn() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 279 unit: def apply_customized_rope() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 18 LOC McCabe index: 4 number of parameters: 5 id: 280 unit: def forward() file: optimum/habana/transformers/models/paligemma/modeling_paligemma.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 0 id: 281 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 18 LOC McCabe index: 5 number of parameters: 2 id: 282 unit: def __init__() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 18 LOC McCabe index: 1 number of parameters: 27 id: 283 unit: def tokenize() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 18 LOC McCabe index: 6 number of parameters: 1 id: 284 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 285 unit: def image_capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 2 id: 286 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 287 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 288 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 289 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 290 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 18 LOC McCabe index: 3 number of parameters: 4 id: 291 unit: def __init__() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 18 LOC McCabe index: 2 number of parameters: 0 id: 292 unit: def _remove_past_key_values() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 17 LOC McCabe index: 8 number of parameters: 2 id: 293 unit: def __init__() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 3 id: 294 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 295 unit: def pre_attn_forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 296 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 297 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 3 id: 298 unit: def forward() file: optimum/habana/transformers/models/codegen/modeling_codegen.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 299 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 17 LOC McCabe index: 5 number of parameters: 2 id: 300 unit: def train_forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 301 unit: def forward() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 302 unit: def gaudi_gpt_bigcode_block_forward() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 303 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 17 LOC McCabe index: 5 number of parameters: 2 id: 304 unit: def create_custom_forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 1 id: 305 unit: def pre_attn() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 0 id: 306 unit: def gaudi_BlipTextModel_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 307 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 3 id: 308 unit: def create_custom_forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 1 id: 309 unit: def forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 0 id: 310 unit: def pre_attn() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 0 id: 311 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 17 LOC McCabe index: 5 number of parameters: 2 id: 312 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 17 LOC McCabe index: 2 number of parameters: 2 id: 313 unit: def __init__() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 25 id: 314 unit: def _pad_tensors_to_max_len() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 17 LOC McCabe index: 5 number of parameters: 3 id: 315 unit: def gaudi_save_pretrained() file: optimum/habana/trl/models/modeling_base.py start line: 0 end line: 0 size: 17 LOC McCabe index: 7 number of parameters: 3 id: 316 unit: def wrap_generation_for_hpu_graph_mode() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 2 id: 317 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 318 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 319 unit: def __init__() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 320 unit: def __init__() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 321 unit: def __init__() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 17 LOC McCabe index: 1 number of parameters: 0 id: 322 unit: def get_params() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 17 LOC McCabe index: 4 number of parameters: 2 id: 323 unit: def _assisted_decoding() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 324 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 325 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 326 unit: def project() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 16 LOC McCabe index: 7 number of parameters: 4 id: 327 unit: def gaudi_T5Block_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 328 unit: def gaudi_T5Stack_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 329 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 7 id: 330 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 0 id: 331 unit: def gaudi_codegen_model_forward() file: optimum/habana/transformers/models/codegen/modeling_codegen.py start line: 0 end line: 0 size: 16 LOC McCabe index: 2 number of parameters: 0 id: 332 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 2 id: 333 unit: def forward() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 334 unit: def forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 335 unit: def forward() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 336 unit: def forward() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 337 unit: def convert_tokens_to_string() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 2 id: 338 unit: def update() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 4 id: 339 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 2 id: 340 unit: def pre_attn_forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 341 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 342 unit: def forward() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 343 unit: def gaudi_DetrHungarianMatcher_forward() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 16 LOC McCabe index: 7 number of parameters: 3 id: 344 unit: def gaudi_SeamlessM4TTextToUnitForConditionalGeneration_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 345 unit: def gaudi_SeamlessM4TForTextToSpeech_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 346 unit: def gaudi_gpt2_forward() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 347 unit: def gaudi_xglm_model_forward() file: optimum/habana/transformers/models/xglm/modeling_xglm.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 348 unit: def convert_tokens_to_string() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 16 LOC McCabe index: 7 number of parameters: 4 id: 349 unit: def pre_attn_forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 350 unit: def _save_rng_state() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 2 id: 351 unit: def declare_autocast_bf16_fp32_ops() file: optimum/habana/transformers/gaudi_configuration.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 1 id: 352 unit: def gaudi_awq_quantizer_validate_environment() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 16 LOC McCabe index: 8 number of parameters: 3 id: 353 unit: def pack_tensor() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 16 LOC McCabe index: 3 number of parameters: 2 id: 354 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 355 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 356 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 357 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 358 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 359 unit: def _fetch_class_library_tuple() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 16 LOC McCabe index: 9 number of parameters: 1 id: 360 unit: def _encode_image() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 16 LOC McCabe index: 5 number of parameters: 5 id: 361 unit: def __init__() file: optimum/habana/diffusers/pipelines/flux/pipeline_flux_img2img.py start line: 0 end line: 0 size: 16 LOC McCabe index: 1 number of parameters: 0 id: 362 unit: def attn_processors() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 16 LOC McCabe index: 4 number of parameters: 1 id: 363 unit: def pytest_report_header() file: conftest.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 1 id: 364 unit: def __init__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 4 id: 365 unit: def get_checkpoint_files() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 9 number of parameters: 3 id: 366 unit: def call_model_init() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 15 LOC McCabe index: 8 number of parameters: 2 id: 367 unit: def _contrastive_search() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 368 unit: def _sample() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 369 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 370 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 371 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 372 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 373 unit: def gaudi_mixtral_rmsnorm_forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 374 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 375 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 4 id: 376 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 2 id: 377 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 378 unit: def forward() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 379 unit: def forward() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 380 unit: def gaudi_mistral_rmsnorm_forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 381 unit: def forward() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 0 id: 382 unit: def gaudi_qwen2_rmsnorm_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 383 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 2 id: 384 unit: def forward() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 385 unit: def forward() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 386 unit: def gaudi_llama_rmsnorm_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 387 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 4 id: 388 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 389 unit: def forward() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 390 unit: def gaudi_BartDecoder_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 391 unit: def gaudi_DetrLoss_loss_boxes() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 5 id: 392 unit: def forward() file: optimum/habana/transformers/models/idefics2/modeling_idefics2.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 393 unit: def gaudi_SeamlessM4TTextToUnitModel_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 394 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 2 id: 395 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 396 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 15 LOC McCabe index: 2 number of parameters: 4 id: 397 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 2 id: 398 unit: def gaudi_BertModel_forward() file: optimum/habana/transformers/models/bert/modeling_bert.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 399 unit: def __init__() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 3 id: 400 unit: def forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 401 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 402 unit: def gaudi_qwen2moe_rmsnorm_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 15 LOC McCabe index: 5 number of parameters: 2 id: 403 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 15 LOC McCabe index: 3 number of parameters: 3 id: 404 unit: def forward() file: optimum/habana/transformers/modeling_rope_utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 405 unit: def _prepare_dataset() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 406 unit: def to_device_dtype() file: optimum/habana/utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 10 number of parameters: 3 id: 407 unit: def gaudi_fourier_filter() file: optimum/habana/diffusers/utils/torch_utils.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 3 id: 408 unit: def gaudi_unet_2d_condition_model_forward() file: optimum/habana/diffusers/models/unet_2d_condition.py start line: 0 end line: 0 size: 15 LOC McCabe index: 1 number of parameters: 0 id: 409 unit: def get_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 15 LOC McCabe index: 4 number of parameters: 3 id: 410 unit: def st_gaudi_encode() file: optimum/habana/sentence_transformers/st_gaudi_encoder.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 411 unit: def adapt_sentence_transformers_to_gaudi() file: optimum/habana/sentence_transformers/modeling_utils.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 412 unit: def _beam_search() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 413 unit: def _group_beam_search() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 414 unit: def _constrained_beam_search() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 415 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 416 unit: def update() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 417 unit: def gaudi_SpeechT5Decoder_forward() file: optimum/habana/transformers/models/speecht5/modeling_speecht5.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 418 unit: def update() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 419 unit: def update() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 420 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 4 number of parameters: 3 id: 421 unit: def tokenize() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 3 id: 422 unit: def update() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 423 unit: def update() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 5 id: 424 unit: def gaudi_gpt_neo_model_forward() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 425 unit: def forward() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 14 LOC McCabe index: 2 number of parameters: 0 id: 426 unit: def update() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 427 unit: def save_vocabulary() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 14 LOC McCabe index: 6 number of parameters: 3 id: 428 unit: def gaudi_bloom_model_forward() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 429 unit: def update() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 14 LOC McCabe index: 7 number of parameters: 5 id: 430 unit: def forward() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 431 unit: def update() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 432 unit: def gaudi_gpt_neox_model_forward() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 433 unit: def update() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 434 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 14 LOC McCabe index: 4 number of parameters: 3 id: 435 unit: def update() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 6 id: 436 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 437 unit: def gaudi_awq_quantizer_process_model_before_weight_loading() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 14 LOC McCabe index: 3 number of parameters: 3 id: 438 unit: def __init__() file: optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 439 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 440 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 441 unit: def _encode_image() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 14 LOC McCabe index: 5 number of parameters: 5 id: 442 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 14 LOC McCabe index: 1 number of parameters: 0 id: 443 unit: def _gaudi_get_task_class() file: optimum/habana/diffusers/pipelines/auto_pipeline.py start line: 0 end line: 0 size: 14 LOC McCabe index: 7 number of parameters: 3 id: 444 unit: def pytest_addoption() file: conftest.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 1 id: 445 unit: def _copy_rowwise() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 5 id: 446 unit: def create_pad_arg() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 3 id: 447 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 448 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 449 unit: def __init__() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 3 id: 450 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 3 id: 451 unit: def gaudi_T5Attention_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 452 unit: def gaudi_T5ForConditionalGeneration_prepare_inputs_for_generation() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 453 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 454 unit: def forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 455 unit: def stream_chat() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 456 unit: def decode() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 2 id: 457 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 5 id: 458 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 459 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 460 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 2 id: 461 unit: def from_pretrained() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 462 unit: def forward() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 463 unit: def forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 464 unit: def forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 465 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 6 id: 466 unit: def from_pretrained() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 467 unit: def gaudi_cohere_model_forward() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 468 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 469 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 470 unit: def forward() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 471 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/llava_onevision/modeling_llava_onevision.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 472 unit: def forward() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 0 id: 473 unit: def gaudi_BartForConditionalGeneration_prepare_inputs_for_generation() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 474 unit: def gaudi_BlipTextEncoder_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 475 unit: def gaudi_SeamlessM4TDecoder_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 476 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 477 unit: def build_single_message() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 6 id: 478 unit: def apply_chat_template() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 479 unit: def gaudi_opt_decoder_forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 480 unit: def gaudi_opt_model_forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 481 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 5 id: 482 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 483 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 484 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 2 id: 485 unit: def update() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 13 LOC McCabe index: 6 number of parameters: 5 id: 486 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 487 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 488 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 2 number of parameters: 4 id: 489 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 3 number of parameters: 2 id: 490 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 491 unit: def chat() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 492 unit: def _zero_model_grad() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 13 LOC McCabe index: 5 number of parameters: 2 id: 493 unit: def _setup_optimizer() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 13 LOC McCabe index: 4 number of parameters: 2 id: 494 unit: def from_pretrained() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 13 LOC McCabe index: 8 number of parameters: 4 id: 495 unit: def __init__() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 13 LOC McCabe index: 1 number of parameters: 0 id: 496 unit: def pytest_sessionstart() file: conftest.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 1 id: 497 unit: def get_volatile_reads_fixed() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 1 id: 498 unit: def pack_grads() file: optimum/habana/distributed/fast_ddp.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 0 id: 499 unit: def _save() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 12 LOC McCabe index: 6 number of parameters: 3 id: 500 unit: def create_model_card() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 501 unit: def _expand_dict_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 12 LOC McCabe index: 8 number of parameters: 1 id: 502 unit: def get_dtype() file: optimum/habana/transformers/trainer_utils.py start line: 0 end line: 0 size: 12 LOC McCabe index: 6 number of parameters: 2 id: 503 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 504 unit: def gaudi_SpeechT5DecoderLayer_forward() file: optimum/habana/transformers/models/speecht5/modeling_speecht5.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 505 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 506 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 507 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 508 unit: def chat() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 509 unit: def build_chat_input() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 4 id: 510 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/codegen/modeling_codegen.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 511 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 512 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 5 id: 513 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 514 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 515 unit: def gaudi_invert_attention_mask() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 516 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 517 unit: def forward() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 518 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 519 unit: def gaudi_persimmon_model_forward() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 520 unit: def forward() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 521 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 522 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 2 id: 523 unit: def __init__() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 524 unit: def gaudi_bloom_attention_forward() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 525 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 526 unit: def gaudi_stablelm_model_forward() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 527 unit: def forward() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 528 unit: def gaudi_wav2vec2_tdnnlayer_forward() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 2 id: 529 unit: def __init__() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 530 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 3 id: 531 unit: def gaudi_BartDecoderLayer_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 532 unit: def gaudi_DetrLoss_get_targets_without_no_objects() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 2 id: 533 unit: def __init__() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 3 id: 534 unit: def gaudi_xglm_decoder_layer_forward() file: optimum/habana/transformers/models/xglm/modeling_xglm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 535 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 2 id: 536 unit: def __init__() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 2 id: 537 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 538 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 539 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 5 id: 540 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 541 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 542 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 5 id: 543 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 544 unit: def log() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 12 LOC McCabe index: 4 number of parameters: 4 id: 545 unit: def _get_autocast_kwargs() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 546 unit: def from_str() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 12 LOC McCabe index: 5 number of parameters: 1 id: 547 unit: def __init__() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 548 unit: def from_linear() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 7 id: 549 unit: def check_optimum_habana_min_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 12 LOC McCabe index: 3 number of parameters: 1 id: 550 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_ldm3d.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 6 id: 551 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 6 id: 552 unit: def from_pretrained() file: optimum/habana/diffusers/pipelines/auto_pipeline.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 3 id: 553 unit: def from_pipe() file: optimum/habana/diffusers/pipelines/auto_pipeline.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 3 id: 554 unit: def from_pretrained() file: optimum/habana/diffusers/pipelines/auto_pipeline.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 3 id: 555 unit: def from_pipe() file: optimum/habana/diffusers/pipelines/auto_pipeline.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 3 id: 556 unit: def __init__() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 557 unit: def __init__() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 558 unit: def transformer_hpu() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 12 LOC McCabe index: 2 number of parameters: 5 id: 559 unit: def __init__() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 12 LOC McCabe index: 1 number of parameters: 0 id: 560 unit: def finalize() file: conftest.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 1 id: 561 unit: def __init__() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 562 unit: def map_tensors() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 11 LOC McCabe index: 9 number of parameters: 2 id: 563 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 564 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 565 unit: def gaudi_generate_speech() file: optimum/habana/transformers/models/speecht5/modeling_speecht5.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 566 unit: def gaudi_FalconMambaModel_forward() file: optimum/habana/transformers/models/falcon_mamba/modeling_falcon_mamba.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 567 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/llava_next/modeling_llava_next.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 568 unit: def _dynamic_frequency_update() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 3 id: 569 unit: def gaudi_t5_layernorm_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 2 id: 570 unit: def gaudi_T5LayerSelfAttention_forward() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 571 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 572 unit: def build_stream_inputs() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 5 id: 573 unit: def forward() file: optimum/habana/transformers/models/codegen/modeling_codegen.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 574 unit: def gaudi_codegen_block_forward() file: optimum/habana/transformers/models/codegen/modeling_codegen.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 575 unit: def forward() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 576 unit: def forward() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 577 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 578 unit: def __init__() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 579 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 580 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 581 unit: def __init__() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 582 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 583 unit: def forward() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 584 unit: def gaudi_bloom_block_forward() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 585 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 586 unit: def forward() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 587 unit: def __init__() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 588 unit: def forward() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 0 id: 589 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 590 unit: def __init__() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 591 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/video_llava/modeling_video_llava.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 592 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 593 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 594 unit: def forward() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 595 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 596 unit: def forward() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 597 unit: def forward() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 598 unit: def forward_impl() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 6 id: 599 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 4 id: 600 unit: def post_attn_pre_mlp() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 601 unit: def __init__() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 3 id: 602 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 603 unit: def gaudi_albert_forward() file: optimum/habana/transformers/models/albert/modeling_albert.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 604 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 11 LOC McCabe index: 3 number of parameters: 2 id: 605 unit: def _dynamic_frequency_update() file: optimum/habana/transformers/modeling_rope_utils.py start line: 0 end line: 0 size: 11 LOC McCabe index: 5 number of parameters: 3 id: 606 unit: def __init__() file: optimum/habana/trl/models/modeling_sd_base.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 607 unit: def gradient_checkpointing_wrap() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 1 id: 608 unit: def forward() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 2 id: 609 unit: def get_device_name() file: optimum/habana/utils.py start line: 0 end line: 0 size: 11 LOC McCabe index: 4 number of parameters: 0 id: 610 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 5 id: 611 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 11 LOC McCabe index: 2 number of parameters: 5 id: 612 unit: def enable_forward_chunking() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 11 LOC McCabe index: 6 number of parameters: 3 id: 613 unit: def forward() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 614 unit: def step() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 8 id: 615 unit: def __init__() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 11 LOC McCabe index: 1 number of parameters: 0 id: 616 unit: def rank_and_world() file: optimum/habana/distributed/__init__.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 1 id: 617 unit: def load_state_dict_into_model() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 8 id: 618 unit: def _copy_colwise() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 5 id: 619 unit: def unpack_grads() file: optimum/habana/distributed/fast_ddp.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 0 id: 620 unit: def override_model_in_loss() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 6 number of parameters: 3 id: 621 unit: def validate_column_names() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 3 id: 622 unit: def _get_candidate_generator() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 623 unit: def _dola_decoding() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 624 unit: def gaudi_SpeechT5Attention_forward() file: optimum/habana/transformers/models/speecht5/modeling_speecht5.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 625 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 2 id: 626 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 627 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 628 unit: def __init__() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 629 unit: def build_chat_inputs() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 5 id: 630 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 631 unit: def save_vocabulary() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 632 unit: def _get_unpad_data() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 1 id: 633 unit: def decode_forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 634 unit: def _init_weights() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 2 id: 635 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/llava/modeling_llava.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 636 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 637 unit: def forward() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 638 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/whisper/modeling_whisper.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 639 unit: def forward() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 640 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 641 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 5 id: 642 unit: def _init_weights() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 2 id: 643 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 4 id: 644 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 645 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 646 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 647 unit: def forward() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 648 unit: def __init__() file: optimum/habana/transformers/models/video_llava/processing_video_llava.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 0 id: 649 unit: def gaudi_unconstrained_rational_quadratic_spline() file: optimum/habana/transformers/models/vits/modeling_vits.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 650 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 651 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 652 unit: def gaudi_BartEncoder_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 653 unit: def gaudi_BlipTextSelfAttention_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 654 unit: def gaudi_BlipTextAttention_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 655 unit: def gaudi_BlipTextLayer_forward() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 656 unit: def gaudi_SeamlessM4TDecoderLayer_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 657 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 658 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 4 id: 659 unit: def save_vocabulary() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 3 id: 660 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 661 unit: def gaudi_opt_attention_forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 662 unit: def forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 663 unit: def _get_unpad_data() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 1 id: 664 unit: def _init_weights() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 2 id: 665 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 3 id: 666 unit: def _get_unpad_data() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 1 id: 667 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 668 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 669 unit: def _init_weights() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 10 LOC McCabe index: 5 number of parameters: 2 id: 670 unit: def hpu_deepspeed_checkpointing() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 3 number of parameters: 3 id: 671 unit: def autocast_smart_context_manager() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 8 number of parameters: 2 id: 672 unit: def _generate_batched() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 673 unit: def train_minibatch() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 674 unit: def __init__() file: optimum/habana/trl/trainer/ddpo_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 675 unit: def cross_entropy_loss() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 2 number of parameters: 2 id: 676 unit: def _prepare_non_packed_dataloader() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 677 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 678 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 679 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 680 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 681 unit: def controlnet_hpu() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 682 unit: def controlnet_capture_replay() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 683 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 684 unit: def forward() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 10 LOC McCabe index: 4 number of parameters: 2 id: 685 unit: def step() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 10 LOC McCabe index: 1 number of parameters: 0 id: 686 unit: def assertRef() file: conftest.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 4 id: 687 unit: def load_state_dict() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 7 id: 688 unit: def _tp_wrapped() file: optimum/habana/distributed/tp_wrapping.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 3 id: 689 unit: def distribute_module() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 3 id: 690 unit: def apply_rowwise_tp() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 4 id: 691 unit: def _prepare_decoder_input_ids_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 692 unit: def _prepare_generated_length() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 693 unit: def allocate() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 694 unit: def gaudi_FalconMambaForCausalLM_prepare_inputs_for_generation() file: optimum/habana/transformers/models/falcon_mamba/modeling_falcon_mamba.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 695 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 696 unit: def __init__() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 697 unit: def allocate() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 698 unit: def post_mlp() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 3 id: 699 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 700 unit: def gaudi_MambaForCausalLM_prepare_inputs_for_generation() file: optimum/habana/transformers/models/mamba/modeling_mamba.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 701 unit: def allocate() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 702 unit: def forward_impl() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 6 id: 703 unit: def get_prompt() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 704 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 705 unit: def stream_generate() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 706 unit: def convert_id_to_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 2 id: 707 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 4 id: 708 unit: def allocate() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 709 unit: def prefill_forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 710 unit: def allocate() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 711 unit: def gaudi_gpt_neo_attention_forward() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 712 unit: def gaudi_gpt_neo_selfattention_forward() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 713 unit: def gaudi_gpt_neo_block_forward() file: optimum/habana/transformers/models/gpt_neo/modeling_gpt_neo.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 714 unit: def apply_FusedSDPA() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 715 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 716 unit: def __init__() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 717 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 718 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 719 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 720 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 721 unit: def allocate() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 722 unit: def no_init_weights() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 1 id: 723 unit: def forward() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 724 unit: def _dynamic_frequency_update() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 3 id: 725 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 726 unit: def allocate() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 727 unit: def __call__() file: optimum/habana/transformers/models/video_llava/processing_video_llava.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 728 unit: def __init__() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 729 unit: def gaudi_XLMRoberta_Sdpa_SelfAttention_forward() file: optimum/habana/transformers/models/xlm_roberta/modeling_xlm_roberta.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 730 unit: def allocate() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 731 unit: def __init__() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 732 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 733 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 734 unit: def gaudi_BartAttention_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 735 unit: def gaudi_DetrConvModel_forward() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 3 id: 736 unit: def gaudi_xglm_attention_forward() file: optimum/habana/transformers/models/xglm/modeling_xglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 737 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 6 id: 738 unit: def impl() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 5 id: 739 unit: def allocate() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 740 unit: def get_prompt() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 4 id: 741 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 4 id: 742 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 743 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 744 unit: def __init__() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 745 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 4 id: 746 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 4 id: 747 unit: def allocate() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 748 unit: def allocate() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 5 id: 749 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 750 unit: def apply_rotary_pos_emb() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 6 id: 751 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 2 id: 752 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 753 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 3 id: 754 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 755 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/vit/modeling_vit.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 756 unit: def __init__() file: optimum/habana/transformers/modeling_rope_utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 757 unit: def __str__() file: optimum/habana/transformers/training_args.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 1 id: 758 unit: def scheduler_step() file: optimum/habana/trl/models/modeling_sd_base.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 759 unit: def wrap_fw_for_hpu_graph_mode() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 9 LOC McCabe index: 2 number of parameters: 2 id: 760 unit: def compile_regions_deepspeed() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 2 id: 761 unit: def get_fp8_recipe() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 9 LOC McCabe index: 4 number of parameters: 1 id: 762 unit: def get_hpu_memory_stats() file: optimum/habana/utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 1 id: 763 unit: def get_habana_frameworks_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 764 unit: def __init__() file: optimum/habana/diffusers/pipelines/ddpm/pipeline_ddpm.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 765 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 766 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 767 unit: def register_modules() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 9 LOC McCabe index: 5 number of parameters: 2 id: 768 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 769 unit: def unet_capture_replay() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_stable_video_diffusion_controlnet.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 770 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 771 unit: def __call__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 772 unit: def forward() file: optimum/habana/diffusers/models/unet_spatio_temporal_condition_controlnet.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 773 unit: def cogvideoXTransformerForwardGaudi() file: optimum/habana/diffusers/models/cogvideox_transformer_3d.py start line: 0 end line: 0 size: 9 LOC McCabe index: 1 number of parameters: 0 id: 774 unit: def index_for_timestep() file: optimum/habana/diffusers/schedulers/scheduling_flow_mactch_euler_discrete.py start line: 0 end line: 0 size: 9 LOC McCabe index: 3 number of parameters: 3 id: 775 unit: def __init__() file: conftest.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 2 id: 776 unit: def apply_tp() file: optimum/habana/distributed/tp_wrapping.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 3 id: 777 unit: def fixed_cross_entropy() file: optimum/habana/distributed/contextparallel.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 5 id: 778 unit: def initialize_model_parallel() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 779 unit: def get_sequence_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 0 id: 780 unit: def get_sequence_parallel_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 0 id: 781 unit: def __init__() file: optimum/habana/transformers/generation/candidate_generator.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 782 unit: def _prepare_inputs_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 783 unit: def _get_hpu_graphs_kwargs() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 2 id: 784 unit: def _prepare_cache_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 785 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 786 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 787 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 788 unit: def encode() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 4 id: 789 unit: def build_prompt() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 3 id: 790 unit: def _pad() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 791 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 792 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 793 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 794 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 795 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 796 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 797 unit: def post_mlp() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 798 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 799 unit: def post_mlp() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 800 unit: def put() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 2 id: 801 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 802 unit: def post_mlp() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 803 unit: def __init__() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 804 unit: def compute_num_masked_span() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 1 id: 805 unit: def gaudi_wav2vec2_forward() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 806 unit: def gaudi_wav2vec2forctc_forward() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 807 unit: def forward() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 808 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 809 unit: def post_mlp() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 810 unit: def _reorder_cache() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 3 id: 811 unit: def apply_customized_rope() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 6 id: 812 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 813 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 814 unit: def pad() file: optimum/habana/transformers/models/idefics2/image_processing_idefics2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 815 unit: def gaudi_SeamlessM4TAttention_forward() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 816 unit: def gaudi_SeamlessM4TTextToUnitForConditionalGeneration_prepare_inputs_for_generation() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 817 unit: def gaudi_SeamlessM4TForTextToSpeech_prepare_inputs_for_generation() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 818 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 819 unit: def _history_to_prompt() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 2 id: 820 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 821 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 2 id: 822 unit: def __init__() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 823 unit: def _pad() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 824 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 825 unit: def post_mlp() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 3 id: 826 unit: def dropout_add() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 4 id: 827 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 828 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 829 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 2 id: 830 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 4 id: 831 unit: def checkpoint() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 832 unit: def generate() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 833 unit: def batched_forward_pass() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 834 unit: def unwrap_generation_for_hpu_graph_mode() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 4 number of parameters: 2 id: 835 unit: def concatenated_inputs() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 836 unit: def _get_bucketed_len() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 3 number of parameters: 2 id: 837 unit: def _get_buckets() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 3 id: 838 unit: def has_compiled_regions() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 8 LOC McCabe index: 5 number of parameters: 1 id: 839 unit: def extra_repr() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 1 id: 840 unit: def speed_metrics() file: optimum/habana/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 841 unit: def step() file: optimum/habana/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 842 unit: def __init__() file: optimum/habana/utils.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 843 unit: def gaudi_create_quantized_param() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 844 unit: def __call__() file: optimum/habana/diffusers/pipelines/ddpm/pipeline_ddpm.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 845 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 846 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 847 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 848 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 849 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 850 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 851 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 852 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 853 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 854 unit: def controlnet_hpu() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 855 unit: def controlnet_capture_replay() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 856 unit: def __call__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 8 LOC McCabe index: 1 number of parameters: 0 id: 857 unit: def roll_params() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 858 unit: def roll_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 8 LOC McCabe index: 2 number of parameters: 1 id: 859 unit: def _copy_embedding() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 860 unit: def __init__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 861 unit: def st_gaudi_transformer_save() file: optimum/habana/sentence_transformers/st_gaudi_transformer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 862 unit: def evaluation_loop() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 863 unit: def get_batch_sampler() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 864 unit: def _get_prompt_length() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 865 unit: def add_prompts_or_dataset_name_transform() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 866 unit: def add_prompts_or_dataset_name_column() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 867 unit: def get_final_stopping_criteria() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 1 id: 868 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 869 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 870 unit: def _attn() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 871 unit: def _update_causal_mask() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 872 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 873 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 874 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 875 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 876 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 2 id: 877 unit: def __init__() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 878 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 879 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 3 id: 880 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 881 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 882 unit: def apply_customized_rope() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 5 id: 883 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 884 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 885 unit: def gaudi_esm_for_protein_folding_forward() file: optimum/habana/transformers/models/esm/modeling_esmfold.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 886 unit: def __init__() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 887 unit: def __init__() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 888 unit: def __init__() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 889 unit: def __init__() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 890 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 891 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 892 unit: def chat() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 893 unit: def __init__() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 894 unit: def build_inputs_with_special_tokens() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 3 id: 895 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 896 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 897 unit: def _reorder_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 898 unit: def __init__() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 899 unit: def gaudi_wav2vec2_encoder_forward() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 900 unit: def __init__() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 901 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 902 unit: def gaudi_BartEncoderLayer_forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 903 unit: def gaudi_BlipForConditionalGeneration_generate() file: optimum/habana/transformers/models/blip/modeling_blip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 904 unit: def gaudi_BlipForQuestionAnswering_generate() file: optimum/habana/transformers/models/blip/modeling_blip.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 905 unit: def gaudi_DetrLoss_loss_cardinality() file: optimum/habana/transformers/models/detr/modeling_detr.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 5 id: 906 unit: def gaudi_SeamlessM4TForTextToSpeech_generate() file: optimum/habana/transformers/models/seamless_m4t/modeling_seamless_m4t.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 907 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 908 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 909 unit: def is_empty() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 5 number of parameters: 1 id: 910 unit: def standard_attention() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 4 id: 911 unit: def attention_fn_default() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 4 id: 912 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 913 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 914 unit: def _pad_batch() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 915 unit: def __init__() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 3 id: 916 unit: def __init__() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 917 unit: def gaudi_table_transformer_conv_encoder_forward() file: optimum/habana/transformers/models/table_transformer/modeling_table_transformer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 3 id: 918 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 919 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 3 id: 920 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 921 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 922 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 923 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 924 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 925 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 6 id: 926 unit: def post_mlp() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 927 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 928 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 3 id: 929 unit: def _reorder_cache() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 7 LOC McCabe index: 3 number of parameters: 2 id: 930 unit: def _tune_save_checkpoint() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 2 id: 931 unit: def _inner_training_loop() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 932 unit: def evaluation_loop() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 933 unit: def prediction_loop() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 934 unit: def _set_cos_sin_cache() file: optimum/habana/transformers/modeling_rope_utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 4 id: 935 unit: def __init__() file: optimum/habana/transformers/gaudi_configuration.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 2 id: 936 unit: def prediction_step() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 937 unit: def compute_loss() file: optimum/habana/trl/trainer/reward_trainer.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 5 id: 938 unit: def __post_init__() file: optimum/habana/accelerate/utils/dataclasses.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 1 id: 939 unit: def import_te() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 0 id: 940 unit: def has_transformer_engine_layers() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 1 id: 941 unit: def set_seed() file: optimum/habana/utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 1 id: 942 unit: def get_driver_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 4 number of parameters: 0 id: 943 unit: def transformer_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 944 unit: def capture_replay() file: optimum/habana/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 945 unit: def __init__() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 946 unit: def save_pretrained() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 947 unit: def unet_capture_replay() file: optimum/habana/diffusers/pipelines/controlnet/pipeline_controlnet.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 948 unit: def _split_image_latents_into_batches() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 949 unit: def __call__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 950 unit: def __call__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 951 unit: def from_unet() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 8 id: 952 unit: def roll_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 7 LOC McCabe index: 2 number of parameters: 1 id: 953 unit: def step() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 7 LOC McCabe index: 1 number of parameters: 0 id: 954 unit: def walk_path() file: conftest.py start line: 0 end line: 0 size: 6 LOC McCabe index: 6 number of parameters: 1 id: 955 unit: def get_adapted() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 4 id: 956 unit: def __getitem__() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 957 unit: def _load_partial_state_dict() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 958 unit: def distribute_layer() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 959 unit: def apply_colwise_tp() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 4 id: 960 unit: def create_single_node_setup_mpirun() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 961 unit: def get_optimized_model_name() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 1 id: 962 unit: def compute_loss() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 963 unit: def get_multi_dataset_batch_sampler() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 964 unit: def _prepare_decoder_attention_mask() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 965 unit: def _update_model_kwargs_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 966 unit: def _get_stopping_criteria() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 967 unit: def _ranking_fast() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 968 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 969 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 970 unit: def __init__() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 971 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 972 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 973 unit: def gaudi_gemma2_repeat_kv() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 974 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 975 unit: def gaudi_mixtral_repeat_kv() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 976 unit: def call_sparse_moe_op() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 977 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 978 unit: def _expand_mask() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 979 unit: def build_single_message() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 980 unit: def yarn_linear_ramp_mask() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 981 unit: def gaudi_deepseekv2_repeat_kv() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 982 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 983 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 984 unit: def gaudi_conv1d_forward() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 985 unit: def gaudi_phi_repeat_kv() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 986 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 987 unit: def __init__() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 988 unit: def gaudi_mistral_repeat_kv() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 989 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 990 unit: def __init__() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 991 unit: def gaudi_qwen2_repeat_kv() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 992 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 993 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 994 unit: def __next__() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 995 unit: def gaudi_llama_repeat_kv() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 996 unit: def get_k_proj_weight_dtype() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 1 id: 997 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 998 unit: def colwise_param_names() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 999 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 3 id: 1000 unit: def _get_vision_features() file: optimum/habana/transformers/models/video_llava/modeling_video_llava.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1001 unit: def _gaudi_wav2vec2_compute_mask_indices() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1002 unit: def gaudi_gemma_repeat_kv() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1003 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1004 unit: def __init__() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1005 unit: def __init__() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1006 unit: def empty_image() file: optimum/habana/transformers/models/idefics2/image_processing_idefics2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 2 id: 1007 unit: def _expand_mask() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1008 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1009 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1010 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1011 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1012 unit: def _tokenize() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1013 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1014 unit: def gaudi_starcoder2_repeat_kv() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1015 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1016 unit: def __init__() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1017 unit: def repeat_kv() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1018 unit: def update_sincos_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1019 unit: def forward() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1020 unit: def yarn_linear_ramp_mask() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1021 unit: def gaudi_deepseekv3_repeat_kv() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1022 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1023 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1024 unit: def gaudi_qwen2moe_repeat_kv() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1025 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 4 id: 1026 unit: def repeat_kv() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1027 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 2 id: 1028 unit: def train() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1029 unit: def prediction_step() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1030 unit: def propagate_args_to_deepspeed() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1031 unit: def to_dict() file: optimum/habana/transformers/training_args_seq2seq.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 1 id: 1032 unit: def _make_causal_mask() file: optimum/habana/transformers/modeling_attn_mask_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1033 unit: def to_4d() file: optimum/habana/transformers/modeling_attn_mask_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1034 unit: def _gaudi_prepare_4d_causal_attention_mask() file: optimum/habana/transformers/modeling_attn_mask_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1035 unit: def __init__() file: optimum/habana/transformers/integrations/deepspeed.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 2 id: 1036 unit: def gaudi_replace_with_awq_linear() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1037 unit: def evaluate() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1038 unit: def predict() file: optimum/habana/transformers/trainer_seq2seq.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1039 unit: def step() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1040 unit: def log() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 4 id: 1041 unit: def compute_loss() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1042 unit: def has_repeated_blocks() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 1 id: 1043 unit: def convert_model() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 1 id: 1044 unit: def unpack_weight_and_zeros() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 3 id: 1045 unit: def _preprocessing() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 1 id: 1046 unit: def disable() file: optimum/habana/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 0 id: 1047 unit: def enable() file: optimum/habana/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 3 number of parameters: 0 id: 1048 unit: def get_device_count() file: optimum/habana/utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 0 id: 1049 unit: def prepare_image_latents() file: optimum/habana/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1050 unit: def to() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1051 unit: def retrieve_timesteps() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1052 unit: def retrieve_timesteps() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1053 unit: def retrieve_timesteps() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1054 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1055 unit: def is_saveable_module() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 4 number of parameters: 2 id: 1056 unit: def to() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 3 id: 1057 unit: def _pad_batches() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 6 LOC McCabe index: 2 number of parameters: 3 id: 1058 unit: def _split_input_into_batches() file: optimum/habana/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1059 unit: def gaudi_unet_2d_model_forward() file: optimum/habana/diffusers/models/unet_2d.py start line: 0 end line: 0 size: 6 LOC McCabe index: 1 number of parameters: 0 id: 1060 unit: def get_reference() file: conftest.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1061 unit: def _get_adapter() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 5 LOC McCabe index: 5 number of parameters: 2 id: 1062 unit: def _get_safetensors_item() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1063 unit: def forward() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 5 LOC McCabe index: 5 number of parameters: 3 id: 1064 unit: def forward() file: optimum/habana/distributed/contextparallel.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1065 unit: def backward() file: optimum/habana/distributed/contextparallel.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1066 unit: def is_training_mode() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1067 unit: def get_sequence_data_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1068 unit: def get_sequence_data_parallel_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 0 id: 1069 unit: def _all_reduce() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1070 unit: def create_single_card_setup() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1071 unit: def create_single_node_setup() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1072 unit: def GaudiAdaptedAttention_getattr() file: optimum/habana/peft/layer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1073 unit: def write_checkpoints_json() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1074 unit: def add_dataset_name_column() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 2 id: 1075 unit: def evaluate() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1076 unit: def _include_prompt_length() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 1077 unit: def maybe_add_prompts_or_dataset_name_column() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1078 unit: def _expand_inputs_for_generation() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1079 unit: def _prepare_generation_config() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1080 unit: def typeerror() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1081 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1082 unit: def _prepare_cross_attention_mask() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1083 unit: def apply_customized_rope() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1084 unit: def call_dynamic_moe_op() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1085 unit: def __call__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 3 id: 1086 unit: def gaudi_chatglm_repeat_kv() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1087 unit: def _config_to_kwargs() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1088 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1089 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1090 unit: def get_command() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1091 unit: def load_balancing_loss_func() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1092 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1093 unit: def backward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 2 id: 1094 unit: def _merge_input_ids_with_image_features() file: optimum/habana/transformers/models/llava/modeling_llava.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 4 id: 1095 unit: def apply_customized_rope() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1096 unit: def apply_customized_rope() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1097 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1098 unit: def import_module() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1099 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1100 unit: def apply_customized_rope() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1101 unit: def _gaudi_wav2vec2_mask_hidden_states() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1102 unit: def forward() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1103 unit: def gaudi_owlvitclasspredictionhead_forward() file: optimum/habana/transformers/models/owlvit/modeling_owlvit.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1104 unit: def inputs_merger() file: optimum/habana/transformers/models/idefics2/modeling_idefics2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1105 unit: def __call__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 3 id: 1106 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1107 unit: def gaudi_chatglm_repeat_kv() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1108 unit: def _config_to_kwargs() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1109 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1110 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 2 id: 1111 unit: def apply_customized_rope() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1112 unit: def apply_customized_rope() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1113 unit: def apply_customized_rope() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 6 id: 1114 unit: def compress_kv() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1115 unit: def _expand_mask() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1116 unit: def rms_layernorm() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1117 unit: def evaluate() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1118 unit: def gaudi_awq_quantizer_process_model_after_weight_loading() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1119 unit: def gaudi_get_current_device() file: optimum/habana/trl/models/modeling_base.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 0 id: 1120 unit: def unet_hpu() file: optimum/habana/trl/models/modeling_sd_base.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1121 unit: def _gradient_checkpointing_wrap() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1122 unit: def hpu_post_init() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 5 LOC McCabe index: 3 number of parameters: 1 id: 1123 unit: def check_habana_frameworks_min_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 1 id: 1124 unit: def unet_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1125 unit: def enable_model_cpu_offload() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1126 unit: def enable_model_cpu_offload() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 5 LOC McCabe index: 2 number of parameters: 3 id: 1127 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1128 unit: def __init__() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 8 id: 1129 unit: def reset_timestep_dependent_params() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1130 unit: def _get_variance() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 3 id: 1131 unit: def add_noise() file: optimum/habana/diffusers/schedulers/scheduling_ddim.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 0 id: 1132 unit: def reset_timestep_dependent_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 5 LOC McCabe index: 1 number of parameters: 1 id: 1133 unit: def register_adapter() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1134 unit: def list_sources() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1135 unit: def __init__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 3 id: 1136 unit: def __getstate__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1137 unit: def model_parallel_is_initialized() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 0 id: 1138 unit: def sequence_parallel_is_initialized() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 0 id: 1139 unit: def sequence_data_parallel_is_initialized() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 0 id: 1140 unit: def get_sequence_parallel_src_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1141 unit: def amax_reduction_is_initialized() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 0 id: 1142 unit: def disable_compiler() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 1 id: 1143 unit: def apply_embedding_tp() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 1144 unit: def create_single_node_setup_deepspeed() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1145 unit: def add_model_card_callback() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1146 unit: def prepare_loss() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1147 unit: def _load_from_checkpoint() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1148 unit: def __init__() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1149 unit: def get_shape() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1150 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1151 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1152 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1153 unit: def __init__() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1154 unit: def get_shape() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1155 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1156 unit: def post_attn_forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1157 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1158 unit: def post_mlp_forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1159 unit: def __init__() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1160 unit: def split_tensor_along_last_dim() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1161 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1162 unit: def get_shape() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1163 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 6 id: 1164 unit: def convert_token_to_id() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1165 unit: def get_vocab() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1166 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1167 unit: def yarn_find_correction_range() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 5 id: 1168 unit: def yarn_get_mscale() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1169 unit: def rotate_half() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1170 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1171 unit: def get_shape() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1172 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1173 unit: def split_kv_b_proj() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1174 unit: def compress_kv() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1175 unit: def __init__() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1176 unit: def get_shape() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1177 unit: def all_reduce() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1178 unit: def forward() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 10 id: 1179 unit: def __init__() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 4 id: 1180 unit: def update_sincos_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1181 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1182 unit: def post_mlp_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1183 unit: def get_k_proj_weight() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1184 unit: def get_k_proj_weight_dtype() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1185 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1186 unit: def post_attn_forward() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1187 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1188 unit: def get_shape() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1189 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1190 unit: def rotate_half() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1191 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1192 unit: def post_mlp_forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1193 unit: def update_sincos_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1194 unit: def post_attn_forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1195 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 1196 unit: def __getstate__() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1197 unit: def __setstate__() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1198 unit: def get_vocab() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1199 unit: def set_tp_for_inference() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1200 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1201 unit: def post_mlp_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1202 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1203 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1204 unit: def get_shape() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1205 unit: def get_k_proj_weight() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1206 unit: def update_sincos_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1207 unit: def post_attn_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1208 unit: def __init__() file: optimum/habana/transformers/models/decilm/configuration_decilm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1209 unit: def __init__() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1210 unit: def get_shape() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1211 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1212 unit: def post_attn_forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1213 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1214 unit: def post_mlp_forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1215 unit: def __init__() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1216 unit: def __init__() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1217 unit: def _merge_heads() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 4 id: 1218 unit: def split_tensor_along_last_dim() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1219 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 6 id: 1220 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1221 unit: def get_shape() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1222 unit: def get_position_ids() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1223 unit: def get_multimodal_position_ids() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1224 unit: def forward() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1225 unit: def get_vocab() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1226 unit: def post_mlp_forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1227 unit: def update_sincos_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1228 unit: def post_attn_forward() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1229 unit: def post_attn_forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1230 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1231 unit: def post_mlp_forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1232 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1233 unit: def yarn_find_correction_range() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 5 id: 1234 unit: def yarn_get_mscale() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1235 unit: def rotate_half() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1236 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1237 unit: def get_shape() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1238 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1239 unit: def split_kv_b_proj() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1240 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1241 unit: def post_mlp_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1242 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1243 unit: def get_shape() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1244 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 4 number of parameters: 2 id: 1245 unit: def post_attn_forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1246 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 3 id: 1247 unit: def rotate_half() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1248 unit: def _move_model_to_device() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 3 id: 1249 unit: def write_bf16_fp32_ops_to_text_files() file: optimum/habana/transformers/gaudi_configuration.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1250 unit: def unwrap_deepspeed_model() file: optimum/habana/transformers/integrations/deepspeed.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1251 unit: def post_init_awq_gemm_hpu_modules() file: optimum/habana/transformers/integrations/awq.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1252 unit: def __post_init__() file: optimum/habana/trl/trainer/dpo_config.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1253 unit: def unwrap_fw_for_hpu_graph_mode() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1254 unit: def is_compiled_module() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1255 unit: def is_fp8_available() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 0 id: 1256 unit: def error_raiser_hpu() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 2 id: 1257 unit: def __init__() file: optimum/habana/utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1258 unit: def check_habana_frameworks_version() file: optimum/habana/utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1259 unit: def gaudi_is_bitsandbytes_available() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 4 LOC McCabe index: 3 number of parameters: 0 id: 1260 unit: def save_lora_weights() file: optimum/habana/diffusers/pipelines/pipeline_utils.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1261 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1262 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1263 unit: def apply_rotary_emb_hpu() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 0 id: 1264 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 2 id: 1265 unit: def create_custom_forward() file: optimum/habana/diffusers/models/cogvideox_transformer_3d.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1266 unit: def zero_module() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 4 LOC McCabe index: 2 number of parameters: 1 id: 1267 unit: def reset_timestep_dependent_params() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 4 LOC McCabe index: 1 number of parameters: 1 id: 1268 unit: def __init__() file: conftest.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1269 unit: def setup_tp() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1270 unit: def _load_safetensors_state_dict() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1271 unit: def __init__() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1272 unit: def distribute_module() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1273 unit: def distribute_layer() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1274 unit: def __setstate__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1275 unit: def set_training_mode() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1276 unit: def set_eval_mode() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1277 unit: def get_model_parallel_group() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1278 unit: def get_sequence_parallel_group() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1279 unit: def get_sequence_data_parallel_group() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1280 unit: def set_sequence_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1281 unit: def set_sequence_data_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1282 unit: def set_sequence_parallel_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1283 unit: def set_sequence_data_parallel_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1284 unit: def setup_config_env_mpirun() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1285 unit: def create_multi_node_setup() file: optimum/habana/distributed/distributed_runner.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1286 unit: def __init__() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1287 unit: def __init__() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1288 unit: def __init__() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1289 unit: def __init__() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1290 unit: def __init__() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1291 unit: def reorder() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1292 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1293 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1294 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1295 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1296 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1297 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1298 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1299 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1300 unit: def __init__() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1301 unit: def reorder() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1302 unit: def attention_all_reduce() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1303 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1304 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1305 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1306 unit: def __init__() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1307 unit: def __init__() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1308 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1309 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1310 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1311 unit: def reorder() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1312 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1313 unit: def _set_gradient_checkpointing() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1314 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1315 unit: def decode_tokens() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1316 unit: def get_prefix_tokens() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1317 unit: def _convert_id_to_token() file: optimum/habana/transformers/models/deepseek_v2/tokenization_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1318 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1319 unit: def reset_parameters() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1320 unit: def reorder() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1321 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1322 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1323 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1324 unit: def forward() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1325 unit: def post_all_reduce() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1326 unit: def __init__() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1327 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1328 unit: def __init__() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1329 unit: def __init__() file: optimum/habana/transformers/models/persimmon/modeling_persimmon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1330 unit: def __init__() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1331 unit: def reorder() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1332 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1333 unit: def update_sincos_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1334 unit: def __init__() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1335 unit: def __init__() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1336 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1337 unit: def reorder() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1338 unit: def attention_all_reduce() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1339 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1340 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1341 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1342 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1343 unit: def reorder() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1344 unit: def attention_all_reduce() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1345 unit: def _set_gradient_checkpointing() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1346 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1347 unit: def update_sincos_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1348 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1349 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1350 unit: def _convert_id_to_token() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1351 unit: def __init__() file: optimum/habana/transformers/models/cohere/modeling_cohere.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1352 unit: def __init__() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1353 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1354 unit: def pre_mlp_forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1355 unit: def reorder() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1356 unit: def attention_all_reduce() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1357 unit: def import_module() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1358 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1359 unit: def update_sincos_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1360 unit: def __init__() file: optimum/habana/transformers/models/stablelm/modeling_stablelm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1361 unit: def __init__() file: optimum/habana/transformers/models/video_llava/modeling_video_llava.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1362 unit: def __init__() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1363 unit: def reorder() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1364 unit: def attention_all_reduce() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1365 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1366 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1367 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1368 unit: def __init__() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1369 unit: def __init__() file: optimum/habana/transformers/models/bart/modeling_bart.py start line: 0 end line: 0 size: 3 LOC McCabe index: 3 number of parameters: 3 id: 1370 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1371 unit: def reorder() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1372 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1373 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1374 unit: def __init__() file: optimum/habana/transformers/models/glm4v/visual.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1375 unit: def get_prefix_tokens() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1376 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1377 unit: def reorder() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1378 unit: def attention_all_reduce() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1379 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1380 unit: def update_sincos_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1381 unit: def gaudi_falcon_linear_forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1382 unit: def __init__() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1383 unit: def attention_all_reduce() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1384 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1385 unit: def __init__() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1386 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1387 unit: def update_sincos_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1388 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1389 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1390 unit: def reset_parameters() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1391 unit: def reorder() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1392 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1393 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1394 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1395 unit: def mlp_all_reduce() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1396 unit: def reorder() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 5 id: 1397 unit: def attention_all_reduce() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1398 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 4 id: 1399 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 2 id: 1400 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1401 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 6 id: 1402 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 6 id: 1403 unit: def __init__() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 3 LOC McCabe index: 5 number of parameters: 3 id: 1404 unit: def _get_device_module() file: optimum/habana/transformers/gradient_checkpointing.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1405 unit: def adapt_PreTrainedModelWrapper_to_gaudi() file: optimum/habana/trl/models/modeling_base.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1406 unit: def __init__() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 3 id: 1407 unit: def __enter__() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1408 unit: def __exit__() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 4 id: 1409 unit: def __enter__() file: optimum/habana/utils.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1410 unit: def gaudi_validate_bnb_backend_availability() file: optimum/habana/quantizers/bitsandbytes.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 1 id: 1411 unit: def set_default_attn_processor_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_mlperf.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1412 unit: def image_encoder_hpu() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1413 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 2 id: 1414 unit: def set_default_attn_processor_hpu() file: optimum/habana/diffusers/models/unet_2d_condition.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 1 id: 1415 unit: def apply_rotary_emb() file: optimum/habana/diffusers/models/embeddings.py start line: 0 end line: 0 size: 3 LOC McCabe index: 1 number of parameters: 0 id: 1416 unit: def _set_gradient_checkpointing() file: optimum/habana/diffusers/models/controlnet_sdv.py start line: 0 end line: 0 size: 3 LOC McCabe index: 2 number of parameters: 3 id: 1417 unit: def assertEqual() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1418 unit: def __init__() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1419 unit: def __repr__() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1420 unit: def __str___() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1421 unit: def token() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1422 unit: def pytest_sessionfinish() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1423 unit: def baseline() file: conftest.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1424 unit: def colwise_param_names() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1425 unit: def rowwise_param_names() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1426 unit: def embedding_param_names() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1427 unit: def import_module() file: optimum/habana/distributed/tp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1428 unit: def local_rank() file: optimum/habana/distributed/__init__.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1429 unit: def set_lazy_tensor() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1430 unit: def _copy_if_present() file: optimum/habana/distributed/serialization.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1431 unit: def __init__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1432 unit: def distribute_module() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1433 unit: def distribute_layer() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1434 unit: def __init__() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1435 unit: def distribute_module() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1436 unit: def distribute_layer() file: optimum/habana/distributed/strategy.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1437 unit: def ForCausalLMContextParallelLoss() file: optimum/habana/distributed/contextparallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1438 unit: def is_unitialized() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1439 unit: def get_model_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1440 unit: def get_model_parallel_rank() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1441 unit: def get_data_parallel_world_size() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1442 unit: def get_data_parallel_group() file: optimum/habana/distributed/parallel_state.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1443 unit: def symbolic() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1444 unit: def forward() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1445 unit: def backward() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1446 unit: def reduce_from_tensor_model_parallel_region() file: optimum/habana/distributed/tensorparallel.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1447 unit: def all_reduce_gradients() file: optimum/habana/distributed/fast_ddp.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1448 unit: def GaudiPolyLayerLinearForward() file: optimum/habana/peft/layer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1449 unit: def model_on_meta() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1450 unit: def model_is_optimized() file: optimum/habana/checkpoint_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1451 unit: def st_gaudi_transformer_tokenize() file: optimum/habana/sentence_transformers/st_gaudi_transformer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1452 unit: def collect_features() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1453 unit: def get_eval_dataloader() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1454 unit: def get_optimizer_cls_and_kwargs() file: optimum/habana/sentence_transformers/st_gaudi_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1455 unit: def create_return_const_tensor() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1456 unit: def gaudi_MaxLengthCriteria_call() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1457 unit: def gaudi_MaxTimeCriteria_call() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1458 unit: def gaudi_EosTokenCriteria_call() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1459 unit: def needs_tensor_output() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1460 unit: def gaudi_StoppingCriteriaList_call() file: optimum/habana/transformers/generation/stopping_criteria.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1461 unit: def update_model_kwargs_for_bucketing() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1462 unit: def move() file: optimum/habana/transformers/generation/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1463 unit: def convert_into_dtypes() file: optimum/habana/transformers/trainer_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1464 unit: def forward() file: optimum/habana/transformers/models/qwen2_vl/modeling_qwen2_vl.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1465 unit: def __init__() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1466 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1467 unit: def forward() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1468 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1469 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1470 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1471 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1472 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1473 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1474 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gptj/modeling_gptj.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1475 unit: def extra_repr() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1476 unit: def forward() file: optimum/habana/transformers/models/mllama/modeling_mllama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 8 id: 1477 unit: def __init__() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1478 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1479 unit: def forward() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1480 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1481 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1482 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1483 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1484 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1485 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1486 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma2/modeling_gemma2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1487 unit: def shape() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1488 unit: def unshape() file: optimum/habana/transformers/models/t5/modeling_t5.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1489 unit: def calculate_routing_tensors() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1490 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mixtral/modeling_mixtral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1491 unit: def gaudi_MambaForCausalLM_update_model_kwargs_for_generation() file: optimum/habana/transformers/models/mamba/modeling_mamba.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1492 unit: def default_init() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1493 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1494 unit: def __init__() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1495 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1496 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1497 unit: def _make_causal_mask() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1498 unit: def forward() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1499 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1500 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1501 unit: def _get_layer() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1502 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1503 unit: def _init_weights() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1504 unit: def get_input_embeddings() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1505 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1506 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1507 unit: def _prepare_decoder_attention_mask() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1508 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1509 unit: def _reorder_cache() file: optimum/habana/transformers/models/chatglm/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1510 unit: def unk_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1511 unit: def pad_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1512 unit: def eos_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1513 unit: def unk_token_id() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1514 unit: def pad_token_id() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1515 unit: def eos_token_id() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1516 unit: def unk_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1517 unit: def pad_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1518 unit: def eos_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1519 unit: def vocab_size() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1520 unit: def _tokenize() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1521 unit: def _convert_token_to_id() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1522 unit: def _convert_id_to_token() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1523 unit: def convert_tokens_to_string() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1524 unit: def build_inputs_with_special_tokens() file: optimum/habana/transformers/models/chatglm/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1525 unit: def convert_ids_to_tokens() file: optimum/habana/transformers/models/deepseek_v2/tokenization_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1526 unit: def yarn_find_correction_dim() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1527 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1528 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1529 unit: def forward() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1530 unit: def _shape() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1531 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1532 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1533 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1534 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1535 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1536 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1537 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1538 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1539 unit: def get_output_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1540 unit: def set_output_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1541 unit: def set_decoder() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1542 unit: def get_decoder() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1543 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1544 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1545 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1546 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v2/modeling_deepseek_v2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1547 unit: def __init__() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1548 unit: def forward() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1549 unit: def forward() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1550 unit: def gaudi_get_extended_attention_mask() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1551 unit: def __init__() file: optimum/habana/transformers/models/modeling_all_models.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1552 unit: def _pad_inputs() file: optimum/habana/transformers/models/llava/modeling_llava.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1553 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1554 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1555 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/phi/modeling_phi.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1556 unit: def forward() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 8 id: 1557 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1558 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1559 unit: def update_sincos_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1560 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1561 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1562 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1563 unit: def update_sincos_cache() file: optimum/habana/transformers/models/mistral/modeling_mistral.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1564 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1565 unit: def __init__() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1566 unit: def forward() file: optimum/habana/transformers/models/clip/modeling_clip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1567 unit: def __init__() file: optimum/habana/transformers/models/mpt/modeling_mpt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1568 unit: def __init__() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1569 unit: def get_gaudi_distributed_attention() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1570 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1571 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1572 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1573 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1574 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1575 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1576 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1577 unit: def _reorder_cache() file: optimum/habana/transformers/models/qwen2/modeling_qwen2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1578 unit: def gaudi_baichuan_build_alibi_tensor() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1579 unit: def __init__() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1580 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1581 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1582 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1583 unit: def forward() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1584 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1585 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1586 unit: def update_sincos_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1587 unit: def get_input_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1588 unit: def set_input_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1589 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1590 unit: def get_input_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1591 unit: def set_input_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1592 unit: def get_output_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1593 unit: def set_output_embeddings() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1594 unit: def set_decoder() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1595 unit: def get_decoder() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1596 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1597 unit: def update_sincos_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1598 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1599 unit: def _reorder_cache() file: optimum/habana/transformers/models/baichuan/modeling_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1600 unit: def end() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1601 unit: def __iter__() file: optimum/habana/transformers/models/baichuan/generation_utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1602 unit: def vocab_size() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1603 unit: def _tokenize() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1604 unit: def _convert_token_to_id() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1605 unit: def get_special_tokens_mask() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1606 unit: def create_token_type_ids_from_sequences() file: optimum/habana/transformers/models/baichuan/tokenization_baichuan.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1607 unit: def gaudi_bloom_build_alibi_tensor() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1608 unit: def gaudi_bloom_convert_to_standard_cache() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1609 unit: def gaudi_bloom_convert_to_bloom_cache() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1610 unit: def _reorder_cache() file: optimum/habana/transformers/models/bloom/modeling_bloom.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1611 unit: def colwise_param_names() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1612 unit: def rowwise_param_names() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1613 unit: def forward() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1614 unit: def __init__() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1615 unit: def get_gaudi_distributed_attention() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1616 unit: def rowwise_param_names() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1617 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1618 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1619 unit: def update_sincos_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1620 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1621 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1622 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1623 unit: def update_sincos_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1624 unit: def _reorder_cache() file: optimum/habana/transformers/models/llama/modeling_llama.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1625 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/decilm/modeling_decilm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1626 unit: def _merge_input_ids_with_visual_features() file: optimum/habana/transformers/models/video_llava/modeling_video_llava.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1627 unit: def _gaudi_wav2vec2_sample_negative_indices() file: optimum/habana/transformers/models/wav2vec2/modeling_wav2vec2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1628 unit: def __init__() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1629 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1630 unit: def forward() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1631 unit: def gaudi_flash_attn_v1() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1632 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1633 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1634 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1635 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1636 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1637 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1638 unit: def update_sincos_cache() file: optimum/habana/transformers/models/gemma/modeling_gemma.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1639 unit: def gaudi_eager_attention_forward() file: optimum/habana/transformers/models/gpt_neox/modeling_gpt_neox.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1640 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1641 unit: def __init__() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1642 unit: def forward() file: optimum/habana/transformers/models/siglip/modeling_siglip.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1643 unit: def gaudi_BlipTextLMHead_prepare_inputs_for_generation() file: optimum/habana/transformers/models/blip/modeling_blip_text.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1644 unit: def gaudi_VisionEncoderDecoderModel_prepare_inputs_for_generation() file: optimum/habana/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1645 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/idefics2/modeling_idefics2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1646 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1647 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/gpt2/modeling_gpt2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1648 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/xglm/modeling_xglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1649 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1650 unit: def __init__() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1651 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1652 unit: def default_init() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1653 unit: def forward() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1654 unit: def _make_causal_mask() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1655 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1656 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1657 unit: def _get_layer() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1658 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1659 unit: def _init_weights() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1660 unit: def get_input_embeddings() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1661 unit: def set_input_embeddings() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1662 unit: def _prepare_decoder_attention_mask() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1663 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1664 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1665 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1666 unit: def _reorder_cache() file: optimum/habana/transformers/models/glm4v/modeling_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1667 unit: def vocab_size() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1668 unit: def _convert_token_to_id() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1669 unit: def _convert_id_to_token() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1670 unit: def build_inputs_with_special_tokens() file: optimum/habana/transformers/models/glm4v/tokenization_chatglm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1671 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1672 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1673 unit: def update_sincos_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1674 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1675 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1676 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1677 unit: def update_sincos_cache() file: optimum/habana/transformers/models/starcoder2/modeling_starcoder2.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1678 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1679 unit: def __init__() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1680 unit: def forward() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1681 unit: def _split_heads() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1682 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1683 unit: def update_sincos_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1684 unit: def update_sincos_cache() file: optimum/habana/transformers/models/falcon/modeling_falcon.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1685 unit: def _shape() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1686 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/opt/modeling_opt.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1687 unit: def yarn_find_correction_dim() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1688 unit: def __init__() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1689 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1690 unit: def forward() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1691 unit: def _shape() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1692 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1693 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1694 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1695 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1696 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1697 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1698 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1699 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1700 unit: def get_output_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1701 unit: def set_output_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1702 unit: def set_decoder() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1703 unit: def get_decoder() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1704 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1705 unit: def update_sincos_cache() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1706 unit: def get_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1707 unit: def set_input_embeddings() file: optimum/habana/transformers/models/deepseek_v3/modeling_deepseek_v3.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1708 unit: def __init__() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1709 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1710 unit: def forward() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1711 unit: def allocate_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1712 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1713 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1714 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 2 id: 1715 unit: def reorder_kv_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1716 unit: def update_sincos_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1717 unit: def _reorder_cache() file: optimum/habana/transformers/models/qwen2_moe/modeling_qwen2_moe.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1718 unit: def _make_causal_mask() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1719 unit: def forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1720 unit: def _shape() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1721 unit: def _flash_attention_forward() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1722 unit: def get_input_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1723 unit: def set_input_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1724 unit: def get_input_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1725 unit: def set_input_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1726 unit: def get_output_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1727 unit: def set_output_embeddings() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1728 unit: def set_decoder() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 2 id: 1729 unit: def get_decoder() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1730 unit: def prepare_inputs_for_generation() file: optimum/habana/transformers/models/minicpm/modeling_minicpm.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1731 unit: def gaudi_vit_self_attention_forward() file: optimum/habana/transformers/models/vit/modeling_vit.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1732 unit: def _maybe_log_save_evaluate() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1733 unit: def training_step() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1734 unit: def predict() file: optimum/habana/transformers/trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1735 unit: def __call__() file: optimum/habana/trl/models/modeling_sd_base.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1736 unit: def make_inputs_require_grad() file: optimum/habana/trl/trainer/ppo_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1737 unit: def make_inputs_require_grad() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1738 unit: def make_inputs_require_grad() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1739 unit: def concatenated_forward() file: optimum/habana/trl/trainer/dpo_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1740 unit: def make_inputs_require_grad() file: optimum/habana/trl/trainer/sft_trainer.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1741 unit: def is_repeated_blocks() file: optimum/habana/accelerate/utils/other.py start line: 0 end line: 0 size: 2 LOC McCabe index: 3 number of parameters: 1 id: 1742 unit: def create_fp8_context() file: optimum/habana/accelerate/utils/transformer_engine.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1743 unit: def prepare_data_loader() file: optimum/habana/accelerate/accelerator.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1744 unit: def post_init() file: optimum/habana/AutoAWQ/gemm_hpu.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1745 unit: def warmup_inference_steps_time_adjustment() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1746 unit: def to_gb_rounded() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1747 unit: def start() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1748 unit: def is_running() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1749 unit: def total_time() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1750 unit: def __exit__() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1751 unit: def noop() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1752 unit: def stop() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1753 unit: def start() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1754 unit: def step() file: optimum/habana/utils.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1755 unit: def denoising_value_valid() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 1756 unit: def denoising_value_valid() file: optimum/habana/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 1757 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1758 unit: def _split_inputs_into_batches() file: optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1759 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1760 unit: def prepare_latents() file: optimum/habana/diffusers/pipelines/cogvideox/pipeline_cogvideox.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1761 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1762 unit: def forward() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 4 id: 1763 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 1 id: 1764 unit: def forward() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 3 id: 1765 unit: def forward() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 9 id: 1766 unit: def __init__() file: optimum/habana/diffusers/models/attention_processor.py start line: 0 end line: 0 size: 2 LOC McCabe index: 2 number of parameters: 1 id: 1767 unit: def CogVideoXCausalConv3dforwardGaudi() file: optimum/habana/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1768 unit: def scale_model_input() file: optimum/habana/diffusers/schedulers/scheduling_euler_discrete.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0 id: 1769 unit: def scale_model_input() file: optimum/habana/diffusers/schedulers/scheduling_euler_ancestral_discrete.py start line: 0 end line: 0 size: 2 LOC McCabe index: 1 number of parameters: 0