server/text_generation_server/models/custom_modeling/gemma3/configuration_gemma3.py (113 lines of code) (raw):

# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/gemma3/modular_gemma3.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_gemma3.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from transformers.configuration_utils import PretrainedConfig from transformers.modeling_rope_utils import rope_config_validation from transformers.utils import logging from transformers import SiglipVisionConfig logger = logging.get_logger(__name__) class Gemma3TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3Model`]. It is used to instantiate a Gemma3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma3-4B. e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 262144): Vocabulary size of the Gemma3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Gemma3Model`] hidden_size (`int`, *optional*, defaults to 2304): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 9216): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 26): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. sliding_window (`int`, *optional*, defaults to 4096): in Gemma3, every other layer uses sliding window attention. This is the size of the sliding window. query_pre_attn_scalar (`float`, *optional*): The scaling factor used on the attention scores, not that rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings used for global attention. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE rope_local_base_freq (float, *optional*, defaults to 10000.0): The base period of the RoPE embeddings for local attention. sliding_window_pattern (`int`, *optional*, defaults to 6): Pattern for the sliding window attention. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. final_logit_softcapping (`bool`, *optional*, defaults to `True`): Whether to apply logit softcapping or nor attn_logit_softcapping (`float`, *optional*, defaults to 50.0): Scaling factor when applying tanh soft-capping on the attention scorexs. cache_implementation (`str`, *optional*, defaults to `"hybrid"`): The cache type to be used with `generate`. ```python >>> from transformers import Gemma3Model, Gemma3TextConfig >>> # Initializing a Gemma3 gemma3-4b style configuration >>> configuration = Gemma3Config() >>> # Initializing a model from the gemma3-4b style configuration >>> model = Gemma3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma3_text" def __init__( self, vocab_size: int = 262_144, hidden_size: int = 2304, intermediate_size: int = 9216, num_hidden_layers: int = 26, num_attention_heads: int = 8, num_key_value_heads: int = 4, head_dim: int = 256, sliding_window: int = 4096, query_pre_attn_scalar: Optional[float] = 256, rope_theta: float = 1_000_000.0, rope_scaling=None, rope_local_base_freq: float = 10_000.0, sliding_window_pattern: int = 6, rms_norm_eps: float = 1e-6, hidden_activation: str = "gelu_pytorch_tanh", pad_token_id: int = 0, eos_token_id: int = 1, bos_token_id: int = 2, tie_word_embeddings: bool = True, max_position_embeddings: int = 131_072, initializer_range: float = 0.02, attention_bias: bool = False, attention_dropout: float = 0.0, use_cache: bool = True, final_logit_softcapping=None, attn_logit_softcapping=None, cache_implementation: str = "hybrid", **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.rope_local_base_freq = rope_local_base_freq # For configuring HybridCache to work with 5:1 attention pattern self.sliding_window_pattern = sliding_window_pattern self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.query_pre_attn_scalar = query_pre_attn_scalar self.sliding_window = sliding_window self.final_logit_softcapping = final_logit_softcapping self.attn_logit_softcapping = attn_logit_softcapping self.cache_implementation = cache_implementation rope_config_validation(self) class Gemma3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3ForConditionalGeneration`]. It is used to instantiate an Gemma3ForConditionalGeneration according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PaliGemma-2B. e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`Union[Gemma3TextConfig, dict]`, *optional*): The config object of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*): Custom vision config or dict. mm_tokens_per_image (`int`, *optional*, defaults to 256): The number of tokens per image embedding. boi_token_index (`int`, *optional*, defaults to 255999): The begin-of-image token index to wrap the image prompt. eoi_token_index (`int`, *optional*, defaults to 256000): The end-of-image token index to wrap the image prompt. image_token_index (`int`, *optional*, defaults to 262144): The image token index to encode the image prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Gemma3ForConditionalGeneration, Gemma3Config, SiglipVisionConfig, Gemma3TextConfig >>> # Initializing a Siglip-like vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a Gemma3 Text config >>> text_config = Gemma3TextConfig() >>> # Initializing a Gemma3 gemma-3-4b style configuration >>> configuration = Gemma3Config(vision_config, text_config) >>> # Initializing a model from the gemma-3-4b style configuration >>> model = Gemma3TextConfig(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma3" sub_configs = { "text_config": Gemma3TextConfig, "vision_config": SiglipVisionConfig, } def __init__( self, text_config: Optional[Gemma3TextConfig] = None, vision_config: Optional[SiglipVisionConfig] = None, mm_tokens_per_image: int = 256, boi_token_index: int = 255_999, eoi_token_index: int = 256_000, image_token_index: int = 262_144, initializer_range: float = 0.02, **kwargs, ): if text_config is None: text_config = Gemma3TextConfig() logger.info( "text_config is None, using default Gemma3TextConfig vision config." ) elif isinstance(text_config, dict): text_config = Gemma3TextConfig(**text_config) if isinstance(vision_config, dict): vision_config = SiglipVisionConfig(**vision_config) else: vision_config = SiglipVisionConfig() logger.info( "vision_config is None or incompatible with Gemma3VisionConfig intialization. Gemma3 will be limited " "to text tasks." ) self.text_config = text_config self.vision_config = vision_config self.mm_tokens_per_image = mm_tokens_per_image self.boi_token_index = boi_token_index self.eoi_token_index = eoi_token_index self.image_token_index = image_token_index self.initializer_range = initializer_range super().__init__(**kwargs) __all__ = ["Gemma3Config", "Gemma3TextConfig"]