models/config.py (67 lines of code) (raw):
from dataclasses import dataclass, field
@dataclass
class VLMConfig:
vit_hidden_dim: int = 768
vit_inter_dim: int = 4 * vit_hidden_dim
vit_patch_size: int = 16
vit_img_size: int = 256
vit_n_heads: int = 12
vit_dropout: float = 0.0
vit_n_blocks: int = 12
vit_ln_eps: float = 1e-6
vit_cls_flag: bool = False
vit_model_type: str = 'google/siglip2-base-patch16-256'
lm_hidden_dim: int = 576
lm_inter_dim: int = 1536
lm_rms_eps: float = 1e-5
lm_re_base: int = 100000
lm_max_position_embeddings: int = 8192
lm_base_vocab_size: int = 49152
extra_token_amount: int = 1 # Number of extra tokens for the VLM (image start, image end, image token)
lm_vocab_size: int = lm_base_vocab_size + extra_token_amount # Not a great way to do this, but it works for now (vlm_extra_tokens cannot be a dict, since this is mutable, and a Field has no len() function)
lm_n_heads: int = 9
lm_n_kv_heads: int = 3
lm_dropout: float = 0.0
lm_n_blocks: int = 30
lm_attn_scaling: float = 1.0
lm_max_length: int = 1024
lm_use_tokens: bool = False # Decide if the LM expects tokens or embeddings as input (if using as a backbone for the VLM, set to False)
lm_tie_weights: bool = True # Decide if you want to tie the LM Head weight to the token embedding weights
lm_model_type: str = 'HuggingFaceTB/SmolLM2-360M-Instruct'
lm_tokenizer: str = 'HuggingFaceTB/SmolLM2-360M-Instruct'
lm_chat_template: str = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
lm_eos_token_id: int = 0
mp_pixel_shuffle_factor: int = 2
mp_image_token_length: int = 64
vlm_extra_tokens: dict[str, str] = field(default_factory=lambda: {"image_token": "<|image|>"})#, "boi_token": "<|image_start|>", "eoi_token": "<|image_end|>"})
vlm_load_backbone_weights: bool = True
vlm_checkpoint_path: str = 'checkpoints'
hf_repo_name: str = 'nanoVLM'
@dataclass
class TrainConfig:
lr_mp: float = 0.00512
lr_backbones: float = 5e-5
data_cutoff_idx: int = None
val_ratio: float = 0.025
batch_size: int = 16
gradient_accumulation_steps: int = 4
mmstar_batch_size: int = 32
max_grad_norm: float = 1.0
eval_in_epochs: bool = True
eval_interval: int = gradient_accumulation_steps * 100
stats_log_interval: int = gradient_accumulation_steps * 25
max_training_steps: int = 5000
max_images_per_example: int = 4
max_images_per_knapsack: int = 18
max_sample_length: int = 1024
compile: bool = False
resume_from_vlm_checkpoint: bool = False # Indicate if the training should be resumed from a checkpoint of the whole VLM or you want to start from scratch
train_dataset_path: str = 'HuggingFaceM4/the_cauldron'
train_dataset_name: tuple[str, ...] = ("ai2d", "aokvqa", "chart2text", "chartqa", "clevr", "cocoqa", "datikz", "diagram_image_to_text", "docvqa", "dvqa", "figureqa", "finqa", "geomverse", "hateful_memes", "hitab", "iam", "iconqa", "infographic_vqa", "intergps", "localized_narratives", "mapqa", "multihiertt", "ocrvqa", "plotqa", "raven", "rendered_text", "robut_sqa", "robut_wikisql", "robut_wtq", "scienceqa", "screen2words", "st_vqa", "tabmwp", "tallyqa", "tat_qa", "textcaps", "textvqa", "tqa", "vistext", "visual7w", "visualmrc", "vqarad", "vqav2", "vsr", "websight")
test_dataset_path: str = "Lin-Chen/MMStar"
wandb_entity: str = "HuggingFace" # Indicate the entity to log to in wandb
log_wandb: bool = True
use_lmms_eval: bool = True # Use lmms-eval for evaluation
lmms_eval_tasks: str = 'mmstar,mmmu,ocrbench,textvqa' # Pass additional task as one string, seperated by commas without spaces (e.g. 'mmstar,mmmu,ocrbench')
lmms_eval_limit: int = None
lmms_eval_batch_size: int = 128