in ultravox/model/ultravox_model.py [0:0]
def apply_lora(model: torch.nn.Module, lora_config: dict) -> torch.nn.Module:
"""
Applies LoRA finetuning to the model. If the `r` parameter is set to 0, the model is frozen instead.
"""
unfreeze_layers = lora_config.pop("unfreeze_layers", None)
lora_config = peft.LoraConfig(**lora_config or {})
if lora_config.r == 0:
# freeze the model entirely, except for the specified layers
for name, param in model.named_parameters():
if not unfreeze_layers or not any(
re.match(layer, name) for layer in unfreeze_layers
):
param.requires_grad = False
else:
logging.info(f"Unfreezing layer: {name} with #{param.numel()} params")
else:
model = peft.get_peft_model(model, lora_config)
return model