in src/peft/utils/other.py [0:0]
def set_additional_trainable_modules(model, peft_config, model_config, adapter_name):
"""Handle the resolution of additional trainable modules (also called AuxiliaryTrainingWrapper)
by checking the config if such modules are requested and adding them to the model.
Currently trainable tokens and modules to save are considered additional trainable modules.
"""
if getattr(peft_config, "modules_to_save", None) is not None:
# this may add a new ModulesToSaveWrapper
_set_trainable(model, adapter_name, module_names=getattr(peft_config, "modules_to_save", None))
if getattr(peft_config, "trainable_token_indices", None) is not None:
if isinstance(peft_config.trainable_token_indices, dict):
target_layers = peft_config.trainable_token_indices
else:
layer_name = _get_input_embeddings_name(model, "embed_tokens")
target_layers = {layer_name: peft_config.trainable_token_indices}
modules_to_save = getattr(peft_config, "modules_to_save", None)
if modules_to_save is not None:
for target_layer in target_layers:
if target_layer in modules_to_save:
raise ValueError(
"The embedding layer is already marked to be trained fully, either specify "
f'`modules_to_save=[..., "{target_layer}", ...]` or '
f"`trainable_tokens={{'{target_layer}': x}}` but not both."
)
for target_layer, token_indices in target_layers.items():
_set_trainable(
model,
adapter_name,
module_names=[target_layer],
strict_module_check=True,
wrapper_cls=TrainableTokensWrapper,
token_indices=token_indices,
)
# There might be the possibility that we have output weights that are tied to the input weights.
# In that case we will tie any module that wants tied weights to the token adapter to make sure that
# any modification is reflected in the tied layers as well.
if (
model_config.get("tie_word_embeddings", False)
# some models may be misconfigured to have weight tying enabled but don't define tied weights keys
and model._tied_weights_keys is not None
and isinstance(model.get_input_embeddings(), TrainableTokensWrapper)
):
# the embedding layer is modified and we want weight tying.
module_keys = [".".join(n.split(".")[:-1]) for n in model._tied_weights_keys]
token_adapter = model.get_input_embeddings().token_adapter
_set_trainable(
model,
adapter_name,
module_names=module_keys,
strict_module_check=True,
wrapper_cls=TrainableTokensWrapper,
token_indices=token_adapter.token_indices[adapter_name],
tied_adapter=model.get_input_embeddings().token_adapter,
)