in src/peft/peft_model.py [0:0]
def get_layer_status(model: torch.nn.Module) -> list[TunerLayerStatus]:
"""Get the status of each adapter layer in the model.
This function returns a list of `TunerLayerStatus` dataclass instances, each of which contains the following
attributes:
- `name` (`str`):
The name of the adapter layer, e.g. `model.encoder.block.0.layer.0.SelfAttention.q`.
- `module_type` (`str`):
The type of the adapter layer, e.g. `lora.Linear`.
- `enabled` (`bool`):
Whether the adapter layer is enabled.
- `active_adapters` (`list[str]`):
The names of the active adapters, if any, e.g. `["default"]`.
- `merged_adapters` (`list[str]`):
The names of the merged adapters, if any, e.g. `["default"]`.
- requires_grad : dict[str, bool | Literal["irregular"]]
The requires_grad status of the parameters for each adapter module. Ideally, it should be either `True` or
`False`. If the requires_grad status is not consistent across all parameters, the value will be set to
`"irregular"`.
- `available_adapters` (`list[str]`):
The names of the available adapters, e.g. `["default"]`.
- `devices` (`dict[str, list[str]]`):
The devices where the parameters of the given adapter are stored, e.g. `["cuda"]`.
Args:
model ([Union[`~PeftModel`, `~transformers.PreTrainedModel`, `nn.Module`]]):
The model to get the adapter layer status from.
Returns:
list[`peft.peft_model.TunerLayerStatus`]:
A list of dataclasses, each containing the status of the corresponding adapter layer.
"""
if isinstance(model, PeftModel):
base_model = model.base_model
if not isinstance(base_model, BaseTuner):
raise TypeError(
"get_layer_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not "
"supported."
)
else:
base_model = model
layer_status: list[TunerLayerStatus] = []
for name, module in base_model.named_modules():
if not isinstance(module, BaseTunerLayer):
continue
# determine if all submodules/parameters if this module require grad or not
mapping_requires_grad_list: dict[str, list[bool]] = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for key, submodule in adapter_module.items():
for param in submodule.parameters():
mapping_requires_grad_list[key].append(param.requires_grad)
elif isinstance(adapter_module, torch.nn.ParameterDict):
for key, param in adapter_module.items():
mapping_requires_grad_list[key].append(param.requires_grad)
else:
# strange, we don't know how to handle this, ignore for now
pass
def check_irrgular(vals: list[bool]) -> bool | Literal["irregular"]:
if all(vals):
return True
if not any(vals):
return False
return "irregular"
requires_grad = {key: check_irrgular(vals) for key, vals in mapping_requires_grad_list.items()}
devices_dd = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names + module.other_param_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for key, submodule in adapter_module.items():
devices_dd[key].extend([param.device.type for param in submodule.parameters()])
elif isinstance(adapter_module, torch.nn.ParameterDict) or (
adapter_module.__class__.__name__ == "BufferDict"
): # VeRA
for key, param in adapter_module.items():
devices_dd[key].append(param.device.type)
devices = {key: sorted(set(val)) for key, val in devices_dd.items()}
status = TunerLayerStatus(
name=name,
module_type=repr(module).partition("(")[0],
enabled=not module.disable_adapters,
active_adapters=module.active_adapters,
merged_adapters=module.merged_adapters,
requires_grad=requires_grad,
available_adapters=sorted(module._get_available_adapters()),
devices=devices,
)
layer_status.append(status)
if not layer_status:
raise ValueError(
"No adapter layers found in the model, please ensure that it's a PEFT model or that you have PEFT adapters "
"injected in the model."
)
return layer_status