def adapt_peft_config()

in optimum/neuron/models/training/transformations_utils.py [0:0]


    def adapt_peft_config(self, peft_config: PeftConfig, inplace: bool = False) -> PeftConfig:
        if not inplace:
            peft_config = copy.deepcopy(peft_config)
        if peft_config.peft_type == "LORA":
            linear_names = [self.query_projection_name, self.key_projection_name, self.value_projection_name]
            target_modules = peft_config.target_modules
            at_least_one_linear_in_target_modules = any(name in target_modules for name in linear_names)
            all_linears_in_target_modules = (
                all(name in target_modules for name in linear_names) or target_modules == "all-linear"
            )
            if at_least_one_linear_in_target_modules and not all_linears_in_target_modules:
                missing_modules = [name for name in linear_names if name not in target_modules]
                raise ValueError(
                    "If you use GQAQKVColumnParallelLinearSpec, either all linear layers must be in the target modules "
                    "of the PEFT config or none at all. The following linear layers are missing: "
                    f"{', '.join(missing_modules)}."
                )
            if all_linears_in_target_modules:
                for name in linear_names:
                    target_modules.remove(name)
                target_modules.add(self.gqa_qkv_projection_name)
        else:
            raise NotImplementedError(
                f"PEFT type {peft_config.peft_type} is not supported for the transformation spec {self.__class__.__name__}."
            )
        return peft_config