def adapt_peft_config()

in optimum/neuron/models/training/transformations_utils.py [0:0]


    def adapt_peft_config(self, peft_config: PeftConfig, inplace: bool = False) -> PeftConfig:
        if not inplace:
            peft_config = copy.deepcopy(peft_config)
        if peft_config.peft_type == "LORA":
            target_modules = peft_config.target_modules
            at_least_one_linear_in_target_modules = any(name in target_modules for name in self.linear_names)
            all_linears_in_target_modules = (
                all(name in target_modules for name in self.linear_names) or target_modules == "all-linear"
            )
            if at_least_one_linear_in_target_modules and not all_linears_in_target_modules:
                missing_modules = [name for name in self.linear_names if name not in target_modules]
                raise ValueError(
                    "If you use FusedLinearsSpec, either all linear layers must be in the target modules of the PEFT "
                    f"config or none at all. The following linear layers are missing: {', '.join(missing_modules)}."
                )
            if all_linears_in_target_modules:
                for name in self.linear_names:
                    target_modules.remove(name)
                target_modules.add(self.fused_linear_name)
        else:
            raise NotImplementedError(
                f"PEFT type {peft_config.peft_type} is not supported for the transformation spec {self.__class__.__name__}."
            )
        return peft_config