src/peft/tuners/lora/bnb.py [228:260]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                for active_adapter in self.active_adapters:
                    if active_adapter not in self.lora_A.keys():
                        continue
                    lora_A = self.lora_A[active_adapter]
                    lora_B = self.lora_B[active_adapter]
                    dropout = self.lora_dropout[active_adapter]
                    scaling = self.scaling[active_adapter]

                    requires_conversion = not torch.is_autocast_enabled()
                    if requires_conversion:
                        expected_dtype = result.dtype
                        x = self._cast_input_dtype(x, lora_A.weight.dtype)

                    if active_adapter not in self.lora_variant:  # vanilla LoRA
                        output = lora_B(lora_A(dropout(x))) * scaling
                        if requires_conversion:
                            output = output.to(expected_dtype)
                        result = result + output
                    else:
                        result = self.lora_variant[active_adapter].forward(
                            self,
                            active_adapter=active_adapter,
                            x=x,
                            result=result,
                        )
                        if requires_conversion:
                            result = result.to(expected_dtype)

            return result

        def __repr__(self) -> str:
            rep = super().__repr__()
            return "lora." + rep
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/peft/tuners/lora/bnb.py [488:520]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                for active_adapter in self.active_adapters:
                    if active_adapter not in self.lora_A.keys():
                        continue
                    lora_A = self.lora_A[active_adapter]
                    lora_B = self.lora_B[active_adapter]
                    dropout = self.lora_dropout[active_adapter]
                    scaling = self.scaling[active_adapter]

                    requires_conversion = not torch.is_autocast_enabled()
                    if requires_conversion:
                        expected_dtype = result.dtype
                        x = self._cast_input_dtype(x, lora_A.weight.dtype)

                    if active_adapter not in self.lora_variant:  # vanilla LoRA
                        output = lora_B(lora_A(dropout(x))) * scaling
                        if requires_conversion:
                            output = output.to(expected_dtype)
                        result = result + output
                    else:
                        result = self.lora_variant[active_adapter].forward(
                            self,
                            active_adapter=active_adapter,
                            x=x,
                            result=result,
                        )
                        if requires_conversion:
                            result = result.to(expected_dtype)

            return result

        def __repr__(self) -> str:
            rep = super().__repr__()
            return "lora." + rep
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



