src/peft/tuners/oft/bnb.py [152:183]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        def get_delta_weight(self, adapter):
            return self.oft_R[adapter].get_weight()

        def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
            if self.disable_adapters:
                if self.merged:
                    self.unmerge()
                result = self.base_layer(x, *args, **kwargs)
            elif self.merged:
                result = self.base_layer(x, *args, **kwargs)
            else:
                for active_adapter in self.active_adapters:
                    if active_adapter not in self.oft_R.keys():
                        continue
                    oft_R = self.oft_R[active_adapter]

                    requires_conversion = not torch.is_autocast_enabled()
                    if requires_conversion:
                        expected_dtype = x.dtype
                        x = self._cast_input_dtype(x, oft_R.weight.dtype)

                    x = oft_R(x)
                    if requires_conversion:
                        x = x.to(expected_dtype)

                result = self.base_layer(x, *args, **kwargs)

            return result

        def __repr__(self) -> str:
            rep = super().__repr__()
            return "oft." + rep
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/peft/tuners/oft/bnb.py [328:366]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        def get_delta_weight(self, adapter):
            return self.oft_R[adapter].get_weight()

        def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
            if self.disable_adapters:
                if self.merged:
                    self.unmerge()
                result = self.base_layer(x, *args, **kwargs)
            elif self.merged:
                result = self.base_layer(x, *args, **kwargs)
            else:
                # As per Tim Dettmers, for 4bit, we need to defensively clone here.
                # The reason is that in some cases, an error can occur that backprop
                # does not work on a manipulated view. This issue may be solved with
                # newer PyTorch versions but this would need extensive testing to be
                # sure.
                # result = result.clone()

                for active_adapter in self.active_adapters:
                    if active_adapter not in self.oft_R.keys():
                        continue
                    oft_R = self.oft_R[active_adapter]

                    requires_conversion = not torch.is_autocast_enabled()
                    if requires_conversion:
                        expected_dtype = x.dtype
                        x = self._cast_input_dtype(x, oft_R.weight.dtype)

                    x = oft_R(x)
                    if requires_conversion:
                        x = x.to(expected_dtype)

                result = self.base_layer(x, *args, **kwargs)

            return result

        def __repr__(self) -> str:
            rep = super().__repr__()
            return "oft." + rep
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



