in optimum/neuron/peft/tuners/lora/layer.py [0:0]
def reset_lora_parameters(self, adapter_name, init_lora_weights):
if init_lora_weights is False:
return
if adapter_name in self.lora_A.keys():
if init_lora_weights is True:
# initialize A the same way as the default for nn.Linear and B to zero
# https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124
nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
elif init_lora_weights.lower() == "gaussian":
nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name])
else:
raise ValueError(f"Unknown initialization {init_lora_weights=}")
if self.base_layer.fuse_qkv:
nn.init.zeros_(self.lora_B[adapter_name].weight_qkv)
if self.lora_bias[adapter_name]:
nn.init.zeros_(self.lora_B[adapter_name].bias_qkv)
else:
nn.init.zeros_(self.lora_B[adapter_name].weight_q)
nn.init.zeros_(self.lora_B[adapter_name].weight_k)
nn.init.zeros_(self.lora_B[adapter_name].weight_v)
if self.lora_bias[adapter_name]:
nn.init.zeros_(self.lora_B[adapter_name].bias_q)
nn.init.zeros_(self.lora_B[adapter_name].bias_k)
nn.init.zeros_(self.lora_B[adapter_name].bias_v)