in src/lighteval/models/transformers/adapter_model.py [0:0]
def _create_auto_model(self) -> transformers.PreTrainedModel:
"""Returns a PeftModel from a base model and a version fined tuned using PEFT."""
torch_dtype = _get_dtype(self.config.dtype)
model_parallel, max_memory, device_map = self.init_model_parallel(self.config.model_parallel)
self.config.model_parallel = model_parallel
adapter_weights = self.config.pretrained
merged_path = f"{adapter_weights}-adapter-applied"
if self.config.dtype == "4bit":
from transformers import BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
elif self.config.dtype == "8bit":
from transformers import BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
else:
quantization_config = None
if self.accelerator.is_local_main_process if self.accelerator is not None else nullcontext():
logger.info(f"Loading model from {adapter_weights} and applying adapter to {self.config.base_model}")
base = AutoModelForCausalLM.from_pretrained(
self.config.base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
# resize model for adapters with added tokens
token_diff = len(self._tokenizer) - base.config.vocab_size
if token_diff != 0:
if token_diff > 0:
logger.info(
f"You're using the adapter model's tokenizer, which has more tokens than the base model. Adding {token_diff} token(s)."
)
else:
logger.info(
f"You're using the adapter model's tokenizer, which has fewer tokens than the base model. Removing {abs(token_diff)} token(s)."
)
base.resize_token_embeddings(len(self._tokenizer))
# Should pass revision
model = PeftModel.from_pretrained(base, adapter_weights)
model = model.merge_and_unload()
logger.info("Saving model with adapter applied")
base.save_pretrained(merged_path)
logger.info(f"Loading model from {merged_path}")
model = AutoModelForCausalLM.from_pretrained(
merged_path,
max_memory=max_memory,
device_map=device_map,
torch_dtype=torch_dtype,
trust_remote_code=self.config.trust_remote_code,
quantization_config=quantization_config,
)
return model