in docker_images/peft/app/pipelines/text_generation.py [0:0]
def __init__(self, model_id: str):
use_auth_token = os.getenv("HF_API_TOKEN")
model_data = model_info(model_id, token=use_auth_token)
config_dict = model_data.config.get("peft")
if config_dict:
base_model_id = config_dict["base_model_name_or_path"]
if base_model_id:
self.tokenizer = AutoTokenizer.from_pretrained(base_model_id)
model = AutoModelForCausalLM.from_pretrained(
base_model_id, device_map="auto"
)
# wrap base model with peft
self.model = PeftModel.from_pretrained(model, model_id)
else:
raise ValueError("There's no base model ID in configuration file.")
else:
raise ValueError("Config file for this model does not exist or is invalid.")