in ml/eval/generate.py [0:0]
def load_model_and_tokenizer(model_path, trust_remote_code=False, use_auth_token=False):
"""Load a model and its tokenizer."""
model = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=trust_remote_code, use_auth_token=use_auth_token,
).to(device)
tokenizer = AutoTokenizer.from_pretrained(
model_path, trust_remote_code=trust_remote_code, use_auth_token=use_auth_token
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Setup chat format if not present
if tokenizer.chat_template is None:
model, tokenizer = setup_chat_format(model, tokenizer)
return model, tokenizer