in optimum/quanto/models/transformers_models.py [0:0]
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs):
if cls.auto_class is None:
raise ValueError(
"Quantized models cannot be reloaded using {cls}: use a specialized quantized class such as QuantizedModelForCausalLM instead."
)
if not is_accelerate_available():
raise ValueError("Reloading a quantized transformers model requires the accelerate library.")
from accelerate import init_empty_weights
if os.path.isdir(pretrained_model_name_or_path):
working_dir = pretrained_model_name_or_path
else:
working_dir = snapshot_download(pretrained_model_name_or_path, **kwargs)
# Look for a quantization map
qmap_path = os.path.join(working_dir, cls._qmap_name())
if not os.path.exists(qmap_path):
raise ValueError(
f"No quantization map found in {pretrained_model_name_or_path}: is this a quantized model ?"
)
with open(qmap_path, "r", encoding="utf-8") as f:
qmap = json.load(f)
# Create an empty model
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
with init_empty_weights():
model = cls.auto_class.from_config(config)
# Look for the index of a sharded checkpoint
checkpoint_file = os.path.join(working_dir, SAFE_WEIGHTS_INDEX_NAME)
if os.path.exists(checkpoint_file):
# Convert the checkpoint path to a list of shards
checkpoint_file, sharded_metadata = get_checkpoint_shard_files(working_dir, checkpoint_file)
# Create a mapping for the sharded safetensor files
state_dict = ShardedStateDict(working_dir, sharded_metadata["weight_map"])
else:
# Look for a single checkpoint file
checkpoint_file = os.path.join(working_dir, SAFE_WEIGHTS_NAME)
if not os.path.exists(checkpoint_file):
raise ValueError(f"No safetensor weights found in {pretrained_model_name_or_path}.")
# Get state_dict from model checkpoint
state_dict = load_state_dict(checkpoint_file)
# Requantize and load quantized weights from state_dict
requantize(model, state_dict=state_dict, quantization_map=qmap)
if getattr(model.config, "tie_word_embeddings", True):
# Tie output weight embeddings to input weight embeddings
# Note that if they were quantized they would NOT be tied
model.tie_weights()
# Set model in evaluation mode as it is done in transformers
model.eval()
return cls(model)