in optimum/exporters/ipex/model_patcher.py [0:0]
def _patch_model(model):
if is_ipex_version("<", _IPEX_MINIMUM_VERSION_FOR_PATCHING):
raise ImportError(f"Only ipex version >= {_IPEX_MINIMUM_VERSION_FOR_PATCHING} supports llama model patching")
if is_transformers_version("<", _TRANSFORMERS_MIN_VERSION) or is_transformers_version(
">", _TRANSFORMERS_MAX_VERSION
):
raise ImportError(
f"Only transformers versions {_TRANSFORMERS_MIN_VERSION} ~ {_TRANSFORMERS_MAX_VERSION} are verified."
)
if model.config.model_type == "llama":
model = _patch_llama_model(model)
elif model.config.model_type == "falcon":
model = _patch_falcon_model(model)
elif model.config.model_type == "gpt2":
model = _patch_gpt2_model(model)
elif model.config.model_type == "qwen2":
model = _patch_qwen2_model(model)
elif model.config.model_type == "mistral":
model = _patch_mistral_model(model)
elif model.config.model_type == "bert":
model = _patch_bert_model(model)
elif model.config.model_type == "vit":
model = _patch_vit_model(model)
return model