in optimum/exporters/executorch/tasks/masked_lm.py [0:0]
def load_masked_lm_model(model_name_or_path: str, **kwargs) -> MaskedLMExportableModule:
"""
Loads a seq2seq language model for conditional text generation and registers it under the task
'fill-mask' using Hugging Face's `AutoModelForMaskedLM`.
Args:
model_name_or_path (str):
Model ID on huggingface.co or path on disk to the model repository to export. For example:
`model_name_or_path="google-bert/bert-base-uncased"` or `mode_name_or_path="/path/to/model_folder`
**kwargs:
Additional configuration options for the model.
Returns:
MaskedLMExportableModule:
An instance of `MaskedLMExportableModule` for exporting and lowering to ExecuTorch.
"""
eager_model = AutoModelForMaskedLM.from_pretrained(model_name_or_path, **kwargs).to("cpu").eval()
return MaskedLMExportableModule(eager_model)