in optimum/amd/ryzenai/quantization.py [0:0]
def __init__(self, onnx_model_path: Path, config: Optional["PretrainedConfig"] = None):
"""
Args:
onnx_model_path (`Path`):
Path to the onnx model files you want to quantize.
config (`Optional[PretrainedConfig]`, defaults to `None`):
The configuration of the model.
"""
super().__init__()
self.onnx_model_path = onnx_model_path
self.config = config
if self.config is None:
try:
self.config = PretrainedConfig.from_pretrained(self.onnx_model_path.parent)
except OSError:
LOGGER.warning(
f"Could not load the config for {self.onnx_model_path} automatically, this might make "
"the quantized model harder to use because it will not be able to be loaded by an RyzenAIModel without "
"having to specify the configuration explicitly."
)