in optimum/furiosa/quantization.py [0:0]
def __init__(self, model_path: Path, config: Optional["PretrainedConfig"] = None):
"""
Args:
model_path (`Path`):
Path to the onnx model files you want to quantize.
config (`Optional[PretrainedConfig]`, *optional*):
The configuration of the model.
"""
super().__init__()
self.model_path = model_path
self.config = config
if self.config is None:
try:
self.config = AutoConfig.from_pretrained(self.model_path.parent)
except OSError:
LOGGER.warning(
f"Could not load the config for {self.model_path} automatically, this might make "
"the quantized model harder to use because it will not be able to be loaded by an FuriosaAIModel without "
"having to specify the configuration explicitly."
)
self._calibrator = None
self._calibration_config = None