def run()

in optimum/commands/export/openvino.py [0:0]


    def run(self):
        from ...exporters.openvino.__main__ import infer_task, main_export, maybe_convert_tokenizers
        from ...exporters.openvino.utils import save_preprocessors
        from ...intel.openvino.configuration import _DEFAULT_4BIT_WQ_CONFIG, OVConfig, get_default_quantization_config

        if self.args.library is None:
            # TODO: add revision, subfolder and token to args
            library_name = _infer_library_from_model_name_or_path(
                model_name_or_path=self.args.model, cache_dir=self.args.cache_dir
            )
            if library_name == "sentence_transformers":
                logger.warning(
                    "Library name is not specified. There are multiple possible variants: `sentence_transformers`, `transformers`."
                    "`transformers` will be selected. If you want to load your model with the `sentence-transformers` library instead, please set --library sentence_transformers"
                )
                library_name = "transformers"
        else:
            library_name = self.args.library

        if self.args.weight_format is None and self.args.quant_mode is None:
            ov_config = None
            if not no_compression_parameter_provided(self.args):
                raise ValueError(
                    "Some compression parameters are provided, but the weight format is not specified. "
                    "Please provide it with --weight-format argument."
                )
            if not no_quantization_parameter_provided(self.args):
                raise ValueError(
                    "Some quantization parameters are provided, but the quantization mode is not specified. "
                    "Please provide it with --quant-mode argument."
                )
        elif self.args.weight_format in {"fp16", "fp32"}:
            ov_config = OVConfig(dtype=self.args.weight_format)
        else:
            if not is_nncf_available():
                raise ImportError("Applying quantization requires nncf, please install it with `pip install nncf`")

            default_quantization_config = get_default_quantization_config(
                self.args.model, self.args.weight_format, self.args.quant_mode
            )
            if self.args.weight_format is not None:
                # For int4 quantization if no parameter is provided, then use the default config if exists
                if no_compression_parameter_provided(self.args) and self.args.weight_format == "int4":
                    if default_quantization_config is not None:
                        quantization_config = default_quantization_config
                        log_message = (
                            f"Applying the default quantization config for {self.args.model}: {quantization_config}."
                        )
                    else:
                        quantization_config = _DEFAULT_4BIT_WQ_CONFIG
                        log_message = f"Applying a default quantization config: {quantization_config}."
                    logger.info(log_message)
                else:
                    quantization_config = prepare_wc_config(self.args, _DEFAULT_4BIT_WQ_CONFIG)
            else:
                if no_quantization_parameter_provided(self.args) and default_quantization_config is not None:
                    quantization_config = default_quantization_config
                    logger.info(
                        f"Applying the default quantization config for {self.args.model}: {quantization_config}."
                    )
                else:
                    if self.args.dataset is None:
                        raise ValueError(
                            "Dataset is required for full quantization. Please provide it with --dataset argument."
                        )
                    if self.args.quant_mode in ["nf4_f8e4m3", "nf4_f8e5m2", "int4_f8e4m3", "int4_f8e5m2"]:
                        if library_name == "diffusers":
                            raise NotImplementedError("Mixed precision quantization isn't supported for diffusers.")

                        wc_config = prepare_wc_config(self.args, _DEFAULT_4BIT_WQ_CONFIG)
                        wc_dtype, q_dtype = self.args.quant_mode.split("_")
                        wc_config["dtype"] = wc_dtype

                        q_config = prepare_q_config(self.args)
                        q_config["dtype"] = q_dtype

                        quantization_config = {
                            "weight_quantization_config": wc_config,
                            "full_quantization_config": q_config,
                            "num_samples": self.args.num_samples,
                            "dataset": self.args.dataset,
                        }
                    else:
                        quantization_config = prepare_q_config(self.args)
            quantization_config["trust_remote_code"] = self.args.trust_remote_code
            ov_config = OVConfig(quantization_config=quantization_config)

        quantization_config = ov_config.quantization_config if ov_config else None
        quantize_with_dataset = quantization_config and getattr(quantization_config, "dataset", None) is not None
        task = infer_task(self.args.task, self.args.model, library_name=library_name)
        # in some cases automatic task detection for multimodal models gives incorrect results
        if self.args.task == "auto" and library_name == "transformers":
            from transformers import AutoConfig

            from ...exporters.openvino.utils import MULTI_MODAL_TEXT_GENERATION_MODELS

            config = AutoConfig.from_pretrained(
                self.args.model,
                cache_dir=self.args.cache_dir,
                trust_remote_code=self.args.trust_remote_code,
            )
            if getattr(config, "model_type", "").replace("_", "-") in MULTI_MODAL_TEXT_GENERATION_MODELS:
                task = "image-text-to-text"

        if library_name == "diffusers" and quantize_with_dataset:
            if not is_diffusers_available():
                raise ValueError(DIFFUSERS_IMPORT_ERROR.format("Export of diffusers models"))

            from diffusers import DiffusionPipeline

            diffusers_config = DiffusionPipeline.load_config(self.args.model)
            class_name = diffusers_config.get("_class_name", None)

            if class_name == "LatentConsistencyModelPipeline":
                from optimum.intel import OVLatentConsistencyModelPipeline

                model_cls = OVLatentConsistencyModelPipeline

            elif class_name == "StableDiffusionXLPipeline":
                from optimum.intel import OVStableDiffusionXLPipeline

                model_cls = OVStableDiffusionXLPipeline
            elif class_name == "StableDiffusionPipeline":
                from optimum.intel import OVStableDiffusionPipeline

                model_cls = OVStableDiffusionPipeline
            elif class_name == "StableDiffusion3Pipeline":
                from optimum.intel import OVStableDiffusion3Pipeline

                model_cls = OVStableDiffusion3Pipeline
            elif class_name == "FluxPipeline":
                from optimum.intel import OVFluxPipeline

                model_cls = OVFluxPipeline
            elif class_name == "SanaPipeline":
                from optimum.intel import OVSanaPipeline

                model_cls = OVSanaPipeline
            elif class_name == "SaneSprintPipeline":
                from optimum.intel import OVSanaSprintPipeline

                model_cls = OVSanaSprintPipeline

            else:
                raise NotImplementedError(f"Quantization isn't supported for class {class_name}.")

            model = model_cls.from_pretrained(self.args.model, export=True, quantization_config=quantization_config)
            model.save_pretrained(self.args.output)
            if not self.args.disable_convert_tokenizer:
                maybe_convert_tokenizers(library_name, self.args.output, model, task=task)
        elif (
            quantize_with_dataset
            and (
                task in ["fill-mask", "zero-shot-image-classification"]
                or task.startswith("text-generation")
                or task.startswith("automatic-speech-recognition")
                or task.startswith("feature-extraction")
            )
            or (task == "image-text-to-text" and quantization_config is not None)
        ):
            if task.startswith("text-generation"):
                from optimum.intel import OVModelForCausalLM

                model_cls = OVModelForCausalLM
            elif task == "image-text-to-text":
                from optimum.intel import OVModelForVisualCausalLM

                model_cls = OVModelForVisualCausalLM
            elif "automatic-speech-recognition" in task:
                from optimum.intel import OVModelForSpeechSeq2Seq

                model_cls = OVModelForSpeechSeq2Seq
            elif task.startswith("feature-extraction") and library_name == "transformers":
                from ...intel import OVModelForFeatureExtraction

                model_cls = OVModelForFeatureExtraction
            elif task.startswith("feature-extraction") and library_name == "sentence_transformers":
                from ...intel import OVSentenceTransformer

                model_cls = OVSentenceTransformer
            elif task == "fill-mask":
                from ...intel import OVModelForMaskedLM

                model_cls = OVModelForMaskedLM
            elif task == "zero-shot-image-classification":
                from ...intel import OVModelForZeroShotImageClassification

                model_cls = OVModelForZeroShotImageClassification
            else:
                raise NotImplementedError(
                    f"Unable to find a matching model class for the task={task} and library_name={library_name}."
                )

            # In this case, to apply quantization an instance of a model class is required
            model = model_cls.from_pretrained(
                self.args.model,
                export=True,
                quantization_config=quantization_config,
                stateful=not self.args.disable_stateful,
                trust_remote_code=self.args.trust_remote_code,
                variant=self.args.variant,
                cache_dir=self.args.cache_dir,
            )
            model.save_pretrained(self.args.output)

            preprocessors = maybe_load_preprocessors(self.args.model, trust_remote_code=self.args.trust_remote_code)
            save_preprocessors(preprocessors, model.config, self.args.output, self.args.trust_remote_code)
            if not self.args.disable_convert_tokenizer:
                maybe_convert_tokenizers(library_name, self.args.output, preprocessors=preprocessors, task=task)
        else:
            # TODO : add input shapes
            main_export(
                model_name_or_path=self.args.model,
                output=self.args.output,
                task=self.args.task,
                framework=self.args.framework,
                cache_dir=self.args.cache_dir,
                trust_remote_code=self.args.trust_remote_code,
                pad_token_id=self.args.pad_token_id,
                ov_config=ov_config,
                stateful=not self.args.disable_stateful,
                convert_tokenizer=not self.args.disable_convert_tokenizer,
                library_name=library_name,
                variant=self.args.variant,
                model_kwargs=self.args.model_kwargs,
                # **input_shapes,
            )