def get_mlflow_convertor()

in assets/training/model_management/src/azureml/model/mgmt/processors/factory.py [0:0]


def get_mlflow_convertor(model_framework, model_dir, output_dir, temp_dir, translate_params):
    """Instantiate and return MLflow convertor."""
    task = translate_params["task"]
    model_id = translate_params.get("model_id")
    logger.info(f"Model task:{task}, Model ID:{model_id}")
    if model_id is None:
        model_id = ""

    if model_framework == ModelFramework.HUGGINGFACE.value:
        # Models from Hugging face framework exported in transformers mlflow flavor
        if SupportedNLPTasks.has_value(task):
            return NLPMLflowConvertorFactory.create_mlflow_convertor(model_dir, output_dir, temp_dir, translate_params)
        elif SupportedVisionTasks.has_value(task):
            return VisionMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif task in [PyFuncSupportedTasks.TEXT_TO_IMAGE.value,
                      PyFuncSupportedTasks.TEXT_TO_IMAGE_INPAINTING.value,
                      PyFuncSupportedTasks.IMAGE_TO_IMAGE.value]:
            return TextToImageMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif task == SupportedTasks.AUTOMATIC_SPEECH_RECOGNITION.value:
            return ASRMLflowConvertorFactory.create_mlflow_convertor(model_dir, output_dir, temp_dir, translate_params)
        # Models from Hugging face framework exported in PyFunc mlflow flavor
        elif (task == PyFuncSupportedTasks.ZERO_SHOT_IMAGE_CLASSIFICATION.value) or (
                (task == PyFuncSupportedTasks.EMBEDDINGS.value) and model_id.startswith(ModelFamilyPrefixes.CLIP.value)
        ):
            return CLIPMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif (task == PyFuncSupportedTasks.EMBEDDINGS.value) and model_id.startswith(ModelFamilyPrefixes.DINOV2.value):
            return DinoV2MLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif task in [PyFuncSupportedTasks.IMAGE_TO_TEXT.value, PyFuncSupportedTasks.VISUAL_QUESTION_ANSWERING.value]:
            return BLIPMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif task == PyFuncSupportedTasks.MASK_GENERATION.value:
            return SegmentAnythingMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif (
            task == PyFuncSupportedTasks.FEATURE_EXTRACTION.value
            or task == PyFuncSupportedTasks.IMAGE_FEATURE_EXTRACTION.value
        ) and model_id.startswith(ModelFamilyPrefixes.HIBOU_B.value):
            return HibouBMLFlowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif task == PyFuncSupportedTasks.IMAGE_FEATURE_EXTRACTION.value:
            return VirchowMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        else:
            raise Exception(
                f"Models from {model_framework} for task {task} and model {model_id} "
                "not supported for MLflow conversion"
            )
    elif model_framework == ModelFramework.MMLAB.value:
        # Models from MMLAB model framework exported in PyFunc mlflow flavor
        if MMLabDetectionTasks.has_value(task):
            return MMLabDetectionMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        elif MMLabTrackingTasks.has_value(task):
            return MMLabTrackingMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        else:
            raise Exception(f"Models from {model_framework} for {task} not supported for MLflow conversion")
    elif model_framework == ModelFramework.LLAVA.value:
        # Models from LLAVA model framework exported in PyFunc mlflow flavor
        if task == PyFuncSupportedTasks.IMAGE_TEXT_TO_TEXT.value:
            return LLaVAMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        else:
            raise Exception(f"Models from {model_framework} for {task} not supported for MLflow conversion")
    elif model_framework == ModelFramework.AutoML.value:
        # Models from AutML model framework exported in PyFunc mlflow flavor
        if task in [
            PyFuncSupportedTasks.IMAGE_CLASSIFICATION.value,
            PyFuncSupportedTasks.IMAGE_CLASSIFICATION_MULTILABEL.value,
            PyFuncSupportedTasks.IMAGE_OBJECT_DETECTION.value,
            PyFuncSupportedTasks.IMAGE_INSTANCE_SEGMENTATION.value,
        ]:
            return AutoMLMLflowConvertorFactory.create_mlflow_convertor(
                model_dir, output_dir, temp_dir, translate_params
            )
        else:
            raise Exception(
                f"Models from {model_framework} for {task} not supported for MLflow conversion"
            )
    else:
        raise Exception(f"Models from {model_framework} not supported for MLflow conversion")