def run_from_endpoint()

in sdk/python/foundation-models/healthcare-ai/medimageinsight/classification_demo/MedImageInsight.py [0:0]


    def run_from_endpoint(self, image=None, text=None, params=None):
        """
        Deploys the endpoint.

        Parameters:
        - image (str): The path to the image data.
        - text (str): The path to the text data.
        - params (dict): Additional parameters for prediction.
            - image_standardization_jpeg_compression_ratio (int): The JPEG compression ratio for the model input, default: 75.
            - image_standardization_image_size (int): The image size for MedImageInsight model input, default: 512.

        Returns:
        - embeddings_dict (dict): A dictionary where each key is the name,
        and the value is another dictionary containing 'image_feature' and/or 'text_feature'.
        """

        embeddings_dict = {}
        if params is None:
            params = {}

        data_dict = {}

        # Collect image data into a dictionary
        if image is not None:
            images_data = get_files_path(image)
            for name, data in images_data.items():
                data_dict.setdefault(name, {})["image"] = data["file"]

        # Collect text data into a dictionary
        if text is not None:
            texts_data = get_text(text)
            for name, data in texts_data.items():
                data_dict.setdefault(name, {})["text"] = data["text"]

        # Ensure that image and text names match if both are provided
        if image is not None and text is not None:
            assert set(images_data.keys()) == set(
                texts_data.keys()
            ), "Image and text names do not match"
            print("--------Start Generating Image and Text Features--------")
        elif image is not None:
            print("--------Start Generating Image Features--------")
        elif text is not None:
            print("--------Start Generating Text Features--------")
        else:
            raise ValueError("At least one of 'image' or 'text' must be provided.")

        # Process each item in data_dict
        scaling_factor = None
        for name, data in tqdm(data_dict.items(), total=len(data_dict)):
            data_list = [data.get("image", ""), data.get("text", "")]
            request_data = {
                "input_data": {
                    "columns": ["image", "text"],
                    "index": [0],
                    "data": [data_list],
                },
                "params": params,
            }

            body = str.encode(json.dumps(request_data))
            req = urllib.request.Request(self.endpoint_url, body, self.headers)

            try:
                response = urllib.request.urlopen(req)
                result = response.read()

                feature_json = json.loads(result)
                embeddings_dict[name] = {}
                for subj in feature_json:
                    if "image_features" in subj:
                        embeddings_dict[name]["image_feature"] = np.array(
                            subj["image_features"]
                        )
                    if "text_features" in subj:
                        embeddings_dict[name]["text_feature"] = np.array(
                            subj["text_features"]
                        )

                    if "scaling_factor" in subj and scaling_factor is None:
                        scaling_factor = np.array(subj["scaling_factor"])

            except urllib.error.HTTPError as error:
                print(
                    "The embedding generation request failed with status code: "
                    + str(error.code)
                )
                print(error.info())
                print(error.read().decode("utf8", "ignore"))

        if image is not None:
            print("--------Finished All Image Features Generation!!--------")
        if text is not None:
            print("--------Finished All Text Features Generation!!--------")

        return embeddings_dict, scaling_factor