def predict()

in src/fmeval/model_runners/sm_jumpstart_model_runner.py [0:0]


    def predict(self, prompt: str) -> Union[Tuple[Optional[str], Optional[float]], List[float]]:
        """
        Invoke the SageMaker endpoint and parse the model response.
        :param prompt: Input data for which you want the model to provide inference.
        """
        composed_data = self._composer.compose(prompt)
        model_output = self._predictor.predict(
            data=composed_data,
            custom_attributes=self._custom_attributes,
            component_name=self._component_name,
        )
        # expect embedding from all text embedding models, return directly
        if self._is_embedding_model:
            embedding = self._extractor.extract_embedding(data=model_output, num_records=1)
            return embedding
        # expect output from all model responses in JS
        output = self._extractor.extract_output(data=model_output, num_records=1)
        log_probability = None
        try:
            log_probability = self._extractor.extract_log_probability(data=model_output, num_records=1)
        except EvalAlgorithmClientError as e:
            # log_probability may be missing
            logger.warning(f"Unable to fetch log_probability from model response: {e}")
        return output, log_probability