def get_eval_score()

in common/sagemaker_rl/orchestrator/workflow/manager/experiment_manager.py [0:0]


    def get_eval_score(self, evaluate_model_id=None, eval_data_path=None):
        """
        Return evaluation score given model id and evaluation data path

        Args:
            evaluate_model_id (str): Model id used for evaluation
            eval_data_path (str): S3 data path of evaluation data

        Returns:
            float: evaluation score of given model and evaluation data
        """
        # use last trained model by default
        if evaluate_model_id is None:
            evaluate_model_id = self.experiment_record._last_trained_model_id

        if evaluate_model_id != self.experiment_record._last_trained_model_id:
            if not self._check_if_model_ready(evaluate_model_id):
                return

        # use last joined job's eval data by default
        if eval_data_path is None:
            eval_data_path = self.last_joined_job_eval_data

        logger.info(
            f"Getting eval scores for model '{evaluate_model_id}'"
            f" on eval data set '{eval_data_path}'"
        )

        eval_score = "n.a."
        if not evaluate_model_id or not eval_data_path:
            # either evaluate_model_id or eval_data_path is none
            pass
        else:
            model_record = self.model_db_client.get_model_record(
                self.experiment_id, evaluate_model_id
            )
            if model_record:
                eval_scores_map = model_record.get("eval_scores", {})
                eval_score = eval_scores_map.get(eval_data_path, eval_score)
            else:
                logger.warn(f"Model Record not found with ModelId: {evaluate_model_id}")
                pass

        if eval_score == "n.a.":
            raise EvalScoreNotAvailableException(
                f"Evaluation score is not available for model '{evaluate_model_id}'"
                f"with data '{eval_data_path}'.'"
            )
        else:
            eval_score = float(eval_score)
            logger.info(
                f"Evaluation score for model '{evaluate_model_id}'"
                f"with data '{eval_data_path}' is {eval_score}."
            )

        return eval_score