def _update_metrics_from_latest_eval_job()

in common/sagemaker_rl/orchestrator/workflow/manager/experiment_manager.py [0:0]


    def _update_metrics_from_latest_eval_job(self, latest_evaluation_job_id):
        """
        Updates SyncThread's local information on every Evaluation Job complete run.

        Also Emit CW metric for New Model Evaluation Scores plot, while updating
        local latest_trained_model_* information, for continuous CW puts (for Number plots)
        """
        try:
            last_trained_model_id = self.experiment_manager.last_trained_model_id
            currently_hosted_model_id = self.experiment_manager.last_hosted_model_id

            if last_trained_model_id in latest_evaluation_job_id:
                # using in as latest_evaluation_job_id would be of format last_trained_model_id-{eval}-{timestamp}
                # If the EvaluationJob was for latest Trained Model
                eval_score = self.get_latest_eval_score_for_model_id(last_trained_model_id)
                if eval_score == "n.a.":
                    logger.debug("EvalScore from last run in n.a.")
                    return
                else:
                    logger.debug("Updated Latest Trained Mode Eval Score")
                    self.latest_trained_model_id = last_trained_model_id
                    self.latest_trained_model_eval_score = eval_score

                    # Also publish this score once, for Eval Score over time Graph
                    self.experiment_manager.cw_logger.publish_newly_trained_model_eval_information(
                        self.experiment_id, last_trained_model_id, eval_score
                    )
            elif currently_hosted_model_id in latest_evaluation_job_id:
                # using in as latest_evaluation_job_id would be of format currently_hosted_model_id-{eval}-{timestamp}
                # If the EvaluationJob was for Currently Hosted Model
                eval_score = self.get_latest_eval_score_for_model_id(currently_hosted_model_id)
                if eval_score == "n.a.":
                    logger.debug("EvalScore for HostedModel is n.a.")
                    return
                else:
                    logger.debug("Updated Hosted Model Latest Eval score")
                    self.latest_hosted_model_eval_score = eval_score
            else:
                # Evaluation Job not for latest-trained-model
                logger.debug(
                    "Latest Evaluated Model doesn't match Latest Trained Model, or"
                    " Currently Hosted Model. Skipping reporting EvalScore"
                )
                return

        except Exception as e:
            logger.warn("Failed to emit latest training job eval metrics." + str(e))