def evaluate_sample()

in src/fmeval/eval_algorithms/summarization_accuracy.py [0:0]


    def evaluate_sample(self, target_output: str, model_output: str) -> List[EvalScore]:  # type: ignore[override]
        """Compute summarization accuracy metrics for a single sample.

        :param target_output: The expected/desired model output.
        :param model_output: The actual model output.
        :returns: A list of EvalScore objects, one for each of the summarization accuracy metrics.
        """
        sample = {
            DatasetColumns.TARGET_OUTPUT.value.name: target_output,
            DatasetColumns.MODEL_OUTPUT.value.name: model_output,
        }
        output_record = self.pipeline.execute_record(sample)
        assert_condition(
            all(metric_name in output_record for metric_name in METRIC_NAMES),
            "Summarization Accuracy evaluate_sample has computed an output that is missing at least one metric. "
            f"The output record is {output_record}.",
        )
        return [EvalScore(name=metric_name, value=output_record[metric_name]) for metric_name in METRIC_NAMES]