src/fmeval/eval_algorithms/factual_knowledge.py [244:263]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def evaluate_sample(self, target_output: str, model_output: str) -> List[EvalScore]:  # type: ignore[override]
        """Computes the factual knowledge metrics for a single sample.

        :param target_output: The expected responses from the model.
        :param model_output: The output of the model being evaluated.
        :return: A list of EvalScore objects, one for each of the Factual Knowledge metrics
        ("exact_inclusion" and "quasi_exact_inclusion").
        """
        sample = {
            DatasetColumns.TARGET_OUTPUT.value.name: target_output,
            DatasetColumns.MODEL_OUTPUT.value.name: model_output,
        }
        result = self.pipeline.execute_record(sample)
        return [EvalScore(name=score_name, value=result[score_name]) for score_name in SCORE_NAMES]

    def evaluate(
        self,
        model: Optional[ModelRunner] = None,
        dataset_config: Optional[Union[DataConfig, List[DataConfig]]] = None,
        prompt_template: Optional[str] = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/fmeval/eval_algorithms/qa_accuracy.py [331:349]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def evaluate_sample(self, target_output: str, model_output: str) -> List[EvalScore]:
        """Compute QA accuracy metrics for a single sample.

        :param target_output: The expected/desired model output.
        :param model_output: The actual model output.
        :returns: A list of EvalScore objects, one for each of the QA accuracy metrics.
        """
        sample = {
            DatasetColumns.TARGET_OUTPUT.value.name: target_output,
            DatasetColumns.MODEL_OUTPUT.value.name: model_output,
        }
        result = self.pipeline.execute_record(sample)
        return [EvalScore(name=score_name, value=result[score_name]) for score_name in SCORE_NAMES]

    def evaluate(
        self,
        model: Optional[ModelRunner] = None,
        dataset_config: Optional[Union[DataConfig, List[DataConfig]]] = None,
        prompt_template: Optional[str] = None,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



