in src/fmeval/eval_algorithms/summarization_accuracy.py [0:0]
def __init__(self, eval_algorithm_config: SummarizationAccuracyConfig = SummarizationAccuracyConfig()):
"""SummarizationAccuracy initializer.
:param eval_algorithm_config: Summarization Accuracy evaluation algorithm config.
"""
super().__init__(eval_algorithm_config)
self.bertscore_model = BertscoreHelperModel(eval_algorithm_config.model_type_for_bertscore)
meteor_score, rouge_score, bert_score = SummarizationAccuracy._create_transforms(
target_output_keys=[DatasetColumns.TARGET_OUTPUT.value.name],
model_output_keys=[DatasetColumns.MODEL_OUTPUT.value.name],
meteor_keys=[METEOR_SCORE],
rouge_keys=[ROUGE_SCORE],
bertscore_keys=[BERT_SCORE],
rouge_type=eval_algorithm_config.rouge_type,
use_stemmer_for_rouge=eval_algorithm_config.use_stemmer_for_rouge,
bertscore_model=self.bertscore_model,
)
self.meteor_score = meteor_score
self.rouge_score = rouge_score
self.bert_score = bert_score
self.pipeline = TransformPipeline([meteor_score, rouge_score, bert_score])