in yourbench/pipeline/citation_score_filtering.py [0:0]
def compute(self, citations: Sequence[str], chunks: Sequence[str], answer: str) -> tuple[float, float, float]:
if not citations or (not chunks and not answer):
return 0.0, 0.0, 0.0
citation_count = len(citations)
chunk_scores = [max((self._ratio(c, ch) for ch in chunks), default=0) for c in citations]
ans_scores = [self._ratio(c, answer) for c in citations]
avg_chunk = sum(chunk_scores) / citation_count
avg_ans = sum(ans_scores) / citation_count
final = self.alpha * avg_chunk + self.beta * avg_ans
return avg_ans, avg_chunk, final