in evaluation_pipeline/evaluation.py [0:0]
def calc_recall_at_k(relevant_docs, retrieved_docs, k):
"""
Compute Recall@K.
Parameters:
relevant_docs (set): Set of relevant document IDs.
retrieved_docs (list): List of retrieved document IDs in ranked order.
k (int): Number of top results to consider.
Returns:
float: Recall@K score.
"""
retrieved_at_k = retrieved_docs[:k]
intersection = set(retrieved_at_k) & set(relevant_docs)
return len(intersection) / len(relevant_docs)