in evaluation_pipeline/evaluation.py [0:0]
def calc_average_precision(relevant_docs, retrieved_docs, k):
"""
Compute Average Precision (AP).
Parameters:
relevant_docs (list): List of relevant document IDs.
retrieved_docs (list): List of retrieved document IDs in ranked order.
k: average precision from 1 to k
Returns:
float: AP score.
Can be used to calculate mean average precision for number of queries Q
"""
if len(retrieved_docs) < 2:
return 0.0
total_precision = 0.0
for i in range(1, len(retrieved_docs)+1):
precision = calc_precision_at_k(relevant_docs, retrieved_docs, k=i)
total_precision += precision
if k > len(retrieved_docs):
print(f"k is higher than retrieval {len(retrieved_docs)}")
elif k < len(retrieved_docs):
print(f"k is lower than retrieval {len(retrieved_docs)}")
average_precision = total_precision / k
return average_precision