def _compute()

in metrics/trec_eval/trec_eval.py [0:0]


    def _compute(self, references, predictions):
        """Returns the TREC evaluation scores."""

        if len(predictions) > 1 or len(references) > 1:
            raise ValueError(
                f"You can only pass one prediction and reference per evaluation. You passed {len(predictions)} prediction(s) and {len(references)} reference(s)."
            )

        df_run = pd.DataFrame(predictions[0])
        df_qrel = pd.DataFrame(references[0])

        trec_run = TrecRun()
        trec_run.filename = "placeholder.file"
        trec_run.run_data = df_run

        trec_qrel = TrecQrel()
        trec_qrel.filename = "placeholder.file"
        trec_qrel.qrels_data = df_qrel

        trec_eval = TrecEval(trec_run, trec_qrel)

        result = {}
        result["runid"] = trec_eval.run.get_runid()
        result["num_ret"] = trec_eval.get_retrieved_documents(per_query=False)
        result["num_rel"] = trec_eval.get_relevant_documents(per_query=False)
        result["num_rel_ret"] = trec_eval.get_relevant_retrieved_documents(per_query=False)
        result["num_q"] = len(trec_eval.run.topics())
        result["map"] = trec_eval.get_map(depth=10000, per_query=False, trec_eval=True)
        result["gm_map"] = trec_eval.get_geometric_map(depth=10000, trec_eval=True)
        result["bpref"] = trec_eval.get_bpref(depth=1000, per_query=False, trec_eval=True)
        result["Rprec"] = trec_eval.get_rprec(depth=1000, per_query=False, trec_eval=True)
        result["recip_rank"] = trec_eval.get_reciprocal_rank(depth=1000, per_query=False, trec_eval=True)

        for v in [5, 10, 15, 20, 30, 100, 200, 500, 1000]:
            result[f"P@{v}"] = trec_eval.get_precision(depth=v, per_query=False, trec_eval=True)
        for v in [5, 10, 15, 20, 30, 100, 200, 500, 1000]:
            result[f"NDCG@{v}"] = trec_eval.get_ndcg(depth=v, per_query=False, trec_eval=True)

        return result