def build_metrics()

in src/dfcx_scrapi/tools/metrics.py [0:0]


def build_metrics(
        metrics: list[str],
        genai_client: genai.client.Client,
        model_id: str,
        embedding_model: TextEmbeddingModel = None
        ) -> list["Metric"]:
    metric_list: list[Metric] = []
    for metric in metrics:
        if metric == "url_match":
            metric_list.append(UrlMatch())
        elif metric == "rougeL":
            metric_list.append(RougeL())
        elif metric == "answer_correctness":
            metric_list.append(
                AnswerCorrectness(
                    genai_client =genai_client,
                    model_id = model_id
                    )
            )
        elif metric == "faithfulness":
            metric_list.append(
                Faithfulness(
                    genai_client =genai_client,
                    model_id = model_id
                    )
                )
        elif metric == "context_recall":
            metric_list.append(
                ContextRecall(
                    genai_client =genai_client,
                    model_id = model_id
                    )
                )
        elif metric in [
            "response_similarity",
            "semantic_similarity",
            "similarity"
            ]:
            metric_list.append(SemanticSimilarity(model=embedding_model))
        elif metric == "tool_call_quality":
            metric_list.extend([ToolActionMatch(), ToolNameMatch()])
        else:
            logging.info(
                f"Metric `{metric}` is not supported. Supported Metrics"
                " are: {SUPPORTED_METRICS}. Skipping...")

    return metric_list