def build_report()

in evals/eval/evaluate.py [0:0]


def build_report(res_dir, evaluation_engines):
    os.makedirs(os.path.join(res_dir, "img"), exist_ok=True)

    for evaluation_engine in evaluation_engines.split(","):
        results = read_results(res_dir, evaluation_engine)
        with open(os.path.join(EVAL_DIR, evaluation_engine + "-results.md")) as f:
            lines = [l.strip() for l in f.readlines()]

        avg_results = get_avg_scores(results)
        build_section(avg_results, "avg", lines, res_dir, evaluation_engine)

        results_json_path = os.path.join(res_dir, evaluation_engine + "-results.json")
        with open(results_json_path, "w") as file:
            json.dump(results, file, indent=2)
            print(f"Results are written to {results_json_path}")

        for lang_pair, datasets in results.items():
            build_section(datasets, lang_pair, lines, res_dir, evaluation_engine)

        results_path = os.path.join(res_dir, evaluation_engine + "-results.md")
        with open(results_path, "w+") as f:
            f.write("\n".join(lines))
            print(f"Results are written to {results_path}")