def read_results()

in evals/eval/evaluate.py [0:0]


def read_results(res_dir, evaluation_engine):
    results = defaultdict(dict)
    all_translators = set()
    for bleu_file in glob(res_dir + "/*/*." + evaluation_engine):
        (
            dataset_name,
            translator,
        ) = os.path.basename(bleu_file).split(
            "."
        )[:2]
        pair = bleu_file.split("/")[-2]
        with open(bleu_file) as f:
            score = float(f.read().strip())

        if dataset_name not in results[pair]:
            results[pair][dataset_name] = {}
        results[pair][dataset_name][translator] = score
        all_translators.add(translator)

    # fix missing translators
    for _, datasets in results.items():
        for _, translators in datasets.items():
            for translator in all_translators:
                if translator not in translators:
                    translators[translator] = 0

    return results