def eval_results_per_class()

in dataset/eval_zoo.py [0:0]


def eval_results_per_class(classes, results, CLASS_LIST, N_PER_CLASS=None):

    metrics = list(results.keys())

    avg_cls_results = {}
    for cls_ in CLASS_LIST:
        ok_cls = [ei for ei, _ in enumerate(classes) if classes[ei] == cls_]
        cls_results = {k: v[ok_cls] for k, v in results.items()}
        if N_PER_CLASS is not None:
            assert len(ok_cls) == N_PER_CLASS[cls_]
        if True:  # asserts ...
            for k, v in cls_results.items():
                assert v.size == len(ok_cls)
        avg_cls_results[cls_] = {k: np.array(
            v).mean() for k, v in cls_results.items()}

    all_avg_results = {}
    for metric in metrics:
        avgmetric = [v[metric] for _, v in avg_cls_results.items()]
        all_avg_results[metric] = float(np.array(avgmetric).mean())

    return all_avg_results, avg_cls_results