project/paperbench/experiments/judge_eval/judge_eval_perf_cost.py [85:98]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        }

    return model_results


def read_random_baseline_results():
    with open(
        "experiments/judge_eval/judge_eval_results/random_20250323_044950/results.json", "r"
    ) as f:
        data = json.load(f)

        random_baseline_results = data["aggregate_metrics"]

    return random_baseline_results
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



project/paperbench/experiments/judge_eval/judge_eval_perf_tables.py [34:47]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        }

    return model_results


def read_random_baseline_results():
    with open(
        "experiments/judge_eval/judge_eval_results/random_20250323_044950/results.json", "r"
    ) as f:
        data = json.load(f)

        random_baseline_results = data["aggregate_metrics"]

    return random_baseline_results
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



