def is_benchmark_conducted()

in llm_perf/common/benchmark_runner.py [0:0]


    def is_benchmark_conducted(self, push_repo_id, subfolder):
        try:
            report = BenchmarkReport.from_pretrained(
                repo_id=push_repo_id, subfolder=subfolder
            )
            if "traceback" in report.to_dict():
                return False
            else:
                return True
        except Exception:
            return False