def run_benchmarks()

in llm_perf/common/benchmark_runner.py [0:0]


    def run_benchmarks(self):
        os.environ["LOG_TO_FILE"] = "0"
        os.environ["LOG_LEVEL"] = "INFO"
        setup_logging(level="INFO", prefix="MAIN-PROCESS")

        benchmarks_to_run = self.get_list_of_benchmarks_to_run()

        self.logger.info(
            f"Running a total of {len(benchmarks_to_run)} benchmarks, "
            f"with {len(CANONICAL_PRETRAINED_OPEN_LLM_LIST)} models"
        )

        for benchmark_name in benchmarks_to_run:
            assert "model" in benchmark_name, "each benchmark should have a model"

            self.run_benchmark(**benchmark_name)