def run_benchmark()

in llm_perf/common/benchmark_runner.py [0:0]


    def run_benchmark(self, **kwargs):
        model = kwargs.pop("model")

        benchmark_name = self.get_benchmark_name(model, **kwargs)
        subfolder = f"{benchmark_name}/{model.replace('/', '--')}"

        if not self.is_benchmark_supported(**kwargs):
            self.logger.info(
                f"Skipping benchmark {benchmark_name} with model {model} since it is not supported"
            )
            return

        if self.is_benchmark_conducted(self.push_repo_id, subfolder):
            self.logger.info(
                f"Skipping benchmark {benchmark_name} with model {model} since it was already conducted"
            )
            return

        benchmark_config = self.get_benchmark_config(model, **kwargs)
        benchmark_config.push_to_hub(
            repo_id=self.push_repo_id, subfolder=subfolder, private=True
        )
        self.execute_and_log_benchmark(benchmark_config, subfolder)