llm_perf/benchmark_runners/cpu/update_llm_perf_cpu_onnxruntime.py [21:54]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.attention_configs = self._get_attention_configs()
        assert (
            self.subset is not None
        ), "SUBSET environment variable must be set for benchmarking"
        self.weights_configs = self._get_weights_configs(self.subset)

    def get_list_of_benchmarks_to_run(self) -> List[Dict[str, Any]]:
        return [
            {
                "model": model,
                "attn_implementation": attn_impl,
                "weights_config": weights_cfg,
            }
            for model, attn_impl, weights_cfg in product(
                CANONICAL_PRETRAINED_OPEN_LLM_LIST,
                self.attention_configs,
                self.weights_configs.keys(),
            )
        ]

    def get_benchmark_name(self, model: str, **kwargs) -> str:
        weights_config = kwargs["weights_config"]
        attn_implementation = kwargs["attn_implementation"]
        return f"{model}-{weights_config}-{attn_implementation}-{self.backend}"

    def get_benchmark_config(self, model: str, **kwargs) -> BenchmarkConfig:
        weights_config = kwargs["weights_config"]
        attn_implementation = kwargs["attn_implementation"]

        assert (
            weights_config in self.weights_configs
        ), f"your config does not contain {weights_config}, adjust your _get_weights_configs to fix this issue"

        torch_dtype = self.weights_configs[weights_config]["torch_dtype"]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



llm_perf/benchmark_runners/cpu/update_llm_perf_cpu_pytorch.py [21:54]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.attention_configs = self._get_attention_configs()
        assert (
            self.subset is not None
        ), "SUBSET environment variable must be set for benchmarking"
        self.weights_configs = self._get_weights_configs(self.subset)

    def get_list_of_benchmarks_to_run(self) -> List[Dict[str, Any]]:
        return [
            {
                "model": model,
                "attn_implementation": attn_impl,
                "weights_config": weights_cfg,
            }
            for model, attn_impl, weights_cfg in product(
                CANONICAL_PRETRAINED_OPEN_LLM_LIST,
                self.attention_configs,
                self.weights_configs.keys(),
            )
        ]

    def get_benchmark_name(self, model: str, **kwargs) -> str:
        weights_config = kwargs["weights_config"]
        attn_implementation = kwargs["attn_implementation"]
        return f"{model}-{weights_config}-{attn_implementation}-{self.backend}"

    def get_benchmark_config(self, model: str, **kwargs) -> BenchmarkConfig:
        weights_config = kwargs["weights_config"]
        attn_implementation = kwargs["attn_implementation"]

        assert (
            weights_config in self.weights_configs
        ), f"your config does not contain {weights_config}, adjust your _get_weights_configs to fix this issue"

        torch_dtype = self.weights_configs[weights_config]["torch_dtype"]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



