def _get_attention_configs()

in llm_perf/benchmark_runners/cuda/update_llm_perf_cuda_pytorch.py [0:0]


    def _get_attention_configs(self) -> List[str]:
        return ["eager", "sdpa", "flash_attention_2"]