def run_benchmark()

in Benchmarks/NVIDIA/LLMBenchmark.py [0:0]


    def run_benchmark(self):
        for model_name in self.config['models']:
            if self.config['models'][model_name]['use_model']:
                print("Benchmarking ", model_name)
                for i in range(len(self.config['models'][model_name]['input_sizes'])):
                    isl = self.config['models'][model_name]['input_sizes'][i]
                    osl = self.config['models'][model_name]['output_sizes'][i]
                    tp = self.config['models'][model_name]['tp_size']
                    name = model_name.split('/')[1]

                    dataset_path = self.dir_path + "/datasets/" + name + "_synthetic_" + str(isl) + "_" + str(osl) + ".txt"
                    results_path = self.dir_path + "/Outputs/results_" + name + "_" + str(isl) + "_" + str(osl) + ".txt"

                    run_benchmark_command = f'''
                        trtllm-bench \
                        --model {model_name} throughput\
                        --dataset {dataset_path} \
                        --engine_dir {self.dir_path + "/engines/" + model_name + "/tp_" + str(tp) + "_pp_1"} > {results_path}
                        '''

                    be2 = subprocess.run(run_benchmark_command, shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
                    tools.write_log(tools.check_error(be2))