def run_single_benchmark()

in benchmarks/benchmark.py [0:0]


def run_single_benchmark(jmx, jmeter_args=dict(), threads=100, out_dir=None):
    if out_dir is None:
        out_dir = os.path.join(OUT_DIR, benchmark_name, basename(benchmark_model))
    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    os.makedirs(out_dir)

    protocol = 'http'
    hostname = '127.0.0.1'
    port = 8080
    threads = pargs.threads[0] if pargs.threads else threads
    workers = pargs.workers[0] if pargs.workers else (
        pargs.gpus[0] if pargs.gpus else multiprocessing.cpu_count()
    )

    if pargs.ts:
        url = pargs.ts[0]
        if '://' in url:
            protocol, url = url.split('://')
        if ':' in url:
            hostname, port = url.split(':')
            port = int(port)
        else:
            hostname = url
            port = 80
    else:
        # Start TorchServe
        docker = 'nvidia-docker' if pargs.gpus else 'docker'
        container = 'ts_benchmark_gpu' if pargs.gpus else 'ts_benchmark_cpu'
        docker_path = 'pytorch/torchserve:latest-gpu' \
            if pargs.gpus else 'pytorch/torchserve:latest'
        if pargs.docker:
            s_pargs_docker = ''.join([str(elem) for elem in pargs.docker]) 
            if '/' in s_pargs_docker:
                #Fixed the logic to get the container name correctly
                container = 'ts_benchmark_{}'.format(pargs.docker[0].split('/')[-1].split(':')[0])
                docker_path = pargs.docker[0]
            else:
                container = 'ts_benchmark_{}'.format(pargs.docker[0].split(':')[1])
                docker_path = pargs.docker
        docker_path = ''.join([str(elem) for elem in docker_path]) 
        run_process("{} rm -f {}".format(docker, container))
        docker_run_call = "{} run --name {} -p 8080:8080 -p 8081:8081 -itd {}".format(docker, container, docker_path)
        retval = run_process(docker_run_call).returncode
        if retval != 0:
            raise Exception("docker run command failed!! Please provide a valid docker image")

    management_port = int(pargs.management[0]) if pargs.management else port + 1
    time.sleep(30)

    try:
        # temp files
        tmpfile = os.path.join(out_dir, 'output.jtl')
        logfile = os.path.join(out_dir, 'jmeter.log')
        outfile = os.path.join(out_dir, 'out.csv')
        perfmon_file = os.path.join(out_dir, 'perfmon.csv')
        graphsDir = os.path.join(out_dir, 'graphs')
        reportDir = os.path.join(out_dir, 'report')

        # run jmeter
        run_jmeter_args = {
            'hostname': hostname,
            'port': port,
            'management_port': management_port,
            'protocol': protocol,
            'min_workers': workers,
            'rampup': 5,
            'threads': threads,
            'loops': int(pargs.loops[0]),
            'perfmon_file': perfmon_file
        }
        run_jmeter_args.update(JMETER_RESULT_SETTINGS)
        run_jmeter_args.update(jmeter_args)
        run_jmeter_args.update(dict(zip(pargs.options[::2], pargs.options[1::2])))
        abs_jmx = jmx if os.path.isabs(jmx) else os.path.join(JMX_BASE, jmx)
        jmeter_args_str = ' '.join(sorted(['-J{}={}'.format(key, val) for key, val in run_jmeter_args.items()]))
        jmeter_call = '{} -n -t {} {} -l {} -j {} -e -o {}'.format(JMETER, abs_jmx, jmeter_args_str, tmpfile, logfile, reportDir)
        run_process(jmeter_call)
        print('Processing jmeter output')
        time.sleep(30)
        # run AggregateReport
        ag_call = 'java -jar {} --tool Reporter --generate-csv {} --input-jtl {} --plugin-type AggregateReport'.format(CMDRUNNER, outfile, tmpfile)
        if PLATFORM == 'Windows':
            run_process(ag_call, shell=True)
        else:
            run_process(ag_call)

        # Generate output graphs
        gLogfile = os.path.join(out_dir, 'graph_jmeter.log')
        graphing_args = {
            'raw_output': graphsDir,
            'jtl_input': tmpfile
        }
        graphing_args.update(JMETER_RESULT_SETTINGS)
        gjmx = os.path.join(JMX_BASE, JMX_GRAPHS_GENERATOR_PLAN)
        graphing_args_str = ' '.join(['-J{}={}'.format(key, val) for key, val in graphing_args.items()])
        graphing_call = '{} -n -t {} {} -j {}'.format(JMETER, gjmx, graphing_args_str, gLogfile)
        run_process(graphing_call)

        print("Output available at {}".format(out_dir))
        print("Report generated at {}".format(os.path.join(reportDir, 'index.html')))

        data_frame = pd.read_csv(outfile, index_col=0)
        report = list()
        for val in EXPERIMENT_RESULTS_MAP[jmx]:
            for full_val in [fv for fv in data_frame.index if val in fv]:
                report.append(decorate_metrics(data_frame, full_val))

        return report

    except Exception:  # pylint: disable=broad-except
        traceback.print_exc()