in scripts/performance/benchmark_utils.py [0:0]
def benchmark_command(command, benchmark_script, summarize_script,
output_dir, num_iterations, dry_run, upkeep=None,
cleanup=None):
"""Benchmark several runs of a long-running command.
:type command: str
:param command: The full aws cli command to benchmark
:type benchmark_script: str
:param benchmark_script: A benchmark script that takes a command to run
and outputs performance data to a file. This should be from s3transfer.
:type summarize_script: str
:param summarize_script: A summarization script that the output of the
benchmark script. This should be from s3transfer.
:type output_dir: str
:param output_dir: The directory to output performance results to.
:type num_iterations: int
:param num_iterations: The number of times to run the benchmark on the
command.
:type dry_run: bool
:param dry_run: Whether or not to actually run the benchmarks.
:type upkeep: function that takes no arguments
:param upkeep: A function that is run after every iteration of the
benchmark process. This should be used for upkeep, such as restoring
files that were deleted as part of the command executing.
:type cleanup: function that takes no arguments
:param cleanup: A function that is run at the end of the benchmark
process or if there are any problems during the benchmark process.
It should be uses for the final cleanup, such as deleting files that
were created at some destination.
"""
performance_dir = os.path.join(output_dir, 'performance')
if os.path.exists(performance_dir):
shutil.rmtree(performance_dir)
os.makedirs(performance_dir)
try:
for i in range(num_iterations):
out_file = 'performance%s.csv' % i
out_file = os.path.join(performance_dir, out_file)
benchmark_args = [
benchmark_script, command, '--output-file', out_file
]
if not dry_run:
subprocess.check_call(benchmark_args)
if upkeep is not None:
upkeep()
if not dry_run:
summarize(summarize_script, performance_dir, output_dir)
finally:
if not dry_run and cleanup is not None:
cleanup()