def get_default_argparser()

in scripts/performance/benchmark_utils.py [0:0]


def get_default_argparser():
    """Get an ArgumentParser with all the base benchmark arguments added in."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--no-cleanup', action='store_true', default=False,
        help='Do not remove the destination after the tests complete.'
    )
    parser.add_argument(
        '--recursive', action='store_true', default=False,
        help='Indicates that this is a recursive transfer.'
    )
    benchmark_script = get_benchmark_script()
    parser.add_argument(
        '--benchmark-script', default=benchmark_script,
        required=benchmark_script is None,
        help=('The benchmark script to run the commands with. This should be '
              'from s3transfer.')
    )
    summarize_script = get_summarize_script()
    parser.add_argument(
        '--summarize-script', default=summarize_script,
        required=summarize_script is None,
        help=('The summarize script to run the commands with. This should be '
              'from s3transfer.')
    )
    parser.add_argument(
        '-o', '--result-dir', default='results',
        help='The directory to output performance results to. Existing '
             'results will be deleted.'
    )
    parser.add_argument(
        '--dry-run', default=False, action='store_true',
        help='If set, commands will only be printed out, not executed.'
    )
    parser.add_argument(
        '--quiet', default=False, action='store_true',
        help='If set, output is suppressed.'
    )
    parser.add_argument(
        '-n', '--num-iterations', default=1, type=int,
        help='The number of times to run the test.'
    )
    return parser