in osbenchmark/benchmark.py [0:0]
def create_arg_parser():
def positive_number(v):
value = int(v)
if value <= 0:
raise argparse.ArgumentTypeError(f"must be positive but was {value}")
return value
def non_empty_list(arg):
lst = opts.csv_to_list(arg)
if len(lst) < 1:
raise argparse.ArgumentError(argument=None, message="At least one argument required!")
return lst
def runtime_jdk(v):
if v == "bundled":
return v
else:
try:
return positive_number(v)
except argparse.ArgumentTypeError:
raise argparse.ArgumentTypeError(f"must be a positive number or 'bundled' but was {v}")
def supported_os_version(v):
if v:
min_os_version = versions.Version.from_string(version.minimum_os_version())
specified_version = versions.Version.from_string(v)
if specified_version < min_os_version:
raise argparse.ArgumentTypeError(f"must be at least {min_os_version} but was {v}")
return v
def add_workload_source(subparser):
workload_source_group = subparser.add_mutually_exclusive_group()
workload_source_group.add_argument(
"--workload-repository",
help="Define the repository from where Benchmark will load workloads (default: default).",
# argparse is smart enough to use this default only if the user did not use --workload-path and also did not specify anything
default="default"
)
workload_source_group.add_argument(
"--workload-path",
help="Define the path to a workload.")
subparser.add_argument(
"--workload-revision",
help="Define a specific revision in the workload repository that Benchmark should use.",
default=None)
# try to preload configurable defaults, but this does not work together with `--configuration-name` (which is undocumented anyway)
cfg = config.Config()
if cfg.config_present():
cfg.load_config()
preserve_install = cfg.opts("defaults", "preserve_benchmark_candidate", default_value=False, mandatory=False)
else:
preserve_install = False
parser = argparse.ArgumentParser(prog=PROGRAM_NAME,
description=BANNER + "\n\n A benchmarking tool for OpenSearch",
epilog="Find out more about Benchmark at {}".format(console.format.link(doc_link())),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version="%(prog)s " + version.version())
subparsers = parser.add_subparsers(
title="subcommands",
dest="subcommand",
help="")
test_execution_parser = subparsers.add_parser("execute_test", help="Run a benchmark")
# change in favor of "list telemetry", "list workloads", "list pipelines"
list_parser = subparsers.add_parser("list", help="List configuration options")
list_parser.add_argument(
"configuration",
metavar="configuration",
help="The configuration for which Benchmark should show the available options. "
"Possible values are: telemetry, workloads, pipelines, test_executions, provision_config_instances, opensearch-plugins",
choices=["telemetry", "workloads", "pipelines", "test_executions", "provision_config_instances", "opensearch-plugins"])
list_parser.add_argument(
"--limit",
help="Limit the number of search results for recent test_executions (default: 10).",
default=10,
)
add_workload_source(list_parser)
info_parser = subparsers.add_parser("info", help="Show info about a workload")
add_workload_source(info_parser)
info_parser.add_argument(
"--workload",
help=f"Define the workload to use. List possible workloads with `{PROGRAM_NAME} list workloads`."
# we set the default value later on because we need to determine whether the user has provided this value.
# default="geonames"
)
info_parser.add_argument(
"--workload-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to the workload as variables.",
default=""
)
info_parser.add_argument(
"--test-procedure",
help=f"Define the test_procedure to use. List possible test_procedures for workloads with `{PROGRAM_NAME} list workloads`."
)
info_task_filter_group = info_parser.add_mutually_exclusive_group()
info_task_filter_group.add_argument(
"--include-tasks",
help="Defines a comma-separated list of tasks to run. By default all tasks of a test_procedure are run.")
info_task_filter_group.add_argument(
"--exclude-tasks",
help="Defines a comma-separated list of tasks not to run. By default all tasks of a test_procedure are run.")
create_workload_parser = subparsers.add_parser("create-workload", help="Create a Benchmark workload from existing data")
create_workload_parser.add_argument(
"--workload",
required=True,
help="Name of the generated workload")
create_workload_parser.add_argument(
"--indices",
type=non_empty_list,
required=True,
help="Comma-separated list of indices to include in the workload")
create_workload_parser.add_argument(
"--target-hosts",
default="",
required=True,
help="Comma-separated list of host:port pairs which should be targeted")
create_workload_parser.add_argument(
"--client-options",
default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS,
help=f"Comma-separated list of client options to use. (default: {opts.ClientOptions.DEFAULT_CLIENT_OPTIONS})")
create_workload_parser.add_argument(
"--output-path",
default=os.path.join(os.getcwd(), "workloads"),
help="Workload output directory (default: workloads/)")
generate_parser = subparsers.add_parser("generate", help="Generate artifacts")
generate_parser.add_argument(
"artifact",
metavar="artifact",
help="The artifact to create. Possible values are: charts",
choices=["charts"])
# We allow to either have a chart-spec-path *or* define a chart-spec on the fly
# with workload, test_procedure and provision_config_instance. Convincing
# argparse to validate that everything is correct *might* be doable but it is
# simpler to just do this manually.
generate_parser.add_argument(
"--chart-spec-path",
required=True,
help="Path to a JSON file(s) containing all combinations of charts to generate. Wildcard patterns can be used to specify "
"multiple files.")
generate_parser.add_argument(
"--chart-type",
help="Chart type to generate (default: time-series).",
choices=["time-series", "bar"],
default="time-series")
generate_parser.add_argument(
"--output-path",
help="Output file name (default: stdout).",
default=None)
compare_parser = subparsers.add_parser("compare", help="Compare two test_executions")
compare_parser.add_argument(
"--baseline",
required=True,
help=f"TestExecution ID of the baseline (see {PROGRAM_NAME} list test_executions).")
compare_parser.add_argument(
"--contender",
required=True,
help=f"TestExecution ID of the contender (see {PROGRAM_NAME} list test_executions).")
compare_parser.add_argument(
"--results-format",
help="Define the output format for the command line results (default: markdown).",
choices=["markdown", "csv"],
default="markdown")
compare_parser.add_argument(
"--results-numbers-align",
help="Define the output column number alignment for the command line results (default: right).",
choices=["right", "center", "left", "decimal"],
default="right")
compare_parser.add_argument(
"--results-file",
help="Write the command line results also to the provided file.",
default="")
compare_parser.add_argument(
"--show-in-results",
help="Whether to include the comparison in the results file.",
default=True)
download_parser = subparsers.add_parser("download", help="Downloads an artifact")
download_parser.add_argument(
"--provision-config-repository",
help="Define the repository from where Benchmark will load provision_configs and provision_config_instances (default: default).",
default="default")
download_parser.add_argument(
"--provision-config-revision",
help="Define a specific revision in the provision_config repository that Benchmark should use.",
default=None)
download_parser.add_argument(
"--provision-config-path",
help="Define the path to the provision_config_instance and plugin configurations to use.")
download_parser.add_argument(
"--distribution-version",
type=supported_os_version,
help="Define the version of the OpenSearch distribution to download. "
"Check https://opensearch.org/docs/version-history/ for released versions.",
default="")
download_parser.add_argument(
"--distribution-repository",
help="Define the repository from where the OpenSearch distribution should be downloaded (default: release).",
default="release")
download_parser.add_argument(
"--provision-config-instance",
help=f"Define the provision_config_instance to use. List possible "
f"provision_config_instances with `{PROGRAM_NAME} list "
f"provision_config_instances` (default: defaults).",
default="defaults") # optimized for local usage
download_parser.add_argument(
"--provision-config-instance-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the provision_config_instance.",
default=""
)
download_parser.add_argument(
"--target-os",
help="The name of the target operating system for which an artifact should be downloaded (default: current OS)",
)
download_parser.add_argument(
"--target-arch",
help="The name of the CPU architecture for which an artifact should be downloaded (default: current architecture)",
)
install_parser = subparsers.add_parser("install", help="Installs an OpenSearch node locally")
install_parser.add_argument(
"--revision",
help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
" 'latest' fetches the latest version on main. It is also possible to specify a commit id or a timestamp."
" The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
"e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
default="current") # optimized for local usage, don't fetch sources
# Intentionally undocumented as we do not consider Docker a fully supported option.
install_parser.add_argument(
"--build-type",
help=argparse.SUPPRESS,
choices=["tar", "docker"],
default="tar")
install_parser.add_argument(
"--provision-config-repository",
help="Define the repository from where Benchmark will load provision_configs and provision_config_instances (default: default).",
default="default")
install_parser.add_argument(
"--provision-config-revision",
help="Define a specific revision in the provision_config repository that Benchmark should use.",
default=None)
install_parser.add_argument(
"--provision-config-path",
help="Define the path to the provision_config_instance and plugin configurations to use.")
install_parser.add_argument(
"--runtime-jdk",
type=runtime_jdk,
help="The major version of the runtime JDK to use during installation.",
default=None)
install_parser.add_argument(
"--distribution-repository",
help="Define the repository from where the OpenSearch distribution should be downloaded (default: release).",
default="release")
install_parser.add_argument(
"--distribution-version",
type=supported_os_version,
help="Define the version of the OpenSearch distribution to download. "
"Check https://opensearch.org/docs/version-history/ for released versions.",
default="")
install_parser.add_argument(
"--provision-config-instance",
help=f"Define the provision_config_instance to use. List possible "
f"provision_config_instances with `{PROGRAM_NAME} list "
f"provision_config_instances` (default: defaults).",
default="defaults") # optimized for local usage
install_parser.add_argument(
"--provision-config-instance-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the provision_config_instance.",
default=""
)
install_parser.add_argument(
"--opensearch-plugins",
help="Define the OpenSearch plugins to install. (default: install no plugins).",
default="")
install_parser.add_argument(
"--plugin-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
default=""
)
install_parser.add_argument(
"--network-host",
help="The IP address to bind to and publish",
default="127.0.0.1"
)
install_parser.add_argument(
"--http-port",
help="The port to expose for HTTP traffic",
default="39200"
)
install_parser.add_argument(
"--node-name",
help="The name of this OpenSearch node",
default="benchmark-node-0"
)
install_parser.add_argument(
"--master-nodes",
help="A comma-separated list of the initial master node names",
default=""
)
install_parser.add_argument(
"--seed-hosts",
help="A comma-separated list of the initial seed host IPs",
default=""
)
start_parser = subparsers.add_parser("start", help="Starts an OpenSearch node locally")
start_parser.add_argument(
"--installation-id",
required=True,
help="The id of the installation to start",
# the default will be dynamically derived by
# test_execution_orchestrator based on the
# presence / absence of other command line options
default="")
start_parser.add_argument(
"--test-execution-id",
required=True,
help="Define a unique id for this test_execution.",
default="")
start_parser.add_argument(
"--runtime-jdk",
type=runtime_jdk,
help="The major version of the runtime JDK to use.",
default=None)
start_parser.add_argument(
"--telemetry",
help=f"Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry "
f"devices with `{PROGRAM_NAME} list telemetry`.",
default="")
start_parser.add_argument(
"--telemetry-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
default=""
)
stop_parser = subparsers.add_parser("stop", help="Stops an OpenSearch node locally")
stop_parser.add_argument(
"--installation-id",
required=True,
help="The id of the installation to stop",
# the default will be dynamically derived by
# test_execution_orchestrator based on the
# presence / absence of other command line options
default="")
stop_parser.add_argument(
"--preserve-install",
help=f"Keep the benchmark candidate and its index. (default: {str(preserve_install).lower()}).",
default=preserve_install,
action="store_true")
for p in [list_parser, test_execution_parser]:
p.add_argument(
"--distribution-version",
type=supported_os_version,
help="Define the version of the OpenSearch distribution to download. "
"Check https://opensearch.org/docs/version-history/ for released versions.",
default="")
p.add_argument(
"--provision-config-path",
help="Define the path to the provision_config_instance and plugin configurations to use.")
p.add_argument(
"--provision-config-repository",
help="Define repository from where Benchmark will load provision_configs and provision_config_instances (default: default).",
default="default")
p.add_argument(
"--provision-config-revision",
help="Define a specific revision in the provision_config repository that Benchmark should use.",
default=None)
test_execution_parser.add_argument(
"--test-execution-id",
help="Define a unique id for this test_execution.",
default=str(uuid.uuid4()))
test_execution_parser.add_argument(
"--pipeline",
help="Select the pipeline to run.",
# the default will be dynamically derived by
# test_execution_orchestrator based on the
# presence / absence of other command line options
default="")
test_execution_parser.add_argument(
"--revision",
help="Define the source code revision for building the benchmark candidate. 'current' uses the source tree as is,"
" 'latest' fetches the latest version on main. It is also possible to specify a commit id or a timestamp."
" The timestamp must be specified as: \"@ts\" where \"ts\" must be a valid ISO 8601 timestamp, "
"e.g. \"@2013-07-27T10:37:00Z\" (default: current).",
default="current") # optimized for local usage, don't fetch sources
add_workload_source(test_execution_parser)
test_execution_parser.add_argument(
"--workload",
help=f"Define the workload to use. List possible workloads with `{PROGRAM_NAME} list workloads`."
)
test_execution_parser.add_argument(
"--workload-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to the workload as variables.",
default=""
)
test_execution_parser.add_argument(
"--test-procedure",
help=f"Define the test_procedure to use. List possible test_procedures for workloads with `{PROGRAM_NAME} list workloads`.")
test_execution_parser.add_argument(
"--provision-config-instance",
help=f"Define the provision_config_instance to use. List possible "
f"provision_config_instances with `{PROGRAM_NAME} list "
f"provision_config_instances` (default: defaults).",
default="defaults") # optimized for local usage
test_execution_parser.add_argument(
"--provision-config-instance-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim as variables for the provision_config_instance.",
default=""
)
test_execution_parser.add_argument(
"--runtime-jdk",
type=runtime_jdk,
help="The major version of the runtime JDK to use.",
default=None)
test_execution_parser.add_argument(
"--opensearch-plugins",
help="Define the OpenSearch plugins to install. (default: install no plugins).",
default="")
test_execution_parser.add_argument(
"--plugin-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to all plugins as variables.",
default=""
)
test_execution_parser.add_argument(
"--target-hosts",
help="Define a comma-separated list of host:port pairs which should be targeted if using the pipeline 'benchmark-only' "
"(default: localhost:9200).",
default="") # actually the default is pipeline specific and it is set later
test_execution_parser.add_argument(
"--load-worker-coordinator-hosts",
help="Define a comma-separated list of hosts which should generate load (default: localhost).",
default="localhost")
test_execution_parser.add_argument(
"--client-options",
help=f"Define a comma-separated list of client options to use. The options will be passed to the OpenSearch "
f"Python client (default: {opts.ClientOptions.DEFAULT_CLIENT_OPTIONS}).",
default=opts.ClientOptions.DEFAULT_CLIENT_OPTIONS)
test_execution_parser.add_argument("--on-error",
choices=["continue", "abort"],
help="Controls how Benchmark behaves on response errors (default: continue).",
default="continue")
test_execution_parser.add_argument(
"--telemetry",
help=f"Enable the provided telemetry devices, provided as a comma-separated list. List possible telemetry "
f"devices with `{PROGRAM_NAME} list telemetry`.",
default="")
test_execution_parser.add_argument(
"--telemetry-params",
help="Define a comma-separated list of key:value pairs that are injected verbatim to the telemetry devices as parameters.",
default=""
)
test_execution_parser.add_argument(
"--distribution-repository",
help="Define the repository from where the OpenSearch distribution should be downloaded (default: release).",
default="release")
task_filter_group = test_execution_parser.add_mutually_exclusive_group()
task_filter_group.add_argument(
"--include-tasks",
help="Defines a comma-separated list of tasks to run. By default all tasks of a test_procedure are run.")
task_filter_group.add_argument(
"--exclude-tasks",
help="Defines a comma-separated list of tasks not to run. By default all tasks of a test_procedure are run.")
test_execution_parser.add_argument(
"--user-tag",
help="Define a user-specific key-value pair (separated by ':'). It is added to each metric record as meta info. "
"Example: intention:baseline-ticket-12345",
default="")
test_execution_parser.add_argument(
"--results-format",
help="Define the output format for the command line results (default: markdown).",
choices=["markdown", "csv"],
default="markdown")
test_execution_parser.add_argument(
"--results-numbers-align",
help="Define the output column number alignment for the command line results (default: right).",
choices=["right", "center", "left", "decimal"],
default="right")
test_execution_parser.add_argument(
"--show-in-results",
help="Define which values are shown in the summary publish (default: available).",
choices=["available", "all-percentiles", "all"],
default="available")
test_execution_parser.add_argument(
"--results-file",
help="Write the command line results also to the provided file.",
default="")
test_execution_parser.add_argument(
"--preserve-install",
help=f"Keep the benchmark candidate and its index. (default: {str(preserve_install).lower()}).",
default=preserve_install,
action="store_true")
test_execution_parser.add_argument(
"--test-mode",
help="Runs the given workload in 'test mode'. Meant to check a workload for errors but not for real benchmarks (default: false).",
default=False,
action="store_true")
test_execution_parser.add_argument(
"--enable-worker-coordinator-profiling",
help="Enables a profiler for analyzing the performance of calls in Benchmark's worker coordinator (default: false).",
default=False,
action="store_true")
test_execution_parser.add_argument(
"--enable-assertions",
help="Enables assertion checks for tasks (default: false).",
default=False,
action="store_true")
test_execution_parser.add_argument(
"--kill-running-processes",
action="store_true",
default=False,
help="If any processes is running, it is going to kill them and allow Benchmark to continue to run."
)
###############################################################################
#
# The options below are undocumented and can be removed or changed at any time.
#
###############################################################################
# This option is intended to tell Benchmark to assume a different start date than 'now'. This is effectively just useful for things like
# backtesting or a benchmark run across environments (think: comparison of EC2 and bare metal) but never for the typical user.
test_execution_parser.add_argument(
"--effective-start-date",
help=argparse.SUPPRESS,
type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S"),
default=None)
# Skips checking that the REST API is available before proceeding with the benchmark
test_execution_parser.add_argument(
"--skip-rest-api-check",
help=argparse.SUPPRESS,
action="store_true",
default=False)
for p in [list_parser, test_execution_parser, compare_parser, download_parser, install_parser,
start_parser, stop_parser, info_parser, generate_parser, create_workload_parser]:
# This option is needed to support a separate configuration for the integration tests on the same machine
p.add_argument(
"--configuration-name",
help=argparse.SUPPRESS,
default=None)
p.add_argument(
"--quiet",
help="Suppress as much as output as possible (default: false).",
default=False,
action="store_true")
p.add_argument(
"--offline",
help="Assume that Benchmark has no connection to the Internet (default: false).",
default=False,
action="store_true")
return parser