in osbenchmark/benchmark.py [0:0]
def dispatch_sub_command(arg_parser, args, cfg):
sub_command = args.subcommand
cfg.add(config.Scope.application, "system", "quiet.mode", args.quiet)
cfg.add(config.Scope.application, "system", "offline.mode", args.offline)
try:
if sub_command == "compare":
configure_results_publishing_params(args, cfg)
results_publisher.compare(cfg, args.baseline, args.contender)
elif sub_command == "list":
cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
cfg.add(config.Scope.applicationOverride, "system", "list.test_executions.max_results", args.limit)
configure_builder_params(args, cfg, command_requires_provision_config_instance=False)
configure_workload_params(arg_parser, args, cfg, command_requires_workload=False)
dispatch_list(cfg)
elif sub_command == "download":
cfg.add(config.Scope.applicationOverride, "builder", "target.os", args.target_os)
cfg.add(config.Scope.applicationOverride, "builder", "target.arch", args.target_arch)
configure_builder_params(args, cfg)
builder.download(cfg)
elif sub_command == "install":
cfg.add(config.Scope.applicationOverride, "system", "install.id", str(uuid.uuid4()))
cfg.add(config.Scope.applicationOverride, "builder", "network.host", args.network_host)
cfg.add(config.Scope.applicationOverride, "builder", "network.http.port", args.http_port)
cfg.add(config.Scope.applicationOverride, "builder", "source.revision", args.revision)
cfg.add(config.Scope.applicationOverride, "builder", "build.type", args.build_type)
cfg.add(config.Scope.applicationOverride, "builder", "runtime.jdk", args.runtime_jdk)
cfg.add(config.Scope.applicationOverride, "builder", "node.name", args.node_name)
cfg.add(config.Scope.applicationOverride, "builder", "master.nodes", opts.csv_to_list(args.master_nodes))
cfg.add(config.Scope.applicationOverride, "builder", "seed.hosts", opts.csv_to_list(args.seed_hosts))
cfg.add(config.Scope.applicationOverride, "builder",
"provision_config_instance.plugins", opts.csv_to_list(
args.opensearch_plugins))
cfg.add(config.Scope.applicationOverride, "builder", "plugin.params", opts.to_dict(args.plugin_params))
configure_builder_params(args, cfg)
builder.install(cfg)
elif sub_command == "start":
cfg.add(config.Scope.applicationOverride, "system", "test_execution.id", args.test_execution_id)
cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id)
cfg.add(config.Scope.applicationOverride, "builder", "runtime.jdk", args.runtime_jdk)
configure_telemetry_params(args, cfg)
builder.start(cfg)
elif sub_command == "stop":
cfg.add(config.Scope.applicationOverride, "builder", "preserve.install", convert.to_bool(args.preserve_install))
cfg.add(config.Scope.applicationOverride, "system", "install.id", args.installation_id)
builder.stop(cfg)
elif sub_command == "execute_test":
# As the execute_test command is doing more work than necessary at the moment, we duplicate several parameters
# in this section that actually belong to dedicated subcommands (like install, start or stop). Over time
# these duplicated parameters will vanish as we move towards dedicated subcommands and use "execute_test" only
# to run the actual benchmark (i.e. generating load).
if args.effective_start_date:
cfg.add(config.Scope.applicationOverride, "system", "time.start", args.effective_start_date)
cfg.add(config.Scope.applicationOverride, "system", "test_execution.id", args.test_execution_id)
# use the test_execution id implicitly also as the install id.
cfg.add(config.Scope.applicationOverride, "system", "install.id", args.test_execution_id)
cfg.add(config.Scope.applicationOverride, "test_execution", "pipeline", args.pipeline)
cfg.add(config.Scope.applicationOverride, "test_execution", "user.tag", args.user_tag)
cfg.add(config.Scope.applicationOverride, "worker_coordinator", "profiling", args.enable_worker_coordinator_profiling)
cfg.add(config.Scope.applicationOverride, "worker_coordinator", "assertions", args.enable_assertions)
cfg.add(config.Scope.applicationOverride, "worker_coordinator", "on.error", args.on_error)
cfg.add(
config.Scope.applicationOverride,
"worker_coordinator",
"load_worker_coordinator_hosts",
opts.csv_to_list(args.load_worker_coordinator_hosts))
cfg.add(config.Scope.applicationOverride, "workload", "test.mode.enabled", args.test_mode)
configure_workload_params(arg_parser, args, cfg)
configure_connection_params(arg_parser, args, cfg)
configure_telemetry_params(args, cfg)
configure_builder_params(args, cfg)
cfg.add(config.Scope.applicationOverride, "builder", "runtime.jdk", args.runtime_jdk)
cfg.add(config.Scope.applicationOverride, "builder", "source.revision", args.revision)
cfg.add(config.Scope.applicationOverride, "builder",
"provision_config_instance.plugins", opts.csv_to_list(
args.opensearch_plugins))
cfg.add(config.Scope.applicationOverride, "builder", "plugin.params", opts.to_dict(args.plugin_params))
cfg.add(config.Scope.applicationOverride, "builder", "preserve.install", convert.to_bool(args.preserve_install))
cfg.add(config.Scope.applicationOverride, "builder", "skip.rest.api.check", convert.to_bool(args.skip_rest_api_check))
configure_results_publishing_params(args, cfg)
execute_test(cfg, args.kill_running_processes)
elif sub_command == "generate":
cfg.add(config.Scope.applicationOverride, "generator", "chart.spec.path", args.chart_spec_path)
cfg.add(config.Scope.applicationOverride, "generator", "chart.type", args.chart_type)
cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path)
generate(cfg)
elif sub_command == "create-workload":
cfg.add(config.Scope.applicationOverride, "generator", "indices", args.indices)
cfg.add(config.Scope.applicationOverride, "generator", "output.path", args.output_path)
cfg.add(config.Scope.applicationOverride, "workload", "workload.name", args.workload)
configure_connection_params(arg_parser, args, cfg)
tracker.create_workload(cfg)
elif sub_command == "info":
configure_workload_params(arg_parser, args, cfg)
workload.workload_info(cfg)
else:
raise exceptions.SystemSetupError(f"Unknown subcommand [{sub_command}]")
return True
except exceptions.BenchmarkError as e:
logging.getLogger(__name__).exception("Cannot run subcommand [%s].", sub_command)
msg = str(e.message)
nesting = 0
while hasattr(e, "cause") and e.cause:
nesting += 1
e = e.cause
if hasattr(e, "message"):
msg += "\n%s%s" % ("\t" * nesting, e.message)
else:
msg += "\n%s%s" % ("\t" * nesting, str(e))
console.error("Cannot %s. %s" % (sub_command, msg))
console.println("")
print_help_on_errors()
return False
except BaseException as e:
logging.getLogger(__name__).exception("A fatal error occurred while running subcommand [%s].", sub_command)
console.error("Cannot %s. %s." % (sub_command, e))
console.println("")
print_help_on_errors()
return False