in src/hdinsightonaks/azext_hdinsightonaks/aaz/latest/hdinsight_on_aks/cluster/_update.py [0:0]
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_name = AAZStrArg(
options=["-n", "--name", "--cluster-name"],
help="The name of the HDInsight cluster.",
required=True,
id_part="child_name_1",
)
_args_schema.cluster_pool_name = AAZStrArg(
options=["--cluster-pool-name"],
help="The name of the cluster pool.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "ApplicationLogs"
_args_schema = cls._args_schema
_args_schema.application_log_std_error_enabled = AAZBoolArg(
options=["--enable-log-std-error", "--application-log-std-error-enabled"],
arg_group="ApplicationLogs",
help="True if application standard error is enabled, otherwise false.",
nullable=True,
)
_args_schema.application_log_std_out_enabled = AAZBoolArg(
options=["--enable-log-std-out", "--application-log-std-out-enabled"],
arg_group="ApplicationLogs",
help="True if application standard out is enabled, otherwise false.",
nullable=True,
)
# define Arg Group "AutoscaleLoadBased"
_args_schema = cls._args_schema
_args_schema.loadbased_config_cooldown_period = AAZIntArg(
options=["--cooldown-period", "--loadbased-config-cooldown-period"],
arg_group="AutoscaleLoadBased",
help="This is a cool down period, this is a time period in seconds, which determines the amount of time that must elapse between a scaling activity started by a rule and the start of the next scaling activity, regardless of the rule that triggers it. The default value is 300 seconds.",
nullable=True,
)
_args_schema.loadbased_config_max_nodes = AAZIntArg(
options=["--loadbased-max-nodes", "--loadbased-config-max-nodes"],
arg_group="AutoscaleLoadBased",
help="User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.",
)
_args_schema.loadbased_config_min_nodes = AAZIntArg(
options=["--loadbased-min-nodes", "--loadbased-config-min-nodes"],
arg_group="AutoscaleLoadBased",
help="User needs to set the minimum number of nodes for load based scaling, the load based scaling will use this to scale up and scale down between minimum and maximum number of nodes.",
)
_args_schema.loadbased_config_poll_interval = AAZIntArg(
options=["--loadbased-interval", "--loadbased-config-poll-interval"],
arg_group="AutoscaleLoadBased",
help="User can specify the poll interval, this is the time period (in seconds) after which scaling metrics are polled for triggering a scaling operation.",
nullable=True,
)
_args_schema.loadbased_config_scaling_rules = AAZListArg(
options=["--loadbased-rules", "--loadbased-config-scaling-rules"],
arg_group="AutoscaleLoadBased",
help="The scaling rules.",
)
loadbased_config_scaling_rules = cls._args_schema.loadbased_config_scaling_rules
loadbased_config_scaling_rules.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.loadbased_config_scaling_rules.Element
_element.action_type = AAZStrArg(
options=["action-type"],
help="The action type.",
enum={"scaledown": "scaledown", "scaleup": "scaleup"},
)
_element.comparison_rule = AAZObjectArg(
options=["comparison-rule"],
help="The comparison rule.",
)
_element.evaluation_count = AAZIntArg(
options=["evaluation-count"],
help="This is an evaluation count for a scaling condition, the number of times a trigger condition should be successful, before scaling activity is triggered.",
)
_element.scaling_metric = AAZStrArg(
options=["scaling-metric"],
help="Metrics name for individual workloads. For example: cpu",
)
comparison_rule = cls._args_schema.loadbased_config_scaling_rules.Element.comparison_rule
comparison_rule.operator = AAZStrArg(
options=["operator"],
help="The comparison operator.",
enum={"greaterThan": "greaterThan", "greaterThanOrEqual": "greaterThanOrEqual", "lessThan": "lessThan", "lessThanOrEqual": "lessThanOrEqual"},
)
comparison_rule.threshold = AAZFloatArg(
options=["threshold"],
help="Threshold setting.",
)
# define Arg Group "AutoscaleScheduleBased"
_args_schema = cls._args_schema
_args_schema.schedule_based_config_default_count = AAZIntArg(
options=["--schedule-default-count", "--schedule-based-config-default-count"],
arg_group="AutoscaleScheduleBased",
help="Setting default node count of current schedule configuration. Default node count specifies the number of nodes which are default when an specified scaling operation is executed (scale up/scale down)",
)
_args_schema.schedule_based_config_schedule = AAZListArg(
options=["--schedule-schedules", "--schedule-based-config-schedule"],
arg_group="AutoscaleScheduleBased",
help="This specifies the schedules where scheduled based Autoscale to be enabled, the user has a choice to set multiple rules within the schedule across days and times (start/end).",
)
_args_schema.schedule_based_config_time_zone = AAZStrArg(
options=["--schedule-time-zone", "--schedule-based-config-time-zone"],
arg_group="AutoscaleScheduleBased",
help="User has to specify the timezone on which the schedule has to be set for schedule based autoscale configuration.",
)
schedule_based_config_schedule = cls._args_schema.schedule_based_config_schedule
schedule_based_config_schedule.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.schedule_based_config_schedule.Element
_element.count = AAZIntArg(
options=["count"],
help="User has to set the node count anticipated at end of the scaling operation of the set current schedule configuration, format is integer.",
)
_element.days = AAZListArg(
options=["days"],
help="User has to set the days where schedule has to be set for autoscale operation.",
)
_element.end_time = AAZStrArg(
options=["end-time"],
help="User has to set the end time of current schedule configuration, format like 10:30 (HH:MM).",
fmt=AAZStrArgFormat(
pattern="^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
),
)
_element.start_time = AAZStrArg(
options=["start-time"],
help="User has to set the start time of current schedule configuration, format like 10:30 (HH:MM).",
fmt=AAZStrArgFormat(
pattern="^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$",
),
)
days = cls._args_schema.schedule_based_config_schedule.Element.days
days.Element = AAZStrArg(
nullable=True,
enum={"Friday": "Friday", "Monday": "Monday", "Saturday": "Saturday", "Sunday": "Sunday", "Thursday": "Thursday", "Tuesday": "Tuesday", "Wednesday": "Wednesday"},
)
# define Arg Group "ClusterProfile"
_args_schema = cls._args_schema
_args_schema.authorization_group_id = AAZListArg(
options=["--authorization-group-id"],
arg_group="ClusterProfile",
help="AAD group Ids authorized for data plane access.",
nullable=True,
)
_args_schema.authorization_user_id = AAZListArg(
options=["--authorization-user-id"],
arg_group="ClusterProfile",
help="AAD user Ids authorized for data plane access.",
nullable=True,
)
_args_schema.autoscale_profile_type = AAZStrArg(
options=["--autoscale-profile-type"],
arg_group="ClusterProfile",
help="User to specify which type of Autoscale to be implemented - Scheduled Based or Load Based.",
nullable=True,
enum={"LoadBased": "LoadBased", "ScheduleBased": "ScheduleBased"},
)
_args_schema.enable_autoscale = AAZBoolArg(
options=["--enable-autoscale"],
arg_group="ClusterProfile",
help="This indicates whether auto scale is enabled on HDInsight on AKS cluster.",
)
_args_schema.autoscale_profile_graceful_decommission_timeout = AAZIntArg(
options=["--decommission-time", "--autoscale-profile-graceful-decommission-timeout"],
arg_group="ClusterProfile",
help="This property is for graceful decommission timeout; It has a default setting of 3600 seconds before forced shutdown takes place. This is the maximal time to wait for running containers and applications to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. The default value is 3600 seconds. Negative value (like -1) is handled as infinite timeout.",
nullable=True,
)
_args_schema.cluster_version = AAZStrArg(
options=["--cluster-version"],
arg_group="ClusterProfile",
help="Version with 3/4 part.",
fmt=AAZStrArgFormat(
pattern="^(0|[1-9][0-9]{0,18})\.(0|[1-9][0-9]{0,18})\.(0|[1-9][0-9]{0,18})(?:\.(0|[1-9][0-9]{0,18}))?$",
),
)
_args_schema.assigned_identity_client_id = AAZStrArg(
options=["--msi-client-id", "--assigned-identity-client-id"],
arg_group="ClusterProfile",
help="ClientId of the MSI.",
fmt=AAZStrArgFormat(
pattern="^[{(]?[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-f]{12}[)}]?$",
),
)
_args_schema.assigned_identity_object_id = AAZStrArg(
options=["--msi-object-id", "--assigned-identity-object-id"],
arg_group="ClusterProfile",
help="ObjectId of the MSI.",
fmt=AAZStrArgFormat(
pattern="^[{(]?[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-f]{12}[)}]?$",
),
)
_args_schema.assigned_identity_id = AAZResourceIdArg(
options=["--msi-id", "--assigned-identity-id"],
arg_group="ClusterProfile",
help="ResourceId of the MSI.",
)
_args_schema.kafka_profile = AAZObjectArg(
options=["--kafka-profile"],
arg_group="ClusterProfile",
help="Kafka cluster profile.",
nullable=True,
)
_args_schema.llap_profile = AAZFreeFormDictArg(
options=["--llap-profile"],
arg_group="ClusterProfile",
help="LLAP cluster profile.",
nullable=True,
)
_args_schema.enable_log_analytics = AAZBoolArg(
options=["--enable-log-analytics"],
arg_group="ClusterProfile",
help="True if log analytics is enabled for the cluster, otherwise false.",
)
_args_schema.oss_version = AAZStrArg(
options=["--oss-version"],
arg_group="ClusterProfile",
help="Version with three part.",
fmt=AAZStrArgFormat(
pattern="^(0|[1-9][0-9]{0,18})\.(0|[1-9][0-9]{0,18})\.(0|[1-9][0-9]{0,18})$",
),
)
_args_schema.ranger_plugin_profile = AAZObjectArg(
options=["--ranger-plugin-profile"],
arg_group="ClusterProfile",
help="Cluster Ranger plugin profile.",
nullable=True,
)
_args_schema.ranger_profile = AAZObjectArg(
options=["--ranger-profile"],
arg_group="ClusterProfile",
help="The ranger cluster profile.",
nullable=True,
)
_args_schema.script_action_profiles = AAZListArg(
options=["--script-action-profiles"],
arg_group="ClusterProfile",
help="The script action profile list.",
nullable=True,
)
_args_schema.service_configs_profiles = AAZListArg(
options=["--service-configs", "--service-configs-profiles"],
arg_group="ClusterProfile",
help="The service configs profiles.",
nullable=True,
)
_args_schema.spark_storage_url = AAZStrArg(
options=["--spark-storage-url"],
arg_group="ClusterProfile",
help="The default storage URL.",
nullable=True,
)
_args_schema.user_plugins_spec = AAZObjectArg(
options=["--user-plugins-spec"],
arg_group="ClusterProfile",
help="Spark user plugins spec",
nullable=True,
)
_args_schema.ssh_profile_count = AAZIntArg(
options=["--ssh-profile-count"],
arg_group="ClusterProfile",
help="Number of ssh pods per cluster.",
fmt=AAZIntArgFormat(
maximum=5,
minimum=0,
),
)
_args_schema.stub_profile = AAZFreeFormDictArg(
options=["--stub-profile"],
arg_group="ClusterProfile",
help="Stub cluster profile.",
nullable=True,
)
authorization_group_id = cls._args_schema.authorization_group_id
authorization_group_id.Element = AAZStrArg(
nullable=True,
)
authorization_user_id = cls._args_schema.authorization_user_id
authorization_user_id.Element = AAZStrArg(
nullable=True,
)
kafka_profile = cls._args_schema.kafka_profile
kafka_profile.disk_storage = AAZObjectArg(
options=["disk-storage"],
help="Kafka disk storage profile.",
)
kafka_profile.enable_k_raft = AAZBoolArg(
options=["enable-k-raft"],
help="Expose Kafka cluster in KRaft mode.",
nullable=True,
)
kafka_profile.enable_public_endpoints = AAZBoolArg(
options=["enable-public-endpoints"],
help="Expose worker nodes as public endpoints.",
nullable=True,
)
kafka_profile.remote_storage_uri = AAZStrArg(
options=["remote-storage-uri"],
help="Fully qualified path of Azure Storage container used for Tiered Storage.",
nullable=True,
fmt=AAZStrArgFormat(
pattern="^(https?|abfss?):\/\/[^/]+(?:\/|$)",
),
)
disk_storage = cls._args_schema.kafka_profile.disk_storage
disk_storage.data_disk_size = AAZIntArg(
options=["data-disk-size"],
help="Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for Premium SSD v2, which supports up to 64TB.",
)
disk_storage.data_disk_type = AAZStrArg(
options=["data-disk-type"],
help="Managed Disk Type.",
enum={"Premium_SSD_LRS": "Premium_SSD_LRS", "Premium_SSD_ZRS": "Premium_SSD_ZRS", "Premium_SSD_v2_LRS": "Premium_SSD_v2_LRS", "Standard_HDD_LRS": "Standard_HDD_LRS", "Standard_SSD_LRS": "Standard_SSD_LRS", "Standard_SSD_ZRS": "Standard_SSD_ZRS"},
)
ranger_plugin_profile = cls._args_schema.ranger_plugin_profile
ranger_plugin_profile.enabled = AAZBoolArg(
options=["enabled"],
help="Enable Ranger for cluster or not.",
)
ranger_profile = cls._args_schema.ranger_profile
ranger_profile.ranger_admin = AAZObjectArg(
options=["ranger-admin"],
help="Specification for the Ranger Admin service.",
)
ranger_profile.ranger_audit = AAZObjectArg(
options=["ranger-audit"],
help="Properties required to describe audit log storage.",
nullable=True,
)
ranger_profile.ranger_usersync = AAZObjectArg(
options=["ranger-usersync"],
help="Specification for the Ranger Usersync service",
)
ranger_admin = cls._args_schema.ranger_profile.ranger_admin
ranger_admin.admins = AAZListArg(
options=["admins"],
help="List of usernames that should be marked as ranger admins. These usernames should match the user principal name (UPN) of the respective AAD users.",
)
ranger_admin.database = AAZObjectArg(
options=["database"],
)
admins = cls._args_schema.ranger_profile.ranger_admin.admins
admins.Element = AAZStrArg(
nullable=True,
)
database = cls._args_schema.ranger_profile.ranger_admin.database
database.host = AAZStrArg(
options=["host"],
help="The database URL",
)
database.name = AAZStrArg(
options=["name"],
help="The database name",
)
database.password_secret_ref = AAZStrArg(
options=["password-secret-ref"],
help="Reference for the database password",
nullable=True,
)
database.username = AAZStrArg(
options=["username"],
help="The name of the database user",
nullable=True,
)
ranger_audit = cls._args_schema.ranger_profile.ranger_audit
ranger_audit.storage_account = AAZStrArg(
options=["storage-account"],
help="Azure storage location of the blobs. MSI should have read/write access to this Storage account.",
nullable=True,
fmt=AAZStrArgFormat(
pattern="^(https)|(abfss)://.*$",
min_length=1,
),
)
ranger_usersync = cls._args_schema.ranger_profile.ranger_usersync
ranger_usersync.enabled = AAZBoolArg(
options=["enabled"],
help="Denotes whether usersync service should be enabled",
nullable=True,
)
ranger_usersync.groups = AAZListArg(
options=["groups"],
help="List of groups that should be synced. These group names should match the object id of the respective AAD groups.",
nullable=True,
)
ranger_usersync.mode = AAZStrArg(
options=["mode"],
help="User & groups can be synced automatically or via a static list that's refreshed.",
nullable=True,
enum={"automatic": "automatic", "static": "static"},
)
ranger_usersync.user_mapping_location = AAZStrArg(
options=["user-mapping-location"],
help="Azure storage location of a mapping file that lists user & group associations.",
nullable=True,
fmt=AAZStrArgFormat(
pattern="^(https)|(abfss)://.*$",
min_length=1,
),
)
ranger_usersync.users = AAZListArg(
options=["users"],
help="List of user names that should be synced. These usernames should match the User principal name of the respective AAD users.",
nullable=True,
)
groups = cls._args_schema.ranger_profile.ranger_usersync.groups
groups.Element = AAZStrArg(
nullable=True,
)
users = cls._args_schema.ranger_profile.ranger_usersync.users
users.Element = AAZStrArg(
nullable=True,
)
script_action_profiles = cls._args_schema.script_action_profiles
script_action_profiles.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.script_action_profiles.Element
_element.name = AAZStrArg(
options=["name"],
help="Script name.",
)
_element.parameters = AAZStrArg(
options=["parameters"],
help="Additional parameters for the script action. It should be space-separated list of arguments required for script execution.",
nullable=True,
)
_element.services = AAZListArg(
options=["services"],
help="List of services to apply the script action.",
)
_element.should_persist = AAZBoolArg(
options=["should-persist"],
help="Specify if the script should persist on the cluster.",
nullable=True,
)
_element.timeout_in_minutes = AAZIntArg(
options=["timeout-in-minutes"],
help="Timeout duration for the script action in minutes.",
nullable=True,
)
_element.type = AAZStrArg(
options=["type"],
help="Type of the script action. Supported type is bash scripts.",
)
_element.url = AAZStrArg(
options=["url"],
help="Url of the script file.",
fmt=AAZStrArgFormat(
pattern="^(https)|(http)://.*$",
),
)
services = cls._args_schema.script_action_profiles.Element.services
services.Element = AAZStrArg(
nullable=True,
)
service_configs_profiles = cls._args_schema.service_configs_profiles
service_configs_profiles.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.service_configs_profiles.Element
_element.configs = AAZListArg(
options=["configs"],
help="List of service configs.",
)
_element.service_name = AAZStrArg(
options=["service-name"],
help="Name of the service the configurations should apply to.",
)
configs = cls._args_schema.service_configs_profiles.Element.configs
configs.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.service_configs_profiles.Element.configs.Element
_element.component = AAZStrArg(
options=["component"],
help="Name of the component the config files should apply to.",
)
_element.files = AAZListArg(
options=["files"],
help="List of Config Files.",
)
files = cls._args_schema.service_configs_profiles.Element.configs.Element.files
files.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.service_configs_profiles.Element.configs.Element.files.Element
_element.content = AAZStrArg(
options=["content"],
help="Free form content of the entire configuration file.",
nullable=True,
)
_element.encoding = AAZStrArg(
options=["encoding"],
help="This property indicates if the content is encoded and is case-insensitive. Please set the value to base64 if the content is base64 encoded. Set it to none or skip it if the content is plain text.",
nullable=True,
enum={"Base64": "Base64", "None": "None"},
)
_element.file_name = AAZStrArg(
options=["file-name"],
help="Configuration file name.",
)
_element.path = AAZStrArg(
options=["path"],
help="Path of the config file if content is specified.",
nullable=True,
)
_element.values = AAZDictArg(
options=["values"],
help="List of key value pairs where key represents a valid service configuration name and value represents the value of the config.",
nullable=True,
)
values = cls._args_schema.service_configs_profiles.Element.configs.Element.files.Element.values
values.Element = AAZStrArg(
nullable=True,
)
user_plugins_spec = cls._args_schema.user_plugins_spec
user_plugins_spec.plugins = AAZListArg(
options=["plugins"],
help="Spark user plugins.",
nullable=True,
)
plugins = cls._args_schema.user_plugins_spec.plugins
plugins.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.user_plugins_spec.plugins.Element
_element.path = AAZStrArg(
options=["path"],
help="Fully qualified path to the folder containing the plugins.",
fmt=AAZStrArgFormat(
pattern="^(https)|(abfss)://.*$",
min_length=1,
),
)
# define Arg Group "ComputeProfile"
_args_schema = cls._args_schema
_args_schema.availability_zones = AAZListArg(
options=["--availability-zones"],
arg_group="ComputeProfile",
help="The list of Availability zones to use for AKS VMSS nodes.",
nullable=True,
)
_args_schema.nodes = AAZListArg(
options=["--nodes"],
arg_group="ComputeProfile",
help="The nodes definitions.",
)
availability_zones = cls._args_schema.availability_zones
availability_zones.Element = AAZStrArg(
nullable=True,
)
nodes = cls._args_schema.nodes
nodes.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.nodes.Element
_element.count = AAZIntArg(
options=["count"],
help="The number of virtual machines.",
fmt=AAZIntArgFormat(
minimum=0,
),
)
_element.type = AAZStrArg(
options=["type"],
help="The node type.",
fmt=AAZStrArgFormat(
pattern="^(head|Head|HEAD|worker|Worker|WORKER)$",
),
)
_element.vm_size = AAZStrArg(
options=["vm-size"],
help="The virtual machine SKU.",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9_\-]{0,256}$",
),
)
# define Arg Group "Coordinator"
_args_schema = cls._args_schema
_args_schema.coordinator_high_availability_enabled = AAZBoolArg(
options=["--enable-coord-ha", "--coordinator-high-availability-enabled"],
arg_group="Coordinator",
help="The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: false.",
nullable=True,
)
_args_schema.coordinator_debug_port = AAZIntArg(
options=["--coord-debug-port", "--coordinator-debug-port"],
arg_group="Coordinator",
help="The flag that if enable debug or not. Default: 8008.",
nullable=True,
)
_args_schema.coordinator_debug_suspend = AAZBoolArg(
options=["--coord-debug-suspend", "--coordinator-debug-suspend"],
arg_group="Coordinator",
help="The flag that if suspend debug or not. Default: false.",
nullable=True,
)
_args_schema.coordinator_debug_enabled = AAZBoolArg(
options=["--enable-coord-debug", "--coordinator-debug-enabled"],
arg_group="Coordinator",
help="The flag that if enable coordinator HA, uses multiple coordinator replicas with auto failover, one per each head node. Default: false.",
nullable=True,
)
# define Arg Group "FlinkProfile"
_args_schema = cls._args_schema
_args_schema.metastore_db_connection_authentication_mode = AAZStrArg(
options=["--flink-db-auth-mode", "--metastore-db-connection-authentication-mode"],
arg_group="FlinkProfile",
help="The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization",
nullable=True,
enum={"IdentityAuth": "IdentityAuth", "SqlAuth": "SqlAuth"},
)
_args_schema.flink_hive_catalog_db_connection_password_secret = AAZStrArg(
options=["--flink-hive-db-secret", "--flink-hive-catalog-db-connection-password-secret"],
arg_group="FlinkProfile",
help="Secret reference name from secretsProfile.secrets containing password for database connection.",
nullable=True,
)
_args_schema.flink_hive_catalog_db_connection_url = AAZStrArg(
options=["--flink-hive-db-url", "--flink-hive-catalog-db-connection-url"],
arg_group="FlinkProfile",
help="Connection string for hive metastore database.",
)
_args_schema.flink_hive_catalog_db_connection_user_name = AAZStrArg(
options=["--flink-hive-db-user", "--flink-hive-catalog-db-connection-user-name"],
arg_group="FlinkProfile",
help="User name for database connection.",
nullable=True,
)
_args_schema.deployment_mode = AAZStrArg(
options=["--deployment-mode"],
arg_group="FlinkProfile",
help="A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => Application, Session. Default value is Session",
nullable=True,
enum={"Application": "Application", "Session": "Session"},
)
_args_schema.history_server_cpu = AAZFloatArg(
options=["--history-server-cpu"],
arg_group="FlinkProfile",
help="History server CPU count.",
)
_args_schema.history_server_memory = AAZIntArg(
options=["--history-server-memory"],
arg_group="FlinkProfile",
help="History server memory size.",
)
_args_schema.job_manager_cpu = AAZFloatArg(
options=["--job-manager-cpu"],
arg_group="FlinkProfile",
help="Job manager CPU count.",
)
_args_schema.job_manager_memory = AAZIntArg(
options=["--job-manager-memory"],
arg_group="FlinkProfile",
help="Job manager memory size.",
)
_args_schema.job_spec = AAZObjectArg(
options=["--job-spec"],
arg_group="FlinkProfile",
help="Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties are changed by calling the RunJob API, please use the ListJob API to get the latest job information.",
nullable=True,
)
_args_schema.num_replicas = AAZIntArg(
options=["--num-replicas"],
arg_group="FlinkProfile",
help="The number of task managers.",
nullable=True,
)
_args_schema.flink_storage_uri = AAZStrArg(
options=["--flink-storage-uri"],
arg_group="FlinkProfile",
help="Storage account uri which is used for savepoint and checkpoint state.",
)
_args_schema.flink_storage_key = AAZStrArg(
options=["--flink-storage-key"],
arg_group="FlinkProfile",
help="Storage key is only required for wasb(s) storage.",
nullable=True,
)
_args_schema.task_manager_cpu = AAZFloatArg(
options=["--task-manager-cpu"],
arg_group="FlinkProfile",
help="Task manager CPU count.",
)
_args_schema.task_manager_memory = AAZIntArg(
options=["--task-manager-memory"],
arg_group="FlinkProfile",
help="The task manager memory size.",
)
job_spec = cls._args_schema.job_spec
job_spec.args = AAZStrArg(
options=["args"],
help="A string property representing additional JVM arguments for the Flink job. It should be space separated value.",
nullable=True,
)
job_spec.entry_class = AAZStrArg(
options=["entry-class"],
help="A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected from the flink job jar package.",
nullable=True,
)
job_spec.jar_name = AAZStrArg(
options=["jar-name"],
help="A string property that represents the name of the job JAR.",
)
job_spec.job_jar_directory = AAZStrArg(
options=["job-jar-directory"],
help="A string property that specifies the directory where the job JAR is located.",
)
job_spec.save_point_name = AAZStrArg(
options=["save-point-name"],
help="A string property that represents the name of the savepoint for the Flink job",
nullable=True,
)
job_spec.upgrade_mode = AAZStrArg(
options=["upgrade-mode"],
help="A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following enum values => STATELESS_UPDATE, UPDATE, LAST_STATE_UPDATE.",
enum={"LAST_STATE_UPDATE": "LAST_STATE_UPDATE", "STATELESS_UPDATE": "STATELESS_UPDATE", "UPDATE": "UPDATE"},
)
# define Arg Group "HDInsightCluster"
_args_schema = cls._args_schema
_args_schema.tags = AAZDictArg(
options=["--tags"],
arg_group="HDInsightCluster",
help="Resource tags.",
nullable=True,
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg(
nullable=True,
)
# define Arg Group "LogAnalyticsProfile"
_args_schema = cls._args_schema
_args_schema.log_analytic_profile_metrics_enabled = AAZBoolArg(
options=["--enable-la-metrics", "--log-analytic-profile-metrics-enabled"],
arg_group="LogAnalyticsProfile",
help="True if metrics are enabled, otherwise false.",
nullable=True,
)
# define Arg Group "ManagedIdentityProfile"
_args_schema = cls._args_schema
_args_schema.identity_list = AAZListArg(
options=["--identity-list"],
arg_group="ManagedIdentityProfile",
help="The list of managed identity.",
)
identity_list = cls._args_schema.identity_list
identity_list.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.identity_list.Element
_element.client_id = AAZStrArg(
options=["client-id"],
help="ClientId of the managed identity.",
fmt=AAZStrArgFormat(
pattern="^[{(]?[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-f]{12}[)}]?$",
),
)
_element.object_id = AAZStrArg(
options=["object-id"],
help="ObjectId of the managed identity.",
fmt=AAZStrArgFormat(
pattern="^[{(]?[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-f]{12}[)}]?$",
),
)
_element.resource_id = AAZResourceIdArg(
options=["resource-id"],
help="ResourceId of the managed identity.",
)
_element.type = AAZStrArg(
options=["type"],
help="The type of managed identity.",
enum={"cluster": "cluster", "internal": "internal", "user": "user"},
)
# define Arg Group "PrometheusProfile"
_args_schema = cls._args_schema
_args_schema.enable_prometheu = AAZBoolArg(
options=["--enable-prometheu"],
arg_group="PrometheusProfile",
help="Enable Prometheus for cluster or not.",
)
# define Arg Group "SecretsProfile"
_args_schema = cls._args_schema
_args_schema.key_vault_id = AAZResourceIdArg(
options=["--key-vault-id"],
arg_group="SecretsProfile",
help="Name of the user Key Vault where all the cluster specific user secrets are stored.",
)
_args_schema.secret_reference = AAZListArg(
options=["--secret-reference"],
arg_group="SecretsProfile",
help="Properties of Key Vault secret.",
nullable=True,
)
secret_reference = cls._args_schema.secret_reference
secret_reference.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.secret_reference.Element
_element.secret_name = AAZStrArg(
options=["secret-name"],
help="Object identifier name of the secret in key vault.",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z][a-zA-Z0-9-]{1,126}$",
),
)
_element.reference_name = AAZStrArg(
options=["reference-name"],
help="Reference name of the secret to be used in service configs.",
)
_element.type = AAZStrArg(
options=["type"],
help="Type of key vault object: secret, key or certificate.",
enum={"Certificate": "Certificate", "Key": "Key", "Secret": "Secret"},
)
_element.version = AAZStrArg(
options=["version"],
help="Version of the secret in key vault.",
nullable=True,
)
# define Arg Group "SparkProfile"
_args_schema = cls._args_schema
_args_schema.db_connection_authentication_mode = AAZStrArg(
options=["--spark-db-auth-mode", "--db-connection-authentication-mode"],
arg_group="SparkProfile",
help="The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization",
nullable=True,
enum={"IdentityAuth": "IdentityAuth", "SqlAuth": "SqlAuth"},
)
_args_schema.spark_hive_catalog_db_name = AAZStrArg(
options=["--spark-hive-db-name", "--spark-hive-catalog-db-name"],
arg_group="SparkProfile",
help="The database name.",
)
_args_schema.spark_hive_catalog_db_password_secret = AAZStrArg(
options=["--spark-hive-db-secret", "--spark-hive-catalog-db-password-secret"],
arg_group="SparkProfile",
help="The secret name which contains the database user password.",
nullable=True,
)
_args_schema.spark_hive_catalog_db_server_name = AAZStrArg(
options=["--spark-hive-db-server", "--spark-hive-catalog-db-server-name"],
arg_group="SparkProfile",
help="The database server host.",
)
_args_schema.spark_hive_catalog_db_user_name = AAZStrArg(
options=["--spark-hive-db-user", "--spark-hive-catalog-db-user-name"],
arg_group="SparkProfile",
help="The database user name.",
nullable=True,
)
_args_schema.spark_hive_catalog_key_vault_id = AAZStrArg(
options=["--spark-hive-kv-id", "--spark-hive-catalog-key-vault-id"],
arg_group="SparkProfile",
help="The key vault resource id.",
nullable=True,
)
_args_schema.spark_hive_catalog_thrift_url = AAZStrArg(
options=["--spark-hive-thrift-url", "--spark-hive-catalog-thrift-url"],
arg_group="SparkProfile",
help="The thrift url.",
nullable=True,
)
# define Arg Group "SshProfile"
_args_schema = cls._args_schema
_args_schema.vm_size = AAZStrArg(
options=["--vm-size"],
arg_group="SshProfile",
help="The virtual machine SKU.",
nullable=True,
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9_\-]{0,256}$",
),
)
# define Arg Group "TrinoClusterWorker"
_args_schema = cls._args_schema
_args_schema.enable_worker_debug = AAZBoolArg(
options=["--enable-worker-debug"],
arg_group="TrinoClusterWorker",
help="The flag that if trino cluster enable debug or not. Default: false.",
nullable=True,
)
_args_schema.worker_debug_port = AAZIntArg(
options=["--worker-debug-port"],
arg_group="TrinoClusterWorker",
help="The debug port. Default: 8008.",
nullable=True,
)
_args_schema.worker_debug_suspend = AAZBoolArg(
options=["--worker-debug-suspend"],
arg_group="TrinoClusterWorker",
help="The flag that if trino cluster suspend debug or not. Default: false.",
nullable=True,
)
# define Arg Group "TrinoHiveCatalog"
_args_schema = cls._args_schema
_args_schema.trino_hive_catalog = AAZListArg(
options=["--trino-hive-catalog"],
arg_group="TrinoHiveCatalog",
help="hive catalog options.",
nullable=True,
)
trino_hive_catalog = cls._args_schema.trino_hive_catalog
trino_hive_catalog.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.trino_hive_catalog.Element
_element.catalog_name = AAZStrArg(
options=["catalog-name"],
help="Name of trino catalog which should use specified hive metastore.",
fmt=AAZStrArgFormat(
min_length=1,
),
)
_element.metastore_db_connection_authentication_mode = AAZStrArg(
options=["metastore-db-connection-authentication-mode"],
help="The authentication mode to connect to your Hive metastore database. More details: https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization",
nullable=True,
enum={"IdentityAuth": "IdentityAuth", "SqlAuth": "SqlAuth"},
)
_element.metastore_db_connection_password_secret = AAZStrArg(
options=["metastore-db-connection-password-secret"],
help="Secret reference name from secretsProfile.secrets containing password for database connection.",
nullable=True,
)
_element.metastore_db_connection_url = AAZStrArg(
options=["metastore-db-connection-url"],
help="Connection string for hive metastore database.",
)
_element.metastore_db_connection_user_name = AAZStrArg(
options=["metastore-db-connection-user-name"],
help="User name for database connection.",
nullable=True,
)
_element.metastore_warehouse_dir = AAZStrArg(
options=["metastore-warehouse-dir"],
help="Metastore root directory URI, format: abfs[s]://<container>@<account_name>.dfs.core.windows.net/<path>. More details: https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri",
)
# define Arg Group "TrinoProfile"
_args_schema = cls._args_schema
_args_schema.trino_profile_user_plugins_plugin_spec = AAZObjectArg(
options=["--trino-plugins-spec", "--trino-profile-user-plugins-plugin-spec"],
arg_group="TrinoProfile",
help="Trino user plugins spec",
nullable=True,
)
_args_schema.trino_profile_user_plugins_telemetry_spec = AAZObjectArg(
options=["--trino-telemetry-spec", "--trino-profile-user-plugins-telemetry-spec"],
arg_group="TrinoProfile",
help="Trino user telemetry spec.",
nullable=True,
)
trino_profile_user_plugins_plugin_spec = cls._args_schema.trino_profile_user_plugins_plugin_spec
trino_profile_user_plugins_plugin_spec.plugins = AAZListArg(
options=["plugins"],
help="Trino user plugins.",
nullable=True,
)
plugins = cls._args_schema.trino_profile_user_plugins_plugin_spec.plugins
plugins.Element = AAZObjectArg(
nullable=True,
)
_element = cls._args_schema.trino_profile_user_plugins_plugin_spec.plugins.Element
_element.enabled = AAZBoolArg(
options=["enabled"],
help="Denotes whether the plugin is active or not.",
nullable=True,
)
_element.name = AAZStrArg(
options=["name"],
help="This field maps to the sub-directory in trino plugins location, that will contain all the plugins under path.",
nullable=True,
fmt=AAZStrArgFormat(
min_length=1,
),
)
_element.path = AAZStrArg(
options=["path"],
help="Fully qualified path to the folder containing the plugins.",
nullable=True,
fmt=AAZStrArgFormat(
pattern="^(https)|(abfss)://.*$",
min_length=1,
),
)
trino_profile_user_plugins_telemetry_spec = cls._args_schema.trino_profile_user_plugins_telemetry_spec
trino_profile_user_plugins_telemetry_spec.storage = AAZObjectArg(
options=["storage"],
help="Trino user telemetry definition.",
nullable=True,
)
storage = cls._args_schema.trino_profile_user_plugins_telemetry_spec.storage
storage.hivecatalog_name = AAZStrArg(
options=["hivecatalog-name"],
help="Hive Catalog name used to mount external tables on the logs written by trino, if not specified there tables are not created.",
nullable=True,
fmt=AAZStrArgFormat(
min_length=1,
),
)
storage.hivecatalog_schema = AAZStrArg(
options=["hivecatalog-schema"],
help="Schema of the above catalog to use, to mount query logs as external tables, if not specified tables will be mounted under schema trinologs.",
nullable=True,
)
storage.partition_retention_in_days = AAZIntArg(
options=["partition-retention-in-days"],
help="Retention period for query log table partitions, this doesn't have any affect on actual data.",
nullable=True,
)
storage.path = AAZStrArg(
options=["path"],
help="Azure storage location of the blobs.",
nullable=True,
fmt=AAZStrArgFormat(
min_length=1,
),
)
return cls._args_schema