v4/main.tf (662 lines of code) (raw):
moved {
from = module.ssh-key.tls_private_key.ssh
to = tls_private_key.ssh[0]
}
resource "tls_private_key" "ssh" {
count = var.admin_username == null ? 0 : 1
algorithm = "RSA"
rsa_bits = 2048
}
resource "azurerm_kubernetes_cluster" "main" {
location = var.location
name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}"
resource_group_name = var.resource_group_name
azure_policy_enabled = var.azure_policy_enabled
cost_analysis_enabled = var.cost_analysis_enabled
disk_encryption_set_id = var.disk_encryption_set_id
dns_prefix = var.prefix
dns_prefix_private_cluster = var.dns_prefix_private_cluster
image_cleaner_enabled = var.image_cleaner_enabled
image_cleaner_interval_hours = var.image_cleaner_interval_hours
kubernetes_version = var.kubernetes_version
local_account_disabled = var.local_account_disabled
node_resource_group = var.node_resource_group
oidc_issuer_enabled = var.oidc_issuer_enabled
open_service_mesh_enabled = var.open_service_mesh_enabled
private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled
private_dns_zone_id = var.private_dns_zone_id
role_based_access_control_enabled = var.role_based_access_control_enabled
run_command_enabled = var.run_command_enabled
sku_tier = var.sku_tier
support_plan = var.support_plan
tags = var.tags
workload_identity_enabled = var.workload_identity_enabled
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
fips_enabled = var.default_node_pool_fips_enabled
max_count = null
max_pods = var.agents_max_pods
min_count = null
node_count = var.agents_count
node_labels = var.agents_labels
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = try(var.pod_subnet.id, null)
proximity_placement_group_id = var.agents_proximity_placement_group_id
scale_down_mode = var.scale_down_mode
snapshot_id = var.snapshot_id
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = try(var.vnet_subnet.id, null)
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
dynamic "node_network_profile" {
for_each = var.node_network_profile == null ? [] : [var.node_network_profile]
content {
application_security_group_ids = node_network_profile.value.application_security_group_ids
node_public_ip_tags = node_network_profile.value.node_public_ip_tags
dynamic "allowed_host_ports" {
for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports
content {
port_end = allowed_host_ports.value.port_end
port_start = allowed_host_ports.value.port_start
protocol = allowed_host_ports.value.protocol
}
}
}
}
dynamic "upgrade_settings" {
for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
content {
max_surge = var.agents_pool_max_surge
drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
}
}
}
}
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
fips_enabled = var.default_node_pool_fips_enabled
max_count = var.agents_max_count
max_pods = var.agents_max_pods
min_count = var.agents_min_count
node_labels = var.agents_labels
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = try(var.pod_subnet.id, null)
proximity_placement_group_id = var.agents_proximity_placement_group_id
scale_down_mode = var.scale_down_mode
snapshot_id = var.snapshot_id
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = try(var.vnet_subnet.id, null)
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
dynamic "upgrade_settings" {
for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
content {
max_surge = var.agents_pool_max_surge
drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
}
}
}
}
dynamic "aci_connector_linux" {
for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : []
content {
subnet_name = var.aci_connector_linux_subnet_name
}
}
dynamic "api_server_access_profile" {
for_each = var.api_server_authorized_ip_ranges != null ? [
"api_server_access_profile"
] : []
content {
authorized_ip_ranges = var.api_server_authorized_ip_ranges
}
}
dynamic "auto_scaler_profile" {
for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : []
content {
balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups
empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max
expander = var.auto_scaler_profile_expander
max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec
max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time
max_unready_nodes = var.auto_scaler_profile_max_unready_nodes
max_unready_percentage = var.auto_scaler_profile_max_unready_percentage
new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay
scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add
scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete
scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure
scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded
scale_down_unready = var.auto_scaler_profile_scale_down_unready
scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold
scan_interval = var.auto_scaler_profile_scan_interval
skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage
skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods
}
}
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : []
content {
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled
managed = true
tenant_id = var.rbac_aad_tenant_id
}
}
dynamic "confidential_computing" {
for_each = var.confidential_computing == null ? [] : [var.confidential_computing]
content {
sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled
}
}
dynamic "http_proxy_config" {
for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"]
content {
http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)
https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy)
no_proxy = var.http_proxy_config.no_proxy
trusted_ca = var.http_proxy_config.trusted_ca
}
}
dynamic "identity" {
for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : []
content {
type = var.identity_type
identity_ids = var.identity_ids
}
}
dynamic "ingress_application_gateway" {
for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : []
content {
gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null)
gateway_name = try(var.green_field_application_gateway_for_ingress.name, null)
subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null)
subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null)
}
}
dynamic "key_management_service" {
for_each = var.kms_enabled ? ["key_management_service"] : []
content {
key_vault_key_id = var.kms_key_vault_key_id
key_vault_network_access = var.kms_key_vault_network_access
}
}
dynamic "key_vault_secrets_provider" {
for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : []
content {
secret_rotation_enabled = var.secret_rotation_enabled
secret_rotation_interval = var.secret_rotation_interval
}
}
dynamic "kubelet_identity" {
for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity]
content {
client_id = kubelet_identity.value.client_id
object_id = kubelet_identity.value.object_id
user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id
}
}
dynamic "linux_profile" {
for_each = var.admin_username == null ? [] : ["linux_profile"]
content {
admin_username = var.admin_username
ssh_key {
key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "")
}
}
}
dynamic "maintenance_window" {
for_each = var.maintenance_window != null ? ["maintenance_window"] : []
content {
dynamic "allowed" {
for_each = var.maintenance_window.allowed
content {
day = allowed.value.day
hours = allowed.value.hours
}
}
dynamic "not_allowed" {
for_each = var.maintenance_window.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "maintenance_window_auto_upgrade" {
for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade]
content {
duration = maintenance_window_auto_upgrade.value.duration
frequency = maintenance_window_auto_upgrade.value.frequency
interval = maintenance_window_auto_upgrade.value.interval
day_of_month = maintenance_window_auto_upgrade.value.day_of_month
day_of_week = maintenance_window_auto_upgrade.value.day_of_week
start_date = maintenance_window_auto_upgrade.value.start_date
start_time = maintenance_window_auto_upgrade.value.start_time
utc_offset = maintenance_window_auto_upgrade.value.utc_offset
week_index = maintenance_window_auto_upgrade.value.week_index
dynamic "not_allowed" {
for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "maintenance_window_node_os" {
for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os]
content {
duration = maintenance_window_node_os.value.duration
frequency = maintenance_window_node_os.value.frequency
interval = maintenance_window_node_os.value.interval
day_of_month = maintenance_window_node_os.value.day_of_month
day_of_week = maintenance_window_node_os.value.day_of_week
start_date = maintenance_window_node_os.value.start_date
start_time = maintenance_window_node_os.value.start_time
utc_offset = maintenance_window_node_os.value.utc_offset
week_index = maintenance_window_node_os.value.week_index
dynamic "not_allowed" {
for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "microsoft_defender" {
for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
}
}
dynamic "monitor_metrics" {
for_each = var.monitor_metrics != null ? ["monitor_metrics"] : []
content {
annotations_allowed = var.monitor_metrics.annotations_allowed
labels_allowed = var.monitor_metrics.labels_allowed
}
}
network_profile {
network_plugin = var.network_plugin
dns_service_ip = var.net_profile_dns_service_ip
ebpf_data_plane = var.ebpf_data_plane
ip_versions = var.network_ip_versions
load_balancer_sku = var.load_balancer_sku
network_data_plane = var.network_data_plane
network_mode = var.network_mode
network_plugin_mode = var.network_plugin_mode
network_policy = var.network_policy
outbound_type = var.net_profile_outbound_type
pod_cidr = var.net_profile_pod_cidr
pod_cidrs = var.net_profile_pod_cidrs
service_cidr = var.net_profile_service_cidr
service_cidrs = var.net_profile_service_cidrs
dynamic "load_balancer_profile" {
for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [
"load_balancer_profile"
] : []
content {
idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes
managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count
managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count
outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids
outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids
outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated
}
}
dynamic "nat_gateway_profile" {
for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile]
content {
idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes
managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count
}
}
}
dynamic "oms_agent" {
for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled
}
}
dynamic "service_mesh_profile" {
for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"]
content {
mode = var.service_mesh_profile.mode
external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled
internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled
}
}
dynamic "service_principal" {
for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : []
content {
client_id = var.client_id
client_secret = var.client_secret
}
}
dynamic "storage_profile" {
for_each = var.storage_profile_enabled ? ["storage_profile"] : []
content {
blob_driver_enabled = var.storage_profile_blob_driver_enabled
disk_driver_enabled = var.storage_profile_disk_driver_enabled
disk_driver_version = var.storage_profile_disk_driver_version
file_driver_enabled = var.storage_profile_file_driver_enabled
snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled
}
}
dynamic "web_app_routing" {
for_each = var.web_app_routing == null ? [] : ["web_app_routing"]
content {
dns_zone_ids = var.web_app_routing.dns_zone_ids
}
}
dynamic "workload_autoscaler_profile" {
for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile]
content {
keda_enabled = workload_autoscaler_profile.value.keda_enabled
vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled
}
}
depends_on = [
null_resource.pool_name_keeper,
]
lifecycle {
ignore_changes = [
http_application_routing_enabled,
http_proxy_config[0].no_proxy,
kubernetes_version,
# we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource.
name,
]
replace_triggered_by = [
null_resource.kubernetes_cluster_name_keeper.id
]
precondition {
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "")
error_message = "Either `client_id` and `client_secret` or `identity_type` must be set."
}
precondition {
# Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0)
error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well."
}
precondition {
condition = var.identity_ids == null || var.client_id == ""
error_message = "Cannot set both `client_id` and `identity_ids`."
}
precondition {
condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium")
error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled."
}
precondition {
condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled)
error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true."
}
precondition {
condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard")
error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`"
}
precondition {
condition = local.automatic_channel_upgrade_check
error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`."
}
precondition {
condition = !(var.kms_enabled && var.identity_type != "UserAssigned")
error_message = "KMS etcd encryption doesn't work with system-assigned managed identity."
}
precondition {
condition = !var.workload_identity_enabled || var.oidc_issuer_enabled
error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity"
}
precondition {
condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure"
error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure."
}
precondition {
condition = var.network_policy != "azure" || var.network_plugin == "azure"
error_message = "network_policy must be `azure` when network_plugin is `azure`"
}
precondition {
condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure"
error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure."
}
precondition {
condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null
error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified."
}
precondition {
condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster))
error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`."
}
precondition {
condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage"
error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`."
}
precondition {
condition = (var.kubelet_identity == null) || (
(var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0)
error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set."
}
precondition {
condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets"
error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes."
}
precondition {
condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null
error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`."
}
precondition {
condition = var.prefix == null || var.dns_prefix_private_cluster == null
error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified."
}
precondition {
condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != ""
error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone"
}
precondition {
condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)]))
error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink.<region>.azmk8s.io`, `<subzone>.privatelink.<region>.azmk8s.io`, `private.<region>.azmk8s.io`, `<subzone>.private.<region>.azmk8s.io`"
}
}
}
resource "null_resource" "kubernetes_cluster_name_keeper" {
triggers = {
name = local.cluster_name
}
}
resource "null_resource" "kubernetes_version_keeper" {
triggers = {
version = var.kubernetes_version
}
}
resource "time_sleep" "interval_before_cluster_update" {
count = var.interval_before_cluster_update == null ? 0 : 1
create_duration = var.interval_before_cluster_update
depends_on = [
azurerm_kubernetes_cluster.main,
]
lifecycle {
replace_triggered_by = [
null_resource.kubernetes_version_keeper.id,
]
}
}
resource "azapi_update_resource" "aks_cluster_post_create" {
type = "Microsoft.ContainerService/managedClusters@2024-02-01"
body = {
properties = {
kubernetesVersion = var.kubernetes_version
}
}
resource_id = azurerm_kubernetes_cluster.main.id
depends_on = [
time_sleep.interval_before_cluster_update,
]
lifecycle {
ignore_changes = all
replace_triggered_by = [null_resource.kubernetes_version_keeper.id]
}
}
resource "null_resource" "http_proxy_config_no_proxy_keeper" {
count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
triggers = {
http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "")
}
}
resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" {
count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
type = "Microsoft.ContainerService/managedClusters@2024-02-01"
body = {
properties = {
httpProxyConfig = {
noProxy = var.http_proxy_config.no_proxy
}
}
}
resource_id = azurerm_kubernetes_cluster.main.id
depends_on = [azapi_update_resource.aks_cluster_post_create]
lifecycle {
ignore_changes = all
replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id]
}
}