in pkg/manifests/nginx.go [378:497]
func newNginxIngressControllerDeployment(conf *config.Config, ingressConfig *NginxIngressConfig) *appsv1.Deployment {
ingressControllerDeploymentLabels := AddComponentLabel(GetTopLevelLabels(), IngressControllerComponentName)
ingressControllerPodLabels := AddComponentLabel(GetTopLevelLabels(), IngressControllerComponentName)
for k, v := range ingressConfig.PodLabels() {
ingressControllerPodLabels[k] = v
}
podAnnotations := map[string]string{}
if !conf.DisableOSM {
podAnnotations["openservicemesh.io/sidecar-injection"] = "disabled"
}
for k, v := range promAnnotations {
podAnnotations[k] = v
}
selector := &metav1.LabelSelector{MatchLabels: ingressConfig.PodLabels()}
deploymentArgs := []string{
"/nginx-ingress-controller",
"--ingress-class=" + ingressConfig.IcName,
"--controller-class=" + ingressConfig.ControllerClass,
"--election-id=" + ingressConfig.ResourceName,
"--publish-service=$(POD_NAMESPACE)/" + ingressConfig.ResourceName,
"--configmap=$(POD_NAMESPACE)/" + ingressConfig.ResourceName,
"--enable-annotation-validation=true",
// https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/#custom-load-balancer-health-probe
// load balancer health probe checks in 5 second intervals. It requires 2 failing probes to fail so we need at least 10s of grace period.
// we set it to 15s to be safe. Without this Nginx process exits but the LoadBalancer continues routing to the Pod until two health checks fail.
"--shutdown-grace-period=15",
}
if ingressConfig.DefaultSSLCertificate != "" {
deploymentArgs = append(deploymentArgs, "--default-ssl-certificate="+ingressConfig.DefaultSSLCertificate)
}
if ingressConfig.DefaultBackendService != "" {
deploymentArgs = append(deploymentArgs, "--default-backend-service="+ingressConfig.DefaultBackendService)
}
ret := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: ingressConfig.ResourceName,
Namespace: conf.NS,
Labels: ingressControllerDeploymentLabels,
},
Spec: appsv1.DeploymentSpec{
RevisionHistoryLimit: util.Int32Ptr(2),
Selector: selector,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ingressControllerPodLabels,
Annotations: podAnnotations,
},
Spec: *WithPreferSystemNodes(&corev1.PodSpec{
TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "kubernetes.io/hostname", // spread across nodes
WhenUnsatisfiable: corev1.ScheduleAnyway,
LabelSelector: selector,
MatchLabelKeys: []string{
// https://kubernetes.io/blog/2024/08/16/matchlabelkeys-podaffinity/
// evaluate only pods of the same version (mostly applicable to rollouts)
"pod-template-hash",
},
},
},
ServiceAccountName: ingressConfig.ResourceName,
Containers: []corev1.Container{*withPodRefEnvVars(withLivenessProbeMatchingReadinessNewFailureThresh(withTypicalReadinessProbe(10254, &corev1.Container{
Name: "controller",
Image: path.Join(conf.Registry, "/oss/kubernetes/ingress/nginx-ingress-controller:"+ingressConfig.Version.tag),
Args: deploymentArgs,
SecurityContext: &corev1.SecurityContext{
AllowPrivilegeEscalation: util.ToPtr(false),
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"NET_BIND_SERVICE"}, // needed to bind to 80/443 ports https://github.com/kubernetes/ingress-nginx/blob/ca6d3622e5c2819a29f4a407ed272f42d10a91a9/docs/troubleshooting.md?plain=1#L369
Drop: []corev1.Capability{"ALL"},
},
RunAsNonRoot: util.ToPtr(true),
RunAsUser: util.Int64Ptr(101),
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
Ports: []corev1.ContainerPort{
{
Name: "https",
ContainerPort: 443,
},
promPodPort,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("127Mi"),
},
},
}), 6))},
}),
},
},
}
if !ingressConfig.HTTPDisabled {
ret.Spec.Template.Spec.Containers[0].Ports = append([]corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
},
}, ret.Spec.Template.Spec.Containers[0].Ports...)
}
return ret
}