parts/k8s/addons/calico.yaml (507 lines of code) (raw):

{{- /* Source: calico/templates/calico-config.yaml This ConfigMap is used to configure a self-hosted Calico installation. */}} kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system labels: addonmanager.kubernetes.io/mode: "EnsureExists" data: {{- /* You must set a non-zero value for Typha replicas below. */}} typha_service_name: "calico-typha" {{- /* The CNI network configuration to install on each node. The special values in this config will be automatically populated. */}} cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": 1500, {{- if not IsAzureCNI}} "ipam": { "type": "host-local", "subnet": "usePodCidr" }, {{- end}} "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- {{- /* Source: calico/templates/kdd-crds.yaml */}} apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: FelixConfiguration plural: felixconfigurations singular: felixconfiguration --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPConfiguration plural: bgpconfigurations singular: bgpconfiguration --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPPool plural: ippools singular: ippool --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: HostEndpoint plural: hostendpoints singular: hostendpoint --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: ClusterInformation plural: clusterinformations singular: clusterinformation --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkSet plural: globalnetworksets singular: globalnetworkset --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkPolicy plural: networkpolicies singular: networkpolicy --- apiVersion: {{GetCRDAPIVersion}} kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org labels: addonmanager.kubernetes.io/mode: "EnsureExists" spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkSet plural: networksets singular: networkset --- {{- /* Source: calico/templates/rbac.yaml Include a clusterrole for the calico-node DaemonSet, and bind it to the calico-node serviceaccount. */}} kind: ClusterRole apiVersion: {{GetRBACAPIVersion}} metadata: name: calico-node labels: addonmanager.kubernetes.io/mode: "EnsureExists" rules: {{- /* The CNI plugin needs to get pods, nodes, and namespaces. */}} - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - apiGroups: [""] resources: - endpoints - services verbs: {{- /* Used to discover service IPs for advertisement. */}} - watch - list {{- /* Used to discover Typhas. */}} - get - apiGroups: [""] resources: - nodes/status verbs: {{- /* Needed for clearing NodeNetworkUnavailable flag. */}} - patch {{- /* Calico stores some configuration information in node annotations. */}} - update {{- /* Watch for changes to Kubernetes NetworkPolicies. */}} - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list {{- /* Used by Calico for policy information. */}} - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch {{- /* The CNI plugin patches pods/status. */}} - apiGroups: [""] resources: - pods/status verbs: - patch {{- /* Calico monitors various CRDs for config. */}} - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - ipamblocks - globalnetworkpolicies - globalnetworksets - networkpolicies - networksets - clusterinformations - hostendpoints verbs: - get - list - watch {{- /* Calico must create and update some CRDs on startup. */}} - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update {{- /* Calico stores some configuration information on the node. */}} - apiGroups: [""] resources: - nodes verbs: - get - list - watch {{- /* These permissions are only requried for upgrade from v2.6, and can be removed after upgrade or on fresh installations. */}} - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update --- apiVersion: {{GetRBACAPIVersion}} kind: ClusterRoleBinding metadata: name: calico-node labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- {{- /* Source: calico/templates/calico-typha.yaml This manifest creates a Service, which will be backed by Calico's Typha daemon. Typha sits in between Felix and the API server, reducing Calico's load on the API server. */}} apiVersion: v1 kind: Service metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha addonmanager.kubernetes.io/mode: "EnsureExists" spec: ports: - port: 5473 protocol: TCP targetPort: calico-typha name: calico-typha selector: k8s-app: calico-typha --- {{- /* This manifest creates a Deployment of Typha to back the above service. */}} apiVersion: apps/v1 kind: Deployment metadata: name: calico-typha namespace: kube-system labels: k8s-app: calico-typha addonmanager.kubernetes.io/mode: "EnsureExists" spec: {{- /* Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the typha_service_name variable in the calico-config ConfigMap above. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. */}} replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: k8s-app: calico-typha template: metadata: labels: k8s-app: calico-typha annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' spec: priorityClassName: system-cluster-critical nodeSelector: kubernetes.io/os: linux hostNetwork: true tolerations: {{- /* Mark the pod as a critical add-on for rescheduling. */}} - key: CriticalAddonsOnly operator: Exists {{- /* Since Calico can't network a pod until Typha is up, we need to run Typha itself as a host-networked pod. */}} serviceAccountName: calico-node containers: - image: {{ContainerImage "calico-typha"}} name: calico-typha ports: - containerPort: 5473 name: calico-typha protocol: TCP env: {{- /* Enable "info" logging by default. Can be set to "debug" to increase verbosity. */}} - name: TYPHA_LOGSEVERITYSCREEN value: "info" {{- /* Disable logging to file and syslog since those don't make sense in Kubernetes. */}} - name: TYPHA_LOGFILEPATH value: "none" - name: TYPHA_LOGSEVERITYSYS value: "none" {{- /* Monitor the Kubernetes API to find the number of running instances and rebalance connections. */}} - name: TYPHA_CONNECTIONREBALANCINGMODE value: "kubernetes" - name: TYPHA_DATASTORETYPE value: "kubernetes" - name: TYPHA_HEALTHENABLED value: "true" {{- /* Configure route aggregation based on pod CIDR. */}} - name: USE_POD_CIDR value: "true" - name: FELIX_INTERFACEPREFIX value: "{{if IsAzureCNI}}azv{{else}}cali{{end}}" # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, # this opens a port on the host, which may need to be secured. #- name: TYPHA_PROMETHEUSMETRICSENABLED # value: "true" #- name: TYPHA_PROMETHEUSMETRICSPORT # value: "9093" livenessProbe: httpGet: path: /liveness port: 9098 host: localhost periodSeconds: 30 initialDelaySeconds: 30 readinessProbe: httpGet: path: /readiness port: 9098 host: localhost periodSeconds: 10 --- {{- /* Source: calico/templates/calico-node.yaml This manifest installs the calico-node container, as well as the CNI plugins and network config on each master and worker node in a Kubernetes cluster. */}} kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node addonmanager.kubernetes.io/mode: "EnsureExists" spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 50% template: metadata: labels: k8s-app: calico-node {{- if IsKubernetesVersionGe "1.17.0"}} annotations: cluster-autoscaler.kubernetes.io/daemonset-pod: "true" {{- end}} spec: priorityClassName: system-cluster-critical nodeSelector: kubernetes.io/os: linux hostNetwork: true tolerations: {{- /* Make sure calico-node gets scheduled on all nodes. */}} - effect: NoSchedule operator: Exists {{- /* Mark the pod as a critical add-on for rescheduling. */}} - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node {{- /* Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. */}} terminationGracePeriodSeconds: 0 initContainers: {{- /* This container installs the CNI binaries and CNI network config file on each node. */}} - name: install-cni image: {{ContainerImage "calico-cni"}} command: ["/install-cni.sh"] env: {{- /* Name of the CNI config file to create. */}} - name: CNI_CONF_NAME value: "10-calico.conflist" {{- /* The CNI network config to install on each node. */}} - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config {{- /* Set the hostname based on the k8s node name. */}} - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName {{- /* Prevents the container from sleeping forever. */}} - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir containers: {{- /* Runs calico-node container on each Kubernetes node. This container programs network policy and routes on each host. */}} - name: calico-node image: {{ContainerImage "calico-node"}} env: {{- /* Use Kubernetes API as the backing datastore. */}} - name: DATASTORE_TYPE value: "kubernetes" {{- /* Configure route aggregation based on pod CIDR. */}} - name: USE_POD_CIDR value: "true" {{- /* Typha support: controlled by the ConfigMap. */}} - name: FELIX_TYPHAK8SSERVICENAME valueFrom: configMapKeyRef: name: calico-config key: typha_service_name {{- /* Wait for the datastore. */}} - name: WAIT_FOR_DATASTORE value: "true" {{- /* Set based on the k8s node name. */}} - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName {{- /* Don't enable BGP. */}} - name: CALICO_NETWORKING_BACKEND value: "none" {{- /* Cluster type to identify the deployment type */}} - name: CLUSTER_TYPE value: "k8s" {{- /* The default IPv4 pool to create on startup if none exists. Pod IPs will be chosen from this range. Changing this value after installation will have no effect. This should fall within `--cluster-cidr`. */}} - name: CALICO_IPV4POOL_CIDR value: "{{GetClusterSubnet}}" {{- /* Disable file logging so `kubectl logs` works. */}} - name: CALICO_DISABLE_FILE_LOGGING value: "true" {{- /* Set Felix endpoint to host default action to ACCEPT. */}} - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" {{- /* Disable IPv6 on Kubernetes. */}} - name: FELIX_IPV6SUPPORT value: "false" {{- /* Set Felix logging to "info" */}} - name: FELIX_LOGSEVERITYSCREEN value: {{ContainerConfig "logSeverityScreen"}} - name: FELIX_HEALTHENABLED value: "true" - name: CALICO_IPV4POOL_IPIP value: "off" - name: FELIX_INTERFACEPREFIX value: "{{if IsAzureCNI}}azv{{else}}cali{{end}}" - name: FELIX_USAGEREPORTINGENABLED value: "{{ContainerConfig "usageReportingEnabled"}}" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -felix-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false volumes: {{- /* Used by calico-node. */}} - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate {{- /* Used to install CNI. */}} - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system labels: addonmanager.kubernetes.io/mode: "EnsureExists" --- {{- /* Typha Horizontal Autoscaler ConfigMap */}} kind: ConfigMap apiVersion: v1 metadata: name: calico-typha-horizontal-autoscaler namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" data: ladder: |- { "coresToReplicas": [], "nodesToReplicas": [ [1, 1], [10, 2], [100, 3], [250, 4], [500, 5], [1000, 6], [1500, 7], [2000, 8] ] } --- {{- /* Typha Horizontal Autoscaler Deployment */}} apiVersion: apps/v1 kind: Deployment metadata: name: calico-typha-horizontal-autoscaler namespace: kube-system labels: k8s-app: calico-typha-autoscaler kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" spec: replicas: 1 selector: matchLabels: k8s-app: calico-typha-autoscaler template: metadata: labels: k8s-app: calico-typha-autoscaler spec: priorityClassName: system-cluster-critical securityContext: supplementalGroups: [65534] fsGroup: 65534 containers: - image: {{ContainerImage "calico-cluster-proportional-autoscaler"}} name: autoscaler command: - /cluster-proportional-autoscaler - --namespace=kube-system - --configmap=calico-typha-horizontal-autoscaler - --target=deployment/calico-typha - --logtostderr=true - --v=2 resources: requests: cpu: 10m limits: cpu: 10m serviceAccountName: typha-cpha --- {{- /* Typha Horizontal Autoscaler Cluster Role */}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: typha-cpha labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" rules: - apiGroups: [""] resources: ["nodes"] verbs: ["list"] --- {{- /* Typha Horizontal Autoscaler Cluster Role Binding */}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: typha-cpha labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: typha-cpha subjects: - kind: ServiceAccount name: typha-cpha namespace: kube-system --- {{- /* Typha Horizontal Autoscaler Role */}} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: typha-cpha namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get"] - apiGroups: ["extensions"] resources: ["deployments/scale"] verbs: ["get", "update"] --- {{- /* Typha Horizontal Autoscaler Role Binding */}} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: typha-cpha namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: "EnsureExists" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: typha-cpha subjects: - kind: ServiceAccount name: typha-cpha namespace: kube-system --- {{- /* Typha Horizontal Autoscaler Service Account */}} apiVersion: v1 kind: ServiceAccount metadata: name: typha-cpha namespace: kube-system labels: kubernetes.io/cluster-service: "true"