observability/prometheus/values.yaml (155 lines of code) (raw):
# ARO-HCP kube-prometheus-stack overrides, review upstream values.yaml for more options.
## Provide a name to substitute for the full names of resources
##
fullnameOverride: "prometheus"
## Install Prometheus Operator CRDs
##
crds:
enabled: true
## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
## It deploy a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
## This feature is in preview, off by default and may change in the future.
upgradeJob:
enabled: false
forceConflicts: false
image:
busybox:
registry: docker.io
repository: busybox
tag: "latest"
sha: ""
pullPolicy: IfNotPresent
kubectl:
registry: registry.k8s.io
repository: kubectl
tag: "" # defaults to the Kubernetes version
sha: ""
pullPolicy: IfNotPresent
global:
rbac:
create: true
## Configuration for alertmanager
## ref: https://prometheus.io/docs/alerting/alertmanager/
##
alertmanager:
enabled: false # Disabled for now.
grafana:
enabled: false
# Azure Managed Prometheus will scrape or expose these metrics
coreDns:
enabled: false
kubeDns:
enabled: false
nodeExporter:
enabled: false
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false
kubeControllerManager:
enabled: false
kubeProxy:
enabled: false
kubelet:
enabled: false
kubeApiServer:
enabled: false
kubeStateMetrics:
enabled: false
prometheusOperator:
enabled: true
## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default
fullnameOverride: ""
## Define Log Format
# Use logfmt (default) or json logging
# logFormat: logfmt
## Decrease log verbosity to errors only
# logLevel: info
## Prometheus-operator image
##
image:
registry: mcr.microsoft.com/oss/v2
repository: prometheus/prometheus-operator
## Prometheus-config-reloader
##
prometheusConfigReloader:
image:
registry: mcr.microsoft.com/oss/v2
repository: prometheus/prometheus-config-reloader
# resource config for prometheusConfigReloader
resources: {}
# requests:
# cpu: 200m
# memory: 50Mi
# limits:
# cpu: 200m
# memory: 50Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: infra
operator: In
values:
- "true"
tolerations:
- key: "infra"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Deploy a Prometheus instance
##
prometheus:
enabled: true
## Service account for Prometheuses to use.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
create: true
name: "prometheus"
annotations:
azure.workload.identity/client-id: ""
automountServiceAccountToken: true
## Configure pod disruption budgets for Prometheus
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
##
podDisruptionBudget:
enabled: false
minAvailable: 1
maxUnavailable: ""
serviceMonitor:
## If true, create a serviceMonitor for prometheus
##
selfMonitor: true
## Settings affecting prometheusSpec
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec
##
prometheusSpec:
## Image of Prometheus.
##
image:
registry: mcr.microsoft.com/azurelinux/
repository: prometheus/prometheus
tag: ""
sha: ""
## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
ruleNamespaceSelector: {}
## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the PrometheusRule resources created
##
ruleSelectorNilUsesHelmValues: false
## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the servicemonitors created
##
serviceMonitorSelectorNilUsesHelmValues: false
## ServiceMonitors to be selected for target discovery.
## If {}, select all ServiceMonitors
##
serviceMonitorSelector: {}
## Namespaces to be selected for ServiceMonitor discovery.
##
serviceMonitorNamespaceSelector: {}
## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
## prometheus resource to be created with selectors based on values in the helm deployment,
## which will also match the podmonitors created
##
podMonitorSelectorNilUsesHelmValues: false
## PodMonitors to be selected for target discovery.
## If {}, select all PodMonitors
##
podMonitorSelector: {}
## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
podMonitorNamespaceSelector: {}
## How long to retain metrics
##
retention: 1d
## Maximum size of metrics
##
retentionSize: "45GiB"
## Log level for Prometheus be configured in
##
logLevel: info
## Log format for Prometheus be configured in
##
logFormat: logfmt
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
## Metadata Labels and Annotations gets propagated to the prometheus pods.
##
podMetadata:
labels:
azure.workload.identity/use: "true"
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
podAntiAffinity: ""
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
##
podAntiAffinityTopologyKey: failure-domain.beta.kubernetes.io/zone
## Assign custom affinity rules to the prometheus instance
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: infra
operator: In
values:
- "true"
tolerations:
- key: "infra"
operator: "Equal"
value: "true"
effect: "NoSchedule"
topologySpreadConstraints:
- maxSkew: 1
topologyKey: ""
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: prometheus
## The remote_write spec configuration for Prometheus.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotewritespec
remoteWrite:
- url: ""
azureAd:
cloud: AzurePublic
sdk: {}
queueConfig:
capacity: 2500
maxShards: 500
minShards: 1
maxSamplesPerSend: 2000
batchSendDeadline: 60s
minBackoff: 30ms
maxBackoff: 256s
metadataConfig:
send: false
## Resource limits & requests
##
resources: {}
# requests:
# memory: 400Mi
## Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
##
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: managed-premium
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
##
cleanPrometheusOperatorObjectNames: true
## Extra manifests to deploy as an array
extraManifests: []
# - apiVersion: v1
# kind: ConfigMap
# metadata:
# labels:
# name: prometheus-extra
# data:
# extra-data: "value"