config/recipes/elastic-agent/fleet-kubernetes-integration-nonroot.yaml (311 lines of code) (raw):
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: manage-agent-hostpath-permissions
spec:
selector:
matchLabels:
name: manage-agent-hostpath-permissions
template:
metadata:
labels:
name: manage-agent-hostpath-permissions
spec:
# This is only required when running in an SELinux-enabled/OpenShift environment.
# Ensure this user has been added to the privileged scc in the correct namespace.
# oc adm policy add-scc-to-user privileged -z elastic-agent -n elastic-apps
# serviceAccountName: elastic-agent
volumes:
- hostPath:
path: /var/lib/elastic-agent
type: DirectoryOrCreate
name: "agent-data"
initContainers:
- name: manage-agent-hostpath-permissions
# UBI is only required when needing the `chcon` binary when running
# in an SELinux-enabled/OpenShift environment. If that
# is not required then the following smaller image can be used instead:
# image: registry.access.redhat.com/ubi9/ubi-minimal:latest
image: docker.io/bash:5.2.15
resources:
limits:
cpu: 100m
memory: 32Mi
securityContext:
# privileged is only required when running in an SELinux-enabled/OpenShift environment.
# privileged: true
runAsUser: 0
volumeMounts:
- mountPath: /var/lib/elastic-agent
name: agent-data
command:
- 'bash'
- '-e'
- '-c'
- |-
# Adjust this be /var/lib/elastic-agent/YOUR-NAMESPACE/YOUR-AGENT-NAME/state
# Multiple directories are supported for the fleet-server + agent use case.
dirs=(
"/var/lib/elastic-agent/default/elastic-agent/state"
"/var/lib/elastic-agent/default/fleet-server/state"
)
for dir in ${dirs[@]}; do
mkdir -p "${dir}"
# chcon is only required when running an SELinux-enabled/OpenShift environment.
# chcon -Rt svirt_sandbox_file_t "${dir}"
chmod g+rw "${dir}"
chgrp 1000 "${dir}"
if [ -n "$(ls -A ${dir} 2>/dev/null)" ]
then
chgrp 1000 "${dir}"/*
chmod g+rw "${dir}"/*
fi
done
containers:
- name: pause
image: gcr.io/google-containers/pause-amd64:3.1
---
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana
spec:
version: 8.17.0
count: 1
elasticsearchRef:
name: elasticsearch
config:
xpack.fleet.agents.fleet_server.hosts: ["https://fleet-server-agent-http.default.svc:8220"]
xpack.fleet.outputs:
- id: eck-fleet-agent-output-elasticsearch
is_default: true
name: eck-elasticsearch
type: elasticsearch
hosts: ["https://elasticsearch-es-http.default.svc:9200"]
ssl:
certificate_authorities: ["/mnt/elastic-internal/elasticsearch-association/default/elasticsearch/certs/ca.crt"]
xpack.fleet.packages:
- name: system
version: latest
- name: elastic_agent
version: latest
- name: fleet_server
version: latest
- name: kubernetes
version: latest
xpack.fleet.agentPolicies:
- name: Fleet Server on ECK policy
id: eck-fleet-server
namespace: default
is_managed: true
monitoring_enabled:
- logs
- metrics
unenroll_timeout: 900
package_policies:
- name: fleet_server-1
id: fleet_server-1
package:
name: fleet_server
- name: Elastic Agent on ECK policy
id: eck-agent
namespace: default
is_managed: true
monitoring_enabled:
- logs
- metrics
unenroll_timeout: 900
package_policies:
- package:
name: system
name: system-1
- package:
name: kubernetes
name: kubernetes-1
---
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch
spec:
version: 8.17.0
nodeSets:
- name: default
count: 3
config:
node.store.allow_mmap: false
---
apiVersion: agent.k8s.elastic.co/v1alpha1
kind: Agent
metadata:
name: fleet-server
spec:
version: 8.17.0
kibanaRef:
name: kibana
elasticsearchRefs:
- name: elasticsearch
mode: fleet
fleetServerEnabled: true
policyID: eck-fleet-server
deployment:
replicas: 1
podTemplate:
spec:
serviceAccountName: fleet-server
automountServiceAccountToken: true
---
apiVersion: agent.k8s.elastic.co/v1alpha1
kind: Agent
metadata:
name: elastic-agent
spec:
version: 8.17.0
kibanaRef:
name: kibana
fleetServerRef:
name: fleet-server
mode: fleet
policyID: eck-agent
daemonSet:
podTemplate:
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: elastic-agent
automountServiceAccountToken: true
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fleet-server
rules:
- apiGroups: [""]
resources:
- pods
- namespaces
- nodes
verbs:
- get
- watch
- list
- apiGroups: ["apps"]
resources:
- replicasets
verbs:
- get
- watch
- list
- apiGroups: ["batch"]
resources:
- jobs
verbs:
- get
- watch
- list
- apiGroups: ["coordination.k8s.io"]
resources:
- leases
verbs:
- get
- create
- update
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fleet-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fleet-server
subjects:
- kind: ServiceAccount
name: fleet-server
namespace: default
roleRef:
kind: ClusterRole
name: fleet-server
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elastic-agent
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- events
- services
- configmaps
verbs:
- get
- watch
- list
- apiGroups: ["coordination.k8s.io"]
resources:
- leases
verbs:
- get
- create
- update
- nonResourceURLs:
- "/metrics"
verbs:
- get
- apiGroups: ["extensions"]
resources:
- replicasets
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- "apps"
resources:
- statefulsets
- deployments
- replicasets
- daemonsets
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- ""
resources:
- nodes/stats
verbs:
- get
- nonResourceURLs:
- "/metrics"
verbs:
- get
- apiGroups:
- "batch"
resources:
- jobs
- cronjobs
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- "storage.k8s.io"
resources:
- storageclasses
verbs:
- "get"
- "list"
- "watch"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: elastic-agent
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elastic-agent
subjects:
- kind: ServiceAccount
name: elastic-agent
namespace: default
roleRef:
kind: ClusterRole
name: elastic-agent
apiGroup: rbac.authorization.k8s.io
...