in internal/manifests/collector/container.go [25:127]
func Container(cfg config.Config, logger logr.Logger, agent v1alpha1.AmazonCloudWatchAgent, addConfig bool) corev1.Container {
image := agent.Spec.Image
if len(image) == 0 {
image = cfg.CollectorImage()
}
ports := getContainerPorts(logger, agent.Spec.Config, agent.Spec.OtelConfig, agent.Spec.Ports)
var volumeMounts []corev1.VolumeMount
argsMap := agent.Spec.Args
if argsMap == nil {
argsMap = map[string]string{}
}
// defines the output (sorted) array for final output
var args []string
// When adding a config via v1alpha1.AmazonCloudWatchAgentSpec.Config, we ensure that it is always the
// first item in the args. At the time of writing, although multiple configs are allowed in the
// cloudwatch agent, the operator has yet to implement such functionality. When multiple configs
// are present they should be merged in a deterministic manner using the order given, and because
// v1alpha1.AmazonCloudWatchAgentSpec.Config is a required field we assume that it will always be the
// "primary" config and in the future additional configs can be appended to the container args in a simple manner.
if addConfig {
volumeMounts = append(volumeMounts, getVolumeMounts(agent.Spec.NodeSelector["kubernetes.io/os"]))
if !agent.Spec.Prometheus.IsEmpty() {
volumeMounts = append(volumeMounts, getPrometheusVolumeMounts(agent.Spec.NodeSelector["kubernetes.io/os"]))
}
}
// ensure that the v1alpha1.AmazonCloudWatchAgentSpec.Args are ordered when moved to container.Args,
// where iterating over a map does not guarantee, so that reconcile will not be fooled by different
// ordering in args.
var sortedArgs []string
for k, v := range argsMap {
sortedArgs = append(sortedArgs, fmt.Sprintf("--%s=%s", k, v))
}
sort.Strings(sortedArgs)
args = append(args, sortedArgs...)
if len(agent.Spec.VolumeMounts) > 0 {
volumeMounts = append(volumeMounts, agent.Spec.VolumeMounts...)
}
var envVars = agent.Spec.Env
if agent.Spec.Env == nil {
envVars = []corev1.EnvVar{}
}
envVars = append(envVars, corev1.EnvVar{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
})
if agent.Spec.TargetAllocator.Enabled {
// We need to add a SHARD here so the collector is able to keep targets after the hashmod operation which is
// added by default by the Prometheus operator's config generator.
// All collector instances use SHARD == 0 as they only receive targets
// allocated to them and should not use the Prometheus hashmod-based
// allocation.
envVars = append(envVars, corev1.EnvVar{
Name: "SHARD",
Value: "0",
})
}
if _, err := adapters.ConfigFromJSONString(agent.Spec.Config); err != nil {
logger.Error(err, "error parsing config")
}
var livenessProbe *corev1.Probe
if configFromString, err := adapters.ConfigFromString(agent.Spec.OtelConfig); err == nil {
if probe, err := getLivenessProbe(configFromString, agent.Spec.LivenessProbe); err == nil {
livenessProbe = probe
} else if errors.Is(err, adapters.ErrNoServiceExtensions) {
logger.Info("extensions not configured, skipping liveness probe creation")
} else if errors.Is(err, adapters.ErrNoServiceExtensionHealthCheck) {
logger.Info("healthcheck extension not configured, skipping liveness probe creation")
} else {
logger.Error(err, "cannot create liveness probe.")
}
}
return corev1.Container{
Name: naming.Container(),
Image: image,
ImagePullPolicy: agent.Spec.ImagePullPolicy,
WorkingDir: agent.Spec.WorkingDir,
VolumeMounts: volumeMounts,
Args: args,
Env: envVars,
EnvFrom: agent.Spec.EnvFrom,
Resources: agent.Spec.Resources,
Ports: portMapToContainerPortList(ports),
SecurityContext: agent.Spec.SecurityContext,
LivenessProbe: livenessProbe,
Lifecycle: agent.Spec.Lifecycle,
}
}