in plugins/processors/k8sdecorator/stores/podstore.go [279:323]
func (p *PodStore) decorateCpu(metric telegraf.Metric, tags map[string]string, pod *corev1.Pod) {
if tags[MetricType] == TypePod {
// add cpu limit and request for pod cpu
if metric.HasField(MetricName(TypePod, CpuTotal)) {
podCpuReq, _ := getResourceSettingForPod(pod, p.nodeInfo.getCPUCapacity(), cpuKey, getRequestForContainer)
// set podReq to the sum of containerReq which has req
if podCpuReq != 0 {
metric.AddField(MetricName(TypePod, CpuRequest), podCpuReq)
}
if p.nodeInfo.getCPUCapacity() != 0 {
metric.AddField(MetricName(TypePod, CpuUtilization), metric.Fields()[MetricName(TypePod, CpuTotal)].(float64)/float64(p.nodeInfo.getCPUCapacity())*100)
if podCpuReq != 0 {
metric.AddField(MetricName(TypePod, CpuReservedCapacity), float64(podCpuReq)/float64(p.nodeInfo.getCPUCapacity())*100)
}
}
podCpuLimit, ok := getResourceSettingForPod(pod, p.nodeInfo.getCPUCapacity(), cpuKey, getLimitForContainer)
// only set podLimit when all the containers has limit
if ok && podCpuLimit != 0 {
metric.AddField(MetricName(TypePod, CpuLimit), podCpuLimit)
metric.AddField(MetricName(TypePod, CpuUtilizationOverPodLimit), metric.Fields()[MetricName(TypePod, CpuTotal)].(float64)/float64(podCpuLimit)*100)
}
}
} else if tags[MetricType] == TypeContainer {
// add cpu limit and request for container
if metric.HasField(MetricName(TypeContainer, CpuTotal)) {
if p.nodeInfo.getCPUCapacity() != 0 {
metric.AddField(MetricName(TypeContainer, CpuUtilization), metric.Fields()[MetricName(TypeContainer, CpuTotal)].(float64)/float64(p.nodeInfo.getCPUCapacity())*100)
}
if containerName, ok := tags[ContainerNamekey]; ok {
for _, containerSpec := range pod.Spec.Containers {
if containerSpec.Name == containerName {
if cpuLimit, ok := getLimitForContainer(cpuKey, containerSpec); ok {
metric.AddField(MetricName(TypeContainer, CpuLimit), cpuLimit)
}
if cpuReq, ok := getRequestForContainer(cpuKey, containerSpec); ok {
metric.AddField(MetricName(TypeContainer, CpuRequest), cpuReq)
}
}
}
}
}
}
}