in custom-metrics-stackdriver-adapter/pkg/adapter/coreprovider/provider.go [132:186]
func (p *CoreProvider) getPodMetrics(pods ...apitypes.NamespacedName) ([]api.TimeInfo, [][]metrics.ContainerMetrics, error) {
timeInfo := make([]api.TimeInfo, len(pods))
coreMetrics := make([][]metrics.ContainerMetrics, len(pods))
resourceNames := make([]string, len(pods))
for i, pod := range pods {
resourceNames[i] = fmt.Sprintf("%q", pod.Name)
}
cpuMetrics, cpuTimeInfo, err := p.client.getContainerCPU(resourceNames)
if err != nil {
return nil, nil, err
}
ramMetrics, _, err := p.client.getContainerRAM(resourceNames)
if err != nil {
return nil, nil, err
}
for i := range pods {
podKey := pods[i].Namespace + ":" + pods[i].Name
cpuContainers, ok := cpuMetrics[podKey]
if !ok {
klog.V(4).Infof("Metric cpu not found for pod '%s'", podKey)
continue
}
ramContainers, ok := ramMetrics[podKey]
if !ok {
klog.V(4).Infof("Metric ram not found for pod '%s'", podKey)
continue
}
coreMetrics[i] = make([]metrics.ContainerMetrics, 0)
for container, cpu := range cpuContainers {
ram, ok := ramContainers[container]
if !ok { // cpu and ram should be present in the container
continue
}
coreMetrics[i] = append(coreMetrics[i], metrics.ContainerMetrics{Name: container, Usage: corev1.ResourceList{
corev1.ResourceCPU: cpu,
corev1.ResourceMemory: ram,
}})
timeInfo[i], ok = cpuTimeInfo[podKey] // TODO(holubowicz): query about the same time segment in cpu and ram (now it can be slightly different)
if !ok {
return nil, nil, apierr.NewInternalError(fmt.Errorf("TimeInfo should be set for every pod with metrics"))
}
}
if len(coreMetrics[i]) == 0 {
coreMetrics[i] = nil
}
}
return timeInfo, coreMetrics, nil
}