in calculator/calculator.go [263:364]
func (service *PricingService) PopulateWorkloads(nodes map[string]cluster.Node) ([]cluster.Workload, error) {
var workloads []cluster.Workload
podMetricsList, err := service.metricsClientset.MetricsV1beta1().PodMetricses("").List(context.TODO(), metav1.ListOptions{FieldSelector: "metadata.namespace!=kube-system,metadata.namespace!=gke-gmp-system,metadata.namespace!=gmp-system"})
if err != nil {
log.Fatalf(err.Error())
}
for _, v := range podMetricsList.Items {
pod, err := cluster.DescribePod(service.clientset, v.Name, v.Namespace)
if err != nil {
return nil, err
}
var cpu int64 = 0
var memory int64 = 0
var storage int64 = 0
var gpu int64 = 0
podContainerCount := 0
gpuModel := pod.Spec.NodeSelector["cloud.google.com/gke-accelerator"]
// Sum used resources from the Pod
for _, container := range v.Containers {
cpuUsage := container.Usage.Cpu().MilliValue()
memoryUsage := container.Usage.Memory().MilliValue() / 1000000000 // Division to get MiB
storageUsage := container.Usage.StorageEphemeral().MilliValue() / 1000000000 // Division to get MiB
gpuUsage := int64(0)
for _, specContainer := range pod.Spec.Containers {
if container.Name == specContainer.Name {
cpuRequest := specContainer.Resources.Requests[corev1.ResourceCPU]
memoryRequest := specContainer.Resources.Requests[corev1.ResourceMemory]
storageRequest := specContainer.Resources.Requests[corev1.ResourceStorage]
gpuRequests := specContainer.Resources.Requests["nvidia.com/gpu"]
// Usage is less than requests, so we set request as usage since the billing works like that
if cpuUsage < cpuRequest.MilliValue() {
cpuUsage = cpuRequest.MilliValue()
}
if memoryUsage < memoryRequest.MilliValue()/1000000000 {
memoryUsage = memoryRequest.MilliValue() / 1000000000
}
if storageUsage < storageRequest.MilliValue()/1000000000 {
storageUsage = memoryRequest.MilliValue() / 1000000000
}
gpuUsage = gpuRequests.Value()
}
}
cpu += cpuUsage
memory += memoryUsage
storage += storageUsage
gpu += gpuUsage
podContainerCount++
}
// Check and modify the limits of summed workloads from the Pod
cpu, memory, storage = ValidateAndRoundResources(cpu, memory, storage)
computeClass := service.DecideComputeClass(
v.Name,
nodes[pod.Spec.NodeName].InstanceType,
cpu,
memory,
gpu,
gpuModel,
strings.Contains(nodes[pod.Spec.NodeName].InstanceType, service.Config.Section("").Key("gce_arm64_prefix").String()),
)
cost := service.CalculatePricing(cpu, memory, storage, gpu, gpuModel, computeClass, nodes[pod.Spec.NodeName].InstanceType, nodes[pod.Spec.NodeName].Spot)
workloadObject := cluster.Workload{
Name: v.Name,
Containers: podContainerCount,
Node_name: pod.Spec.NodeName,
Cpu: cpu,
Memory: memory,
Storage: storage,
AcceleratorType: gpuModel,
AcceleratorAmount: gpu,
Cost: cost,
ComputeClass: computeClass,
}
workloads = append(workloads, workloadObject)
if entry, ok := nodes[pod.Spec.NodeName]; ok {
entry.Workloads = append(entry.Workloads, workloadObject)
entry.Cost += cost
nodes[pod.Spec.NodeName] = entry
}
}
return workloads, nil
}