def get_pod_used_resources()

in gpudirect-tcpxo/topology-scheduler/schedule-daemon.py [0:0]


def get_pod_used_resources(pod):
  """Get the resources used by this pod"""
  used_cpu = 0
  used_memory = 0
  used_gpu = 0
  if pod.status is None or pod.status.container_statuses is None:
    return used_cpu, used_memory, used_gpu
  for container, container_status in zip(pod.spec.containers, pod.status.container_statuses):
    if container_status.state.terminated is not None:
      # terminated pods don't use resources
      continue
    requests = container.resources.requests or {}
    used_cpu += parse_quantity(requests.get('cpu', 0))
    used_memory += parse_quantity(requests.get('memory', 0))
    used_gpu += int(requests.get('nvidia.com/gpu', 0))
  return used_cpu, used_memory, used_gpu