func()

in pkg/capacityscheduling/capacity_scheduling.go [207:281]


func (c *CapacityScheduling) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
	// TODO improve the efficiency of taking snapshot
	// e.g. use a two-pointer data structure to only copy the updated EQs when necessary.
	snapshotElasticQuota := c.snapshotElasticQuota()
	podReq := computePodResourceRequest(pod)

	state.Write(ElasticQuotaSnapshotKey, snapshotElasticQuota)

	elasticQuotaInfos := snapshotElasticQuota.elasticQuotaInfos
	eq := snapshotElasticQuota.elasticQuotaInfos[pod.Namespace]
	if eq == nil {
		preFilterState := &PreFilterState{
			podReq: *podReq,
		}
		state.Write(preFilterStateKey, preFilterState)
		return nil, framework.NewStatus(framework.Success)
	}

	// nominatedPodsReqInEQWithPodReq is the sum of podReq and the requested resources of the Nominated Pods
	// which subject to the same quota(namespace) and is more important than the preemptor.
	nominatedPodsReqInEQWithPodReq := &framework.Resource{}
	// nominatedPodsReqWithPodReq is the sum of podReq and the requested resources of the Nominated Pods
	// which subject to the all quota(namespace). Generated Nominated Pods consist of two kinds of pods:
	// 1. the pods subject to the same quota(namespace) and is more important than the preemptor.
	// 2. the pods subject to the different quota(namespace) and the usage of quota(namespace) does not exceed min.
	nominatedPodsReqWithPodReq := &framework.Resource{}

	nodeList, err := c.fh.SnapshotSharedLister().NodeInfos().List()
	if err != nil {
		return nil, framework.NewStatus(framework.Error, fmt.Sprintf("Error getting the nodelist: %v", err))
	}

	for _, node := range nodeList {
		nominatedPods := c.fh.NominatedPodsForNode(node.Node().Name)
		for _, p := range nominatedPods {
			if p.Pod.UID == pod.UID {
				continue
			}
			ns := p.Pod.Namespace
			info := c.elasticQuotaInfos[ns]
			if info != nil {
				pResourceRequest := util.ResourceList(computePodResourceRequest(p.Pod))
				// If they are subject to the same quota(namespace) and p is more important than pod,
				// p will be added to the nominatedResource and totalNominatedResource.
				// If they aren't subject to the same quota(namespace) and the usage of quota(p's namespace) does not exceed min,
				// p will be added to the totalNominatedResource.
				if ns == pod.Namespace && corev1helpers.PodPriority(p.Pod) >= corev1helpers.PodPriority(pod) {
					nominatedPodsReqInEQWithPodReq.Add(pResourceRequest)
					nominatedPodsReqWithPodReq.Add(pResourceRequest)
				} else if ns != pod.Namespace && !info.usedOverMin() {
					nominatedPodsReqWithPodReq.Add(pResourceRequest)
				}
			}
		}
	}

	nominatedPodsReqInEQWithPodReq.Add(util.ResourceList(podReq))
	nominatedPodsReqWithPodReq.Add(util.ResourceList(podReq))
	preFilterState := &PreFilterState{
		podReq:                         *podReq,
		nominatedPodsReqInEQWithPodReq: *nominatedPodsReqInEQWithPodReq,
		nominatedPodsReqWithPodReq:     *nominatedPodsReqWithPodReq,
	}
	state.Write(preFilterStateKey, preFilterState)

	if eq.usedOverMaxWith(nominatedPodsReqInEQWithPodReq) {
		return nil, framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Pod %v/%v is rejected in PreFilter because ElasticQuota %v is more than Max", pod.Namespace, pod.Name, eq.Namespace))
	}

	if elasticQuotaInfos.aggregatedUsedOverMinWith(*nominatedPodsReqWithPodReq) {
		return nil, framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Pod %v/%v is rejected in PreFilter because total ElasticQuota used is more than min", pod.Namespace, pod.Name))
	}

	return nil, framework.NewStatus(framework.Success, "")
}