static void vgic_prune_ap_list()

in kvm/vgic/vgic.c [618:717]


static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_irq *irq, *tmp;

	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());

retry:
	raw_spin_lock(&vgic_cpu->ap_list_lock);

	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
		bool target_vcpu_needs_kick = false;

		raw_spin_lock(&irq->irq_lock);

		BUG_ON(vcpu != irq->vcpu);

		target_vcpu = vgic_target_oracle(irq);

		if (!target_vcpu) {
			/*
			 * We don't need to process this interrupt any
			 * further, move it off the list.
			 */
			list_del(&irq->ap_list);
			irq->vcpu = NULL;
			raw_spin_unlock(&irq->irq_lock);

			/*
			 * This vgic_put_irq call matches the
			 * vgic_get_irq_kref in vgic_queue_irq_unlock,
			 * where we added the LPI to the ap_list. As
			 * we remove the irq from the list, we drop
			 * also drop the refcount.
			 */
			vgic_put_irq(vcpu->kvm, irq);
			continue;
		}

		if (target_vcpu == vcpu) {
			/* We're on the right CPU */
			raw_spin_unlock(&irq->irq_lock);
			continue;
		}

		/* This interrupt looks like it has to be migrated. */

		raw_spin_unlock(&irq->irq_lock);
		raw_spin_unlock(&vgic_cpu->ap_list_lock);

		/*
		 * Ensure locking order by always locking the smallest
		 * ID first.
		 */
		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
			vcpuA = vcpu;
			vcpuB = target_vcpu;
		} else {
			vcpuA = target_vcpu;
			vcpuB = vcpu;
		}

		raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
		raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
				      SINGLE_DEPTH_NESTING);
		raw_spin_lock(&irq->irq_lock);

		/*
		 * If the affinity has been preserved, move the
		 * interrupt around. Otherwise, it means things have
		 * changed while the interrupt was unlocked, and we
		 * need to replay this.
		 *
		 * In all cases, we cannot trust the list not to have
		 * changed, so we restart from the beginning.
		 */
		if (target_vcpu == vgic_target_oracle(irq)) {
			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;

			list_del(&irq->ap_list);
			irq->vcpu = target_vcpu;
			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
			target_vcpu_needs_kick = true;
		}

		raw_spin_unlock(&irq->irq_lock);
		raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
		raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);

		if (target_vcpu_needs_kick) {
			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
			kvm_vcpu_kick(target_vcpu);
		}

		goto retry;
	}

	raw_spin_unlock(&vgic_cpu->ap_list_lock);
}