static int vbg_acquire_session_capabilities()

in vboxguest/vboxguest_core.c [720:811]


static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
					    struct vbg_session *session,
					    u32 or_mask, u32 not_mask,
					    u32 flags, bool session_termination)
{
	unsigned long irqflags;
	bool wakeup = false;
	int ret = 0;

	mutex_lock(&gdev->session_mutex);

	if (gdev->set_guest_caps_tracker.mask & or_mask) {
		vbg_err("%s error: cannot acquire caps which are currently set\n",
			__func__);
		ret = -EINVAL;
		goto out;
	}

	/*
	 * Mark any caps in the or_mask as now being in acquire-mode. Note
	 * once caps are in acquire_mode they always stay in this mode.
	 * This impacts event handling, so we take the event-lock.
	 */
	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
	gdev->acquire_mode_guest_caps |= or_mask;
	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);

	/* If we only have to switch the caps to acquire mode, we're done. */
	if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
		goto out;

	not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
	not_mask &= session->acquired_guest_caps;
	or_mask &= ~session->acquired_guest_caps;

	if (or_mask == 0 && not_mask == 0)
		goto out;

	if (gdev->acquired_guest_caps & or_mask) {
		ret = -EBUSY;
		goto out;
	}

	gdev->acquired_guest_caps |= or_mask;
	gdev->acquired_guest_caps &= ~not_mask;
	/* session->acquired_guest_caps impacts event handling, take the lock */
	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
	session->acquired_guest_caps |= or_mask;
	session->acquired_guest_caps &= ~not_mask;
	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);

	ret = vbg_set_host_capabilities(gdev, session, session_termination);
	/* Roll back on failure, unless it's session termination time. */
	if (ret < 0 && !session_termination) {
		gdev->acquired_guest_caps &= ~or_mask;
		gdev->acquired_guest_caps |= not_mask;
		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
		session->acquired_guest_caps &= ~or_mask;
		session->acquired_guest_caps |= not_mask;
		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
	}

	/*
	 * If we added a capability, check if that means some other thread in
	 * our session should be unblocked because there are events pending
	 * (the result of vbg_get_allowed_event_mask_for_session() may change).
	 *
	 * HACK ALERT! When the seamless support capability is added we generate
	 *	a seamless change event so that the ring-3 client can sync with
	 *	the seamless state.
	 */
	if (ret == 0 && or_mask != 0) {
		spin_lock_irqsave(&gdev->event_spinlock, irqflags);

		if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
			gdev->pending_events |=
				VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;

		if (gdev->pending_events)
			wakeup = true;

		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);

		if (wakeup)
			wake_up(&gdev->event_wq);
	}

out:
	mutex_unlock(&gdev->session_mutex);

	return ret;
}