static int vbg_set_session_event_filter()

in vboxguest/vboxguest_core.c [574:637]


static int vbg_set_session_event_filter(struct vbg_dev *gdev,
					struct vbg_session *session,
					u32 or_mask, u32 not_mask,
					bool session_termination)
{
	struct vmmdev_mask *req;
	u32 changed, previous;
	int rc, ret = 0;

	/*
	 * Allocate a request buffer before taking the spinlock, when
	 * the session is being terminated the requestor is the kernel,
	 * as we're cleaning up.
	 */
	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
			    session_termination ? VBG_KERNEL_REQUEST :
						  session->requestor);
	if (!req) {
		if (!session_termination)
			return -ENOMEM;
		/* Ignore allocation failure, we must do session cleanup. */
	}

	mutex_lock(&gdev->session_mutex);

	/* Apply the changes to the session mask. */
	previous = session->event_filter;
	session->event_filter |= or_mask;
	session->event_filter &= ~not_mask;

	/* If anything actually changed, update the global usage counters. */
	changed = previous ^ session->event_filter;
	if (!changed)
		goto out;

	vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
	or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;

	if (gdev->event_filter_host == or_mask || !req)
		goto out;

	gdev->event_filter_host = or_mask;
	req->or_mask = or_mask;
	req->not_mask = ~or_mask;
	rc = vbg_req_perform(gdev, req);
	if (rc < 0) {
		ret = vbg_status_code_to_errno(rc);

		/* Failed, roll back (unless it's session termination time). */
		gdev->event_filter_host = U32_MAX;
		if (session_termination)
			goto out;

		vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
				    session->event_filter);
		session->event_filter = previous;
	}

out:
	mutex_unlock(&gdev->session_mutex);
	vbg_req_free(req, sizeof(*req));

	return ret;
}