static void vmbus_chan_sched()

in vmbus_drv.c [1245:1332]


static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
{
	unsigned long *recv_int_page;
	u32 maxbits, relid;

	if (vmbus_proto_version < VERSION_WIN8) {
		maxbits = MAX_NUM_CHANNELS_SUPPORTED;
		recv_int_page = vmbus_connection.recv_int_page;
	} else {
		/*
		 * When the host is win8 and beyond, the event page
		 * can be directly checked to get the id of the channel
		 * that has the interrupt pending.
		 */
		void *page_addr = hv_cpu->synic_event_page;
		union hv_synic_event_flags *event
			= (union hv_synic_event_flags *)page_addr +
						 VMBUS_MESSAGE_SINT;

		maxbits = HV_EVENT_FLAGS_COUNT;
		recv_int_page = event->flags;
	}

	if (unlikely(!recv_int_page))
		return;

	for_each_set_bit(relid, recv_int_page, maxbits) {
		void (*callback_fn)(void *context);
		struct vmbus_channel *channel;

		if (!sync_test_and_clear_bit(relid, recv_int_page))
			continue;

		/* Special case - vmbus channel protocol msg */
		if (relid == 0)
			continue;

		/*
		 * Pairs with the kfree_rcu() in vmbus_chan_release().
		 * Guarantees that the channel data structure doesn't
		 * get freed while the channel pointer below is being
		 * dereferenced.
		 */
		rcu_read_lock();

		/* Find channel based on relid */
		channel = relid2channel(relid);
		if (channel == NULL)
			goto sched_unlock_rcu;

		if (channel->rescind)
			goto sched_unlock_rcu;

		/*
		 * Make sure that the ring buffer data structure doesn't get
		 * freed while we dereference the ring buffer pointer.  Test
		 * for the channel's onchannel_callback being NULL within a
		 * sched_lock critical section.  See also the inline comments
		 * in vmbus_reset_channel_cb().
		 */
		spin_lock(&channel->sched_lock);

		callback_fn = channel->onchannel_callback;
		if (unlikely(callback_fn == NULL))
			goto sched_unlock;

		trace_vmbus_chan_sched(channel);

		++channel->interrupts;

		switch (channel->callback_mode) {
		case HV_CALL_ISR:
			(*callback_fn)(channel->channel_callback_context);
			break;

		case HV_CALL_BATCHED:
			hv_begin_read(&channel->inbound);
			fallthrough;
		case HV_CALL_DIRECT:
			tasklet_schedule(&channel->callback_event);
		}

sched_unlock:
		spin_unlock(&channel->sched_lock);
sched_unlock_rcu:
		rcu_read_unlock();
	}
}