static void init_vp_index()

in channel_mgmt.c [726:788]


static void init_vp_index(struct vmbus_channel *channel)
{
	bool perf_chn = hv_is_perf_channel(channel);
	u32 i, ncpu = num_online_cpus();
	cpumask_var_t available_mask;
	struct cpumask *alloced_mask;
	u32 target_cpu;
	int numa_node;

	if ((vmbus_proto_version == VERSION_WS2008) ||
	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
	    !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
		/*
		 * Prior to win8, all channel interrupts are
		 * delivered on VMBUS_CONNECT_CPU.
		 * Also if the channel is not a performance critical
		 * channel, bind it to VMBUS_CONNECT_CPU.
		 * In case alloc_cpumask_var() fails, bind it to
		 * VMBUS_CONNECT_CPU.
		 */
		channel->target_cpu = VMBUS_CONNECT_CPU;
		if (perf_chn)
			hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
		return;
	}

	for (i = 1; i <= ncpu + 1; i++) {
		while (true) {
			numa_node = next_numa_node_id++;
			if (numa_node == nr_node_ids) {
				next_numa_node_id = 0;
				continue;
			}
			if (cpumask_empty(cpumask_of_node(numa_node)))
				continue;
			break;
		}
		alloced_mask = &hv_context.hv_numa_map[numa_node];

		if (cpumask_weight(alloced_mask) ==
		    cpumask_weight(cpumask_of_node(numa_node))) {
			/*
			 * We have cycled through all the CPUs in the node;
			 * reset the alloced map.
			 */
			cpumask_clear(alloced_mask);
		}

		cpumask_xor(available_mask, alloced_mask,
			    cpumask_of_node(numa_node));

		target_cpu = cpumask_first(available_mask);
		cpumask_set_cpu(target_cpu, alloced_mask);

		if (channel->offermsg.offer.sub_channel_index >= ncpu ||
		    i > ncpu || !hv_cpuself_used(target_cpu, channel))
			break;
	}

	channel->target_cpu = target_cpu;

	free_cpumask_var(available_mask);
}