int hv_ringbuffer_init()

in ring_buffer.c [185:268]


int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
		       struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
	struct page **pages_wraparound;
	unsigned long *pfns_wraparound;
	u64 pfn;
	int i;

	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));

	/*
	 * First page holds struct hv_ring_buffer, do wraparound mapping for
	 * the rest.
	 */
	if (hv_isolation_type_snp()) {
		pfn = page_to_pfn(pages) +
			PFN_DOWN(ms_hyperv.shared_gpa_boundary);

		pfns_wraparound = kcalloc(page_cnt * 2 - 1,
			sizeof(unsigned long), GFP_KERNEL);
		if (!pfns_wraparound)
			return -ENOMEM;

		pfns_wraparound[0] = pfn;
		for (i = 0; i < 2 * (page_cnt - 1); i++)
			pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;

		ring_info->ring_buffer = (struct hv_ring_buffer *)
			vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
				 PAGE_KERNEL);
		kfree(pfns_wraparound);

		if (!ring_info->ring_buffer)
			return -ENOMEM;

		/* Zero ring buffer after setting memory host visibility. */
		memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
	} else {
		pages_wraparound = kcalloc(page_cnt * 2 - 1,
					   sizeof(struct page *),
					   GFP_KERNEL);
		if (!pages_wraparound)
			return -ENOMEM;

		pages_wraparound[0] = pages;
		for (i = 0; i < 2 * (page_cnt - 1); i++)
			pages_wraparound[i + 1] =
				&pages[i % (page_cnt - 1) + 1];

		ring_info->ring_buffer = (struct hv_ring_buffer *)
			vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
				PAGE_KERNEL);

		kfree(pages_wraparound);
		if (!ring_info->ring_buffer)
			return -ENOMEM;
	}


	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;

	/* Set the feature bit for enabling flow control. */
	ring_info->ring_buffer->feature_bits.value = 1;

	ring_info->ring_size = page_cnt << PAGE_SHIFT;
	ring_info->ring_size_div10_reciprocal =
		reciprocal_value(ring_info->ring_size / 10);
	ring_info->ring_datasize = ring_info->ring_size -
		sizeof(struct hv_ring_buffer);
	ring_info->priv_read_index = 0;

	/* Initialize buffer that holds copies of incoming packets */
	if (max_pkt_size) {
		ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
		if (!ring_info->pkt_buffer)
			return -ENOMEM;
		ring_info->pkt_buffer_size = max_pkt_size;
	}

	spin_lock_init(&ring_info->ring_lock);

	return 0;
}