static int gve_rx_prefill_pages()

in build/gve_rx.c [212:310]


static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
				struct gve_rx_alloc_rings_cfg *cfg)
{
	struct gve_priv *priv = rx->gve;
	u32 slots;
	int err;
	int i;
	int j;

	/* Allocate one page per Rx queue slot. Each page is split into two
	 * packet buffers, when possible we "page flip" between the two.
	 */
	slots = rx->mask + 1;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)
	rx->data.page_info = kvzalloc(slots *
				      sizeof(*rx->data.page_info), GFP_KERNEL);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
	rx->data.page_info = kcalloc(1, slots * sizeof(*rx->data.page_info),
				     GFP_KERNEL);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
	if (!rx->data.page_info)
		return -ENOMEM;

	for (i = 0; i < slots; i++) {
		if (!rx->data.raw_addressing) {
			struct page *page = rx->data.qpl->pages[i];
			dma_addr_t addr = i * PAGE_SIZE;

			gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
					    &rx->data.data_ring[i].qpl_offset);
			continue;
		}
		err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
					  &rx->data.page_info[i],
					  &rx->data.data_ring[i], rx);
		if (err)
			goto alloc_err_rda;
	}

	if (!rx->data.raw_addressing) {
		for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
			struct page *page = alloc_page(GFP_KERNEL);

			if (!page) {
				err = -ENOMEM;
				goto alloc_err_qpl;
			}

			rx->qpl_copy_pool[j].page = page;
			rx->qpl_copy_pool[j].page_offset = 0;
			rx->qpl_copy_pool[j].page_address = page_address(page);

			/* The page already has 1 ref. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
			page_ref_add(page, INT_MAX - 1);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
			atomic_add(INT_MAX - 1, &page->_count);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
			rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
		}
	}

	return slots;

alloc_err_qpl:
	/* Fully free the copy pool pages. */
	while (j--) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
		page_ref_sub(rx->qpl_copy_pool[j].page,
			     rx->qpl_copy_pool[j].pagecnt_bias - 1);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
		atomic_sub(rx->qpl_copy_pool[j].pagecnt_bias - 1,
			   &rx->qpl_copy_pool[j].page->_count);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
		put_page(rx->qpl_copy_pool[j].page);
	}

	/* Do not fully free QPL pages - only remove the bias added in this
	 * function with gve_setup_rx_buffer. */
	while (i--) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
		page_ref_sub(rx->data.page_info[i].page,
			     rx->data.page_info[i].pagecnt_bias - 1);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
		atomic_sub(rx->data.page_info[i].pagecnt_bias - 1,
			   &rx->data.page_info[i].page->_count);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) */
	}

	return err;

alloc_err_rda:
	while (i--)
		gve_rx_free_buffer(&priv->pdev->dev,
				   &rx->data.page_info[i],
				   &rx->data.data_ring[i]);
	return err;
}