in google/gve/gve_rx.c [178:256]
static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{
struct gve_priv *priv = rx->gve;
u32 slots;
int err;
int i;
int j;
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
*/
slots = rx->mask + 1;
rx->data.page_info = kvzalloc(slots *
sizeof(*rx->data.page_info), GFP_KERNEL);
if (!rx->data.page_info)
return -ENOMEM;
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
&rx->data.page_info[i],
&rx->data.data_ring[i], rx);
if (err)
goto alloc_err_rda;
}
if (!rx->data.raw_addressing) {
for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
struct page *page = alloc_page(GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto alloc_err_qpl;
}
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
}
}
return slots;
alloc_err_qpl:
/* Fully free the copy pool pages. */
while (j--) {
page_ref_sub(rx->qpl_copy_pool[j].page,
rx->qpl_copy_pool[j].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[j].page);
}
/* Do not fully free QPL pages - only remove the bias added in this
* function with gve_setup_rx_buffer. */
while (i--)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
return err;
alloc_err_rda:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
&rx->data.page_info[i],
&rx->data.data_ring[i]);
return err;
}