in google/gve/gve_rx_dqo.c [205:283]
int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg,
struct gve_rx_ring *rx,
int idx)
{
struct device *hdev = &priv->pdev->dev;
struct page_pool *pool;
int qpl_page_cnt;
size_t size;
u32 qpl_id;
const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
if (!rx->dqo.buf_states)
return -ENOMEM;
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
rx->dqo.complq.desc_ring =
dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
if (!rx->dqo.complq.desc_ring)
goto err;
/* Allocate RX buffer queue */
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
rx->dqo.bufq.desc_ring =
dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
if (!rx->dqo.bufq.desc_ring)
goto err;
if (cfg->raw_addressing) {
pool = gve_rx_create_page_pool(priv, rx);
if (IS_ERR(pool))
goto err;
rx->dqo.page_pool = pool;
} else {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
}
rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
&rx->q_resources_bus, GFP_KERNEL);
if (!rx->q_resources)
goto err;
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
completion_queue_slots);
return 0;
err:
gve_rx_free_ring_dqo(priv, rx, cfg);
return -ENOMEM;
}