in google/gve/gve_tx.c [258:328]
static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg,
struct gve_tx_ring *tx,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
spin_lock_init(&tx->clean_lock);
spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
tx->mask = cfg->ring_size - 1;
/* alloc metadata */
tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
if (!tx->info)
return -ENOMEM;
/* alloc tx queue */
bytes = sizeof(*tx->desc) * cfg->ring_size;
tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
if (!tx->desc)
goto abort_with_info;
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl;
}
tx->q_resources =
dma_alloc_coherent(hdev,
sizeof(*tx->q_resources),
&tx->q_resources_bus,
GFP_KERNEL);
if (!tx->q_resources)
goto abort_with_fifo;
return 0;
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing) {
gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
abort_with_info:
vfree(tx->info);
tx->info = NULL;
return -ENOMEM;
}