in build/gve_rx_dqo.c [127:199]
void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
buffer_queue_slots = rx->dqo.bufq.mask + 1;
if (rx->q_resources) {
dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
}
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
if (rx->dqo.page_pool)
gve_free_to_page_pool(rx, bs, false);
else
gve_free_qpl_page_dqo(bs);
#else
if (bs->page_info.page)gve_free_page_dqo(priv, bs,
!rx->dqo.qpl);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
}
if (rx->dqo.qpl) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
rx->dqo.bufq.bus);
rx->dqo.bufq.desc_ring = NULL;
}
if (rx->dqo.complq.desc_ring) {
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
rx->dqo.complq.bus);
rx->dqo.complq.desc_ring = NULL;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)
kvfree(rx->dqo.buf_states);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
kfree(rx->dqo.buf_states);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
rx->dqo.buf_states = NULL;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
if (rx->dqo.page_pool) {
page_pool_destroy(rx->dqo.page_pool);
rx->dqo.page_pool = NULL;
}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
gve_rx_free_hdr_bufs(priv, rx);
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}