int gve_rx_alloc_ring_gqi()

in build/gve_rx.c [320:445]


int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
			  struct gve_rx_alloc_rings_cfg *cfg,
			  struct gve_rx_ring *rx,
			  int idx)
{
	struct device *hdev = &priv->pdev->dev;
	u32 slots = cfg->ring_size;
	int filled_pages;
	int qpl_page_cnt;
	u32 qpl_id = 0;
	size_t bytes;
	int err;

	netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
	/* Make sure everything is zeroed to start with */
	memset(rx, 0, sizeof(*rx));

	rx->gve = priv;
	rx->q_num = idx;

	rx->mask = slots - 1;
	rx->data.raw_addressing = cfg->raw_addressing;

	/* alloc rx data ring */
	bytes = sizeof(*rx->data.data_ring) * slots;
	rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
						&rx->data.data_bus,
						GFP_KERNEL);
	if (!rx->data.data_ring)
		return -ENOMEM;

	rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
	rx->qpl_copy_pool_head = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)
	rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
				     sizeof(rx->qpl_copy_pool[0]),
				     GFP_KERNEL);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
	rx->qpl_copy_pool = kcalloc(rx->qpl_copy_pool_mask + 1,
				    sizeof(rx->qpl_copy_pool[0]), GFP_KERNEL);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */

	if (!rx->qpl_copy_pool) {
		err = -ENOMEM;
		goto abort_with_slots;
	}

	if (!rx->data.raw_addressing) {
		qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
		qpl_page_cnt = cfg->ring_size;

		rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
							 qpl_page_cnt);
		if (!rx->data.qpl) {
			err = -ENOMEM;
			goto abort_with_copy_pool;
		}
	}

	filled_pages = gve_rx_prefill_pages(rx, cfg);
	if (filled_pages < 0) {
		err = -ENOMEM;
		goto abort_with_qpl;
	}
	rx->fill_cnt = filled_pages;
	/* Ensure data ring slots (packet buffers) are visible. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
	dma_wmb();
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
	wmb();
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */

	/* Alloc gve_queue_resources */
	rx->q_resources =
		dma_alloc_coherent(hdev,
				   sizeof(*rx->q_resources),
				   &rx->q_resources_bus,
				   GFP_KERNEL);
	if (!rx->q_resources) {
		err = -ENOMEM;
		goto abort_filled;
	}
	netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
		  (unsigned long)rx->data.data_bus);

	/* alloc rx desc ring */
	bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
	rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
						GFP_KERNEL);
	if (!rx->desc.desc_ring) {
		err = -ENOMEM;
		goto abort_with_q_resources;
	}
	rx->db_threshold = slots / 2;
	gve_rx_init_ring_state_gqi(rx);

	rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
	gve_rx_ctx_clear(&rx->ctx);

	return 0;

abort_with_q_resources:
	dma_free_coherent(hdev, sizeof(*rx->q_resources),
			  rx->q_resources, rx->q_resources_bus);
	rx->q_resources = NULL;
abort_filled:
	gve_rx_unfill_pages(priv, rx, cfg);
abort_with_qpl:
	if (!rx->data.raw_addressing) {
		gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
		rx->data.qpl = NULL;
	}
abort_with_copy_pool:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)
	kvfree(rx->qpl_copy_pool);
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
	kfree(rx->qpl_copy_pool);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) */
	rx->qpl_copy_pool = NULL;
abort_with_slots:
	bytes = sizeof(*rx->data.data_ring) * slots;
	dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
	rx->data.data_ring = NULL;

	return err;
}