int xdma_performance_submit()

in sdk/linux_kernel_drivers/xdma/libxdma.c [3263:3381]


int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine)
{
	u32 max_consistent_size = 128 * 32 * 1024; /* 1024 pages, 4MB */
	struct xdma_transfer *transfer;
	u64 ep_addr = 0;
	int num_desc_in_a_loop = 128;
	int size_in_desc = engine->xdma_perf->transfer_size;
	int size = size_in_desc * num_desc_in_a_loop;
	int free_desc = 0;
	int i;
	int rv = -ENOMEM;

	if (unlikely(size_in_desc > max_consistent_size)) {
		pr_err("%s, size too big %d > %u.\n",
			engine->name, size_in_desc, max_consistent_size);
		return -EINVAL;
	}

	if (size > max_consistent_size) {
		size = max_consistent_size;
		num_desc_in_a_loop = size / size_in_desc;
	}

	engine->perf_buf_virt = dma_alloc_coherent(&xdev->pdev->dev, size,
					&engine->perf_buf_bus, GFP_KERNEL);
	if (unlikely(!engine->perf_buf_virt)) {
		pr_err("engine %s perf buf OOM.\n", engine->name);
		return -ENOMEM;
	}

	/* allocate transfer data structure */
	transfer = kzalloc(sizeof(struct xdma_transfer), GFP_KERNEL);
	if (unlikely(!transfer)) {
		pr_err("engine %s transfer OOM.\n", engine->name);
		goto free_buffer;
	}

	/* 0 = write engine (to_dev=0) , 1 = read engine (to_dev=1) */
	transfer->dir = engine->dir;
	/* set number of descriptors */
	transfer->desc_num = num_desc_in_a_loop;

	/* allocate descriptor list */
	if (!engine->desc) {
		engine->desc = dma_alloc_coherent(&xdev->pdev->dev,
			num_desc_in_a_loop * sizeof(struct xdma_desc),
			&engine->desc_bus, GFP_KERNEL);
		if (unlikely(!engine->desc)) {
			pr_err("%s desc OOM.\n", engine->name);
			goto free_xfer;
		}
		dbg_init("device %s, engine %s pre-alloc desc 0x%p,0x%llx.\n",
			dev_name(&xdev->pdev->dev), engine->name,
			engine->desc, engine->desc_bus);
		free_desc = 1;
	}
	transfer->desc_virt = engine->desc;
	transfer->desc_bus = engine->desc_bus;

	rv = transfer_desc_init(transfer, transfer->desc_num);
	if (rv < 0)
		goto free_desc;

	dbg_sg("transfer->desc_bus = 0x%llx.\n", (u64)transfer->desc_bus);

	for (i = 0; i < transfer->desc_num; i++) {
		struct xdma_desc *desc = transfer->desc_virt + i;
		dma_addr_t rc_bus_addr = engine->perf_buf_bus +
						size_in_desc * i;

		/* fill in descriptor entry with transfer details */
		xdma_desc_set(desc, rc_bus_addr, ep_addr, size_in_desc,
			engine->dir);
	}

	/* stop engine and request interrupt on last descriptor */
	rv = xdma_desc_control_set(transfer->desc_virt, 0);
	if (rv < 0) {
		pr_err("%s: Failed to set desc control\n", engine->name);
		goto free_desc;
	}

	/* create a linked loop */
	xdma_desc_link(transfer->desc_virt + transfer->desc_num - 1,
		transfer->desc_virt, transfer->desc_bus);

	transfer->cyclic = 1;

	/* initialize wait queue */
	init_waitqueue_head(&transfer->wq);

	dbg_perf("Queueing XDMA I/O %s request for performance measurement.\n",
		engine->dir ? "write (to dev)" : "read (from dev)");
	rv = transfer_queue(engine, transfer);
	if (rv < 0)
		goto free_desc;

	return 0;

free_desc:
	if (free_desc && engine->desc)
		dma_free_coherent(&xdev->pdev->dev,
				num_desc_in_a_loop * sizeof(struct xdma_desc),
				engine->desc, engine->desc_bus);
	engine->desc = NULL;

free_xfer:
	if (transfer) {
		list_del(&transfer->entry);
		kfree(transfer);
	}

free_buffer:
	if (engine->perf_buf_virt)
		dma_free_coherent(&xdev->pdev->dev, size_in_desc,
			engine->perf_buf_virt, engine->perf_buf_bus);
	engine->perf_buf_virt = NULL;
	return rv;
}