in sdk/linux_kernel_drivers/xdma/libxdma.c [4324:4429]
int xdma_cyclic_transfer_setup(struct xdma_engine *engine)
{
struct xdma_dev *xdev;
struct xdma_transfer *xfer;
dma_addr_t bus;
unsigned long flags;
int i;
int rc;
if (unlikely(!engine || !engine->xdev)) {
pr_err("engine 0x%p, xdev NULL.\n", engine);
return -EINVAL;
}
xdev = engine->xdev;
if (engine->cyclic_req) {
pr_info("%s: exclusive access already taken.\n",
engine->name);
return -EBUSY;
}
spin_lock_irqsave(&engine->lock, flags);
engine->rx_tail = 0;
engine->rx_head = 0;
engine->rx_overrun = 0;
engine->eop_found = 0;
rc = sgt_alloc_with_pages(&engine->cyclic_sgt, CYCLIC_RX_PAGES_MAX,
engine->dir, xdev->pdev);
if (rc < 0) {
pr_info("%s cyclic pages %u OOM.\n",
engine->name, CYCLIC_RX_PAGES_MAX);
goto err_out;
}
engine->cyclic_req = xdma_init_request(&engine->cyclic_sgt, 0);
if (!engine->cyclic_req) {
pr_info("%s cyclic request OOM.\n", engine->name);
rc = -ENOMEM;
goto err_out;
}
#ifdef __LIBXDMA_DEBUG__
xdma_request_cb_dump(engine->cyclic_req);
#endif
rc = transfer_init(engine, engine->cyclic_req);
if (rc < 0)
goto err_out;
xfer = &engine->cyclic_req->xfer;
/* replace source addresses with result write-back addresses */
memset(engine->cyclic_result, 0,
CYCLIC_RX_PAGES_MAX * sizeof(struct xdma_result));
bus = engine->cyclic_result_bus;
for (i = 0; i < xfer->desc_num; i++) {
xfer->desc_virt[i].src_addr_lo = cpu_to_le32(PCI_DMA_L(bus));
xfer->desc_virt[i].src_addr_hi = cpu_to_le32(PCI_DMA_H(bus));
bus += sizeof(struct xdma_result);
}
/* set control of all descriptors */
for (i = 0; i < xfer->desc_num; i++) {
xdma_desc_control_clear(xfer->desc_virt + i, LS_BYTE_MASK);
xdma_desc_control_set(xfer->desc_virt + i,
XDMA_DESC_EOP | XDMA_DESC_COMPLETED);
}
/* make this a cyclic transfer */
xdma_transfer_cyclic(xfer);
#ifdef __LIBXDMA_DEBUG__
transfer_dump(xfer);
#endif
if (enable_credit_mp)
write_register(128, &engine->sgdma_regs->credits, 0);
spin_unlock_irqrestore(&engine->lock, flags);
/* start cyclic transfer */
rc = transfer_queue(engine, xfer);
if (!rc)
return 0;
spin_lock_irqsave(&engine->lock, flags);
/* unwind on errors */
err_out:
if (engine->cyclic_req) {
xdma_request_free(engine->cyclic_req);
engine->cyclic_req = NULL;
}
if (engine->cyclic_sgt.orig_nents) {
sgt_free_with_pages(&engine->cyclic_sgt, engine->dir,
xdev->pdev);
engine->cyclic_sgt.orig_nents = 0;
engine->cyclic_sgt.nents = 0;
engine->cyclic_sgt.sgl = NULL;
}
spin_unlock_irqrestore(&engine->lock, flags);
return rc;
}