in spi-mxs.c [163:296]
static int mxs_spi_txrx_dma(struct mxs_spi *spi,
unsigned char *buf, int len,
unsigned int flags)
{
struct mxs_ssp *ssp = &spi->ssp;
struct dma_async_tx_descriptor *desc = NULL;
const bool vmalloced_buf = is_vmalloc_addr(buf);
const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
const int sgs = DIV_ROUND_UP(len, desc_len);
int sg_count;
int min, ret;
u32 ctrl0;
struct page *vm_page;
struct {
u32 pio[4];
struct scatterlist sg;
} *dma_xfer;
if (!len)
return -EINVAL;
dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
if (!dma_xfer)
return -ENOMEM;
reinit_completion(&spi->c);
/* Chip select was already programmed into CTRL0 */
ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
BM_SSP_CTRL0_READ);
ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
if (!(flags & TXRX_WRITE))
ctrl0 |= BM_SSP_CTRL0_READ;
/* Queue the DMA data transfer. */
for (sg_count = 0; sg_count < sgs; sg_count++) {
/* Prepare the transfer descriptor. */
min = min(len, desc_len);
/*
* De-assert CS on last segment if flag is set (i.e., no more
* transfers will follow)
*/
if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
if (ssp->devid == IMX23_SSP) {
ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
ctrl0 |= min;
}
dma_xfer[sg_count].pio[0] = ctrl0;
dma_xfer[sg_count].pio[3] = min;
if (vmalloced_buf) {
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
ret = -ENOMEM;
goto err_vmalloc;
}
sg_init_table(&dma_xfer[sg_count].sg, 1);
sg_set_page(&dma_xfer[sg_count].sg, vm_page,
min, offset_in_page(buf));
} else {
sg_init_one(&dma_xfer[sg_count].sg, buf, min);
}
ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
len -= min;
buf += min;
/* Queue the PIO register write transfer. */
desc = dmaengine_prep_slave_sg(ssp->dmach,
(struct scatterlist *)dma_xfer[sg_count].pio,
(ssp->devid == IMX23_SSP) ? 1 : 4,
DMA_TRANS_NONE,
sg_count ? DMA_PREP_INTERRUPT : 0);
if (!desc) {
dev_err(ssp->dev,
"Failed to get PIO reg. write descriptor.\n");
ret = -EINVAL;
goto err_mapped;
}
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(ssp->dev,
"Failed to get DMA data write descriptor.\n");
ret = -EINVAL;
goto err_mapped;
}
}
/*
* The last descriptor must have this callback,
* to finish the DMA transaction.
*/
desc->callback = mxs_ssp_dma_irq_callback;
desc->callback_param = spi;
/* Start the transfer. */
dmaengine_submit(desc);
dma_async_issue_pending(ssp->dmach);
if (!wait_for_completion_timeout(&spi->c,
msecs_to_jiffies(SSP_TIMEOUT))) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
dmaengine_terminate_all(ssp->dmach);
goto err_vmalloc;
}
ret = 0;
err_vmalloc:
while (--sg_count >= 0) {
err_mapped:
dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
kfree(dma_xfer);
return ret;
}