in fsl-qdma.c [1117:1260]
static int fsl_qdma_probe(struct platform_device *pdev)
{
int ret, i;
int blk_num, blk_off;
u32 len, chans, queues;
struct resource *res;
struct fsl_qdma_chan *fsl_chan;
struct fsl_qdma_engine *fsl_qdma;
struct device_node *np = pdev->dev.of_node;
ret = of_property_read_u32(np, "dma-channels", &chans);
if (ret) {
dev_err(&pdev->dev, "Can't get dma-channels.\n");
return ret;
}
ret = of_property_read_u32(np, "block-offset", &blk_off);
if (ret) {
dev_err(&pdev->dev, "Can't get block-offset.\n");
return ret;
}
ret = of_property_read_u32(np, "block-number", &blk_num);
if (ret) {
dev_err(&pdev->dev, "Can't get block-number.\n");
return ret;
}
blk_num = min_t(int, blk_num, num_online_cpus());
len = sizeof(*fsl_qdma);
fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!fsl_qdma)
return -ENOMEM;
len = sizeof(*fsl_chan) * chans;
fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!fsl_qdma->chans)
return -ENOMEM;
len = sizeof(struct fsl_qdma_queue *) * blk_num;
fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!fsl_qdma->status)
return -ENOMEM;
len = sizeof(int) * blk_num;
fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!fsl_qdma->queue_irq)
return -ENOMEM;
ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
if (ret) {
dev_err(&pdev->dev, "Can't get queues.\n");
return ret;
}
fsl_qdma->desc_allocated = 0;
fsl_qdma->n_chans = chans;
fsl_qdma->n_queues = queues;
fsl_qdma->block_number = blk_num;
fsl_qdma->block_offset = blk_off;
mutex_init(&fsl_qdma->fsl_qdma_mutex);
for (i = 0; i < fsl_qdma->block_number; i++) {
fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
if (!fsl_qdma->status[i])
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fsl_qdma->ctrl_base))
return PTR_ERR(fsl_qdma->ctrl_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fsl_qdma->status_base))
return PTR_ERR(fsl_qdma->status_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fsl_qdma->block_base))
return PTR_ERR(fsl_qdma->block_base);
fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
if (!fsl_qdma->queue)
return -ENOMEM;
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
for (i = 0; i < fsl_qdma->n_chans; i++) {
struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
fsl_chan->qdma = fsl_qdma;
fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
fsl_qdma->block_number);
fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
}
dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
fsl_qdma->dma_dev.dev = &pdev->dev;
fsl_qdma->dma_dev.device_free_chan_resources =
fsl_qdma_free_chan_resources;
fsl_qdma->dma_dev.device_alloc_chan_resources =
fsl_qdma_alloc_chan_resources;
fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
if (ret) {
dev_err(&pdev->dev, "dma_set_mask failure.\n");
return ret;
}
platform_set_drvdata(pdev, fsl_qdma);
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
return 0;
}