static int xilinx_dma_probe()

in xilinx/xilinx_dma.c [3129:3305]


static int xilinx_dma_probe(struct platform_device *pdev)
{
	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
			struct clk **, struct clk **, struct clk **)
					= axivdma_clk_init;
	struct device_node *node = pdev->dev.of_node;
	struct xilinx_dma_device *xdev;
	struct device_node *child, *np = pdev->dev.of_node;
	u32 num_frames, addr_width, len_width;
	int i, err;

	/* Allocate and initialize the DMA engine structure */
	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
	if (!xdev)
		return -ENOMEM;

	xdev->dev = &pdev->dev;
	if (np) {
		const struct of_device_id *match;

		match = of_match_node(xilinx_dma_of_ids, np);
		if (match && match->data) {
			xdev->dma_config = match->data;
			clk_init = xdev->dma_config->clk_init;
		}
	}

	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
		       &xdev->rx_clk, &xdev->rxs_clk);
	if (err)
		return err;

	/* Request and map I/O memory */
	xdev->regs = devm_platform_ioremap_resource(pdev, 0);
	if (IS_ERR(xdev->regs))
		return PTR_ERR(xdev->regs);

	/* Retrieve the DMA engine properties from the device tree */
	xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
	xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;

	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
	    xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
		if (!of_property_read_u32(node, "xlnx,sg-length-width",
					  &len_width)) {
			if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
			    len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
				dev_warn(xdev->dev,
					 "invalid xlnx,sg-length-width property value. Using default width\n");
			} else {
				if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
					dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
				xdev->max_buffer_len =
					GENMASK(len_width - 1, 0);
			}
		}
	}

	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
		err = of_property_read_u32(node, "xlnx,num-fstores",
					   &num_frames);
		if (err < 0) {
			dev_err(xdev->dev,
				"missing xlnx,num-fstores property\n");
			return err;
		}

		err = of_property_read_u32(node, "xlnx,flush-fsync",
					   &xdev->flush_on_fsync);
		if (err < 0)
			dev_warn(xdev->dev,
				 "missing xlnx,flush-fsync property\n");
	}

	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
	if (err < 0)
		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");

	if (addr_width > 32)
		xdev->ext_addr = true;
	else
		xdev->ext_addr = false;

	/* Set the dma mask bits */
	dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));

	/* Initialize the DMA engine */
	xdev->common.dev = &pdev->dev;

	INIT_LIST_HEAD(&xdev->common.channels);
	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
	}

	xdev->common.device_alloc_chan_resources =
				xilinx_dma_alloc_chan_resources;
	xdev->common.device_free_chan_resources =
				xilinx_dma_free_chan_resources;
	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
	xdev->common.device_synchronize = xilinx_dma_synchronize;
	xdev->common.device_tx_status = xilinx_dma_tx_status;
	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
	xdev->common.device_config = xilinx_dma_device_config;
	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
		xdev->common.device_prep_dma_cyclic =
					  xilinx_dma_prep_dma_cyclic;
		/* Residue calculation is supported by only AXI DMA and CDMA */
		xdev->common.residue_granularity =
					  DMA_RESIDUE_GRANULARITY_SEGMENT;
	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
		dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
		xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
		/* Residue calculation is supported by only AXI DMA and CDMA */
		xdev->common.residue_granularity =
					  DMA_RESIDUE_GRANULARITY_SEGMENT;
	} else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
		xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
	} else {
		xdev->common.device_prep_interleaved_dma =
				xilinx_vdma_dma_prep_interleaved;
	}

	platform_set_drvdata(pdev, xdev);

	/* Initialize the channels */
	for_each_child_of_node(node, child) {
		err = xilinx_dma_child_probe(xdev, child);
		if (err < 0)
			goto disable_clks;
	}

	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
		for (i = 0; i < xdev->dma_config->max_channels; i++)
			if (xdev->chan[i])
				xdev->chan[i]->num_frms = num_frames;
	}

	/* Register the DMA engine with the core */
	err = dma_async_device_register(&xdev->common);
	if (err) {
		dev_err(xdev->dev, "failed to register the dma device\n");
		goto error;
	}

	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
					 xdev);
	if (err < 0) {
		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
		dma_async_device_unregister(&xdev->common);
		goto error;
	}

	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
		dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
		dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
	else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
		dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
	else
		dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");

	return 0;

disable_clks:
	xdma_disable_allclks(xdev);
error:
	for (i = 0; i < xdev->dma_config->max_channels; i++)
		if (xdev->chan[i])
			xilinx_dma_chan_remove(xdev->chan[i]);

	return err;
}