static int mv_xor_probe()

in mv_xor.c [1290:1450]


static int mv_xor_probe(struct platform_device *pdev)
{
	const struct mbus_dram_target_info *dram;
	struct mv_xor_device *xordev;
	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
	struct resource *res;
	unsigned int max_engines, max_channels;
	int i, ret;

	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");

	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
	if (!xordev)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
					resource_size(res));
	if (!xordev->xor_base)
		return -EBUSY;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (!res)
		return -ENODEV;

	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
					     resource_size(res));
	if (!xordev->xor_high_base)
		return -EBUSY;

	platform_set_drvdata(pdev, xordev);


	/*
	 * We need to know which type of XOR device we use before
	 * setting up. In non-dt case it can only be the legacy one.
	 */
	xordev->xor_type = XOR_ORION;
	if (pdev->dev.of_node) {
		const struct of_device_id *of_id =
			of_match_device(mv_xor_dt_ids,
					&pdev->dev);

		xordev->xor_type = (uintptr_t)of_id->data;
	}

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (xordev->xor_type == XOR_ARMADA_37XX) {
		mv_xor_conf_mbus_windows_a3700(xordev);
	} else {
		dram = mv_mbus_dram_info();
		if (dram)
			mv_xor_conf_mbus_windows(xordev, dram);
	}

	/* Not all platforms can gate the clock, so it is not
	 * an error if the clock does not exists.
	 */
	xordev->clk = clk_get(&pdev->dev, NULL);
	if (!IS_ERR(xordev->clk))
		clk_prepare_enable(xordev->clk);

	/*
	 * We don't want to have more than one channel per CPU in
	 * order for async_tx to perform well. So we limit the number
	 * of engines and channels so that we take into account this
	 * constraint. Note that we also want to use channels from
	 * separate engines when possible.  For dual-CPU Armada 3700
	 * SoC with single XOR engine allow using its both channels.
	 */
	max_engines = num_present_cpus();
	if (xordev->xor_type == XOR_ARMADA_37XX)
		max_channels =	num_present_cpus();
	else
		max_channels = min_t(unsigned int,
				     MV_XOR_MAX_CHANNELS,
				     DIV_ROUND_UP(num_present_cpus(), 2));

	if (mv_xor_engine_count >= max_engines)
		return 0;

	if (pdev->dev.of_node) {
		struct device_node *np;
		int i = 0;

		for_each_child_of_node(pdev->dev.of_node, np) {
			struct mv_xor_chan *chan;
			dma_cap_mask_t cap_mask;
			int irq;

			if (i >= max_channels)
				continue;

			dma_cap_zero(cap_mask);
			dma_cap_set(DMA_MEMCPY, cap_mask);
			dma_cap_set(DMA_XOR, cap_mask);
			dma_cap_set(DMA_INTERRUPT, cap_mask);

			irq = irq_of_parse_and_map(np, 0);
			if (!irq) {
				ret = -ENODEV;
				goto err_channel_add;
			}

			chan = mv_xor_channel_add(xordev, pdev, i,
						  cap_mask, irq);
			if (IS_ERR(chan)) {
				ret = PTR_ERR(chan);
				irq_dispose_mapping(irq);
				goto err_channel_add;
			}

			xordev->channels[i] = chan;
			i++;
		}
	} else if (pdata && pdata->channels) {
		for (i = 0; i < max_channels; i++) {
			struct mv_xor_channel_data *cd;
			struct mv_xor_chan *chan;
			int irq;

			cd = &pdata->channels[i];
			irq = platform_get_irq(pdev, i);
			if (irq < 0) {
				ret = irq;
				goto err_channel_add;
			}

			chan = mv_xor_channel_add(xordev, pdev, i,
						  cd->cap_mask, irq);
			if (IS_ERR(chan)) {
				ret = PTR_ERR(chan);
				goto err_channel_add;
			}

			xordev->channels[i] = chan;
		}
	}

	return 0;

err_channel_add:
	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
		if (xordev->channels[i]) {
			mv_xor_channel_remove(xordev->channels[i]);
			if (pdev->dev.of_node)
				irq_dispose_mapping(xordev->channels[i]->irq);
		}

	if (!IS_ERR(xordev->clk)) {
		clk_disable_unprepare(xordev->clk);
		clk_put(xordev->clk);
	}

	return ret;
}