static int omap_dma_probe()

in ti/omap-dma.c [1652:1840]


static int omap_dma_probe(struct platform_device *pdev)
{
	const struct omap_dma_config *conf;
	struct omap_dmadev *od;
	struct resource *res;
	int rc, i, irq;
	u32 val;

	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
	if (!od)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	od->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(od->base))
		return PTR_ERR(od->base);

	conf = of_device_get_match_data(&pdev->dev);
	if (conf) {
		od->cfg = conf;
		od->plat = dev_get_platdata(&pdev->dev);
		if (!od->plat) {
			dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
			return -ENODEV;
		}
	} else {
		od->cfg = &default_cfg;

		od->plat = omap_get_plat_info();
		if (!od->plat)
			return -EPROBE_DEFER;
	}

	od->reg_map = od->plat->reg_map;

	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
	dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
	od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
	od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
	od->ddev.device_tx_status = omap_dma_tx_status;
	od->ddev.device_issue_pending = omap_dma_issue_pending;
	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
	od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
	od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
	od->ddev.device_config = omap_dma_slave_config;
	od->ddev.device_pause = omap_dma_pause;
	od->ddev.device_resume = omap_dma_resume;
	od->ddev.device_terminate_all = omap_dma_terminate_all;
	od->ddev.device_synchronize = omap_dma_synchronize;
	od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
	od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	if (__dma_omap15xx(od->plat->dma_attr))
		od->ddev.residue_granularity =
				DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
	else
		od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
	od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
	od->ddev.dev = &pdev->dev;
	INIT_LIST_HEAD(&od->ddev.channels);
	mutex_init(&od->lch_lock);
	spin_lock_init(&od->lock);
	spin_lock_init(&od->irq_lock);

	/* Number of DMA requests */
	od->dma_requests = OMAP_SDMA_REQUESTS;
	if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
						      "dma-requests",
						      &od->dma_requests)) {
		dev_info(&pdev->dev,
			 "Missing dma-requests property, using %u.\n",
			 OMAP_SDMA_REQUESTS);
	}

	/* Number of available logical channels */
	if (!pdev->dev.of_node) {
		od->lch_count = od->plat->dma_attr->lch_count;
		if (unlikely(!od->lch_count))
			od->lch_count = OMAP_SDMA_CHANNELS;
	} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
					&od->lch_count)) {
		dev_info(&pdev->dev,
			 "Missing dma-channels property, using %u.\n",
			 OMAP_SDMA_CHANNELS);
		od->lch_count = OMAP_SDMA_CHANNELS;
	}

	/* Mask of allowed logical channels */
	if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
						       "dma-channel-mask",
						       &val)) {
		/* Tag channels not in mask as reserved */
		val = ~val;
		bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
	}
	if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
		bitmap_set(od->lch_bitmap, 0, 2);

	od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
				   sizeof(*od->lch_map),
				   GFP_KERNEL);
	if (!od->lch_map)
		return -ENOMEM;

	for (i = 0; i < od->dma_requests; i++) {
		rc = omap_dma_chan_init(od);
		if (rc) {
			omap_dma_free(od);
			return rc;
		}
	}

	irq = platform_get_irq(pdev, 1);
	if (irq <= 0) {
		dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
		od->legacy = true;
	} else {
		/* Disable all interrupts */
		od->irq_enable_mask = 0;
		omap_dma_glbl_write(od, IRQENABLE_L1, 0);

		rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
				      IRQF_SHARED, "omap-dma-engine", od);
		if (rc) {
			omap_dma_free(od);
			return rc;
		}
	}

	if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
		od->ll123_supported = true;

	od->ddev.filter.map = od->plat->slave_map;
	od->ddev.filter.mapcnt = od->plat->slavecnt;
	od->ddev.filter.fn = omap_dma_filter_fn;

	if (od->ll123_supported) {
		od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
						&pdev->dev,
						sizeof(struct omap_type2_desc),
						4, 0);
		if (!od->desc_pool) {
			dev_err(&pdev->dev,
				"unable to allocate descriptor pool\n");
			od->ll123_supported = false;
		}
	}

	rc = dma_async_device_register(&od->ddev);
	if (rc) {
		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
			rc);
		omap_dma_free(od);
		return rc;
	}

	platform_set_drvdata(pdev, od);

	if (pdev->dev.of_node) {
		omap_dma_info.dma_cap = od->ddev.cap_mask;

		/* Device-tree DMA controller registration */
		rc = of_dma_controller_register(pdev->dev.of_node,
				of_dma_simple_xlate, &omap_dma_info);
		if (rc) {
			pr_warn("OMAP-DMA: failed to register DMA controller\n");
			dma_async_device_unregister(&od->ddev);
			omap_dma_free(od);
		}
	}

	omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);

	if (od->cfg->needs_busy_check) {
		od->nb.notifier_call = omap_dma_busy_notifier;
		cpu_pm_register_notifier(&od->nb);
	} else if (od->cfg->may_lose_context) {
		od->nb.notifier_call = omap_dma_context_notifier;
		cpu_pm_register_notifier(&od->nb);
	}

	dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
		 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");

	return rc;
}