static int davinci_mmcsd_probe()

in host/davinci_mmc.c [1190:1348]


static int davinci_mmcsd_probe(struct platform_device *pdev)
{
	const struct of_device_id *match;
	struct mmc_davinci_host *host = NULL;
	struct mmc_host *mmc = NULL;
	struct resource *r, *mem = NULL;
	int ret, irq;
	size_t mem_size;
	const struct platform_device_id *id_entry;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r)
		return -ENODEV;
	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	mem_size = resource_size(r);
	mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
				      pdev->name);
	if (!mem)
		return -EBUSY;

	mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
	if (!mmc)
		return -ENOMEM;

	host = mmc_priv(mmc);
	host->mmc = mmc;	/* Important */

	host->mem_res = mem;
	host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
	if (!host->base) {
		ret = -ENOMEM;
		goto ioremap_fail;
	}

	host->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk)) {
		ret = PTR_ERR(host->clk);
		goto clk_get_fail;
	}
	ret = clk_prepare_enable(host->clk);
	if (ret)
		goto clk_prepare_enable_fail;

	host->mmc_input_clk = clk_get_rate(host->clk);

	match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
	if (match) {
		pdev->id_entry = match->data;
		ret = mmc_of_parse(mmc);
		if (ret) {
			dev_err_probe(&pdev->dev, ret,
				      "could not parse of data\n");
			goto parse_fail;
		}
	} else {
		ret = mmc_davinci_parse_pdata(mmc);
		if (ret) {
			dev_err(&pdev->dev,
				"could not parse platform data: %d\n", ret);
			goto parse_fail;
	}	}

	if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
		host->nr_sg = MAX_NR_SG;

	init_mmcsd_host(host);

	host->use_dma = use_dma;
	host->mmc_irq = irq;
	host->sdio_irq = platform_get_irq(pdev, 1);

	if (host->use_dma) {
		ret = davinci_acquire_dma_channels(host);
		if (ret == -EPROBE_DEFER)
			goto dma_probe_defer;
		else if (ret)
			host->use_dma = 0;
	}

	mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;

	id_entry = platform_get_device_id(pdev);
	if (id_entry)
		host->version = id_entry->driver_data;

	mmc->ops = &mmc_davinci_ops;
	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;

	/* With no iommu coalescing pages, each phys_seg is a hw_seg.
	 * Each hw_seg uses one EDMA parameter RAM slot, always one
	 * channel and then usually some linked slots.
	 */
	mmc->max_segs		= MAX_NR_SG;

	/* EDMA limit per hw segment (one or two MBytes) */
	mmc->max_seg_size	= MAX_CCNT * rw_threshold;

	/* MMC/SD controller limits for multiblock requests */
	mmc->max_blk_size	= 4095;  /* BLEN is 12 bits */
	mmc->max_blk_count	= 65535; /* NBLK is 16 bits */
	mmc->max_req_size	= mmc->max_blk_size * mmc->max_blk_count;

	dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
	dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
	dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
	dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);

	platform_set_drvdata(pdev, host);

	ret = mmc_davinci_cpufreq_register(host);
	if (ret) {
		dev_err(&pdev->dev, "failed to register cpufreq\n");
		goto cpu_freq_fail;
	}

	ret = mmc_add_host(mmc);
	if (ret < 0)
		goto mmc_add_host_fail;

	ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
			       mmc_hostname(mmc), host);
	if (ret)
		goto request_irq_fail;

	if (host->sdio_irq >= 0) {
		ret = devm_request_irq(&pdev->dev, host->sdio_irq,
				       mmc_davinci_sdio_irq, 0,
				       mmc_hostname(mmc), host);
		if (!ret)
			mmc->caps |= MMC_CAP_SDIO_IRQ;
	}

	rename_region(mem, mmc_hostname(mmc));

	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
		host->use_dma ? "DMA" : "PIO",
		(mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);

	return 0;

request_irq_fail:
	mmc_remove_host(mmc);
mmc_add_host_fail:
	mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
	davinci_release_dma_channels(host);
parse_fail:
dma_probe_defer:
	clk_disable_unprepare(host->clk);
clk_prepare_enable_fail:
clk_get_fail:
ioremap_fail:
	mmc_free_host(mmc);

	return ret;
}