in nand/raw/brcmnand/brcmnand.c [2980:3195]
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node, *child;
struct brcmnand_controller *ctrl;
struct resource *res;
int ret;
/* We only support device-tree instantiation */
if (!dn)
return -ENODEV;
if (!of_match_node(brcmnand_of_match, dn))
return -ENODEV;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
dev_set_drvdata(dev, ctrl);
ctrl->dev = dev;
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
init_completion(&ctrl->edu_done);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
/* NAND register range */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->nand_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->nand_base))
return PTR_ERR(ctrl->nand_base);
/* Enable clock before using NAND registers */
ctrl->clk = devm_clk_get(dev, "nand");
if (!IS_ERR(ctrl->clk)) {
ret = clk_prepare_enable(ctrl->clk);
if (ret)
return ret;
} else {
ret = PTR_ERR(ctrl->clk);
if (ret == -EPROBE_DEFER)
return ret;
ctrl->clk = NULL;
}
/* Initialize NAND revision */
ret = brcmnand_revision_init(ctrl);
if (ret)
goto err;
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
if (res) {
ctrl->nand_fc = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->nand_fc)) {
ret = PTR_ERR(ctrl->nand_fc);
goto err;
}
} else {
ctrl->nand_fc = ctrl->nand_base +
ctrl->reg_offsets[BRCMNAND_FC_BASE];
}
/* FLASH_DMA */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
if (res) {
ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->flash_dma_base)) {
ret = PTR_ERR(ctrl->flash_dma_base);
goto err;
}
/* initialize the dma version */
brcmnand_flash_dma_revision_init(ctrl);
ret = -EIO;
if (ctrl->nand_version >= 0x0700)
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(40));
if (ret)
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(32));
if (ret)
goto err;
/* linked-list and stop on error */
flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
/* Allocate descriptor(s) */
ctrl->dma_desc = dmam_alloc_coherent(dev,
sizeof(*ctrl->dma_desc),
&ctrl->dma_pa, GFP_KERNEL);
if (!ctrl->dma_desc) {
ret = -ENOMEM;
goto err;
}
ctrl->dma_irq = platform_get_irq(pdev, 1);
if ((int)ctrl->dma_irq < 0) {
dev_err(dev, "missing FLASH_DMA IRQ\n");
ret = -ENODEV;
goto err;
}
ret = devm_request_irq(dev, ctrl->dma_irq,
brcmnand_dma_irq, 0, DRV_NAME,
ctrl);
if (ret < 0) {
dev_err(dev, "can't allocate IRQ %d: error %d\n",
ctrl->dma_irq, ret);
goto err;
}
dev_info(dev, "enabling FLASH_DMA\n");
/* set flash dma transfer function to call */
ctrl->dma_trans = brcmnand_dma_trans;
} else {
ret = brcmnand_edu_setup(pdev);
if (ret < 0)
goto err;
if (has_edu(ctrl))
/* set edu transfer function to call */
ctrl->dma_trans = brcmnand_edu_trans;
}
/* Disable automatic device ID config, direct addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)
brcmnand_set_wp(ctrl, false);
} else {
wp_on = 0;
}
/* IRQ */
ctrl->irq = platform_get_irq(pdev, 0);
if ((int)ctrl->irq < 0) {
dev_err(dev, "no IRQ defined\n");
ret = -ENODEV;
goto err;
}
/*
* Some SoCs integrate this controller (e.g., its interrupt bits) in
* interesting ways
*/
if (soc) {
ctrl->soc = soc;
ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
DRV_NAME, ctrl);
/* Enable interrupt */
ctrl->soc->ctlrdy_ack(ctrl->soc);
ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
} else {
/* Use standard interrupt infrastructure */
ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
DRV_NAME, ctrl);
}
if (ret < 0) {
dev_err(dev, "can't allocate IRQ %d: error %d\n",
ctrl->irq, ret);
goto err;
}
for_each_available_child_of_node(dn, child) {
if (of_device_is_compatible(child, "brcm,nandcs")) {
struct brcmnand_host *host;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
of_node_put(child);
ret = -ENOMEM;
goto err;
}
host->pdev = pdev;
host->ctrl = ctrl;
ret = brcmnand_init_cs(host, child);
if (ret) {
devm_kfree(dev, host);
continue; /* Try all chip-selects */
}
list_add_tail(&host->node, &ctrl->host_list);
}
}
/* No chip-selects could initialize properly */
if (list_empty(&ctrl->host_list)) {
ret = -ENODEV;
goto err;
}
return 0;
err:
clk_disable_unprepare(ctrl->clk);
return ret;
}