in fsl_ddr_edac.c [476:616]
int fsl_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct fsl_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
int res;
if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 4;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, fsl_mc_err_probe);
return -ENOMEM;
}
pdata = mci->pvt_info;
pdata->name = "fsl_mc_err";
mci->pdev = &op->dev;
pdata->edac_idx = edac_mc_idx++;
dev_set_drvdata(mci->pdev, mci);
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
/*
* Get the endianness of DDR controller registers.
* Default is big endian.
*/
little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
pr_err("%s: Unable to get resource for MC err regs\n",
__func__);
goto err;
}
if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
pdata->name)) {
pr_err("%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->mc_vbase) {
pr_err("%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;
goto err;
}
sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
if (!(sdram_ctl & DSC_ECC_EN)) {
/* no ECC */
pr_warn("%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
goto err;
}
edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check = fsl_mc_check;
mci->ctl_page_to_phys = NULL;
mci->scrub_mode = SCRUB_SW_SRC;
fsl_ddr_init_csrows(mci);
/* store the original error disable bits */
orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
/* clear all error bits */
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
if (res) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
DDR_EIE_MBEE | DDR_EIE_SBEE);
/* store the original error management threshold */
orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
FSL_MC_ERR_SBE) & 0xff0000;
/* set threshold to 1 error per interrupt */
ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
/* register interrupts */
pdata->irq = platform_get_irq(op, 0);
res = devm_request_irq(&op->dev, pdata->irq,
fsl_mc_isr,
IRQF_SHARED,
"[EDAC] MC err", mci);
if (res < 0) {
pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
__func__, pdata->irq);
res = -ENODEV;
goto err2;
}
pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
pdata->irq);
}
devres_remove_group(&op->dev, fsl_mc_err_probe);
edac_dbg(3, "success\n");
pr_info(EDAC_MOD_STR " MC err registered\n");
return 0;
err2:
edac_mc_del_mc(&op->dev);
err:
devres_release_group(&op->dev, fsl_mc_err_probe);
edac_mc_free(mci);
return res;
}