in qcom/qcom_adm.c [740:899]
static int adm_dma_probe(struct platform_device *pdev)
{
struct adm_device *adev;
int ret;
u32 i;
adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
if (!adev)
return -ENOMEM;
adev->dev = &pdev->dev;
adev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adev->regs))
return PTR_ERR(adev->regs);
adev->irq = platform_get_irq(pdev, 0);
if (adev->irq < 0)
return adev->irq;
ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
if (ret) {
dev_err(adev->dev, "Execution environment unspecified\n");
return ret;
}
adev->core_clk = devm_clk_get(adev->dev, "core");
if (IS_ERR(adev->core_clk))
return PTR_ERR(adev->core_clk);
adev->iface_clk = devm_clk_get(adev->dev, "iface");
if (IS_ERR(adev->iface_clk))
return PTR_ERR(adev->iface_clk);
adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
if (IS_ERR(adev->clk_reset)) {
dev_err(adev->dev, "failed to get ADM0 reset\n");
return PTR_ERR(adev->clk_reset);
}
adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
if (IS_ERR(adev->c0_reset)) {
dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
return PTR_ERR(adev->c0_reset);
}
adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
if (IS_ERR(adev->c1_reset)) {
dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
return PTR_ERR(adev->c1_reset);
}
adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
if (IS_ERR(adev->c2_reset)) {
dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
return PTR_ERR(adev->c2_reset);
}
ret = clk_prepare_enable(adev->core_clk);
if (ret) {
dev_err(adev->dev, "failed to prepare/enable core clock\n");
return ret;
}
ret = clk_prepare_enable(adev->iface_clk);
if (ret) {
dev_err(adev->dev, "failed to prepare/enable iface clock\n");
goto err_disable_core_clk;
}
reset_control_assert(adev->clk_reset);
reset_control_assert(adev->c0_reset);
reset_control_assert(adev->c1_reset);
reset_control_assert(adev->c2_reset);
udelay(2);
reset_control_deassert(adev->clk_reset);
reset_control_deassert(adev->c0_reset);
reset_control_deassert(adev->c1_reset);
reset_control_deassert(adev->c2_reset);
adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
sizeof(*adev->channels), GFP_KERNEL);
if (!adev->channels) {
ret = -ENOMEM;
goto err_disable_clks;
}
/* allocate and initialize channels */
INIT_LIST_HEAD(&adev->common.channels);
for (i = 0; i < ADM_MAX_CHANNELS; i++)
adm_channel_init(adev, &adev->channels[i], i);
/* reset CRCIs */
for (i = 0; i < 16; i++)
writel(ADM_CRCI_CTL_RST, adev->regs +
ADM_CRCI_CTL(i, adev->ee));
/* configure client interfaces */
writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
adev->regs + ADM_GP_CTL);
ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
0, "adm_dma", adev);
if (ret)
goto err_disable_clks;
platform_set_drvdata(pdev, adev);
adev->common.dev = adev->dev;
adev->common.dev->dma_parms = &adev->dma_parms;
/* set capabilities */
dma_cap_zero(adev->common.cap_mask);
dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
/* initialize dmaengine apis */
adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
adev->common.device_free_chan_resources = adm_free_chan;
adev->common.device_prep_slave_sg = adm_prep_slave_sg;
adev->common.device_issue_pending = adm_issue_pending;
adev->common.device_tx_status = adm_tx_status;
adev->common.device_terminate_all = adm_terminate_all;
adev->common.device_config = adm_slave_config;
ret = dma_async_device_register(&adev->common);
if (ret) {
dev_err(adev->dev, "failed to register dma async device\n");
goto err_disable_clks;
}
ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
&adev->common);
if (ret)
goto err_unregister_dma;
return 0;
err_unregister_dma:
dma_async_device_unregister(&adev->common);
err_disable_clks:
clk_disable_unprepare(adev->iface_clk);
err_disable_core_clk:
clk_disable_unprepare(adev->core_clk);
return ret;
}