static int brcmstb_pm_probe()

in bcm/brcmstb/pm/pm-arm.c [680:819]


static int brcmstb_pm_probe(struct platform_device *pdev)
{
	const struct ddr_phy_ofdata *ddr_phy_data;
	const struct ddr_seq_ofdata *ddr_seq_data;
	const struct of_device_id *of_id = NULL;
	struct device_node *dn;
	void __iomem *base;
	int ret, i;

	/* AON ctrl registers */
	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
	if (IS_ERR(base)) {
		pr_err("error mapping AON_CTRL\n");
		return PTR_ERR(base);
	}
	ctrl.aon_ctrl_base = base;

	/* AON SRAM registers */
	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
	if (IS_ERR(base)) {
		/* Assume standard offset */
		ctrl.aon_sram = ctrl.aon_ctrl_base +
				     AON_CTRL_SYSTEM_DATA_RAM_OFS;
	} else {
		ctrl.aon_sram = base;
	}

	writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);

	/* DDR PHY registers */
	base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
				     (const void **)&ddr_phy_data);
	if (IS_ERR(base)) {
		pr_err("error mapping DDR PHY\n");
		return PTR_ERR(base);
	}
	ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
	ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
	/* Only need DDR PHY 0 for now? */
	ctrl.memcs[0].ddr_phy_base = base;
	ctrl.s3entry_method = ddr_phy_data->s3entry_method;
	ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
	ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
	/*
	 * Slightly grosss to use the phy ver to get a memc,
	 * offset but that is the only versioned things so far
	 * we can test for.
	 */
	ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;

	/* DDR SHIM-PHY registers */
	for_each_matching_node(dn, ddr_shimphy_dt_ids) {
		i = ctrl.num_memc;
		if (i >= MAX_NUM_MEMC) {
			pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
			break;
		}

		base = of_io_request_and_map(dn, 0, dn->full_name);
		if (IS_ERR(base)) {
			if (!ctrl.support_warm_boot)
				break;

			pr_err("error mapping DDR SHIMPHY %d\n", i);
			return PTR_ERR(base);
		}
		ctrl.memcs[i].ddr_shimphy_base = base;
		ctrl.num_memc++;
	}

	/* Sequencer DRAM Param and Control Registers */
	i = 0;
	for_each_matching_node(dn, brcmstb_memc_of_match) {
		base = of_iomap(dn, 0);
		if (!base) {
			pr_err("error mapping DDR Sequencer %d\n", i);
			return -ENOMEM;
		}

		of_id = of_match_node(brcmstb_memc_of_match, dn);
		if (!of_id) {
			iounmap(base);
			return -EINVAL;
		}

		ddr_seq_data = of_id->data;
		ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
		/* Adjust warm boot offset based on the DDR sequencer */
		if (ddr_seq_data->warm_boot_offset)
			ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;

		ctrl.memcs[i].ddr_ctrl = base;
		i++;
	}

	pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
		ctrl.support_warm_boot, ctrl.s3entry_method,
		ctrl.warm_boot_offset);

	dn = of_find_matching_node(NULL, sram_dt_ids);
	if (!dn) {
		pr_err("SRAM not found\n");
		return -EINVAL;
	}

	ret = brcmstb_init_sram(dn);
	if (ret) {
		pr_err("error setting up SRAM for PM\n");
		return ret;
	}

	ctrl.pdev = pdev;

	ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
	if (!ctrl.s3_params)
		return -ENOMEM;
	ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
					   sizeof(*ctrl.s3_params),
					   DMA_TO_DEVICE);
	if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
		pr_err("error mapping DMA memory\n");
		ret = -ENOMEM;
		goto out;
	}

	atomic_notifier_chain_register(&panic_notifier_list,
				       &brcmstb_pm_panic_nb);

	pm_power_off = brcmstb_pm_poweroff;
	suspend_set_ops(&brcmstb_pm_ops);

	return 0;

out:
	kfree(ctrl.s3_params);

	pr_warn("PM: initialization failed with code %d\n", ret);

	return ret;
}