int iproc_msi_init()

in controller/pcie-iproc-msi.c [519:656]


int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
{
	struct iproc_msi *msi;
	int i, ret;
	unsigned int cpu;

	if (!of_device_is_compatible(node, "brcm,iproc-msi"))
		return -ENODEV;

	if (!of_find_property(node, "msi-controller", NULL))
		return -ENODEV;

	if (pcie->msi)
		return -EBUSY;

	msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
	if (!msi)
		return -ENOMEM;

	msi->pcie = pcie;
	pcie->msi = msi;
	msi->msi_addr = pcie->base_addr;
	mutex_init(&msi->bitmap_lock);
	msi->nr_cpus = num_possible_cpus();

	if (msi->nr_cpus == 1)
		iproc_msi_domain_info.flags |=  MSI_FLAG_MULTI_PCI_MSI;

	msi->nr_irqs = of_irq_count(node);
	if (!msi->nr_irqs) {
		dev_err(pcie->dev, "found no MSI GIC interrupt\n");
		return -ENODEV;
	}

	if (msi->nr_irqs > NR_HW_IRQS) {
		dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
			 msi->nr_irqs);
		msi->nr_irqs = NR_HW_IRQS;
	}

	if (msi->nr_irqs < msi->nr_cpus) {
		dev_err(pcie->dev,
			"not enough GIC interrupts for MSI affinity\n");
		return -EINVAL;
	}

	if (msi->nr_irqs % msi->nr_cpus != 0) {
		msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
		dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
			 msi->nr_irqs);
	}

	switch (pcie->type) {
	case IPROC_PCIE_PAXB_BCMA:
	case IPROC_PCIE_PAXB:
		msi->reg_offsets = iproc_msi_reg_paxb;
		msi->nr_eq_region = 1;
		msi->nr_msi_region = 1;
		break;
	case IPROC_PCIE_PAXC:
		msi->reg_offsets = iproc_msi_reg_paxc;
		msi->nr_eq_region = msi->nr_irqs;
		msi->nr_msi_region = msi->nr_irqs;
		break;
	default:
		dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
		return -EINVAL;
	}

	if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
		msi->has_inten_reg = true;

	msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
	msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
				   sizeof(*msi->bitmap), GFP_KERNEL);
	if (!msi->bitmap)
		return -ENOMEM;

	msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
				 GFP_KERNEL);
	if (!msi->grps)
		return -ENOMEM;

	for (i = 0; i < msi->nr_irqs; i++) {
		unsigned int irq = irq_of_parse_and_map(node, i);

		if (!irq) {
			dev_err(pcie->dev, "unable to parse/map interrupt\n");
			ret = -ENODEV;
			goto free_irqs;
		}
		msi->grps[i].gic_irq = irq;
		msi->grps[i].msi = msi;
		msi->grps[i].eq = i;
	}

	/* Reserve memory for event queue and make sure memories are zeroed */
	msi->eq_cpu = dma_alloc_coherent(pcie->dev,
					 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
					 &msi->eq_dma, GFP_KERNEL);
	if (!msi->eq_cpu) {
		ret = -ENOMEM;
		goto free_irqs;
	}

	ret = iproc_msi_alloc_domains(node, msi);
	if (ret) {
		dev_err(pcie->dev, "failed to create MSI domains\n");
		goto free_eq_dma;
	}

	for_each_online_cpu(cpu) {
		ret = iproc_msi_irq_setup(msi, cpu);
		if (ret)
			goto free_msi_irq;
	}

	iproc_msi_enable(msi);

	return 0;

free_msi_irq:
	for_each_online_cpu(cpu)
		iproc_msi_irq_free(msi, cpu);
	iproc_msi_free_domains(msi);

free_eq_dma:
	dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
			  msi->eq_cpu, msi->eq_dma);

free_irqs:
	for (i = 0; i < msi->nr_irqs; i++) {
		if (msi->grps[i].gic_irq)
			irq_dispose_mapping(msi->grps[i].gic_irq);
	}
	pcie->msi = NULL;
	return ret;
}