int armpmu_request_irq()

in arm_pmu.c [626:692]


int armpmu_request_irq(int irq, int cpu)
{
	int err = 0;
	const irq_handler_t handler = armpmu_dispatch_irq;
	const struct pmu_irq_ops *irq_ops;

	if (!irq)
		return 0;

	if (!irq_is_percpu_devid(irq)) {
		unsigned long irq_flags;

		err = irq_force_affinity(irq, cpumask_of(cpu));

		if (err && num_possible_cpus() > 1) {
			pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
				irq, cpu);
			goto err_out;
		}

		irq_flags = IRQF_PERCPU |
			    IRQF_NOBALANCING | IRQF_NO_AUTOEN |
			    IRQF_NO_THREAD;

		err = request_nmi(irq, handler, irq_flags, "arm-pmu",
				  per_cpu_ptr(&cpu_armpmu, cpu));

		/* If cannot get an NMI, get a normal interrupt */
		if (err) {
			err = request_irq(irq, handler, irq_flags, "arm-pmu",
					  per_cpu_ptr(&cpu_armpmu, cpu));
			irq_ops = &pmuirq_ops;
		} else {
			has_nmi = true;
			irq_ops = &pmunmi_ops;
		}
	} else if (armpmu_count_irq_users(irq) == 0) {
		err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);

		/* If cannot get an NMI, get a normal interrupt */
		if (err) {
			err = request_percpu_irq(irq, handler, "arm-pmu",
						 &cpu_armpmu);
			irq_ops = &percpu_pmuirq_ops;
		} else {
			has_nmi = true;
			irq_ops = &percpu_pmunmi_ops;
		}
	} else {
		/* Per cpudevid irq was already requested by another CPU */
		irq_ops = armpmu_find_irq_ops(irq);

		if (WARN_ON(!irq_ops))
			err = -EINVAL;
	}

	if (err)
		goto err_out;

	per_cpu(cpu_irq, cpu) = irq;
	per_cpu(cpu_irq_ops, cpu) = irq_ops;
	return 0;

err_out:
	pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
	return err;
}