static irqreturn_t dmc620_pmu_handle_irq()

in arm_dmc620_pmu.c [339:397]


static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
{
	struct dmc620_pmu_irq *irq = data;
	struct dmc620_pmu *dmc620_pmu;
	irqreturn_t ret = IRQ_NONE;

	rcu_read_lock();
	list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
		unsigned long status;
		struct perf_event *event;
		unsigned int idx;

		/*
		 * HW doesn't provide a control to atomically disable all counters.
		 * To prevent race condition (overflow happens while clearing status register),
		 * disable all events before continuing
		 */
		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
			event = dmc620_pmu->events[idx];
			if (!event)
				continue;
			dmc620_pmu_disable_counter(event);
		}

		status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
		status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
				DMC620_PMU_CLKDIV2_MAX_COUNTERS);
		if (status) {
			for_each_set_bit(idx, &status,
					DMC620_PMU_MAX_COUNTERS) {
				event = dmc620_pmu->events[idx];
				if (WARN_ON_ONCE(!event))
					continue;
				dmc620_pmu_event_update(event);
				dmc620_pmu_event_set_period(event);
			}

			if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
				writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);

			if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
				DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
				writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
		}

		for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
			event = dmc620_pmu->events[idx];
			if (!event)
				continue;
			if (!(event->hw.state & PERF_HES_STOPPED))
				dmc620_pmu_enable_counter(event);
		}

		ret = IRQ_HANDLED;
	}
	rcu_read_unlock();

	return ret;
}