int csky_pmu_device_probe()

in kernel/perf_event.c [1293:1338]


int csky_pmu_device_probe(struct platform_device *pdev,
			  const struct of_device_id *of_table)
{
	struct device_node *node = pdev->dev.of_node;
	int ret;

	ret = init_hw_perf_events();
	if (ret) {
		pr_notice("[perf] failed to probe PMU!\n");
		return ret;
	}

	if (of_property_read_u32(node, "count-width",
				 &csky_pmu.count_width)) {
		csky_pmu.count_width = DEFAULT_COUNT_WIDTH;
	}
	csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1;

	csky_pmu.plat_device = pdev;

	/* Ensure the PMU has sane values out of reset. */
	on_each_cpu(csky_pmu_reset, &csky_pmu, 1);

	ret = csky_pmu_request_irq(csky_pmu_handle_irq);
	if (ret) {
		csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
		pr_notice("[perf] PMU request irq fail!\n");
	}

	ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE",
				csky_pmu_starting_cpu,
				csky_pmu_dying_cpu);
	if (ret) {
		csky_pmu_free_irq();
		free_percpu(csky_pmu.hw_events);
		return ret;
	}

	ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
	if (ret) {
		csky_pmu_free_irq();
		free_percpu(csky_pmu.hw_events);
	}

	return ret;
}