in arm-ccn.c [713:842]
static int arm_ccn_pmu_event_init(struct perf_event *event)
{
struct arm_ccn *ccn;
struct hw_perf_event *hw = &event->hw;
u32 node_xp, type, event_id;
int valid;
int i;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
ccn = pmu_to_arm_ccn(event->pmu);
if (hw->sample_period) {
dev_dbg(ccn->dev, "Sampling not supported!\n");
return -EOPNOTSUPP;
}
if (has_branch_stack(event)) {
dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
return -EINVAL;
}
if (event->cpu < 0) {
dev_dbg(ccn->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, like CCN, where each
* event could be theoretically assigned to a different CPU. To
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
event->cpu = ccn->dt.cpu;
node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
event_id = CCN_CONFIG_EVENT(event->attr.config);
/* Validate node/xp vs topology */
switch (type) {
case CCN_TYPE_MN:
if (node_xp != ccn->mn_id) {
dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
return -EINVAL;
}
break;
case CCN_TYPE_XP:
if (node_xp >= ccn->num_xps) {
dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
return -EINVAL;
}
break;
case CCN_TYPE_CYCLES:
break;
default:
if (node_xp >= ccn->num_nodes) {
dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
return -EINVAL;
}
if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
type, node_xp);
return -EINVAL;
}
break;
}
/* Validate event ID vs available for the type */
for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
i++) {
struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
u32 port = CCN_CONFIG_PORT(event->attr.config);
u32 vc = CCN_CONFIG_VC(event->attr.config);
if (!arm_ccn_pmu_type_eq(type, e->type))
continue;
if (event_id != e->event)
continue;
if (e->num_ports && port >= e->num_ports) {
dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
port, node_xp);
return -EINVAL;
}
if (e->num_vcs && vc >= e->num_vcs) {
dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
vc, node_xp);
return -EINVAL;
}
valid = 1;
}
if (!valid) {
dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
event_id, node_xp);
return -EINVAL;
}
/* Watchpoint-based event for a node is actually set on XP */
if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
u32 port;
type = CCN_TYPE_XP;
port = arm_ccn_node_to_xp_port(node_xp);
node_xp = arm_ccn_node_to_xp(node_xp);
arm_ccn_pmu_config_set(&event->attr.config,
node_xp, type, port);
}
/*
* We must NOT create groups containing mixed PMUs, although software
* events are acceptable (for example to create a CCN group
* periodically read when a hrtimer aka cpu-clock leader triggers).
*/
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
return -EINVAL;
}
return 0;
}