in profiler/profile.bpf.c [163:214]
int do_perf_event(struct bpf_perf_event_data *ctx)
{
__u64 id = bpf_get_current_pid_tgid();
__u32 pid = id >> 32;
__u32 tid = id;
__u64 *valp;
static const __u64 zero;
struct stack_key key = {};
if (!include_idle && tid == 0)
return 0;
if (targ_pid != -1 && targ_pid != pid)
return 0;
if (targ_tid != -1 && targ_tid != tid)
return 0;
key.pid = pid;
bpf_get_current_comm(&key.name, sizeof(key.name));
if (user_stacks_only)
key.kern_stack_id = -1;
else
key.kern_stack_id = bpf_get_stackid(&ctx->regs, &stackmap, 0);
if (kernel_stacks_only)
key.user_stack_id = -1;
else
key.user_stack_id = bpf_get_stackid(&ctx->regs, &stackmap, BPF_F_USER_STACK);
if (key.kern_stack_id >= 0)
{
// populate extras to fix the kernel stack
__u64 ip = PT_REGS_IP(&ctx->regs);
if (is_kernel_addr(ip))
{
key.kernel_ip = ip;
}
}
valp = bpf_map_lookup_or_try_init(&counts, &key, &zero);
if (valp)
__sync_fetch_and_add(valp, 1);
if (!disable_lua_user_trace && (!valp || *valp <= 1))
{
// only get lua stack the first time we found a new stack id
fix_lua_stack(ctx, tid, key.user_stack_id);
}
return 0;
}