perf: Ensure bpf_perf_link path is properly serialized

[ Upstream commit 7ed9138a72829d2035ecbd8dbd35b1bc3c137c40 ]

Ravi reported that the bpf_perf_link_attach() usage of
perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the
PERF_EVENT_IOC_SET_BPF case.

Reported-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Peter Zijlstra
2025-01-17 10:54:50 +01:00
committed by Greg Kroah-Hartman
parent efcd52ba64
commit 51c695986f

View File

@@ -5913,6 +5913,9 @@ static int perf_event_set_output(struct perf_event *event,
static int perf_event_set_filter(struct perf_event *event, void __user *arg); static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_copy_attr(struct perf_event_attr __user *uattr, static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr); struct perf_event_attr *attr);
static int __perf_event_set_bpf_prog(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{ {
@@ -5981,7 +5984,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
if (IS_ERR(prog)) if (IS_ERR(prog))
return PTR_ERR(prog); return PTR_ERR(prog);
err = perf_event_set_bpf_prog(event, prog, 0); err = __perf_event_set_bpf_prog(event, prog, 0);
if (err) { if (err) {
bpf_prog_put(prog); bpf_prog_put(prog);
return err; return err;
@@ -10583,8 +10586,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
return false; return false;
} }
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, static int __perf_event_set_bpf_prog(struct perf_event *event,
u64 bpf_cookie) struct bpf_prog *prog,
u64 bpf_cookie)
{ {
bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp; bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
@@ -10622,6 +10626,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
return perf_event_attach_bpf_prog(event, prog, bpf_cookie); return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
} }
int perf_event_set_bpf_prog(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie)
{
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
perf_event_ctx_unlock(event, ctx);
return ret;
}
void perf_event_free_bpf_prog(struct perf_event *event) void perf_event_free_bpf_prog(struct perf_event *event)
{ {
if (!perf_event_is_tracing(event)) { if (!perf_event_is_tracing(event)) {
@@ -10641,7 +10659,15 @@ static void perf_event_free_filter(struct perf_event *event)
{ {
} }
int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, static int __perf_event_set_bpf_prog(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie)
{
return -ENOENT;
}
int perf_event_set_bpf_prog(struct perf_event *event,
struct bpf_prog *prog,
u64 bpf_cookie) u64 bpf_cookie)
{ {
return -ENOENT; return -ENOENT;