diff options
| author | Alexei Starovoitov <ast@kernel.org> | 2020-01-15 11:44:51 -0800 |
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2020-01-15 11:46:32 -0800 |
| commit | 5640a771d7971412049d1934d2d030edfdd1c28e (patch) | |
| tree | e68ec27387482ff400e4f7b7212eae6f6941e6ac /kernel | |
| parent | xsk: Support allocations of large umems (diff) | |
| parent | tools/bpf: Add self tests for bpf_send_signal_thread() (diff) | |
| download | linux-5640a771d7971412049d1934d2d030edfdd1c28e.tar.gz linux-5640a771d7971412049d1934d2d030edfdd1c28e.zip | |
Merge branch 'bpf_send_signal_thread'
Yonghong Song says:
====================
Commit 8b401f9ed244 ("bpf: implement bpf_send_signal() helper")
added helper bpf_send_signal() which permits bpf program to
send a signal to the current process. The signal may send to
any thread of the process.
This patch implemented a new helper bpf_send_signal_thread()
to send a signal to the thread corresponding to the kernel current task.
This helper can simplify user space code if the thread context of
bpf sending signal is needed in user space. Please see Patch #1 for
details of use case and kernel implementation.
Patch #2 added some bpf self tests for the new helper.
Changelogs:
v2 -> v3:
- More simplification for skeleton codes by removing not-needed
mmap code and redundantly created tracepoint link.
v1 -> v2:
- More description for the difference between bpf_send_signal()
and bpf_send_signal_thread() in the uapi header bpf.h.
- Use skeleton and mmap for send_signal test.
====================
Acked-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e5ef4ae9edb5..19e793aa441a 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -703,6 +703,7 @@ struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; u32 sig; + enum pid_type type; }; static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); @@ -712,10 +713,10 @@ static void do_bpf_send_signal(struct irq_work *entry) struct send_signal_irq_work *work; work = container_of(entry, struct send_signal_irq_work, irq_work); - group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); + group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); } -BPF_CALL_1(bpf_send_signal, u32, sig) +static int bpf_send_signal_common(u32 sig, enum pid_type type) { struct send_signal_irq_work *work = NULL; @@ -748,11 +749,17 @@ BPF_CALL_1(bpf_send_signal, u32, sig) */ work->task = current; work->sig = sig; + work->type = type; irq_work_queue(&work->irq_work); return 0; } - return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID); + return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); +} + +BPF_CALL_1(bpf_send_signal, u32, sig) +{ + return bpf_send_signal_common(sig, PIDTYPE_TGID); } static const struct bpf_func_proto bpf_send_signal_proto = { @@ -762,6 +769,18 @@ static const struct bpf_func_proto bpf_send_signal_proto = { .arg1_type = ARG_ANYTHING, }; +BPF_CALL_1(bpf_send_signal_thread, u32, sig) +{ + return bpf_send_signal_common(sig, PIDTYPE_PID); +} + +static const struct bpf_func_proto bpf_send_signal_thread_proto = { + .func = bpf_send_signal_thread, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -822,6 +841,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) #endif case BPF_FUNC_send_signal: return &bpf_send_signal_proto; + case BPF_FUNC_send_signal_thread: + return &bpf_send_signal_thread_proto; default: return NULL; } |
