From 51d157946666382e779f94c39891e8e9a020da78 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 8 Nov 2021 10:58:10 -0500 Subject: ring-buffer: Protect ring_buffer_reset() from reentrancy The resetting of the entire ring buffer use to simply go through and reset each individual CPU buffer that had its own protection and synchronization. But this was very slow, due to performing a synchronization for each CPU. The code was reshuffled to do one disabling of all CPU buffers, followed by a single RCU synchronization, and then the resetting of each of the CPU buffers. But unfortunately, the mutex that prevented multiple occurrences of resetting the buffer was not moved to the upper function, and there is nothing to protect from it. Take the ring buffer mutex around the global reset. Cc: stable@vger.kernel.org Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU") Reported-by: "Tzvetomir Stoyanov (VMware)" Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f6520d0a4c8c..2699e9e562b1 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -5228,6 +5228,9 @@ void ring_buffer_reset(struct trace_buffer *buffer) struct ring_buffer_per_cpu *cpu_buffer; int cpu; + /* prevent another thread from changing buffer sizes */ + mutex_lock(&buffer->mutex); + for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; @@ -5246,6 +5249,8 @@ void ring_buffer_reset(struct trace_buffer *buffer) atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->resize_disabled); } + + mutex_unlock(&buffer->mutex); } EXPORT_SYMBOL_GPL(ring_buffer_reset); -- cgit v1.2.3 From 2e6e9058d13a22a6fdd36a8c444ac71d9656003a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 9 Nov 2021 12:42:17 +0100 Subject: ftrace/direct: Fix lockup in modify_ftrace_direct_multi We can't call unregister_ftrace_function under ftrace_lock. Link: https://lkml.kernel.org/r/20211109114217.1645296-1-jolsa@kernel.org Fixes: ed29271894aa ("ftrace/direct: Do not disable when switching direct callers") Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b4ed1a301232..fc49e8809a56 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5602,10 +5602,11 @@ int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) } } + mutex_unlock(&ftrace_lock); + /* Removing the tmp_ops will add the updated direct callers to the functions */ unregister_ftrace_function(&tmp_ops); - mutex_unlock(&ftrace_lock); out_direct: mutex_unlock(&direct_mutex); return err; -- cgit v1.2.3