summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2015-02-03 17:26:54 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:41:48 +0900
commitda7debdb62bcd558948f54404c2d4530bc10335a (patch)
tree0e12339b3ceec29dfd554b6d9323b887ed8632bc /kernel
parent519aaf480b302512edad375022be5d9050585ef1 (diff)
smp: smp_cross_call function pointer tracing
generic tracing for smp_cross_call function calls Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org> [k.kozlowski: rebased on 4.1, no signed-off-by of previous committer] Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 07854477c164..8eb18ff6c523 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/smp.h>
#include "smpboot.h"
@@ -153,7 +155,9 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
*/
csd_unlock(csd);
local_irq_save(flags);
+ trace_smp_call_func_entry(func);
func(info);
+ trace_smp_call_func_exit(func);
local_irq_restore(flags);
return 0;
}
@@ -178,8 +182,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
- if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+ trace_smp_call_func_send(csd->func, cpu);
arch_send_call_function_single_ipi(cpu);
+ }
return 0;
}
@@ -240,14 +246,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
smp_call_func_t func = csd->func;
void *info = csd->info;
-
/* Do we wait until *after* callback? */
if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
+ trace_smp_call_func_entry(func);
func(info);
+ trace_smp_call_func_exit(func);
csd_unlock(csd);
} else {
csd_unlock(csd);
+ trace_smp_call_func_entry(func);
func(info);
+ trace_smp_call_func_exit(func);
}
}
@@ -276,6 +285,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int this_cpu;
int err;
+ trace_smp_call_func_send(func, cpu);
/*
* prevent preemption and reschedule on another processor,
* as well as CPU removal