From 8f0820183056ad26dabc0202115848a92f1143fc Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Apr 2010 10:47:33 -0400 Subject: tracing: Create class struct for events This patch creates a ftrace_event_class struct that event structs point to. This class struct will be made to hold information to modify the events. Currently the class struct only holds the events system name. This patch slightly increases the size, but this change lays the ground work of other changes to make the footprint of tracepoints smaller. With 82 standard tracepoints, and 618 system call tracepoints (two tracepoints per syscall: enter and exit): text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4914025 1088868 861512 6864405 68be15 vmlinux.class This patch also cleans up some stale comments in ftrace.h. v2: Fixed missing semi-colon in macro. Acked-by: Frederic Weisbecker Acked-by: Mathieu Desnoyers Acked-by: Masami Hiramatsu Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 20 ++++++++++---------- kernel/trace/trace_events_filter.c | 6 +++--- kernel/trace/trace_export.c | 6 +++++- kernel/trace/trace_kprobe.c | 12 ++++++------ kernel/trace/trace_syscalls.c | 4 ++++ 5 files changed, 28 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c697c704334..2f54b48d363 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -176,10 +176,10 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, if (match && strcmp(match, call->name) != 0 && - strcmp(match, call->system) != 0) + strcmp(match, call->class->system) != 0) continue; - if (sub && strcmp(sub, call->system) != 0) + if (sub && strcmp(sub, call->class->system) != 0) continue; if (event && strcmp(event, call->name) != 0) @@ -355,8 +355,8 @@ static int t_show(struct seq_file *m, void *v) { struct ftrace_event_call *call = v; - if (strcmp(call->system, TRACE_SYSTEM) != 0) - seq_printf(m, "%s:", call->system); + if (strcmp(call->class->system, TRACE_SYSTEM) != 0) + seq_printf(m, "%s:", call->class->system); seq_printf(m, "%s\n", call->name); return 0; @@ -453,7 +453,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, if (!call->name || !call->regfunc) continue; - if (system && strcmp(call->system, system) != 0) + if (system && strcmp(call->class->system, system) != 0) continue; /* @@ -925,8 +925,8 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, * If the trace point header did not define TRACE_SYSTEM * then the system would be called "TRACE_SYSTEM". */ - if (strcmp(call->system, TRACE_SYSTEM) != 0) - d_events = event_subsystem_dir(call->system, d_events); + if (strcmp(call->class->system, TRACE_SYSTEM) != 0) + d_events = event_subsystem_dir(call->class->system, d_events); call->dir = debugfs_create_dir(call->name, d_events); if (!call->dir) { @@ -1041,7 +1041,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) list_del(&call->list); trace_destroy_fields(call); destroy_preds(call); - remove_subsystem_dir(call->system); + remove_subsystem_dir(call->class->system); } /* Remove an event_call */ @@ -1399,8 +1399,8 @@ static __init void event_trace_self_tests(void) * syscalls as we test. */ #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS - if (call->system && - strcmp(call->system, "syscalls") == 0) + if (call->class->system && + strcmp(call->class->system, "syscalls") == 0) continue; #endif diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 88c0b6dbd7f..ca329603d0b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -628,7 +628,7 @@ static int init_subsystem_preds(struct event_subsystem *system) if (!call->define_fields) continue; - if (strcmp(call->system, system->name) != 0) + if (strcmp(call->class->system, system->name) != 0) continue; err = init_preds(call); @@ -647,7 +647,7 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) if (!call->define_fields) continue; - if (strcmp(call->system, system->name) != 0) + if (strcmp(call->class->system, system->name) != 0) continue; filter_disable_preds(call); @@ -1252,7 +1252,7 @@ static int replace_system_preds(struct event_subsystem *system, if (!call->define_fields) continue; - if (strcmp(call->system, system->name) != 0) + if (strcmp(call->class->system, system->name) != 0) continue; /* try to see if the filter can be applied */ diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index e091f64ba6c..7f16e216381 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -18,6 +18,10 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM ftrace +struct ftrace_event_class event_class_ftrace = { + .system = __stringify(TRACE_SYSTEM), +}; + /* not needed for this file */ #undef __field_struct #define __field_struct(type, item) @@ -160,7 +164,7 @@ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ .id = type, \ - .system = __stringify(TRACE_SYSTEM), \ + .class = &event_class_ftrace, \ .raw_init = ftrace_raw_init_event, \ .print_fmt = print, \ .define_fields = ftrace_define_fields_##call, \ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1251e367bae..eda220bf206 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -332,8 +332,8 @@ static struct trace_probe *alloc_trace_probe(const char *group, goto error; } - tp->call.system = kstrdup(group, GFP_KERNEL); - if (!tp->call.system) + tp->call.class->system = kstrdup(group, GFP_KERNEL); + if (!tp->call.class->system) goto error; INIT_LIST_HEAD(&tp->list); @@ -361,7 +361,7 @@ static void free_trace_probe(struct trace_probe *tp) for (i = 0; i < tp->nr_args; i++) free_probe_arg(&tp->args[i]); - kfree(tp->call.system); + kfree(tp->call.class->system); kfree(tp->call.name); kfree(tp->symbol); kfree(tp); @@ -374,7 +374,7 @@ static struct trace_probe *find_probe_event(const char *event, list_for_each_entry(tp, &probe_list, list) if (strcmp(tp->call.name, event) == 0 && - strcmp(tp->call.system, group) == 0) + strcmp(tp->call.class->system, group) == 0) return tp; return NULL; } @@ -399,7 +399,7 @@ static int register_trace_probe(struct trace_probe *tp) mutex_lock(&probe_lock); /* register as an event */ - old_tp = find_probe_event(tp->call.name, tp->call.system); + old_tp = find_probe_event(tp->call.name, tp->call.class->system); if (old_tp) { /* delete old event */ unregister_trace_probe(old_tp); @@ -798,7 +798,7 @@ static int probes_seq_show(struct seq_file *m, void *v) char buf[MAX_ARGSTR_LEN + 1]; seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); - seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); + seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); if (!tp->symbol) seq_printf(m, " 0x%p", tp->rp.kp.addr); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4d6d711717f..d036a74a64f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -15,6 +15,10 @@ static int sys_refcount_exit; static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); +struct ftrace_event_class event_class_syscalls = { + .system = "syscalls" +}; + extern unsigned long __start_syscalls_metadata[]; extern unsigned long __stop_syscalls_metadata[]; -- cgit v1.2.3 From 38516ab59fbc5b3bb278cf5e1fe2867c70cff32e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Apr 2010 17:04:50 -0400 Subject: tracing: Let tracepoints have data passed to tracepoint callbacks This patch adds data to be passed to tracepoint callbacks. The created functions from DECLARE_TRACE() now need a mandatory data parameter. For example: DECLARE_TRACE(mytracepoint, int value, value) Will create the register function: int register_trace_mytracepoint((void(*)(void *data, int value))probe, void *data); As the first argument, all callbacks (probes) must take a (void *data) parameter. So a callback for the above tracepoint will look like: void myprobe(void *data, int value) { } The callback may choose to ignore the data parameter. This change allows callbacks to register a private data pointer along with the function probe. void mycallback(void *data, int value); register_trace_mytracepoint(mycallback, mydata); Then the mycallback() will receive the "mydata" as the first parameter before the args. A more detailed example: DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status)); /* In the C file */ DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status)); [...] trace_mytracepoint(status); /* In a file registering this tracepoint */ int my_callback(void *data, int status) { struct my_struct my_data = data; [...] } [...] my_data = kmalloc(sizeof(*my_data), GFP_KERNEL); init_my_data(my_data); register_trace_mytracepoint(my_callback, my_data); The same callback can also be registered to the same tracepoint as long as the data registered is different. Note, the data must also be used to unregister the callback: unregister_trace_mytracepoint(my_callback, my_data); Because of the data parameter, tracepoints declared this way can not have no args. That is: DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS()); will cause an error. If no arguments are needed, a new macro can be used instead: DECLARE_TRACE_NOARGS(mytracepoint); Since there are no arguments, the proto and args fields are left out. This is part of a series to make the tracepoint footprint smaller: text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4914025 1088868 861512 6864405 68be15 vmlinux.class 4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint Again, this patch also increases the size of the kernel, but lays the ground work for decreasing it. v5: Fixed net/core/drop_monitor.c to handle these updates. v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the #ifdef CONFIG_TRACE_POINTS, since the two are the same in both cases. The __DECLARE_TRACE() is what changes. Thanks to Frederic Weisbecker for pointing this out. v3: Made all register_* functions require data to be passed and all callbacks to take a void * parameter as its first argument. This makes the calling functions comply with C standards. Also added more comments to the modifications of DECLARE_TRACE(). v2: Made the DECLARE_TRACE() have the ability to pass arguments and added a new DECLARE_TRACE_NOARGS() for tracepoints that do not need any arguments. Acked-by: Mathieu Desnoyers Acked-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Neil Horman Cc: David S. Miller Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 95 ++++++++++++++----- include/trace/ftrace.h | 14 +-- kernel/trace/blktrace.c | 125 ++++++++++++++----------- kernel/trace/ftrace.c | 7 +- kernel/trace/kmemtrace.c | 42 +++++---- kernel/trace/trace_sched_switch.c | 20 ++-- kernel/trace/trace_sched_wakeup.c | 28 +++--- kernel/trace/trace_syscalls.c | 24 ++--- kernel/trace/trace_workqueue.c | 26 ++--- kernel/tracepoint.c | 91 ++++++++++-------- net/core/drop_monitor.c | 12 +-- samples/tracepoints/tp-samples-trace.h | 4 +- samples/tracepoints/tracepoint-probe-sample.c | 13 +-- samples/tracepoints/tracepoint-probe-sample2.c | 7 +- 14 files changed, 298 insertions(+), 210 deletions(-) (limited to 'kernel') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 8d5e4f6d96d..9a59d1f98cd 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -20,12 +20,17 @@ struct module; struct tracepoint; +struct tracepoint_func { + void *func; + void *data; +}; + struct tracepoint { const char *name; /* Tracepoint name */ int state; /* State. */ void (*regfunc)(void); void (*unregfunc)(void); - void **funcs; + struct tracepoint_func *funcs; } __attribute__((aligned(32))); /* * Aligned on 32 bytes because it is * globally visible and gcc happily @@ -37,16 +42,19 @@ struct tracepoint { * Connect a probe to a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_register(const char *name, void *probe); +extern int tracepoint_probe_register(const char *name, void *probe, void *data); /* * Disconnect a probe from a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_unregister(const char *name, void *probe); +extern int +tracepoint_probe_unregister(const char *name, void *probe, void *data); -extern int tracepoint_probe_register_noupdate(const char *name, void *probe); -extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); +extern int tracepoint_probe_register_noupdate(const char *name, void *probe, + void *data); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, + void *data); extern void tracepoint_probe_update_all(void); struct tracepoint_iter { @@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. + * + * Note, the proto and args passed in includes "__data" as the first parameter. + * The reason for this is to handle the "void" prototype. If a tracepoint + * has a "void" prototype, then it is invalid to declare a function + * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just + * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ #define __DO_TRACE(tp, proto, args) \ do { \ - void **it_func; \ + struct tracepoint_func *it_func_ptr; \ + void *it_func; \ + void *__data; \ \ rcu_read_lock_sched_notrace(); \ - it_func = rcu_dereference_sched((tp)->funcs); \ - if (it_func) { \ + it_func_ptr = rcu_dereference_sched((tp)->funcs); \ + if (it_func_ptr) { \ do { \ - ((void(*)(proto))(*it_func))(args); \ - } while (*(++it_func)); \ + it_func = (it_func_ptr)->func; \ + __data = (it_func_ptr)->data; \ + ((void(*)(proto))(it_func))(args); \ + } while ((++it_func_ptr)->func); \ } \ rcu_read_unlock_sched_notrace(); \ } while (0) @@ -122,23 +140,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. */ -#define DECLARE_TRACE(name, proto, args) \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ - TP_PROTO(proto), TP_ARGS(args)); \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args)); \ } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), void *data) \ { \ - return tracepoint_probe_register(#name, (void *)probe); \ + return tracepoint_probe_register(#name, (void *)probe, \ + data); \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ - return tracepoint_probe_unregister(#name, (void *)probe);\ + return tracepoint_probe_unregister(#name, (void *)probe, \ + data); \ } \ - static inline void check_trace_callback_type_##name(void (*cb)(proto)) \ + static inline void \ + check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } @@ -158,20 +182,22 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, EXPORT_SYMBOL(__tracepoint_##name) #else /* !CONFIG_TRACEPOINTS */ -#define DECLARE_TRACE(name, proto, args) \ - static inline void _do_trace_##name(struct tracepoint *tp, proto) \ - { } \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ } \ - static inline void check_trace_callback_type_##name(void (*cb)(proto)) \ + static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } @@ -181,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, #define EXPORT_TRACEPOINT_SYMBOL(name) #endif /* CONFIG_TRACEPOINTS */ + +/* + * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype + * (void). "void" is a special value in a function prototype and can + * not be combined with other arguments. Since the DECLARE_TRACE() + * macro adds a data element at the beginning of the prototype, + * we need a way to differentiate "(void *data, proto)" from + * "(void *data, void)". The second prototype is invalid. + * + * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype + * and "void *__data" as the callback prototype. + * + * DECLARE_TRACE() passes "proto" as the tracepoint protoype and + * "void *__data, proto" as the callback prototype. + */ +#define DECLARE_TRACE_NOARGS(name) \ + __DECLARE_TRACE(name, void, , void *__data, __data) + +#define DECLARE_TRACE(name, proto, args) \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 7dcdfd824aa..ba28b644f41 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -406,18 +406,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ \ -static void perf_trace_##name(proto); \ +static void perf_trace_##name(void *, proto); \ \ static notrace int \ perf_trace_enable_##name(struct ftrace_event_call *unused) \ { \ - return register_trace_##name(perf_trace_##name); \ + return register_trace_##name(perf_trace_##name, NULL); \ } \ \ static notrace void \ perf_trace_disable_##name(struct ftrace_event_call *unused) \ { \ - unregister_trace_##name(perf_trace_##name); \ + unregister_trace_##name(perf_trace_##name, NULL); \ } #undef DEFINE_EVENT_PRINT @@ -578,7 +578,7 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ \ -static notrace void ftrace_raw_event_##call(proto) \ +static notrace void ftrace_raw_event_##call(void *__ignore, proto) \ { \ ftrace_raw_event_id_##template(&event_##call, args); \ } \ @@ -586,13 +586,13 @@ static notrace void ftrace_raw_event_##call(proto) \ static notrace int \ ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ { \ - return register_trace_##call(ftrace_raw_event_##call); \ + return register_trace_##call(ftrace_raw_event_##call, NULL); \ } \ \ static notrace void \ ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ { \ - unregister_trace_##call(ftrace_raw_event_##call); \ + unregister_trace_##call(ftrace_raw_event_##call, NULL); \ } \ \ static struct trace_event ftrace_event_type_##call = { \ @@ -793,7 +793,7 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void perf_trace_##call(proto) \ +static notrace void perf_trace_##call(void *__ignore, proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ \ diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b3bc91a3f51..19d93f83e3c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -675,28 +675,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, } } -static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) +static void blk_add_trace_rq_abort(void *ignore, + struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_ABORT); } -static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) +static void blk_add_trace_rq_insert(void *ignore, + struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_INSERT); } -static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) +static void blk_add_trace_rq_issue(void *ignore, + struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_ISSUE); } -static void blk_add_trace_rq_requeue(struct request_queue *q, +static void blk_add_trace_rq_requeue(void *ignore, + struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); } -static void blk_add_trace_rq_complete(struct request_queue *q, +static void blk_add_trace_rq_complete(void *ignore, + struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); @@ -724,34 +729,40 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); } -static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) +static void blk_add_trace_bio_bounce(void *ignore, + struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); } -static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) +static void blk_add_trace_bio_complete(void *ignore, + struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); } -static void blk_add_trace_bio_backmerge(struct request_queue *q, +static void blk_add_trace_bio_backmerge(void *ignore, + struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); } -static void blk_add_trace_bio_frontmerge(struct request_queue *q, +static void blk_add_trace_bio_frontmerge(void *ignore, + struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); } -static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) +static void blk_add_trace_bio_queue(void *ignore, + struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_QUEUE); } -static void blk_add_trace_getrq(struct request_queue *q, +static void blk_add_trace_getrq(void *ignore, + struct request_queue *q, struct bio *bio, int rw) { if (bio) @@ -765,7 +776,8 @@ static void blk_add_trace_getrq(struct request_queue *q, } -static void blk_add_trace_sleeprq(struct request_queue *q, +static void blk_add_trace_sleeprq(void *ignore, + struct request_queue *q, struct bio *bio, int rw) { if (bio) @@ -779,7 +791,7 @@ static void blk_add_trace_sleeprq(struct request_queue *q, } } -static void blk_add_trace_plug(struct request_queue *q) +static void blk_add_trace_plug(void *ignore, struct request_queue *q) { struct blk_trace *bt = q->blk_trace; @@ -787,7 +799,7 @@ static void blk_add_trace_plug(struct request_queue *q) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); } -static void blk_add_trace_unplug_io(struct request_queue *q) +static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) { struct blk_trace *bt = q->blk_trace; @@ -800,7 +812,7 @@ static void blk_add_trace_unplug_io(struct request_queue *q) } } -static void blk_add_trace_unplug_timer(struct request_queue *q) +static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) { struct blk_trace *bt = q->blk_trace; @@ -813,7 +825,8 @@ static void blk_add_trace_unplug_timer(struct request_queue *q) } } -static void blk_add_trace_split(struct request_queue *q, struct bio *bio, +static void blk_add_trace_split(void *ignore, + struct request_queue *q, struct bio *bio, unsigned int pdu) { struct blk_trace *bt = q->blk_trace; @@ -839,8 +852,9 @@ static void blk_add_trace_split(struct request_queue *q, struct bio *bio, * it spans a stripe (or similar). Add a trace for that action. * **/ -static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, - dev_t dev, sector_t from) +static void blk_add_trace_remap(void *ignore, + struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; @@ -869,7 +883,8 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, * Add a trace for that action. * **/ -static void blk_add_trace_rq_remap(struct request_queue *q, +static void blk_add_trace_rq_remap(void *ignore, + struct request_queue *q, struct request *rq, dev_t dev, sector_t from) { @@ -921,64 +936,64 @@ static void blk_register_tracepoints(void) { int ret; - ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); + ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); WARN_ON(ret); - ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); + ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); WARN_ON(ret); - ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); + ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); WARN_ON(ret); - ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); + ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); WARN_ON(ret); - ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); + ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); WARN_ON(ret); - ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); + ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); WARN_ON(ret); - ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); + ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); WARN_ON(ret); - ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); + ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); WARN_ON(ret); - ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); + ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); WARN_ON(ret); - ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); + ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); WARN_ON(ret); - ret = register_trace_block_getrq(blk_add_trace_getrq); + ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); WARN_ON(ret); - ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); + ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); WARN_ON(ret); - ret = register_trace_block_plug(blk_add_trace_plug); + ret = register_trace_block_plug(blk_add_trace_plug, NULL); WARN_ON(ret); - ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); + ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); WARN_ON(ret); - ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); + ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); WARN_ON(ret); - ret = register_trace_block_split(blk_add_trace_split); + ret = register_trace_block_split(blk_add_trace_split, NULL); WARN_ON(ret); - ret = register_trace_block_remap(blk_add_trace_remap); + ret = register_trace_block_remap(blk_add_trace_remap, NULL); WARN_ON(ret); - ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); + ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); WARN_ON(ret); } static void blk_unregister_tracepoints(void) { - unregister_trace_block_rq_remap(blk_add_trace_rq_remap); - unregister_trace_block_remap(blk_add_trace_remap); - unregister_trace_block_split(blk_add_trace_split); - unregister_trace_block_unplug_io(blk_add_trace_unplug_io); - unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); - unregister_trace_block_plug(blk_add_trace_plug); - unregister_trace_block_sleeprq(blk_add_trace_sleeprq); - unregister_trace_block_getrq(blk_add_trace_getrq); - unregister_trace_block_bio_queue(blk_add_trace_bio_queue); - unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); - unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); - unregister_trace_block_bio_complete(blk_add_trace_bio_complete); - unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); - unregister_trace_block_rq_complete(blk_add_trace_rq_complete); - unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); - unregister_trace_block_rq_issue(blk_add_trace_rq_issue); - unregister_trace_block_rq_insert(blk_add_trace_rq_insert); - unregister_trace_block_rq_abort(blk_add_trace_rq_abort); + unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); + unregister_trace_block_remap(blk_add_trace_remap, NULL); + unregister_trace_block_split(blk_add_trace_split, NULL); + unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); + unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); + unregister_trace_block_plug(blk_add_trace_plug, NULL); + unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); + unregister_trace_block_getrq(blk_add_trace_getrq, NULL); + unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); + unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); + unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); + unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); + unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); + unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); + unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); + unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); + unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); + unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); tracepoint_synchronize_unregister(); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 32837e19e3b..6d2cb14f944 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3234,7 +3234,8 @@ free: } static void -ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next) +ftrace_graph_probe_sched_switch(void *ignore, + struct task_struct *prev, struct task_struct *next) { unsigned long long timestamp; int index; @@ -3288,7 +3289,7 @@ static int start_graph_tracing(void) } while (ret == -EAGAIN); if (!ret) { - ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); + ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); if (ret) pr_info("ftrace_graph: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); @@ -3364,7 +3365,7 @@ void unregister_ftrace_graph(void) ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); - unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); + unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); out: mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index a91da69f153..7253d0c1c32 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c @@ -95,7 +95,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, trace_wake_up(); } -static void kmemtrace_kmalloc(unsigned long call_site, +static void kmemtrace_kmalloc(void *ignore, + unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -105,7 +106,8 @@ static void kmemtrace_kmalloc(unsigned long call_site, bytes_req, bytes_alloc, gfp_flags, -1); } -static void kmemtrace_kmem_cache_alloc(unsigned long call_site, +static void kmemtrace_kmem_cache_alloc(void *ignore, + unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -115,7 +117,8 @@ static void kmemtrace_kmem_cache_alloc(unsigned long call_site, bytes_req, bytes_alloc, gfp_flags, -1); } -static void kmemtrace_kmalloc_node(unsigned long call_site, +static void kmemtrace_kmalloc_node(void *ignore, + unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -126,7 +129,8 @@ static void kmemtrace_kmalloc_node(unsigned long call_site, bytes_req, bytes_alloc, gfp_flags, node); } -static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, +static void kmemtrace_kmem_cache_alloc_node(void *ignore, + unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -137,12 +141,14 @@ static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, bytes_req, bytes_alloc, gfp_flags, node); } -static void kmemtrace_kfree(unsigned long call_site, const void *ptr) +static void +kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr) { kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); } -static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr) +static void kmemtrace_kmem_cache_free(void *ignore, + unsigned long call_site, const void *ptr) { kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); } @@ -151,34 +157,34 @@ static int kmemtrace_start_probes(void) { int err; - err = register_trace_kmalloc(kmemtrace_kmalloc); + err = register_trace_kmalloc(kmemtrace_kmalloc, NULL); if (err) return err; - err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); + err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); if (err) return err; - err = register_trace_kmalloc_node(kmemtrace_kmalloc_node); + err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); if (err) return err; - err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); + err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); if (err) return err; - err = register_trace_kfree(kmemtrace_kfree); + err = register_trace_kfree(kmemtrace_kfree, NULL); if (err) return err; - err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free); + err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); return err; } static void kmemtrace_stop_probes(void) { - unregister_trace_kmalloc(kmemtrace_kmalloc); - unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); - unregister_trace_kmalloc_node(kmemtrace_kmalloc_node); - unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); - unregister_trace_kfree(kmemtrace_kfree); - unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free); + unregister_trace_kmalloc(kmemtrace_kmalloc, NULL); + unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); + unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); + unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); + unregister_trace_kfree(kmemtrace_kfree, NULL); + unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); } static int kmem_trace_init(struct trace_array *tr) diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index a55fccfede5..8f758d070c4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -50,7 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr, } static void -probe_sched_switch(struct task_struct *prev, struct task_struct *next) +probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; unsigned long flags; @@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, } static void -probe_sched_wakeup(struct task_struct *wakee, int success) +probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) { struct trace_array_cpu *data; unsigned long flags; @@ -138,21 +138,21 @@ static int tracing_sched_register(void) { int ret; - ret = register_trace_sched_wakeup(probe_sched_wakeup); + ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return ret; } - ret = register_trace_sched_wakeup_new(probe_sched_wakeup); + ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } - ret = register_trace_sched_switch(probe_sched_switch); + ret = register_trace_sched_switch(probe_sched_switch, NULL); if (ret) { pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); @@ -161,17 +161,17 @@ static int tracing_sched_register(void) return ret; fail_deprobe_wake_new: - unregister_trace_sched_wakeup_new(probe_sched_wakeup); + unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); fail_deprobe: - unregister_trace_sched_wakeup(probe_sched_wakeup); + unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); return ret; } static void tracing_sched_unregister(void) { - unregister_trace_sched_switch(probe_sched_switch); - unregister_trace_sched_wakeup_new(probe_sched_wakeup); - unregister_trace_sched_wakeup(probe_sched_wakeup); + unregister_trace_sched_switch(probe_sched_switch, NULL); + unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); + unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); } static void tracing_start_sched_switch(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 8052446ceea..0e73bc2ef8c 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -98,7 +98,8 @@ static int report_latency(cycle_t delta) return 1; } -static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) +static void +probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) { if (task != wakeup_task) return; @@ -107,7 +108,8 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) } static void notrace -probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) +probe_wakeup_sched_switch(void *ignore, + struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; @@ -199,7 +201,7 @@ static void wakeup_reset(struct trace_array *tr) } static void -probe_wakeup(struct task_struct *p, int success) +probe_wakeup(void *ignore, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); @@ -263,28 +265,28 @@ static void start_wakeup_tracer(struct trace_array *tr) { int ret; - ret = register_trace_sched_wakeup(probe_wakeup); + ret = register_trace_sched_wakeup(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return; } - ret = register_trace_sched_wakeup_new(probe_wakeup); + ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } - ret = register_trace_sched_switch(probe_wakeup_sched_switch); + ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); if (ret) { pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); goto fail_deprobe_wake_new; } - ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task); + ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); @@ -311,19 +313,19 @@ static void start_wakeup_tracer(struct trace_array *tr) return; fail_deprobe_wake_new: - unregister_trace_sched_wakeup_new(probe_wakeup); + unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: - unregister_trace_sched_wakeup(probe_wakeup); + unregister_trace_sched_wakeup(probe_wakeup, NULL); } static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; unregister_ftrace_function(&trace_ops); - unregister_trace_sched_switch(probe_wakeup_sched_switch); - unregister_trace_sched_wakeup_new(probe_wakeup); - unregister_trace_sched_wakeup(probe_wakeup); - unregister_trace_sched_migrate_task(probe_wakeup_migrate_task); + unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); + unregister_trace_sched_wakeup_new(probe_wakeup, NULL); + unregister_trace_sched_wakeup(probe_wakeup, NULL); + unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); } static int __wakeup_tracer_init(struct trace_array *tr) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index d036a74a64f..b8d30e7ecd0 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -247,7 +247,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) return ret; } -void ftrace_syscall_enter(struct pt_regs *regs, long id) +void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; @@ -282,7 +282,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) trace_current_buffer_unlock_commit(buffer, event, 0, 0); } -void ftrace_syscall_exit(struct pt_regs *regs, long ret) +void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_trace_exit *entry; struct syscall_metadata *sys_data; @@ -324,7 +324,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) return -ENOSYS; mutex_lock(&syscall_trace_lock); if (!sys_refcount_enter) - ret = register_trace_sys_enter(ftrace_syscall_enter); + ret = register_trace_sys_enter(ftrace_syscall_enter, NULL); if (!ret) { set_bit(num, enabled_enter_syscalls); sys_refcount_enter++; @@ -344,7 +344,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) sys_refcount_enter--; clear_bit(num, enabled_enter_syscalls); if (!sys_refcount_enter) - unregister_trace_sys_enter(ftrace_syscall_enter); + unregister_trace_sys_enter(ftrace_syscall_enter, NULL); mutex_unlock(&syscall_trace_lock); } @@ -358,7 +358,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) return -ENOSYS; mutex_lock(&syscall_trace_lock); if (!sys_refcount_exit) - ret = register_trace_sys_exit(ftrace_syscall_exit); + ret = register_trace_sys_exit(ftrace_syscall_exit, NULL); if (!ret) { set_bit(num, enabled_exit_syscalls); sys_refcount_exit++; @@ -378,7 +378,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) sys_refcount_exit--; clear_bit(num, enabled_exit_syscalls); if (!sys_refcount_exit) - unregister_trace_sys_exit(ftrace_syscall_exit); + unregister_trace_sys_exit(ftrace_syscall_exit, NULL); mutex_unlock(&syscall_trace_lock); } @@ -438,7 +438,7 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); static int sys_perf_refcount_enter; static int sys_perf_refcount_exit; -static void perf_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; @@ -484,7 +484,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call) mutex_lock(&syscall_trace_lock); if (!sys_perf_refcount_enter) - ret = register_trace_sys_enter(perf_syscall_enter); + ret = register_trace_sys_enter(perf_syscall_enter, NULL); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); @@ -506,11 +506,11 @@ void perf_sysenter_disable(struct ftrace_event_call *call) sys_perf_refcount_enter--; clear_bit(num, enabled_perf_enter_syscalls); if (!sys_perf_refcount_enter) - unregister_trace_sys_enter(perf_syscall_enter); + unregister_trace_sys_enter(perf_syscall_enter, NULL); mutex_unlock(&syscall_trace_lock); } -static void perf_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; @@ -559,7 +559,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call) mutex_lock(&syscall_trace_lock); if (!sys_perf_refcount_exit) - ret = register_trace_sys_exit(perf_syscall_exit); + ret = register_trace_sys_exit(perf_syscall_exit, NULL); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); @@ -581,7 +581,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call) sys_perf_refcount_exit--; clear_bit(num, enabled_perf_exit_syscalls); if (!sys_perf_refcount_exit) - unregister_trace_sys_exit(perf_syscall_exit); + unregister_trace_sys_exit(perf_syscall_exit, NULL); mutex_unlock(&syscall_trace_lock); } diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index cc2d2faa7d9..a7cc3793baf 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -49,7 +49,8 @@ static void cpu_workqueue_stat_free(struct kref *kref) /* Insertion of a work */ static void -probe_workqueue_insertion(struct task_struct *wq_thread, +probe_workqueue_insertion(void *ignore, + struct task_struct *wq_thread, struct work_struct *work) { int cpu = cpumask_first(&wq_thread->cpus_allowed); @@ -70,7 +71,8 @@ found: /* Execution of a work */ static void -probe_workqueue_execution(struct task_struct *wq_thread, +probe_workqueue_execution(void *ignore, + struct task_struct *wq_thread, struct work_struct *work) { int cpu = cpumask_first(&wq_thread->cpus_allowed); @@ -90,7 +92,8 @@ found: } /* Creation of a cpu workqueue thread */ -static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) +static void probe_workqueue_creation(void *ignore, + struct task_struct *wq_thread, int cpu) { struct cpu_workqueue_stats *cws; unsigned long flags; @@ -114,7 +117,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) } /* Destruction of a cpu workqueue thread */ -static void probe_workqueue_destruction(struct task_struct *wq_thread) +static void +probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread) { /* Workqueue only execute on one cpu */ int cpu = cpumask_first(&wq_thread->cpus_allowed); @@ -259,19 +263,19 @@ int __init trace_workqueue_early_init(void) { int ret, cpu; - ret = register_trace_workqueue_insertion(probe_workqueue_insertion); + ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL); if (ret) goto out; - ret = register_trace_workqueue_execution(probe_workqueue_execution); + ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL); if (ret) goto no_insertion; - ret = register_trace_workqueue_creation(probe_workqueue_creation); + ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL); if (ret) goto no_execution; - ret = register_trace_workqueue_destruction(probe_workqueue_destruction); + ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL); if (ret) goto no_creation; @@ -283,11 +287,11 @@ int __init trace_workqueue_early_init(void) return 0; no_creation: - unregister_trace_workqueue_creation(probe_workqueue_creation); + unregister_trace_workqueue_creation(probe_workqueue_creation, NULL); no_execution: - unregister_trace_workqueue_execution(probe_workqueue_execution); + unregister_trace_workqueue_execution(probe_workqueue_execution, NULL); no_insertion: - unregister_trace_workqueue_insertion(probe_workqueue_insertion); + unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL); out: pr_warning("trace_workqueue: unable to trace workqueues\n"); diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index cc89be5bc0f..c77f3eceea2 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -54,7 +54,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; */ struct tracepoint_entry { struct hlist_node hlist; - void **funcs; + struct tracepoint_func *funcs; int refcount; /* Number of times armed. 0 if disarmed. */ char name[0]; }; @@ -64,12 +64,12 @@ struct tp_probes { struct rcu_head rcu; struct list_head list; } u; - void *probes[0]; + struct tracepoint_func probes[0]; }; static inline void *allocate_probes(int count) { - struct tp_probes *p = kmalloc(count * sizeof(void *) + struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) + sizeof(struct tp_probes), GFP_KERNEL); return p == NULL ? NULL : p->probes; } @@ -79,7 +79,7 @@ static void rcu_free_old_probes(struct rcu_head *head) kfree(container_of(head, struct tp_probes, u.rcu)); } -static inline void release_probes(void *old) +static inline void release_probes(struct tracepoint_func *old) { if (old) { struct tp_probes *tp_probes = container_of(old, @@ -95,15 +95,16 @@ static void debug_print_probes(struct tracepoint_entry *entry) if (!tracepoint_debug || !entry->funcs) return; - for (i = 0; entry->funcs[i]; i++) - printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]); + for (i = 0; entry->funcs[i].func; i++) + printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); } -static void * -tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) +static struct tracepoint_func * +tracepoint_entry_add_probe(struct tracepoint_entry *entry, + void *probe, void *data) { int nr_probes = 0; - void **old, **new; + struct tracepoint_func *old, *new; WARN_ON(!probe); @@ -111,8 +112,9 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) old = entry->funcs; if (old) { /* (N -> N+1), (N != 0, 1) probes */ - for (nr_probes = 0; old[nr_probes]; nr_probes++) - if (old[nr_probes] == probe) + for (nr_probes = 0; old[nr_probes].func; nr_probes++) + if (old[nr_probes].func == probe && + old[nr_probes].data == data) return ERR_PTR(-EEXIST); } /* + 2 : one for new probe, one for NULL func */ @@ -120,9 +122,10 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) if (new == NULL) return ERR_PTR(-ENOMEM); if (old) - memcpy(new, old, nr_probes * sizeof(void *)); - new[nr_probes] = probe; - new[nr_probes + 1] = NULL; + memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); + new[nr_probes].func = probe; + new[nr_probes].data = data; + new[nr_probes + 1].func = NULL; entry->refcount = nr_probes + 1; entry->funcs = new; debug_print_probes(entry); @@ -130,10 +133,11 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) } static void * -tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) +tracepoint_entry_remove_probe(struct tracepoint_entry *entry, + void *probe, void *data) { int nr_probes = 0, nr_del = 0, i; - void **old, **new; + struct tracepoint_func *old, *new; old = entry->funcs; @@ -142,8 +146,10 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) debug_print_probes(entry); /* (N -> M), (N > 1, M >= 0) probes */ - for (nr_probes = 0; old[nr_probes]; nr_probes++) { - if ((!probe || old[nr_probes] == probe)) + for (nr_probes = 0; old[nr_probes].func; nr_probes++) { + if (!probe || + (old[nr_probes].func == probe && + old[nr_probes].data == data)) nr_del++; } @@ -160,10 +166,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) new = allocate_probes(nr_probes - nr_del + 1); if (new == NULL) return ERR_PTR(-ENOMEM); - for (i = 0; old[i]; i++) - if ((probe && old[i] != probe)) + for (i = 0; old[i].func; i++) + if (probe && + (old[i].func != probe || old[i].data != data)) new[j++] = old[i]; - new[nr_probes - nr_del] = NULL; + new[nr_probes - nr_del].func = NULL; entry->refcount = nr_probes - nr_del; entry->funcs = new; } @@ -315,18 +322,19 @@ static void tracepoint_update_probes(void) module_update_tracepoints(); } -static void *tracepoint_add_probe(const char *name, void *probe) +static struct tracepoint_func * +tracepoint_add_probe(const char *name, void *probe, void *data) { struct tracepoint_entry *entry; - void *old; + struct tracepoint_func *old; entry = get_tracepoint(name); if (!entry) { entry = add_tracepoint(name); if (IS_ERR(entry)) - return entry; + return (struct tracepoint_func *)entry; } - old = tracepoint_entry_add_probe(entry, probe); + old = tracepoint_entry_add_probe(entry, probe, data); if (IS_ERR(old) && !entry->refcount) remove_tracepoint(entry); return old; @@ -340,12 +348,12 @@ static void *tracepoint_add_probe(const char *name, void *probe) * Returns 0 if ok, error value on error. * The probe address must at least be aligned on the architecture pointer size. */ -int tracepoint_probe_register(const char *name, void *probe) +int tracepoint_probe_register(const char *name, void *probe, void *data) { - void *old; + struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); - old = tracepoint_add_probe(name, probe); + old = tracepoint_add_probe(name, probe, data); mutex_unlock(&tracepoints_mutex); if (IS_ERR(old)) return PTR_ERR(old); @@ -356,15 +364,16 @@ int tracepoint_probe_register(const char *name, void *probe) } EXPORT_SYMBOL_GPL(tracepoint_probe_register); -static void *tracepoint_remove_probe(const char *name, void *probe) +static struct tracepoint_func * +tracepoint_remove_probe(const char *name, void *probe, void *data) { struct tracepoint_entry *entry; - void *old; + struct tracepoint_func *old; entry = get_tracepoint(name); if (!entry) return ERR_PTR(-ENOENT); - old = tracepoint_entry_remove_probe(entry, probe); + old = tracepoint_entry_remove_probe(entry, probe, data); if (IS_ERR(old)) return old; if (!entry->refcount) @@ -382,12 +391,12 @@ static void *tracepoint_remove_probe(const char *name, void *probe) * itself uses stop_machine(), which insures that every preempt disabled section * have finished. */ -int tracepoint_probe_unregister(const char *name, void *probe) +int tracepoint_probe_unregister(const char *name, void *probe, void *data) { - void *old; + struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); - old = tracepoint_remove_probe(name, probe); + old = tracepoint_remove_probe(name, probe, data); mutex_unlock(&tracepoints_mutex); if (IS_ERR(old)) return PTR_ERR(old); @@ -418,12 +427,13 @@ static void tracepoint_add_old_probes(void *old) * * caller must call tracepoint_probe_update_all() */ -int tracepoint_probe_register_noupdate(const char *name, void *probe) +int tracepoint_probe_register_noupdate(const char *name, void *probe, + void *data) { - void *old; + struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); - old = tracepoint_add_probe(name, probe); + old = tracepoint_add_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); @@ -441,12 +451,13 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); * * caller must call tracepoint_probe_update_all() */ -int tracepoint_probe_unregister_noupdate(const char *name, void *probe) +int tracepoint_probe_unregister_noupdate(const char *name, void *probe, + void *data) { - void *old; + struct tracepoint_func *old; mutex_lock(&tracepoints_mutex); - old = tracepoint_remove_probe(name, probe); + old = tracepoint_remove_probe(name, probe, data); if (IS_ERR(old)) { mutex_unlock(&tracepoints_mutex); return PTR_ERR(old); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cf208d8042b..ad41529fb60 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -172,12 +172,12 @@ out: return; } -static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) +static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) { trace_drop_common(skb, location); } -static void trace_napi_poll_hit(struct napi_struct *napi) +static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) { struct dm_hw_stat_delta *new_stat; @@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) switch (state) { case TRACE_ON: - rc |= register_trace_kfree_skb(trace_kfree_skb_hit); - rc |= register_trace_napi_poll(trace_napi_poll_hit); + rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); + rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); break; case TRACE_OFF: - rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); - rc |= unregister_trace_napi_poll(trace_napi_poll_hit); + rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); + rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); tracepoint_synchronize_unregister(); diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h index dffdc49878a..4d46be96596 100644 --- a/samples/tracepoints/tp-samples-trace.h +++ b/samples/tracepoints/tp-samples-trace.h @@ -7,7 +7,5 @@ DECLARE_TRACE(subsys_event, TP_PROTO(struct inode *inode, struct file *file), TP_ARGS(inode, file)); -DECLARE_TRACE(subsys_eventb, - TP_PROTO(void), - TP_ARGS()); +DECLARE_TRACE_NOARGS(subsys_eventb); #endif diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c index 9e60eb6ca2d..744c0b9652a 100644 --- a/samples/tracepoints/tracepoint-probe-sample.c +++ b/samples/tracepoints/tracepoint-probe-sample.c @@ -13,7 +13,8 @@ * Here the caller only guarantees locking for struct file and struct inode. * Locking must therefore be done in the probe to use the dentry. */ -static void probe_subsys_event(struct inode *inode, struct file *file) +static void probe_subsys_event(void *ignore, + struct inode *inode, struct file *file) { path_get(&file->f_path); dget(file->f_path.dentry); @@ -23,7 +24,7 @@ static void probe_subsys_event(struct inode *inode, struct file *file) path_put(&file->f_path); } -static void probe_subsys_eventb(void) +static void probe_subsys_eventb(void *ignore) { printk(KERN_INFO "Event B is encountered\n"); } @@ -32,9 +33,9 @@ static int __init tp_sample_trace_init(void) { int ret; - ret = register_trace_subsys_event(probe_subsys_event); + ret = register_trace_subsys_event(probe_subsys_event, NULL); WARN_ON(ret); - ret = register_trace_subsys_eventb(probe_subsys_eventb); + ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL); WARN_ON(ret); return 0; @@ -44,8 +45,8 @@ module_init(tp_sample_trace_init); static void __exit tp_sample_trace_exit(void) { - unregister_trace_subsys_eventb(probe_subsys_eventb); - unregister_trace_subsys_event(probe_subsys_event); + unregister_trace_subsys_eventb(probe_subsys_eventb, NULL); + unregister_trace_subsys_event(probe_subsys_event, NULL); tracepoint_synchronize_unregister(); } diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c index be2a960573f..9fcf990e5d4 100644 --- a/samples/tracepoints/tracepoint-probe-sample2.c +++ b/samples/tracepoints/tracepoint-probe-sample2.c @@ -12,7 +12,8 @@ * Here the caller only guarantees locking for struct file and struct inode. * Locking must therefore be done in the probe to use the dentry. */ -static void probe_subsys_event(struct inode *inode, struct file *file) +static void probe_subsys_event(void *ignore, + struct inode *inode, struct file *file) { printk(KERN_INFO "Event is encountered with inode number %lu\n", inode->i_ino); @@ -22,7 +23,7 @@ static int __init tp_sample_trace_init(void) { int ret; - ret = register_trace_subsys_event(probe_subsys_event); + ret = register_trace_subsys_event(probe_subsys_event, NULL); WARN_ON(ret); return 0; @@ -32,7 +33,7 @@ module_init(tp_sample_trace_init); static void __exit tp_sample_trace_exit(void) { - unregister_trace_subsys_event(probe_subsys_event); + unregister_trace_subsys_event(probe_subsys_event, NULL); tracepoint_synchronize_unregister(); } -- cgit v1.2.3 From 2239291aeb0379fe47980b0e560e0eb9fd7e82ec Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 21 Apr 2010 12:27:06 -0400 Subject: tracing: Remove per event trace registering This patch removes the register functions of TRACE_EVENT() to enable and disable tracepoints. The registering of a event is now down directly in the trace_events.c file. The tracepoint_probe_register() is now called directly. The prototypes are no longer type checked, but this should not be an issue since the tracepoints are created automatically by the macros. If a prototype is incorrect in the TRACE_EVENT() macro, then other macros will catch it. The trace_event_class structure now holds the probes to be called by the callbacks. This removes needing to have each event have a separate pointer for the probe. To handle kprobes and syscalls, since they register probes in a different manner, a "reg" field is added to the ftrace_event_class structure. If the "reg" field is assigned, then it will be called for enabling and disabling of the probe for either ftrace or perf. To let the reg function know what is happening, a new enum (trace_reg) is created that has the type of control that is needed. With this new rework, the 82 kernel events and 618 syscall events has their footprint dramatically lowered: text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4914025 1088868 861512 6864405 68be15 vmlinux.class 4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint 4900252 1057412 861512 6819176 680d68 vmlinux.regs The size went from 6863829 to 6819176, that's a total of 44K in savings. With tracepoints being continuously added, this is critical that the footprint becomes minimal. v5: Added #ifdef CONFIG_PERF_EVENTS around a reference to perf specific structure in trace_events.c. v4: Fixed trace self tests to check probe because regfunc no longer exists. v3: Updated to handle void *data in beginning of probe parameters. Also added the tracepoint: check_trace_callback_type_##call(). v2: Changed the callback probes to pass void * and typecast the value within the function. Acked-by: Mathieu Desnoyers Acked-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 19 ++++-- include/linux/syscalls.h | 29 ++------- include/trace/ftrace.h | 133 ++++++++++++---------------------------- kernel/trace/trace_event_perf.c | 15 ++++- kernel/trace/trace_events.c | 32 +++++++--- kernel/trace/trace_kprobe.c | 34 +++++++--- kernel/trace/trace_syscalls.c | 56 ++++++++++++++++- 7 files changed, 171 insertions(+), 147 deletions(-) (limited to 'kernel') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 496eea898ee..e665ed38b4b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -113,8 +113,23 @@ void tracing_record_cmdline(struct task_struct *tsk); struct event_filter; +enum trace_reg { + TRACE_REG_REGISTER, + TRACE_REG_UNREGISTER, + TRACE_REG_PERF_REGISTER, + TRACE_REG_PERF_UNREGISTER, +}; + +struct ftrace_event_call; + struct ftrace_event_class { char *system; + void *probe; +#ifdef CONFIG_PERF_EVENTS + void *perf_probe; +#endif + int (*reg)(struct ftrace_event_call *event, + enum trace_reg type); }; struct ftrace_event_call { @@ -124,8 +139,6 @@ struct ftrace_event_call { struct dentry *dir; struct trace_event *event; int enabled; - int (*regfunc)(struct ftrace_event_call *); - void (*unregfunc)(struct ftrace_event_call *); int id; const char *print_fmt; int (*raw_init)(struct ftrace_event_call *); @@ -137,8 +150,6 @@ struct ftrace_event_call { void *data; int perf_refcount; - int (*perf_event_enable)(struct ftrace_event_call *); - void (*perf_event_disable)(struct ftrace_event_call *); }; #define PERF_MAX_TRACE_SIZE 2048 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index ac5791df250..e3348c4c22e 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -103,22 +103,6 @@ struct perf_event_attr; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) -#ifdef CONFIG_PERF_EVENTS - -#define TRACE_SYS_ENTER_PERF_INIT(sname) \ - .perf_event_enable = perf_sysenter_enable, \ - .perf_event_disable = perf_sysenter_disable, - -#define TRACE_SYS_EXIT_PERF_INIT(sname) \ - .perf_event_enable = perf_sysexit_enable, \ - .perf_event_disable = perf_sysexit_disable, -#else -#define TRACE_SYS_ENTER_PERF(sname) -#define TRACE_SYS_ENTER_PERF_INIT(sname) -#define TRACE_SYS_EXIT_PERF(sname) -#define TRACE_SYS_EXIT_PERF_INIT(sname) -#endif /* CONFIG_PERF_EVENTS */ - #ifdef CONFIG_FTRACE_SYSCALLS #define __SC_STR_ADECL1(t, a) #a #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) @@ -134,7 +118,8 @@ struct perf_event_attr; #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) -extern struct ftrace_event_class event_class_syscalls; +extern struct ftrace_event_class event_class_syscall_enter; +extern struct ftrace_event_class event_class_syscall_exit; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ @@ -148,14 +133,11 @@ extern struct ftrace_event_class event_class_syscalls; __attribute__((section("_ftrace_events"))) \ event_enter_##sname = { \ .name = "sys_enter"#sname, \ - .class = &event_class_syscalls, \ + .class = &event_class_syscall_enter, \ .event = &enter_syscall_print_##sname, \ .raw_init = init_syscall_trace, \ .define_fields = syscall_enter_define_fields, \ - .regfunc = reg_event_syscall_enter, \ - .unregfunc = unreg_event_syscall_enter, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -170,14 +152,11 @@ extern struct ftrace_event_class event_class_syscalls; __attribute__((section("_ftrace_events"))) \ event_exit_##sname = { \ .name = "sys_exit"#sname, \ - .class = &event_class_syscalls, \ + .class = &event_class_syscall_exit, \ .event = &exit_syscall_print_##sname, \ .raw_init = init_syscall_trace, \ .define_fields = syscall_exit_define_fields, \ - .regfunc = reg_event_syscall_exit, \ - .unregfunc = unreg_event_syscall_exit, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index ba28b644f41..26d132418f9 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -381,53 +381,6 @@ static inline notrace int ftrace_get_offsets_##call( \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#ifdef CONFIG_PERF_EVENTS - -/* - * Generate the functions needed for tracepoint perf_event support. - * - * NOTE: The insertion profile callback (ftrace_profile_) is defined later - * - * static int ftrace_profile_enable_(void) - * { - * return register_trace_(ftrace_profile_); - * } - * - * static void ftrace_profile_disable_(void) - * { - * unregister_trace_(ftrace_profile_); - * } - * - */ - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ - \ -static void perf_trace_##name(void *, proto); \ - \ -static notrace int \ -perf_trace_enable_##name(struct ftrace_event_call *unused) \ -{ \ - return register_trace_##name(perf_trace_##name, NULL); \ -} \ - \ -static notrace void \ -perf_trace_disable_##name(struct ftrace_event_call *unused) \ -{ \ - unregister_trace_##name(perf_trace_##name, NULL); \ -} - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - -#endif /* CONFIG_PERF_EVENTS */ - /* * Stage 4 of the trace events. * @@ -437,8 +390,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * * static struct ftrace_event_call event_; * - * static void ftrace_raw_event_(proto) + * static void ftrace_raw_event_(void *__data, proto) * { + * struct ftrace_event_call *event_call = __data; * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_ *entry; <-- defined in stage 1 @@ -468,16 +422,6 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * event, irq_flags, pc); * } * - * static int ftrace_raw_reg_event_(struct ftrace_event_call *unused) - * { - * return register_trace_(ftrace_raw_event_); - * } - * - * static void ftrace_unreg_event_(struct ftrace_event_call *unused) - * { - * unregister_trace_(ftrace_raw_event_); - * } - * * static struct trace_event ftrace_event_type_ = { * .trace = ftrace_raw_output_, <-- stage 2 * }; @@ -504,11 +448,15 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ #ifdef CONFIG_PERF_EVENTS +#define _TRACE_PERF_PROTO(call, proto) \ + static notrace void \ + perf_trace_##call(void *__data, proto); + #define _TRACE_PERF_INIT(call) \ - .perf_event_enable = perf_trace_enable_##call, \ - .perf_event_disable = perf_trace_disable_##call, + .perf_probe = perf_trace_##call, #else +#define _TRACE_PERF_PROTO(call, proto) #define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ @@ -542,9 +490,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ \ static notrace void \ -ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ - proto) \ +ftrace_raw_event_##call(void *__data, proto) \ { \ + struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ring_buffer_event *event; \ struct ftrace_raw_##call *entry; \ @@ -574,30 +522,23 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ trace_nowake_buffer_unlock_commit(buffer, \ event, irq_flags, pc); \ } +/* + * The ftrace_test_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ \ -static notrace void ftrace_raw_event_##call(void *__ignore, proto) \ -{ \ - ftrace_raw_event_id_##template(&event_##call, args); \ -} \ - \ -static notrace int \ -ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ -{ \ - return register_trace_##call(ftrace_raw_event_##call, NULL); \ -} \ - \ -static notrace void \ -ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ -{ \ - unregister_trace_##call(ftrace_raw_event_##call, NULL); \ -} \ - \ static struct trace_event ftrace_event_type_##call = { \ .trace = ftrace_raw_output_##call, \ -}; +}; \ + \ +static inline void ftrace_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(ftrace_raw_event_##template); \ +} #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ @@ -618,9 +559,12 @@ static struct trace_event ftrace_event_type_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +_TRACE_PERF_PROTO(call, PARAMS(proto)); \ static const char print_fmt_##call[] = print; \ static struct ftrace_event_class __used event_class_##call = { \ - .system = __stringify(TRACE_SYSTEM) \ + .system = __stringify(TRACE_SYSTEM), \ + .probe = ftrace_raw_event_##call, \ + _TRACE_PERF_INIT(call) \ }; #undef DEFINE_EVENT @@ -633,11 +577,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .class = &event_class_##template, \ .event = &ftrace_event_type_##call, \ .raw_init = trace_event_raw_init, \ - .regfunc = ftrace_raw_reg_event_##call, \ - .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##template, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PERF_INIT(call) \ }; #undef DEFINE_EVENT_PRINT @@ -652,11 +593,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .class = &event_class_##template, \ .event = &ftrace_event_type_##call, \ .raw_init = trace_event_raw_init, \ - .regfunc = ftrace_raw_reg_event_##call, \ - .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##call, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -756,9 +694,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -perf_trace_templ_##call(struct ftrace_event_call *event_call, \ - proto) \ +perf_trace_##call(void *__data, proto) \ { \ + struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ @@ -791,15 +729,20 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ __count, irq_flags, __regs); \ } +/* + * This part is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the + * perf probe will fail to compile unless it too is updated. + */ #undef DEFINE_EVENT -#define DEFINE_EVENT(template, call, proto, args) \ -static notrace void perf_trace_##call(void *__ignore, proto) \ -{ \ - struct ftrace_event_call *event_call = &event_##call; \ - \ - perf_trace_templ_##template(event_call, args); \ +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void perf_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(perf_trace_##template); \ + \ } + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0565bb42566..196fe9d2677 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -49,7 +49,12 @@ static int perf_trace_event_enable(struct ftrace_event_call *event) rcu_assign_pointer(perf_trace_buf_nmi, buf); } - ret = event->perf_event_enable(event); + if (event->class->reg) + ret = event->class->reg(event, TRACE_REG_PERF_REGISTER); + else + ret = tracepoint_probe_register(event->name, + event->class->perf_probe, + event); if (!ret) { total_ref_count++; return 0; @@ -75,7 +80,8 @@ int perf_trace_enable(int event_id) mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->perf_event_enable && + if (event->id == event_id && + event->class && event->class->perf_probe && try_module_get(event->mod)) { ret = perf_trace_event_enable(event); break; @@ -93,7 +99,10 @@ static void perf_trace_event_disable(struct ftrace_event_call *event) if (--event->perf_refcount > 0) return; - event->perf_event_disable(event); + if (event->class->reg) + event->class->reg(event, TRACE_REG_PERF_UNREGISTER); + else + tracepoint_probe_unregister(event->name, event->class->perf_probe, event); if (!--total_ref_count) { buf = perf_trace_buf; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 2f54b48d363..19d1eb0a718 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -127,13 +127,23 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, if (call->enabled) { call->enabled = 0; tracing_stop_cmdline_record(); - call->unregfunc(call); + if (call->class->reg) + call->class->reg(call, TRACE_REG_UNREGISTER); + else + tracepoint_probe_unregister(call->name, + call->class->probe, + call); } break; case 1: if (!call->enabled) { tracing_start_cmdline_record(); - ret = call->regfunc(call); + if (call->class->reg) + ret = call->class->reg(call, TRACE_REG_REGISTER); + else + ret = tracepoint_probe_register(call->name, + call->class->probe, + call); if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " @@ -171,7 +181,8 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { - if (!call->name || !call->regfunc) + if (!call->name || !call->class || + (!call->class->probe && !call->class->reg)) continue; if (match && @@ -297,7 +308,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) * The ftrace subsystem is for showing formats only. * They can not be enabled or disabled via the event files. */ - if (call->regfunc) + if (call->class && (call->class->probe || call->class->reg)) return call; } @@ -450,7 +461,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); list_for_each_entry(call, &ftrace_events, list) { - if (!call->name || !call->regfunc) + if (!call->name || !call->class || + (!call->class->probe && !call->class->reg)) continue; if (system && strcmp(call->class->system, system) != 0) @@ -935,13 +947,15 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return -1; } - if (call->regfunc) + if (call->class->probe || call->class->reg) trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id && call->perf_event_enable) +#ifdef CONFIG_PERF_EVENTS + if (call->id && (call->class->perf_probe || call->class->reg)) trace_create_file("id", 0444, call->dir, call, id); +#endif if (call->define_fields) { ret = trace_define_common_fields(call); @@ -1388,8 +1402,8 @@ static __init void event_trace_self_tests(void) list_for_each_entry(call, &ftrace_events, list) { - /* Only test those that have a regfunc */ - if (!call->regfunc) + /* Only test those that have a probe */ + if (!call->class || !call->class->probe) continue; /* diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index eda220bf206..f8af21a53f0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -202,6 +202,7 @@ struct trace_probe { unsigned long nhit; unsigned int flags; /* For TP_FLAG_* */ const char *symbol; /* symbol name */ + struct ftrace_event_class class; struct ftrace_event_call call; struct trace_event event; unsigned int nr_args; @@ -323,6 +324,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, goto error; } + tp->call.class = &tp->class; tp->call.name = kstrdup(event, GFP_KERNEL); if (!tp->call.name) goto error; @@ -332,8 +334,8 @@ static struct trace_probe *alloc_trace_probe(const char *group, goto error; } - tp->call.class->system = kstrdup(group, GFP_KERNEL); - if (!tp->call.class->system) + tp->class.system = kstrdup(group, GFP_KERNEL); + if (!tp->class.system) goto error; INIT_LIST_HEAD(&tp->list); @@ -1302,6 +1304,26 @@ static void probe_perf_disable(struct ftrace_event_call *call) } #endif /* CONFIG_PERF_EVENTS */ +static __kprobes +int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) +{ + switch (type) { + case TRACE_REG_REGISTER: + return probe_event_enable(event); + case TRACE_REG_UNREGISTER: + probe_event_disable(event); + return 0; + +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: + return probe_perf_enable(event); + case TRACE_REG_PERF_UNREGISTER: + probe_perf_disable(event); + return 0; +#endif + } + return 0; +} static __kprobes int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) @@ -1355,13 +1377,7 @@ static int register_probe_event(struct trace_probe *tp) return -ENODEV; } call->enabled = 0; - call->regfunc = probe_event_enable; - call->unregfunc = probe_event_disable; - -#ifdef CONFIG_PERF_EVENTS - call->perf_event_enable = probe_perf_enable; - call->perf_event_disable = probe_perf_disable; -#endif + call->class->reg = kprobe_register; call->data = tp; ret = trace_add_event_call(call); if (ret) { diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index b8d30e7ecd0..a21d366cae4 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -15,8 +15,19 @@ static int sys_refcount_exit; static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); -struct ftrace_event_class event_class_syscalls = { - .system = "syscalls" +static int syscall_enter_register(struct ftrace_event_call *event, + enum trace_reg type); +static int syscall_exit_register(struct ftrace_event_call *event, + enum trace_reg type); + +struct ftrace_event_class event_class_syscall_enter = { + .system = "syscalls", + .reg = syscall_enter_register +}; + +struct ftrace_event_class event_class_syscall_exit = { + .system = "syscalls", + .reg = syscall_exit_register }; extern unsigned long __start_syscalls_metadata[]; @@ -587,3 +598,44 @@ void perf_sysexit_disable(struct ftrace_event_call *call) #endif /* CONFIG_PERF_EVENTS */ +static int syscall_enter_register(struct ftrace_event_call *event, + enum trace_reg type) +{ + switch (type) { + case TRACE_REG_REGISTER: + return reg_event_syscall_enter(event); + case TRACE_REG_UNREGISTER: + unreg_event_syscall_enter(event); + return 0; + +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: + return perf_sysenter_enable(event); + case TRACE_REG_PERF_UNREGISTER: + perf_sysenter_disable(event); + return 0; +#endif + } + return 0; +} + +static int syscall_exit_register(struct ftrace_event_call *event, + enum trace_reg type) +{ + switch (type) { + case TRACE_REG_REGISTER: + return reg_event_syscall_exit(event); + case TRACE_REG_UNREGISTER: + unreg_event_syscall_exit(event); + return 0; + +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: + return perf_sysexit_enable(event); + case TRACE_REG_PERF_UNREGISTER: + perf_sysexit_disable(event); + return 0; +#endif + } + return 0; +} -- cgit v1.2.3 From 2e33af029556cb8bd22bf4f86f42d540249177ea Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 22 Apr 2010 10:35:55 -0400 Subject: tracing: Move fields from event to class structure Move the defined fields from the event to the class structure. Since the fields of the event are defined by the class they belong to, it makes sense to have the class hold the information instead of the individual events. The events of the same class would just hold duplicate information. After this change the size of the kernel dropped another 3K: text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4900252 1057412 861512 6819176 680d68 vmlinux.regs 4900375 1053380 861512 6815267 67fe23 vmlinux.fields Although the text increased, this was mainly due to the C files having to adapt to the change. This is a constant increase, where new tracepoints will not increase the Text. But the big drop is in the data size (as well as needed allocations to hold the fields). This will give even more savings as more tracepoints are created. Note, if just TRACE_EVENT()s are used and not DECLARE_EVENT_CLASS() with several DEFINE_EVENT()s, then the savings will be lost. But we are pushing developers to consolidate events with DEFINE_EVENT() so this should not be an issue. The kprobes define a unique class to every new event, but are dynamic so it should not be a issue. The syscalls however have a single class but the fields for the individual events are different. The syscalls use a metadata to define the fields. I moved the fields list from the event to the metadata and added a "get_fields()" function to the class. This function is used to find the fields. For normal events and kprobes, get_fields() just returns a pointer to the fields list_head in the class. For syscall events, it returns the fields list_head in the metadata for the event. v2: Fixed the syscall fields. The syscall metadata needs a list of fields for both enter and exit. Acked-by: Frederic Weisbecker Acked-by: Mathieu Desnoyers Acked-by: Masami Hiramatsu Cc: Tom Zanussi Cc: Peter Zijlstra Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 5 ++-- include/linux/syscalls.h | 14 ++++++----- include/trace/ftrace.h | 11 +++++---- include/trace/syscall.h | 4 ++-- kernel/trace/trace.h | 3 +++ kernel/trace/trace_events.c | 48 ++++++++++++++++++++++++++++---------- kernel/trace/trace_events_filter.c | 10 ++++---- kernel/trace/trace_export.c | 14 +++++------ kernel/trace/trace_kprobe.c | 8 +++---- kernel/trace/trace_syscalls.c | 31 ++++++++++++++++++++---- 10 files changed, 102 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index e665ed38b4b..479c3c1876e 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -130,6 +130,9 @@ struct ftrace_event_class { #endif int (*reg)(struct ftrace_event_call *event, enum trace_reg type); + int (*define_fields)(struct ftrace_event_call *); + struct list_head *(*get_fields)(struct ftrace_event_call *); + struct list_head fields; }; struct ftrace_event_call { @@ -142,8 +145,6 @@ struct ftrace_event_call { int id; const char *print_fmt; int (*raw_init)(struct ftrace_event_call *); - int (*define_fields)(struct ftrace_event_call *); - struct list_head fields; int filter_active; struct event_filter *filter; void *mod; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e3348c4c22e..fd0f1f248cd 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -122,7 +122,7 @@ extern struct ftrace_event_class event_class_syscall_enter; extern struct ftrace_event_class event_class_syscall_exit; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ static struct trace_event enter_syscall_print_##sname = { \ @@ -136,12 +136,11 @@ extern struct ftrace_event_class event_class_syscall_exit; .class = &event_class_syscall_enter, \ .event = &enter_syscall_print_##sname, \ .raw_init = init_syscall_trace, \ - .define_fields = syscall_enter_define_fields, \ .data = (void *)&__syscall_meta_##sname,\ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ static struct trace_event exit_syscall_print_##sname = { \ @@ -155,14 +154,13 @@ extern struct ftrace_event_class event_class_syscall_exit; .class = &event_class_syscall_exit, \ .event = &exit_syscall_print_##sname, \ .raw_init = init_syscall_trace, \ - .define_fields = syscall_exit_define_fields, \ .data = (void *)&__syscall_meta_##sname,\ } #define SYSCALL_METADATA(sname, nb) \ SYSCALL_TRACE_ENTER_EVENT(sname); \ SYSCALL_TRACE_EXIT_EVENT(sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta_##sname = { \ @@ -172,12 +170,14 @@ extern struct ftrace_event_class event_class_syscall_exit; .args = args_##sname, \ .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ SYSCALL_TRACE_ENTER_EVENT(_##sname); \ SYSCALL_TRACE_EXIT_EVENT(_##sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta__##sname = { \ @@ -185,6 +185,8 @@ extern struct ftrace_event_class event_class_syscall_exit; .nb_args = 0, \ .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 26d132418f9..c7e3bcd5d52 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -430,6 +430,9 @@ static inline notrace int ftrace_get_offsets_##call( \ * * static struct ftrace_event_class __used event_class_