1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
|
/*
* ltt/ltt-channels.c
*
* (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
*
* LTTng channel management.
*
* Author:
* Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/ltt-channels.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/*
* ltt_channel_mutex may be nested inside the LTT trace mutex.
* ltt_channel_mutex mutex may be nested inside markers mutex.
*/
static DEFINE_MUTEX(ltt_channel_mutex);
static LIST_HEAD(ltt_channels);
/*
* Index of next channel in array. Makes sure that as long as a trace channel is
* allocated, no array index will be re-used when a channel is freed and then
* another channel is allocated. This index is cleared and the array indexeds
* get reassigned when the index_kref goes back to 0, which indicates that no
* more trace channels are allocated.
*/
static unsigned int free_index;
/* index_kref is protected by both ltt_channel_mutex and lock_markers */
static struct kref index_kref; /* Keeps track of allocated trace channels */
static struct ltt_channel_setting *lookup_channel(const char *name)
{
struct ltt_channel_setting *iter;
list_for_each_entry(iter, <t_channels, list)
if (strcmp(name, iter->name) == 0)
return iter;
return NULL;
}
/*
* Must be called when channel refcount falls to 0 _and_ also when the last
* trace is freed. This function is responsible for compacting the channel and
* event IDs when no users are active.
*
* Called with lock_markers() and channels mutex held.
*/
static void release_channel_setting(struct kref *kref)
{
struct ltt_channel_setting *setting = container_of(kref,
struct ltt_channel_setting, kref);
struct ltt_channel_setting *iter;
if (atomic_read(&index_kref.refcount) == 0
&& atomic_read(&setting->kref.refcount) == 0) {
list_del(&setting->list);
kfree(setting);
free_index = 0;
list_for_each_entry(iter, <t_channels, list) {
iter->index = free_index++;
iter->free_event_id = 0;
}
}
}
/*
* Perform channel index compaction when the last trace channel is freed.
*
* Called with lock_markers() and channels mutex held.
*/
static void release_trace_channel(struct kref *kref)
{
struct ltt_channel_setting *iter, *n;
list_for_each_entry_safe(iter, n, <t_channels, list)
release_channel_setting(&iter->kref);
if (atomic_read(&index_kref.refcount) == 0)
markers_compact_event_ids();
}
/*
* ltt_channel_trace_ref : Is there an existing trace session ?
*
* Must be called with lock_markers() held.
*/
int ltt_channels_trace_ref(void)
{
return !!atomic_read(&index_kref.refcount);
}
EXPORT_SYMBOL_GPL(ltt_channels_trace_ref);
/**
* ltt_channels_register - Register a trace channel.
* @name: channel name
*
* Uses refcounting.
*/
int ltt_channels_register(const char *name)
{
struct ltt_channel_setting *setting;
int ret = 0;
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (setting) {
if (atomic_read(&setting->kref.refcount) == 0)
goto init_kref;
else {
kref_get(&setting->kref);
goto end;
}
}
setting = kzalloc(sizeof(*setting), GFP_KERNEL);
if (!setting) {
ret = -ENOMEM;
goto end;
}
list_add(&setting->list, <t_channels);
strncpy(setting->name, name, PATH_MAX-1);
setting->index = free_index++;
init_kref:
kref_init(&setting->kref);
end:
mutex_unlock(<t_channel_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(ltt_channels_register);
/**
* ltt_channels_unregister - Unregister a trace channel.
* @name: channel name
* @compacting: performing compaction
*
* Must be called with markers mutex held.
*/
int ltt_channels_unregister(const char *name, int compacting)
{
struct ltt_channel_setting *setting;
int ret = 0;
if (!compacting)
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (!setting || atomic_read(&setting->kref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
kref_put(&setting->kref, release_channel_setting);
if (!compacting && atomic_read(&index_kref.refcount) == 0)
markers_compact_event_ids();
end:
if (!compacting)
mutex_unlock(<t_channel_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(ltt_channels_unregister);
/**
* ltt_channels_set_default - Set channel default behavior.
* @name: default channel name
* @sb_size: size of the subbuffers
* @n_sb: number of subbuffers
*/
int ltt_channels_set_default(const char *name,
unsigned int sb_size,
unsigned int n_sb)
{
struct ltt_channel_setting *setting;
int ret = 0;
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (!setting || atomic_read(&setting->kref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
setting->sb_size = sb_size;
setting->n_sb = n_sb;
end:
mutex_unlock(<t_channel_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(ltt_channels_set_default);
/**
* ltt_channels_get_name_from_index - get channel name from channel index
* @index: channel index
*
* Allows to lookup the channel name given its index. Done to keep the name
* information outside of each trace channel instance.
*/
const char *ltt_channels_get_name_from_index(unsigned int index)
{
struct ltt_channel_setting *iter;
list_for_each_entry(iter, <t_channels, list)
if (iter->index == index && atomic_read(&iter->kref.refcount))
return iter->name;
return NULL;
}
EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
static struct ltt_channel_setting *
ltt_channels_get_setting_from_name(const char *name)
{
struct ltt_channel_setting *iter;
list_for_each_entry(iter, <t_channels, list)
if (!strcmp(iter->name, name)
&& atomic_read(&iter->kref.refcount))
return iter;
return NULL;
}
/**
* ltt_channels_get_index_from_name - get channel index from channel name
* @name: channel name
*
* Allows to lookup the channel index given its name. Done to keep the name
* information outside of each trace channel instance.
* Returns -1 if not found.
*/
int ltt_channels_get_index_from_name(const char *name)
{
struct ltt_channel_setting *setting;
setting = ltt_channels_get_setting_from_name(name);
if (setting)
return setting->index;
else
return -1;
}
EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
/**
* ltt_channels_trace_alloc - Allocate channel structures for a trace
* @sb_size: subbuffer size. 0 uses default.
* @n_sb: number of subbuffers per per-cpu buffers. 0 uses default.
* @flags: Default channel flags
*
* Use the current channel list to allocate the channels for a trace.
* Called with trace lock held. Does not perform the trace buffer allocation,
* because we must let the user overwrite specific channel sizes.
*/
struct ltt_chan *ltt_channels_trace_alloc(unsigned int *nr_channels,
int overwrite, int active)
{
struct ltt_chan *chan = NULL;
struct ltt_channel_setting *iter;
lock_markers();
mutex_lock(<t_channel_mutex);
if (!free_index)
goto end;
if (!atomic_read(&index_kref.refcount))
kref_init(&index_kref);
else
kref_get(&index_kref);
*nr_channels = free_index;
chan = kzalloc(sizeof(struct ltt_chan) * free_index, GFP_KERNEL);
if (!chan)
goto end;
list_for_each_entry(iter, <t_channels, list) {
if (!atomic_read(&iter->kref.refcount))
continue;
chan[iter->index].a.sb_size = iter->sb_size;
chan[iter->index].a.n_sb = iter->n_sb;
chan[iter->index].overwrite = overwrite;
chan[iter->index].active = active;
strncpy(chan[iter->index].a.filename, iter->name, NAME_MAX - 1);
chan[iter->index].switch_timer_interval = 0;
}
end:
mutex_unlock(<t_channel_mutex);
unlock_markers();
return chan;
}
EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
/**
* ltt_channels_trace_free - Free one trace's channels
* @channels: channels to free
*
* Called with trace lock held. The actual channel buffers must be freed before
* this function is called.
*/
void ltt_channels_trace_free(struct ltt_chan *channels,
unsigned int nr_channels)
{
lock_markers();
mutex_lock(<t_channel_mutex);
kfree(channels);
kref_put(&index_kref, release_trace_channel);
mutex_unlock(<t_channel_mutex);
unlock_markers();
marker_update_probes();
}
EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
/**
* ltt_channels_trace_set_timer - set switch timer
* @channel: channel
* @interval: interval of timer interrupt, in jiffies. 0 inhibits timer.
*/
void ltt_channels_trace_set_timer(struct ltt_chan *chan,
unsigned long interval)
{
chan->switch_timer_interval = interval;
}
EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer);
/**
* _ltt_channels_get_event_id - get next event ID for a marker
* @channel: channel name
* @name: event name
*
* Returns a unique event ID (for this channel) or < 0 on error.
* Must be called with channels mutex held.
*/
int _ltt_channels_get_event_id(const char *channel, const char *name)
{
struct ltt_channel_setting *setting;
int ret;
setting = ltt_channels_get_setting_from_name(channel);
if (!setting) {
ret = -ENOENT;
goto end;
}
if (strcmp(channel, "metadata") == 0) {
if (strcmp(name, "core_marker_id") == 0)
ret = 0;
else if (strcmp(name, "core_marker_format") == 0)
ret = 1;
else
ret = -ENOENT;
goto end;
}
if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
ret = -ENOSPC;
goto end;
}
ret = setting->free_event_id++;
end:
return ret;
}
/**
* ltt_channels_get_event_id - get next event ID for a marker
* @channel: channel name
* @name: event name
*
* Returns a unique event ID (for this channel) or < 0 on error.
*/
int ltt_channels_get_event_id(const char *channel, const char *name)
{
int ret;
mutex_lock(<t_channel_mutex);
ret = _ltt_channels_get_event_id(channel, name);
mutex_unlock(<t_channel_mutex);
return ret;
}
/**
* ltt_channels_reset_event_ids - reset event IDs at compaction
*
* Called with lock marker and channel mutex held.
*/
void _ltt_channels_reset_event_ids(void)
{
struct ltt_channel_setting *iter;
list_for_each_entry(iter, <t_channels, list)
iter->free_event_id = 0;
}
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
|