4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 * LTTng channel management.
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
11 * Dual LGPL v2.1/GPL v2 license.
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/ltt-channels.h>
21 * ltt_channel_mutex may be nested inside the LTT trace mutex.
22 * ltt_channel_mutex mutex may be nested inside markers mutex.
24 static DEFINE_MUTEX(ltt_channel_mutex
);
25 static LIST_HEAD(ltt_channels
);
27 * Index of next channel in array. Makes sure that as long as a trace channel is
28 * allocated, no array index will be re-used when a channel is freed and then
29 * another channel is allocated. This index is cleared and the array indexeds
30 * get reassigned when the index_kref goes back to 0, which indicates that no
31 * more trace channels are allocated.
33 static unsigned int free_index
;
34 /* index_kref is protected by both ltt_channel_mutex and lock_markers */
35 static struct kref index_kref
; /* Keeps track of allocated trace channels */
37 static struct ltt_channel_setting
*lookup_channel(const char *name
)
39 struct ltt_channel_setting
*iter
;
41 list_for_each_entry(iter
, <t_channels
, list
)
42 if (strcmp(name
, iter
->name
) == 0)
48 * Must be called when channel refcount falls to 0 _and_ also when the last
49 * trace is freed. This function is responsible for compacting the channel and
50 * event IDs when no users are active.
52 * Called with lock_markers() and channels mutex held.
54 static void release_channel_setting(struct kref
*kref
)
56 struct ltt_channel_setting
*setting
= container_of(kref
,
57 struct ltt_channel_setting
, kref
);
58 struct ltt_channel_setting
*iter
;
60 if (atomic_read(&index_kref
.refcount
) == 0
61 && atomic_read(&setting
->kref
.refcount
) == 0) {
62 list_del(&setting
->list
);
66 list_for_each_entry(iter
, <t_channels
, list
) {
67 iter
->index
= free_index
++;
68 iter
->free_event_id
= 0;
74 * Perform channel index compaction when the last trace channel is freed.
76 * Called with lock_markers() and channels mutex held.
78 static void release_trace_channel(struct kref
*kref
)
80 struct ltt_channel_setting
*iter
, *n
;
82 list_for_each_entry_safe(iter
, n
, <t_channels
, list
)
83 release_channel_setting(&iter
->kref
);
84 if (atomic_read(&index_kref
.refcount
) == 0)
85 markers_compact_event_ids();
89 * ltt_channel_trace_ref : Is there an existing trace session ?
91 * Must be called with lock_markers() held.
93 int ltt_channels_trace_ref(void)
95 return !!atomic_read(&index_kref
.refcount
);
97 EXPORT_SYMBOL_GPL(ltt_channels_trace_ref
);
100 * ltt_channels_register - Register a trace channel.
101 * @name: channel name
105 int ltt_channels_register(const char *name
)
107 struct ltt_channel_setting
*setting
;
110 mutex_lock(<t_channel_mutex
);
111 setting
= lookup_channel(name
);
113 if (atomic_read(&setting
->kref
.refcount
) == 0)
116 kref_get(&setting
->kref
);
120 setting
= kzalloc(sizeof(*setting
), GFP_KERNEL
);
125 list_add(&setting
->list
, <t_channels
);
126 strncpy(setting
->name
, name
, PATH_MAX
-1);
127 setting
->index
= free_index
++;
129 kref_init(&setting
->kref
);
131 mutex_unlock(<t_channel_mutex
);
134 EXPORT_SYMBOL_GPL(ltt_channels_register
);
137 * ltt_channels_unregister - Unregister a trace channel.
138 * @name: channel name
139 * @compacting: performing compaction
141 * Must be called with markers mutex held.
143 int ltt_channels_unregister(const char *name
, int compacting
)
145 struct ltt_channel_setting
*setting
;
149 mutex_lock(<t_channel_mutex
);
150 setting
= lookup_channel(name
);
151 if (!setting
|| atomic_read(&setting
->kref
.refcount
) == 0) {
155 kref_put(&setting
->kref
, release_channel_setting
);
156 if (!compacting
&& atomic_read(&index_kref
.refcount
) == 0)
157 markers_compact_event_ids();
160 mutex_unlock(<t_channel_mutex
);
163 EXPORT_SYMBOL_GPL(ltt_channels_unregister
);
166 * ltt_channels_set_default - Set channel default behavior.
167 * @name: default channel name
168 * @sb_size: size of the subbuffers
169 * @n_sb: number of subbuffers
171 int ltt_channels_set_default(const char *name
,
172 unsigned int sb_size
,
175 struct ltt_channel_setting
*setting
;
178 mutex_lock(<t_channel_mutex
);
179 setting
= lookup_channel(name
);
180 if (!setting
|| atomic_read(&setting
->kref
.refcount
) == 0) {
184 setting
->sb_size
= sb_size
;
185 setting
->n_sb
= n_sb
;
187 mutex_unlock(<t_channel_mutex
);
190 EXPORT_SYMBOL_GPL(ltt_channels_set_default
);
193 * ltt_channels_get_name_from_index - get channel name from channel index
194 * @index: channel index
196 * Allows to lookup the channel name given its index. Done to keep the name
197 * information outside of each trace channel instance.
199 const char *ltt_channels_get_name_from_index(unsigned int index
)
201 struct ltt_channel_setting
*iter
;
203 list_for_each_entry(iter
, <t_channels
, list
)
204 if (iter
->index
== index
&& atomic_read(&iter
->kref
.refcount
))
208 EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index
);
210 static struct ltt_channel_setting
*
211 ltt_channels_get_setting_from_name(const char *name
)
213 struct ltt_channel_setting
*iter
;
215 list_for_each_entry(iter
, <t_channels
, list
)
216 if (!strcmp(iter
->name
, name
)
217 && atomic_read(&iter
->kref
.refcount
))
223 * ltt_channels_get_index_from_name - get channel index from channel name
224 * @name: channel name
226 * Allows to lookup the channel index given its name. Done to keep the name
227 * information outside of each trace channel instance.
228 * Returns -1 if not found.
230 int ltt_channels_get_index_from_name(const char *name
)
232 struct ltt_channel_setting
*setting
;
234 setting
= ltt_channels_get_setting_from_name(name
);
236 return setting
->index
;
240 EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name
);
243 * ltt_channels_trace_alloc - Allocate channel structures for a trace
244 * @sb_size: subbuffer size. 0 uses default.
245 * @n_sb: number of subbuffers per per-cpu buffers. 0 uses default.
246 * @flags: Default channel flags
248 * Use the current channel list to allocate the channels for a trace.
249 * Called with trace lock held. Does not perform the trace buffer allocation,
250 * because we must let the user overwrite specific channel sizes.
252 struct ltt_chan
*ltt_channels_trace_alloc(unsigned int *nr_channels
,
253 int overwrite
, int active
)
255 struct ltt_chan
*chan
= NULL
;
256 struct ltt_channel_setting
*iter
;
259 mutex_lock(<t_channel_mutex
);
262 if (!atomic_read(&index_kref
.refcount
))
263 kref_init(&index_kref
);
265 kref_get(&index_kref
);
266 *nr_channels
= free_index
;
267 chan
= kzalloc(sizeof(struct ltt_chan
) * free_index
, GFP_KERNEL
);
270 list_for_each_entry(iter
, <t_channels
, list
) {
271 if (!atomic_read(&iter
->kref
.refcount
))
273 chan
[iter
->index
].a
.sb_size
= iter
->sb_size
;
274 chan
[iter
->index
].a
.n_sb
= iter
->n_sb
;
275 chan
[iter
->index
].overwrite
= overwrite
;
276 chan
[iter
->index
].active
= active
;
277 strncpy(chan
[iter
->index
].a
.filename
, iter
->name
, NAME_MAX
- 1);
278 chan
[iter
->index
].switch_timer_interval
= 0;
281 mutex_unlock(<t_channel_mutex
);
285 EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc
);
288 * ltt_channels_trace_free - Free one trace's channels
289 * @channels: channels to free
291 * Called with trace lock held. The actual channel buffers must be freed before
292 * this function is called.
294 void ltt_channels_trace_free(struct ltt_chan
*channels
,
295 unsigned int nr_channels
)
298 mutex_lock(<t_channel_mutex
);
300 kref_put(&index_kref
, release_trace_channel
);
301 mutex_unlock(<t_channel_mutex
);
303 marker_update_probes();
305 EXPORT_SYMBOL_GPL(ltt_channels_trace_free
);
308 * ltt_channels_trace_set_timer - set switch timer
310 * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer.
313 void ltt_channels_trace_set_timer(struct ltt_chan
*chan
,
314 unsigned long interval
)
316 chan
->switch_timer_interval
= interval
;
318 EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer
);
321 * _ltt_channels_get_event_id - get next event ID for a marker
322 * @channel: channel name
325 * Returns a unique event ID (for this channel) or < 0 on error.
326 * Must be called with channels mutex held.
328 int _ltt_channels_get_event_id(const char *channel
, const char *name
)
330 struct ltt_channel_setting
*setting
;
333 setting
= ltt_channels_get_setting_from_name(channel
);
338 if (strcmp(channel
, "metadata") == 0) {
339 if (strcmp(name
, "core_marker_id") == 0)
341 else if (strcmp(name
, "core_marker_format") == 0)
347 if (setting
->free_event_id
== EVENTS_PER_CHANNEL
- 1) {
351 ret
= setting
->free_event_id
++;
357 * ltt_channels_get_event_id - get next event ID for a marker
358 * @channel: channel name
361 * Returns a unique event ID (for this channel) or < 0 on error.
363 int ltt_channels_get_event_id(const char *channel
, const char *name
)
367 mutex_lock(<t_channel_mutex
);
368 ret
= _ltt_channels_get_event_id(channel
, name
);
369 mutex_unlock(<t_channel_mutex
);
374 * ltt_channels_reset_event_ids - reset event IDs at compaction
376 * Called with lock marker and channel mutex held.
378 void _ltt_channels_reset_event_ids(void)
380 struct ltt_channel_setting
*iter
;
382 list_for_each_entry(iter
, <t_channels
, list
)
383 iter
->free_event_id
= 0;
386 MODULE_LICENSE("GPL and additional rights");
387 MODULE_AUTHOR("Mathieu Desnoyers");
388 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");