Commit | Line | Data |
---|---|---|
1c8284eb MD |
1 | /* |
2 | * ltt/ltt-tracer.c | |
3 | * | |
4 | * (C) Copyright 2005-2008 - | |
5 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
6 | * | |
7 | * Tracing management internal kernel API. Trace buffer allocation/free, tracing | |
8 | * start/stop. | |
9 | * | |
10 | * Author: | |
11 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
12 | * | |
13 | * Inspired from LTT : | |
14 | * Karim Yaghmour (karim@opersys.com) | |
15 | * Tom Zanussi (zanussi@us.ibm.com) | |
16 | * Bob Wisniewski (bob@watson.ibm.com) | |
17 | * And from K42 : | |
18 | * Bob Wisniewski (bob@watson.ibm.com) | |
19 | * | |
20 | * Changelog: | |
21 | * 22/09/06, Move to the marker/probes mechanism. | |
22 | * 19/10/05, Complete lockless mechanism. | |
23 | * 27/05/05, Modular redesign and rewrite. | |
24 | * | |
25 | * Dual LGPL v2.1/GPL v2 license. | |
26 | */ | |
27 | ||
28 | #include <linux/time.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/string.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/rcupdate.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <linux/fs.h> | |
37 | #include <linux/cpu.h> | |
38 | #include <linux/kref.h> | |
39 | #include <linux/delay.h> | |
40 | #include <linux/vmalloc.h> | |
41 | #include <asm/atomic.h> | |
42 | ||
43 | #include "ltt-tracer.h" | |
44 | ||
45 | static void synchronize_trace(void) | |
46 | { | |
47 | synchronize_sched(); | |
48 | #ifdef CONFIG_PREEMPT_RT | |
49 | synchronize_rcu(); | |
50 | #endif | |
51 | } | |
52 | ||
53 | static void async_wakeup(unsigned long data); | |
54 | ||
55 | static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); | |
56 | ||
57 | /* Default callbacks for modules */ | |
58 | notrace | |
59 | int ltt_filter_control_default(enum ltt_filter_control_msg msg, | |
60 | struct ltt_trace *trace) | |
61 | { | |
62 | return 0; | |
63 | } | |
64 | ||
65 | int ltt_statedump_default(struct ltt_trace *trace) | |
66 | { | |
67 | return 0; | |
68 | } | |
69 | ||
70 | /* Callbacks for registered modules */ | |
71 | ||
72 | int (*ltt_filter_control_functor) | |
73 | (enum ltt_filter_control_msg msg, struct ltt_trace *trace) = | |
74 | ltt_filter_control_default; | |
75 | struct module *ltt_filter_control_owner; | |
76 | ||
77 | /* These function pointers are protected by a trace activation check */ | |
78 | struct module *ltt_run_filter_owner; | |
79 | int (*ltt_statedump_functor)(struct ltt_trace *trace) = ltt_statedump_default; | |
80 | struct module *ltt_statedump_owner; | |
81 | ||
82 | struct chan_info_struct { | |
83 | const char *name; | |
84 | unsigned int def_sb_size; | |
85 | unsigned int def_n_sb; | |
86 | } chan_infos[] = { | |
87 | [LTT_CHANNEL_METADATA] = { | |
88 | LTT_METADATA_CHANNEL, | |
89 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
90 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
91 | }, | |
92 | [LTT_CHANNEL_FD_STATE] = { | |
93 | LTT_FD_STATE_CHANNEL, | |
94 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
95 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
96 | }, | |
97 | [LTT_CHANNEL_GLOBAL_STATE] = { | |
98 | LTT_GLOBAL_STATE_CHANNEL, | |
99 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
100 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
101 | }, | |
102 | [LTT_CHANNEL_IRQ_STATE] = { | |
103 | LTT_IRQ_STATE_CHANNEL, | |
104 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
105 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
106 | }, | |
107 | [LTT_CHANNEL_MODULE_STATE] = { | |
108 | LTT_MODULE_STATE_CHANNEL, | |
109 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
110 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
111 | }, | |
112 | [LTT_CHANNEL_NETIF_STATE] = { | |
113 | LTT_NETIF_STATE_CHANNEL, | |
114 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
115 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
116 | }, | |
117 | [LTT_CHANNEL_SOFTIRQ_STATE] = { | |
118 | LTT_SOFTIRQ_STATE_CHANNEL, | |
119 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
120 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
121 | }, | |
122 | [LTT_CHANNEL_SWAP_STATE] = { | |
123 | LTT_SWAP_STATE_CHANNEL, | |
124 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
125 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
126 | }, | |
127 | [LTT_CHANNEL_SYSCALL_STATE] = { | |
128 | LTT_SYSCALL_STATE_CHANNEL, | |
129 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
130 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
131 | }, | |
132 | [LTT_CHANNEL_TASK_STATE] = { | |
133 | LTT_TASK_STATE_CHANNEL, | |
134 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
135 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
136 | }, | |
137 | [LTT_CHANNEL_VM_STATE] = { | |
138 | LTT_VM_STATE_CHANNEL, | |
139 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
140 | LTT_DEFAULT_N_SUBBUFS_MED, | |
141 | }, | |
142 | [LTT_CHANNEL_FS] = { | |
143 | LTT_FS_CHANNEL, | |
144 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
145 | LTT_DEFAULT_N_SUBBUFS_MED, | |
146 | }, | |
147 | [LTT_CHANNEL_INPUT] = { | |
148 | LTT_INPUT_CHANNEL, | |
149 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
150 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
151 | }, | |
152 | [LTT_CHANNEL_IPC] = { | |
153 | LTT_IPC_CHANNEL, | |
154 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
155 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
156 | }, | |
157 | [LTT_CHANNEL_KERNEL] = { | |
158 | LTT_KERNEL_CHANNEL, | |
159 | LTT_DEFAULT_SUBBUF_SIZE_HIGH, | |
160 | LTT_DEFAULT_N_SUBBUFS_HIGH, | |
161 | }, | |
162 | [LTT_CHANNEL_MM] = { | |
163 | LTT_MM_CHANNEL, | |
164 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
165 | LTT_DEFAULT_N_SUBBUFS_MED, | |
166 | }, | |
167 | [LTT_CHANNEL_RCU] = { | |
168 | LTT_RCU_CHANNEL, | |
169 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
170 | LTT_DEFAULT_N_SUBBUFS_MED, | |
171 | }, | |
172 | [LTT_CHANNEL_DEFAULT] = { | |
173 | NULL, | |
174 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
175 | LTT_DEFAULT_N_SUBBUFS_MED, | |
176 | }, | |
177 | }; | |
178 | ||
179 | static enum ltt_channels get_channel_type_from_name(const char *name) | |
180 | { | |
181 | int i; | |
182 | ||
183 | if (!name) | |
184 | return LTT_CHANNEL_DEFAULT; | |
185 | ||
186 | for (i = 0; i < ARRAY_SIZE(chan_infos); i++) | |
187 | if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) | |
188 | return (enum ltt_channels)i; | |
189 | ||
190 | return LTT_CHANNEL_DEFAULT; | |
191 | } | |
192 | ||
193 | /** | |
194 | * ltt_module_register - LTT module registration | |
195 | * @name: module type | |
196 | * @function: callback to register | |
197 | * @owner: module which owns the callback | |
198 | * | |
199 | * The module calling this registration function must ensure that no | |
200 | * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() | |
201 | * must be called between a vmalloc and the moment the memory is made visible to | |
202 | * "function". This registration acts as a vmalloc_sync_all. Therefore, only if | |
203 | * the module allocates virtual memory after its registration must it | |
204 | * synchronize the TLBs. | |
205 | */ | |
206 | int ltt_module_register(enum ltt_module_function name, void *function, | |
207 | struct module *owner) | |
208 | { | |
209 | int ret = 0; | |
210 | ||
211 | /* | |
212 | * Make sure no page fault can be triggered by the module about to be | |
213 | * registered. We deal with this here so we don't have to call | |
214 | * vmalloc_sync_all() in each module's init. | |
215 | */ | |
216 | vmalloc_sync_all(); | |
217 | ||
218 | switch (name) { | |
219 | case LTT_FUNCTION_RUN_FILTER: | |
220 | if (ltt_run_filter_owner != NULL) { | |
221 | ret = -EEXIST; | |
222 | goto end; | |
223 | } | |
224 | ltt_filter_register((ltt_run_filter_functor)function); | |
225 | ltt_run_filter_owner = owner; | |
226 | break; | |
227 | case LTT_FUNCTION_FILTER_CONTROL: | |
228 | if (ltt_filter_control_owner != NULL) { | |
229 | ret = -EEXIST; | |
230 | goto end; | |
231 | } | |
232 | ltt_filter_control_functor = | |
233 | (int (*)(enum ltt_filter_control_msg, | |
234 | struct ltt_trace *))function; | |
235 | ltt_filter_control_owner = owner; | |
236 | break; | |
237 | case LTT_FUNCTION_STATEDUMP: | |
238 | if (ltt_statedump_owner != NULL) { | |
239 | ret = -EEXIST; | |
240 | goto end; | |
241 | } | |
242 | ltt_statedump_functor = | |
243 | (int (*)(struct ltt_trace *))function; | |
244 | ltt_statedump_owner = owner; | |
245 | break; | |
246 | } | |
247 | ||
248 | end: | |
249 | ||
250 | return ret; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(ltt_module_register); | |
253 | ||
254 | /** | |
255 | * ltt_module_unregister - LTT module unregistration | |
256 | * @name: module type | |
257 | */ | |
258 | void ltt_module_unregister(enum ltt_module_function name) | |
259 | { | |
260 | switch (name) { | |
261 | case LTT_FUNCTION_RUN_FILTER: | |
262 | ltt_filter_unregister(); | |
263 | ltt_run_filter_owner = NULL; | |
264 | /* Wait for preempt sections to finish */ | |
265 | synchronize_trace(); | |
266 | break; | |
267 | case LTT_FUNCTION_FILTER_CONTROL: | |
268 | ltt_filter_control_functor = ltt_filter_control_default; | |
269 | ltt_filter_control_owner = NULL; | |
270 | break; | |
271 | case LTT_FUNCTION_STATEDUMP: | |
272 | ltt_statedump_functor = ltt_statedump_default; | |
273 | ltt_statedump_owner = NULL; | |
274 | break; | |
275 | } | |
276 | ||
277 | } | |
278 | EXPORT_SYMBOL_GPL(ltt_module_unregister); | |
279 | ||
280 | static LIST_HEAD(ltt_transport_list); | |
281 | ||
282 | /** | |
283 | * ltt_transport_register - LTT transport registration | |
284 | * @transport: transport structure | |
285 | * | |
286 | * Registers a transport which can be used as output to extract the data out of | |
287 | * LTTng. The module calling this registration function must ensure that no | |
288 | * trap-inducing code will be executed by the transport functions. E.g. | |
289 | * vmalloc_sync_all() must be called between a vmalloc and the moment the memory | |
290 | * is made visible to the transport function. This registration acts as a | |
291 | * vmalloc_sync_all. Therefore, only if the module allocates virtual memory | |
292 | * after its registration must it synchronize the TLBs. | |
293 | */ | |
294 | void ltt_transport_register(struct ltt_transport *transport) | |
295 | { | |
296 | /* | |
297 | * Make sure no page fault can be triggered by the module about to be | |
298 | * registered. We deal with this here so we don't have to call | |
299 | * vmalloc_sync_all() in each module's init. | |
300 | */ | |
301 | vmalloc_sync_all(); | |
302 | ||
303 | ltt_lock_traces(); | |
304 | list_add_tail(&transport->node, <t_transport_list); | |
305 | ltt_unlock_traces(); | |
306 | } | |
307 | EXPORT_SYMBOL_GPL(ltt_transport_register); | |
308 | ||
309 | /** | |
310 | * ltt_transport_unregister - LTT transport unregistration | |
311 | * @transport: transport structure | |
312 | */ | |
313 | void ltt_transport_unregister(struct ltt_transport *transport) | |
314 | { | |
315 | ltt_lock_traces(); | |
316 | list_del(&transport->node); | |
317 | ltt_unlock_traces(); | |
318 | } | |
319 | EXPORT_SYMBOL_GPL(ltt_transport_unregister); | |
320 | ||
321 | static inline | |
322 | int is_channel_overwrite(enum ltt_channels chan, enum trace_mode mode) | |
323 | { | |
324 | switch (mode) { | |
325 | case LTT_TRACE_NORMAL: | |
326 | return 0; | |
327 | case LTT_TRACE_FLIGHT: | |
328 | switch (chan) { | |
329 | case LTT_CHANNEL_METADATA: | |
330 | return 0; | |
331 | default: | |
332 | return 1; | |
333 | } | |
334 | case LTT_TRACE_HYBRID: | |
335 | switch (chan) { | |
336 | case LTT_CHANNEL_KERNEL: | |
337 | case LTT_CHANNEL_FS: | |
338 | case LTT_CHANNEL_MM: | |
339 | case LTT_CHANNEL_RCU: | |
340 | case LTT_CHANNEL_IPC: | |
341 | case LTT_CHANNEL_INPUT: | |
342 | return 1; | |
343 | default: | |
344 | return 0; | |
345 | } | |
346 | default: | |
347 | return 0; | |
348 | } | |
349 | } | |
350 | ||
351 | static void trace_async_wakeup(struct ltt_trace *trace) | |
352 | { | |
353 | int i; | |
354 | struct ltt_chan *chan; | |
355 | ||
356 | /* Must check each channel for pending read wakeup */ | |
357 | for (i = 0; i < trace->nr_channels; i++) { | |
358 | chan = &trace->channels[i]; | |
359 | if (chan->active) | |
360 | trace->ops->wakeup_channel(chan); | |
361 | } | |
362 | } | |
363 | ||
364 | /* Timer to send async wakeups to the readers */ | |
365 | static void async_wakeup(unsigned long data) | |
366 | { | |
367 | struct ltt_trace *trace; | |
368 | ||
369 | /* | |
370 | * PREEMPT_RT does not allow spinlocks to be taken within preempt | |
371 | * disable sections (spinlock taken in wake_up). However, mainline won't | |
372 | * allow mutex to be taken in interrupt context. Ugly. | |
373 | * Take a standard RCU read lock for RT kernels, which imply that we | |
374 | * also have to synchronize_rcu() upon updates. | |
375 | */ | |
376 | #ifndef CONFIG_PREEMPT_RT | |
377 | rcu_read_lock_sched(); | |
378 | #else | |
379 | rcu_read_lock(); | |
380 | #endif | |
381 | list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
382 | trace_async_wakeup(trace); | |
383 | } | |
384 | #ifndef CONFIG_PREEMPT_RT | |
385 | rcu_read_unlock_sched(); | |
386 | #else | |
387 | rcu_read_unlock(); | |
388 | #endif | |
389 | ||
390 | mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
391 | } | |
392 | ||
393 | /** | |
394 | * _ltt_trace_find - find a trace by given name. | |
395 | * trace_name: trace name | |
396 | * | |
397 | * Returns a pointer to the trace structure, NULL if not found. | |
398 | */ | |
399 | static struct ltt_trace *_ltt_trace_find(const char *trace_name) | |
400 | { | |
401 | struct ltt_trace *trace; | |
402 | ||
403 | list_for_each_entry(trace, <t_traces.head, list) | |
404 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
405 | return trace; | |
406 | ||
407 | return NULL; | |
408 | } | |
409 | ||
410 | /* _ltt_trace_find_setup : | |
411 | * find a trace in setup list by given name. | |
412 | * | |
413 | * Returns a pointer to the trace structure, NULL if not found. | |
414 | */ | |
415 | struct ltt_trace *_ltt_trace_find_setup(const char *trace_name) | |
416 | { | |
417 | struct ltt_trace *trace; | |
418 | ||
419 | list_for_each_entry(trace, <t_traces.setup_head, list) | |
420 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
421 | return trace; | |
422 | ||
423 | return NULL; | |
424 | } | |
425 | EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); | |
426 | ||
427 | /** | |
428 | * ltt_release_trace - Release a LTT trace | |
429 | * @kref : reference count on the trace | |
430 | */ | |
431 | void ltt_release_trace(struct kref *kref) | |
432 | { | |
433 | struct ltt_trace *trace = container_of(kref, struct ltt_trace, kref); | |
434 | ||
435 | trace->ops->remove_dirs(trace); | |
436 | module_put(trace->transport->owner); | |
437 | ltt_channels_trace_free(trace->channels, trace->nr_channels); | |
438 | kfree(trace); | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(ltt_release_trace); | |
441 | ||
442 | static inline void prepare_chan_size_num(unsigned int *subbuf_size, | |
443 | unsigned int *n_subbufs) | |
444 | { | |
445 | /* Make sure the subbuffer size is larger than a page */ | |
446 | *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE); | |
447 | ||
448 | /* round to next power of 2 */ | |
449 | *subbuf_size = 1 << get_count_order(*subbuf_size); | |
450 | *n_subbufs = 1 << get_count_order(*n_subbufs); | |
451 | ||
452 | /* Subbuf size and number must both be power of two */ | |
453 | WARN_ON(hweight32(*subbuf_size) != 1); | |
454 | WARN_ON(hweight32(*n_subbufs) != 1); | |
455 | } | |
456 | ||
457 | int _ltt_trace_setup(const char *trace_name) | |
458 | { | |
459 | int err = 0; | |
460 | struct ltt_trace *new_trace = NULL; | |
461 | int metadata_index; | |
462 | unsigned int chan; | |
463 | enum ltt_channels chantype; | |
464 | ||
465 | if (_ltt_trace_find_setup(trace_name)) { | |
466 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
467 | trace_name); | |
468 | err = -EEXIST; | |
469 | goto traces_error; | |
470 | } | |
471 | ||
472 | if (_ltt_trace_find(trace_name)) { | |
473 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
474 | trace_name); | |
475 | err = -EEXIST; | |
476 | goto traces_error; | |
477 | } | |
478 | ||
479 | new_trace = kzalloc(sizeof(struct ltt_trace), GFP_KERNEL); | |
480 | if (!new_trace) { | |
481 | printk(KERN_ERR | |
482 | "LTT : Unable to allocate memory for trace %s\n", | |
483 | trace_name); | |
484 | err = -ENOMEM; | |
485 | goto traces_error; | |
486 | } | |
487 | strncpy(new_trace->trace_name, trace_name, NAME_MAX); | |
488 | new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels, | |
489 | 0, 1); | |
490 | if (!new_trace->channels) { | |
491 | printk(KERN_ERR | |
492 | "LTT : Unable to allocate memory for chaninfo %s\n", | |
493 | trace_name); | |
494 | err = -ENOMEM; | |
495 | goto trace_free; | |
496 | } | |
497 | ||
498 | /* | |
499 | * Force metadata channel to active, no overwrite. | |
500 | */ | |
501 | metadata_index = ltt_channels_get_index_from_name("metadata"); | |
502 | WARN_ON(metadata_index < 0); | |
503 | new_trace->channels[metadata_index].overwrite = 0; | |
504 | new_trace->channels[metadata_index].active = 1; | |
505 | ||
506 | /* | |
507 | * Set hardcoded tracer defaults for some channels | |
508 | */ | |
509 | for (chan = 0; chan < new_trace->nr_channels; chan++) { | |
510 | if (!(new_trace->channels[chan].active)) | |
511 | continue; | |
512 | ||
513 | chantype = get_channel_type_from_name( | |
514 | ltt_channels_get_name_from_index(chan)); | |
515 | new_trace->channels[chan].a.sb_size = | |
516 | chan_infos[chantype].def_sb_size; | |
517 | new_trace->channels[chan].a.n_sb = | |
518 | chan_infos[chantype].def_n_sb; | |
519 | } | |
520 | ||
521 | list_add(&new_trace->list, <t_traces.setup_head); | |
522 | return 0; | |
523 | ||
524 | trace_free: | |
525 | kfree(new_trace); | |
526 | traces_error: | |
527 | return err; | |
528 | } | |
529 | EXPORT_SYMBOL_GPL(_ltt_trace_setup); | |
530 | ||
531 | ||
532 | int ltt_trace_setup(const char *trace_name) | |
533 | { | |
534 | int ret; | |
535 | ltt_lock_traces(); | |
536 | ret = _ltt_trace_setup(trace_name); | |
537 | ltt_unlock_traces(); | |
538 | return ret; | |
539 | } | |
540 | EXPORT_SYMBOL_GPL(ltt_trace_setup); | |
541 | ||
542 | /* must be called from within a traces lock. */ | |
543 | static void _ltt_trace_free(struct ltt_trace *trace) | |
544 | { | |
545 | list_del(&trace->list); | |
546 | kfree(trace); | |
547 | } | |
548 | ||
549 | int ltt_trace_set_type(const char *trace_name, const char *trace_type) | |
550 | { | |
551 | int err = 0; | |
552 | struct ltt_trace *trace; | |
553 | struct ltt_transport *tran_iter, *transport = NULL; | |
554 | ||
555 | ltt_lock_traces(); | |
556 | ||
557 | trace = _ltt_trace_find_setup(trace_name); | |
558 | if (!trace) { | |
559 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
560 | err = -ENOENT; | |
561 | goto traces_error; | |
562 | } | |
563 | ||
564 | list_for_each_entry(tran_iter, <t_transport_list, node) { | |
565 | if (!strcmp(tran_iter->name, trace_type)) { | |
566 | transport = tran_iter; | |
567 | break; | |
568 | } | |
569 | } | |
570 | if (!transport) { | |
571 | printk(KERN_ERR "LTT : Transport %s is not present.\n", | |
572 | trace_type); | |
573 | err = -EINVAL; | |
574 | goto traces_error; | |
575 | } | |
576 | ||
577 | trace->transport = transport; | |
578 | ||
579 | traces_error: | |
580 | ltt_unlock_traces(); | |
581 | return err; | |
582 | } | |
583 | EXPORT_SYMBOL_GPL(ltt_trace_set_type); | |
584 | ||
585 | int ltt_trace_set_channel_subbufsize(const char *trace_name, | |
586 | const char *channel_name, | |
587 | unsigned int size) | |
588 | { | |
589 | int err = 0; | |
590 | struct ltt_trace *trace; | |
591 | int index; | |
592 | ||
593 | ltt_lock_traces(); | |
594 | ||
595 | trace = _ltt_trace_find_setup(trace_name); | |
596 | if (!trace) { | |
597 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
598 | err = -ENOENT; | |
599 | goto traces_error; | |
600 | } | |
601 | ||
602 | index = ltt_channels_get_index_from_name(channel_name); | |
603 | if (index < 0) { | |
604 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
605 | err = -ENOENT; | |
606 | goto traces_error; | |
607 | } | |
608 | trace->channels[index].a.sb_size = size; | |
609 | ||
610 | traces_error: | |
611 | ltt_unlock_traces(); | |
612 | return err; | |
613 | } | |
614 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); | |
615 | ||
616 | int ltt_trace_set_channel_subbufcount(const char *trace_name, | |
617 | const char *channel_name, | |
618 | unsigned int cnt) | |
619 | { | |
620 | int err = 0; | |
621 | struct ltt_trace *trace; | |
622 | int index; | |
623 | ||
624 | ltt_lock_traces(); | |
625 | ||
626 | trace = _ltt_trace_find_setup(trace_name); | |
627 | if (!trace) { | |
628 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
629 | err = -ENOENT; | |
630 | goto traces_error; | |
631 | } | |
632 | ||
633 | index = ltt_channels_get_index_from_name(channel_name); | |
634 | if (index < 0) { | |
635 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
636 | err = -ENOENT; | |
637 | goto traces_error; | |
638 | } | |
639 | trace->channels[index].a.n_sb = cnt; | |
640 | ||
641 | traces_error: | |
642 | ltt_unlock_traces(); | |
643 | return err; | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); | |
646 | ||
647 | int ltt_trace_set_channel_switch_timer(const char *trace_name, | |
648 | const char *channel_name, | |
649 | unsigned long interval) | |
650 | { | |
651 | int err = 0; | |
652 | struct ltt_trace *trace; | |
653 | int index; | |
654 | ||
655 | ltt_lock_traces(); | |
656 | ||
657 | trace = _ltt_trace_find_setup(trace_name); | |
658 | if (!trace) { | |
659 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
660 | err = -ENOENT; | |
661 | goto traces_error; | |
662 | } | |
663 | ||
664 | index = ltt_channels_get_index_from_name(channel_name); | |
665 | if (index < 0) { | |
666 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
667 | err = -ENOENT; | |
668 | goto traces_error; | |
669 | } | |
670 | ltt_channels_trace_set_timer(&trace->channels[index], interval); | |
671 | ||
672 | traces_error: | |
673 | ltt_unlock_traces(); | |
674 | return err; | |
675 | } | |
676 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_switch_timer); | |
677 | ||
678 | int ltt_trace_set_channel_enable(const char *trace_name, | |
679 | const char *channel_name, unsigned int enable) | |
680 | { | |
681 | int err = 0; | |
682 | struct ltt_trace *trace; | |
683 | int index; | |
684 | ||
685 | ltt_lock_traces(); | |
686 | ||
687 | trace = _ltt_trace_find_setup(trace_name); | |
688 | if (!trace) { | |
689 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
690 | err = -ENOENT; | |
691 | goto traces_error; | |
692 | } | |
693 | ||
694 | /* | |
695 | * Datas in metadata channel(marker info) is necessary to be able to | |
696 | * read the trace, we always enable this channel. | |
697 | */ | |
698 | if (!enable && !strcmp(channel_name, "metadata")) { | |
699 | printk(KERN_ERR "LTT : Trying to disable metadata channel\n"); | |
700 | err = -EINVAL; | |
701 | goto traces_error; | |
702 | } | |
703 | ||
704 | index = ltt_channels_get_index_from_name(channel_name); | |
705 | if (index < 0) { | |
706 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
707 | err = -ENOENT; | |
708 | goto traces_error; | |
709 | } | |
710 | ||
711 | trace->channels[index].active = enable; | |
712 | ||
713 | traces_error: | |
714 | ltt_unlock_traces(); | |
715 | return err; | |
716 | } | |
717 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable); | |
718 | ||
719 | int ltt_trace_set_channel_overwrite(const char *trace_name, | |
720 | const char *channel_name, | |
721 | unsigned int overwrite) | |
722 | { | |
723 | int err = 0; | |
724 | struct ltt_trace *trace; | |
725 | int index; | |
726 | ||
727 | ltt_lock_traces(); | |
728 | ||
729 | trace = _ltt_trace_find_setup(trace_name); | |
730 | if (!trace) { | |
731 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
732 | err = -ENOENT; | |
733 | goto traces_error; | |
734 | } | |
735 | ||
736 | /* | |
737 | * Always put the metadata channel in non-overwrite mode : | |
738 | * This is a very low traffic channel and it can't afford to have its | |
739 | * data overwritten : this data (marker info) is necessary to be | |
740 | * able to read the trace. | |
741 | */ | |
742 | if (overwrite && !strcmp(channel_name, "metadata")) { | |
743 | printk(KERN_ERR "LTT : Trying to set metadata channel to " | |
744 | "overwrite mode\n"); | |
745 | err = -EINVAL; | |
746 | goto traces_error; | |
747 | } | |
748 | ||
749 | index = ltt_channels_get_index_from_name(channel_name); | |
750 | if (index < 0) { | |
751 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
752 | err = -ENOENT; | |
753 | goto traces_error; | |
754 | } | |
755 | ||
756 | trace->channels[index].overwrite = overwrite; | |
757 | ||
758 | traces_error: | |
759 | ltt_unlock_traces(); | |
760 | return err; | |
761 | } | |
762 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); | |
763 | ||
764 | int ltt_trace_alloc(const char *trace_name) | |
765 | { | |
766 | int err = 0; | |
767 | struct ltt_trace *trace; | |
768 | int sb_size, n_sb; | |
769 | unsigned long flags; | |
770 | int chan; | |
771 | const char *channel_name; | |
772 | ||
773 | ltt_lock_traces(); | |
774 | ||
775 | trace = _ltt_trace_find_setup(trace_name); | |
776 | if (!trace) { | |
777 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
778 | err = -ENOENT; | |
779 | goto traces_error; | |
780 | } | |
781 | ||
782 | kref_init(&trace->kref); | |
783 | init_waitqueue_head(&trace->kref_wq); | |
784 | trace->active = 0; | |
785 | get_trace_clock(); | |
786 | trace->freq_scale = trace_clock_freq_scale(); | |
787 | ||
788 | if (!trace->transport) { | |
789 | printk(KERN_ERR "LTT : Transport is not set.\n"); | |
790 | err = -EINVAL; | |
791 | goto transport_error; | |
792 | } | |
793 | if (!try_module_get(trace->transport->owner)) { | |
794 | printk(KERN_ERR "LTT : Can't lock transport module.\n"); | |
795 | err = -ENODEV; | |
796 | goto transport_error; | |
797 | } | |
798 | trace->ops = &trace->transport->ops; | |
799 | ||
800 | err = trace->ops->create_dirs(trace); | |
801 | if (err) { | |
802 | printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", | |
803 | trace_name); | |
804 | goto dirs_error; | |
805 | } | |
806 | ||
807 | local_irq_save(flags); | |
808 | trace->start_freq = trace_clock_frequency(); | |
809 | trace->start_tsc = trace_clock_read64(); | |
810 | do_gettimeofday(&trace->start_time); | |
811 | local_irq_restore(flags); | |
812 | ||
813 | for (chan = 0; chan < trace->nr_channels; chan++) { | |
814 | if (!(trace->channels[chan].active)) | |
815 | continue; | |
816 | ||
817 | channel_name = ltt_channels_get_name_from_index(chan); | |
818 | WARN_ON(!channel_name); | |
819 | /* | |
820 | * note: sb_size and n_sb will be overwritten with updated | |
821 | * values by channel creation. | |
822 | */ | |
823 | sb_size = trace->channels[chan].a.sb_size; | |
824 | n_sb = trace->channels[chan].a.n_sb; | |
825 | prepare_chan_size_num(&sb_size, &n_sb); | |
826 | err = trace->ops->create_channel(channel_name, | |
827 | &trace->channels[chan], | |
828 | trace->dentry.trace_root, | |
829 | sb_size, n_sb, | |
830 | trace->channels[chan].overwrite, trace); | |
831 | if (err != 0) { | |
832 | printk(KERN_ERR "LTT : Can't create channel %s.\n", | |
833 | channel_name); | |
834 | goto create_channel_error; | |
835 | } | |
836 | } | |
837 | ||
838 | list_del(&trace->list); | |
839 | if (list_empty(<t_traces.head)) { | |
840 | mod_timer(<t_async_wakeup_timer, | |
841 | jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
842 | set_kernel_trace_flag_all_tasks(); | |
843 | } | |
844 | list_add_rcu(&trace->list, <t_traces.head); | |
845 | synchronize_trace(); | |
846 | ||
847 | ltt_unlock_traces(); | |
848 | ||
849 | return 0; | |
850 | ||
851 | create_channel_error: | |
852 | for (chan--; chan >= 0; chan--) { | |
853 | if (trace->channels[chan].active) { | |
854 | struct ltt_chan *chanp = &trace->channels[chan]; | |
855 | trace->ops->remove_channel_files(chanp); | |
856 | kref_put(&chanp->a.kref, trace->ops->remove_channel); | |
857 | } | |
858 | } | |
859 | trace->ops->remove_dirs(trace); | |
860 | ||
861 | dirs_error: | |
862 | module_put(trace->transport->owner); | |
863 | transport_error: | |
864 | put_trace_clock(); | |
865 | traces_error: | |
866 | ltt_unlock_traces(); | |
867 | return err; | |
868 | } | |
869 | EXPORT_SYMBOL_GPL(ltt_trace_alloc); | |
870 | ||
871 | /* | |
872 | * It is worked as a wrapper for current version of ltt_control.ko. | |
873 | * We will make a new ltt_control based on debugfs, and control each channel's | |
874 | * buffer. | |
875 | */ | |
876 | static | |
877 | int ltt_trace_create(const char *trace_name, const char *trace_type, | |
878 | enum trace_mode mode, | |
879 | unsigned int subbuf_size_low, unsigned int n_subbufs_low, | |
880 | unsigned int subbuf_size_med, unsigned int n_subbufs_med, | |
881 | unsigned int subbuf_size_high, unsigned int n_subbufs_high) | |
882 | { | |
883 | int err = 0; | |
884 | ||
885 | err = ltt_trace_setup(trace_name); | |
886 | if (IS_ERR_VALUE(err)) | |
887 | return err; | |
888 | ||
889 | err = ltt_trace_set_type(trace_name, trace_type); | |
890 | if (IS_ERR_VALUE(err)) | |
891 | return err; | |
892 | ||
893 | err = ltt_trace_alloc(trace_name); | |
894 | if (IS_ERR_VALUE(err)) | |
895 | return err; | |
896 | ||
897 | return err; | |
898 | } | |
899 | ||
900 | /* Must be called while sure that trace is in the list. */ | |
901 | static int _ltt_trace_destroy(struct ltt_trace *trace) | |
902 | { | |
903 | int err = -EPERM; | |
904 | ||
905 | if (trace == NULL) { | |
906 | err = -ENOENT; | |
907 | goto traces_error; | |
908 | } | |
909 | if (trace->active) { | |
910 | printk(KERN_ERR | |
911 | "LTT : Can't destroy trace %s : tracer is active\n", | |
912 | trace->trace_name); | |
913 | err = -EBUSY; | |
914 | goto active_error; | |
915 | } | |
916 | /* Everything went fine */ | |
917 | list_del_rcu(&trace->list); | |
918 | synchronize_trace(); | |
919 | if (list_empty(<t_traces.head)) { | |
920 | clear_kernel_trace_flag_all_tasks(); | |
921 | /* | |
922 | * We stop the asynchronous delivery of reader wakeup, but | |
923 | * we must make one last check for reader wakeups pending | |
924 | * later in __ltt_trace_destroy. | |
925 | */ | |
926 | del_timer_sync(<t_async_wakeup_timer); | |
927 | } | |
928 | return 0; | |
929 | ||
930 | /* error handling */ | |
931 | active_error: | |
932 | traces_error: | |
933 | return err; | |
934 | } | |
935 | ||
936 | /* Sleepable part of the destroy */ | |
937 | static void __ltt_trace_destroy(struct ltt_trace *trace) | |
938 | { | |
939 | int i; | |
940 | struct ltt_chan *chan; | |
941 | ||
942 | for (i = 0; i < trace->nr_channels; i++) { | |
943 | chan = &trace->channels[i]; | |
944 | if (chan->active) | |
945 | trace->ops->finish_channel(chan); | |
946 | } | |
947 | ||
948 | flush_scheduled_work(); | |
949 | ||
950 | /* | |
951 | * The currently destroyed trace is not in the trace list anymore, | |
952 | * so it's safe to call the async wakeup ourself. It will deliver | |
953 | * the last subbuffers. | |
954 | */ | |
955 | trace_async_wakeup(trace); | |
956 | ||
957 | for (i = 0; i < trace->nr_channels; i++) { | |
958 | chan = &trace->channels[i]; | |
959 | if (chan->active) { | |
960 | trace->ops->remove_channel_files(chan); | |
961 | kref_put(&chan->a.kref, | |
962 | trace->ops->remove_channel); | |
963 | } | |
964 | } | |
965 | ||
966 | /* | |
967 | * Wait for lttd readers to release the files, therefore making sure | |
968 | * the last subbuffers have been read. | |
969 | */ | |
970 | if (atomic_read(&trace->kref.refcount) > 1) { | |
971 | int ret = 0; | |
972 | /* | |
973 | * Unlock traces and CPU hotplug while we wait for lttd to | |
974 | * release the files. | |
975 | */ | |
976 | ltt_unlock_traces(); | |
977 | __wait_event_interruptible(trace->kref_wq, | |
978 | (atomic_read(&trace->kref.refcount) == 1), ret); | |
979 | ltt_lock_traces(); | |
980 | } | |
981 | ||
982 | kref_put(&trace->kref, ltt_release_trace); | |
983 | } | |
984 | ||
985 | int ltt_trace_destroy(const char *trace_name) | |
986 | { | |
987 | int err = 0; | |
988 | struct ltt_trace *trace; | |
989 | ||
990 | ltt_lock_traces(); | |
991 | ||
992 | trace = _ltt_trace_find(trace_name); | |
993 | if (trace) { | |
994 | err = _ltt_trace_destroy(trace); | |
995 | if (err) | |
996 | goto error; | |
997 | ||
998 | __ltt_trace_destroy(trace); | |
999 | ltt_unlock_traces(); | |
1000 | put_trace_clock(); | |
1001 | ||
1002 | return 0; | |
1003 | } | |
1004 | ||
1005 | trace = _ltt_trace_find_setup(trace_name); | |
1006 | if (trace) { | |
1007 | _ltt_trace_free(trace); | |
1008 | ltt_unlock_traces(); | |
1009 | return 0; | |
1010 | } | |
1011 | ||
1012 | err = -ENOENT; | |
1013 | ||
1014 | /* Error handling */ | |
1015 | error: | |
1016 | ltt_unlock_traces(); | |
1017 | return err; | |
1018 | } | |
1019 | EXPORT_SYMBOL_GPL(ltt_trace_destroy); | |
1020 | ||
1021 | /* | |
1022 | * called with trace lock held. | |
1023 | */ | |
1024 | static | |
1025 | void ltt_channels_trace_start_timer(struct ltt_chan *channels, | |
1026 | unsigned int nr_channels) | |
1027 | { | |
1028 | int i; | |
1029 | ||
1030 | for (i = 0; i < nr_channels; i++) { | |
1031 | struct ltt_chan *chan = &channels[i]; | |
1032 | chan->a.trace->ops->start_switch_timer(chan); | |
1033 | } | |
1034 | } | |
1035 | ||
1036 | /* | |
1037 | * called with trace lock held. | |
1038 | */ | |
1039 | static | |
1040 | void ltt_channels_trace_stop_timer(struct ltt_chan *channels, | |
1041 | unsigned int nr_channels) | |
1042 | { | |
1043 | int i; | |
1044 | ||
1045 | for (i = 0; i < nr_channels; i++) { | |
1046 | struct ltt_chan *chan = &channels[i]; | |
1047 | chan->a.trace->ops->stop_switch_timer(chan); | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | /* must be called from within a traces lock. */ | |
1052 | static int _ltt_trace_start(struct ltt_trace *trace) | |
1053 | { | |
1054 | int err = 0; | |
1055 | ||
1056 | if (trace == NULL) { | |
1057 | err = -ENOENT; | |
1058 | goto traces_error; | |
1059 | } | |
1060 | if (trace->active) | |
1061 | printk(KERN_INFO "LTT : Tracing already active for trace %s\n", | |
1062 | trace->trace_name); | |
1063 | if (!try_module_get(ltt_run_filter_owner)) { | |
1064 | err = -ENODEV; | |
1065 | printk(KERN_ERR "LTT : Can't lock filter module.\n"); | |
1066 | goto get_ltt_run_filter_error; | |
1067 | } | |
1068 | ltt_channels_trace_start_timer(trace->channels, trace->nr_channels); | |
1069 | trace->active = 1; | |
1070 | /* Read by trace points without protection : be careful */ | |
1071 | ltt_traces.num_active_traces++; | |
1072 | return err; | |
1073 | ||
1074 | /* error handling */ | |
1075 | get_ltt_run_filter_error: | |
1076 | traces_error: | |
1077 | return err; | |
1078 | } | |
1079 | ||
1080 | int ltt_trace_start(const char *trace_name) | |
1081 | { | |
1082 | int err = 0; | |
1083 | struct ltt_trace *trace; | |
1084 | ||
1085 | ltt_lock_traces(); | |
1086 | ||
1087 | trace = _ltt_trace_find(trace_name); | |
1088 | err = _ltt_trace_start(trace); | |
1089 | if (err) | |
1090 | goto no_trace; | |
1091 | ||
1092 | ltt_unlock_traces(); | |
1093 | ||
1094 | /* | |
1095 | * Call the kernel state dump. | |
1096 | * Events will be mixed with real kernel events, it's ok. | |
1097 | * Notice that there is no protection on the trace : that's exactly | |
1098 | * why we iterate on the list and check for trace equality instead of | |
1099 | * directly using this trace handle inside the logging function. | |
1100 | */ | |
1101 | ||
1102 | ltt_dump_marker_state(trace); | |
1103 | ||
1104 | if (!try_module_get(ltt_statedump_owner)) { | |
1105 | err = -ENODEV; | |
1106 | printk(KERN_ERR | |
1107 | "LTT : Can't lock state dump module.\n"); | |
1108 | } else { | |
1109 | ltt_statedump_functor(trace); | |
1110 | module_put(ltt_statedump_owner); | |
1111 | } | |
1112 | ||
1113 | return err; | |
1114 | ||
1115 | /* Error handling */ | |
1116 | no_trace: | |
1117 | ltt_unlock_traces(); | |
1118 | return err; | |
1119 | } | |
1120 | EXPORT_SYMBOL_GPL(ltt_trace_start); | |
1121 | ||
1122 | /* must be called from within traces lock */ | |
1123 | static int _ltt_trace_stop(struct ltt_trace *trace) | |
1124 | { | |
1125 | int err = -EPERM; | |
1126 | ||
1127 | if (trace == NULL) { | |
1128 | err = -ENOENT; | |
1129 | goto traces_error; | |
1130 | } | |
1131 | if (!trace->active) | |
1132 | printk(KERN_INFO "LTT : Tracing not active for trace %s\n", | |
1133 | trace->trace_name); | |
1134 | if (trace->active) { | |
1135 | ltt_channels_trace_stop_timer(trace->channels, | |
1136 | trace->nr_channels); | |
1137 | trace->active = 0; | |
1138 | ltt_traces.num_active_traces--; | |
1139 | synchronize_trace(); /* Wait for each tracing to be finished */ | |
1140 | } | |
1141 | module_put(ltt_run_filter_owner); | |
1142 | /* Everything went fine */ | |
1143 | return 0; | |
1144 | ||
1145 | /* Error handling */ | |
1146 | traces_error: | |
1147 | return err; | |
1148 | } | |
1149 | ||
1150 | int ltt_trace_stop(const char *trace_name) | |
1151 | { | |
1152 | int err = 0; | |
1153 | struct ltt_trace *trace; | |
1154 | ||
1155 | ltt_lock_traces(); | |
1156 | trace = _ltt_trace_find(trace_name); | |
1157 | err = _ltt_trace_stop(trace); | |
1158 | ltt_unlock_traces(); | |
1159 | return err; | |
1160 | } | |
1161 | EXPORT_SYMBOL_GPL(ltt_trace_stop); | |
1162 | ||
1163 | /** | |
1164 | * ltt_control - Trace control in-kernel API | |
1165 | * @msg: Action to perform | |
1166 | * @trace_name: Trace on which the action must be done | |
1167 | * @trace_type: Type of trace (normal, flight, hybrid) | |
1168 | * @args: Arguments specific to the action | |
1169 | */ | |
1170 | int ltt_control(enum ltt_control_msg msg, const char *trace_name, | |
1171 | const char *trace_type, union ltt_control_args args) | |
1172 | { | |
1173 | int err = -EPERM; | |
1174 | ||
1175 | printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); | |
1176 | switch (msg) { | |
1177 | case LTT_CONTROL_START: | |
1178 | printk(KERN_DEBUG "Start tracing %s\n", trace_name); | |
1179 | err = ltt_trace_start(trace_name); | |
1180 | break; | |
1181 | case LTT_CONTROL_STOP: | |
1182 | printk(KERN_DEBUG "Stop tracing %s\n", trace_name); | |
1183 | err = ltt_trace_stop(trace_name); | |
1184 | break; | |
1185 | case LTT_CONTROL_CREATE_TRACE: | |
1186 | printk(KERN_DEBUG "Creating trace %s\n", trace_name); | |
1187 | err = ltt_trace_create(trace_name, trace_type, | |
1188 | args.new_trace.mode, | |
1189 | args.new_trace.subbuf_size_low, | |
1190 | args.new_trace.n_subbufs_low, | |
1191 | args.new_trace.subbuf_size_med, | |
1192 | args.new_trace.n_subbufs_med, | |
1193 | args.new_trace.subbuf_size_high, | |
1194 | args.new_trace.n_subbufs_high); | |
1195 | break; | |
1196 | case LTT_CONTROL_DESTROY_TRACE: | |
1197 | printk(KERN_DEBUG "Destroying trace %s\n", trace_name); | |
1198 | err = ltt_trace_destroy(trace_name); | |
1199 | break; | |
1200 | } | |
1201 | return err; | |
1202 | } | |
1203 | EXPORT_SYMBOL_GPL(ltt_control); | |
1204 | ||
1205 | /** | |
1206 | * ltt_filter_control - Trace filter control in-kernel API | |
1207 | * @msg: Action to perform on the filter | |
1208 | * @trace_name: Trace on which the action must be done | |
1209 | */ | |
1210 | int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) | |
1211 | { | |
1212 | int err; | |
1213 | struct ltt_trace *trace; | |
1214 | ||
1215 | printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); | |
1216 | ltt_lock_traces(); | |
1217 | trace = _ltt_trace_find(trace_name); | |
1218 | if (trace == NULL) { | |
1219 | printk(KERN_ALERT | |
1220 | "Trace does not exist. Cannot proxy control request\n"); | |
1221 | err = -ENOENT; | |
1222 | goto trace_error; | |
1223 | } | |
1224 | if (!try_module_get(ltt_filter_control_owner)) { | |
1225 | err = -ENODEV; | |
1226 | goto get_module_error; | |
1227 | } | |
1228 | switch (msg) { | |
1229 | case LTT_FILTER_DEFAULT_ACCEPT: | |
1230 | printk(KERN_DEBUG | |
1231 | "Proxy filter default accept %s\n", trace_name); | |
1232 | err = (*ltt_filter_control_functor)(msg, trace); | |
1233 | break; | |
1234 | case LTT_FILTER_DEFAULT_REJECT: | |
1235 | printk(KERN_DEBUG | |
1236 | "Proxy filter default reject %s\n", trace_name); | |
1237 | err = (*ltt_filter_control_functor)(msg, trace); | |
1238 | break; | |
1239 | default: | |
1240 | err = -EPERM; | |
1241 | } | |
1242 | module_put(ltt_filter_control_owner); | |
1243 | ||
1244 | get_module_error: | |
1245 | trace_error: | |
1246 | ltt_unlock_traces(); | |
1247 | return err; | |
1248 | } | |
1249 | EXPORT_SYMBOL_GPL(ltt_filter_control); | |
1250 | ||
1251 | int __init ltt_init(void) | |
1252 | { | |
1253 | /* Make sure no page fault can be triggered by this module */ | |
1254 | vmalloc_sync_all(); | |
1255 | init_timer_deferrable(<t_async_wakeup_timer); | |
1256 | return 0; | |
1257 | } | |
1258 | ||
1259 | module_init(ltt_init) | |
1260 | ||
1261 | static void __exit ltt_exit(void) | |
1262 | { | |
1263 | struct ltt_trace *trace; | |
1264 | struct list_head *pos, *n; | |
1265 | ||
1266 | ltt_lock_traces(); | |
1267 | /* Stop each trace, currently being read by RCU read-side */ | |
1268 | list_for_each_entry_rcu(trace, <t_traces.head, list) | |
1269 | _ltt_trace_stop(trace); | |
1270 | /* Wait for quiescent state. Readers have preemption disabled. */ | |
1271 | synchronize_trace(); | |
1272 | /* Safe iteration is now permitted. It does not have to be RCU-safe | |
1273 | * because no readers are left. */ | |
1274 | list_for_each_safe(pos, n, <t_traces.head) { | |
1275 | trace = container_of(pos, struct ltt_trace, list); | |
1276 | /* _ltt_trace_destroy does a synchronize_trace() */ | |
1277 | _ltt_trace_destroy(trace); | |
1278 | __ltt_trace_destroy(trace); | |
1279 | } | |
1280 | /* free traces in pre-alloc status */ | |
1281 | list_for_each_safe(pos, n, <t_traces.setup_head) { | |
1282 | trace = container_of(pos, struct ltt_trace, list); | |
1283 | _ltt_trace_free(trace); | |
1284 | } | |
1285 | ||
1286 | ltt_unlock_traces(); | |
1287 | } | |
1288 | ||
1289 | module_exit(ltt_exit) | |
1290 | ||
1291 | MODULE_LICENSE("GPL and additional rights"); | |
1292 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
1293 | MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); |