Commit | Line | Data |
---|---|---|
9dad1eb8 PMF |
1 | /* |
2 | * ltt/ltt-tracer.c | |
3 | * | |
4 | * (C) Copyright 2005-2008 - | |
5 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
6 | * | |
7 | * Tracing management internal kernel API. Trace buffer allocation/free, tracing | |
8 | * start/stop. | |
9 | * | |
10 | * Author: | |
11 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
12 | * | |
13 | * Inspired from LTT : | |
14 | * Karim Yaghmour (karim@opersys.com) | |
15 | * Tom Zanussi (zanussi@us.ibm.com) | |
16 | * Bob Wisniewski (bob@watson.ibm.com) | |
17 | * And from K42 : | |
18 | * Bob Wisniewski (bob@watson.ibm.com) | |
19 | * | |
20 | * Changelog: | |
21 | * 22/09/06, Move to the marker/probes mechanism. | |
22 | * 19/10/05, Complete lockless mechanism. | |
23 | * 27/05/05, Modular redesign and rewrite. | |
24 | */ | |
25 | ||
26 | #include <linux/time.h> | |
27 | #include <linux/ltt-tracer.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/rcupdate.h> | |
33 | #include <linux/sched.h> | |
34 | #include <linux/bitops.h> | |
35 | #include <linux/fs.h> | |
36 | #include <linux/cpu.h> | |
37 | #include <linux/kref.h> | |
38 | #include <linux/delay.h> | |
39 | #include <linux/vmalloc.h> | |
40 | #include <asm/atomic.h> | |
41 | ||
42 | static void async_wakeup(unsigned long data); | |
43 | ||
44 | static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); | |
45 | ||
46 | /* Default callbacks for modules */ | |
47 | notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg, | |
48 | struct ltt_trace_struct *trace) | |
49 | { | |
50 | return 0; | |
51 | } | |
52 | ||
53 | int ltt_statedump_default(struct ltt_trace_struct *trace) | |
54 | { | |
55 | return 0; | |
56 | } | |
57 | ||
58 | /* Callbacks for registered modules */ | |
59 | ||
60 | int (*ltt_filter_control_functor) | |
61 | (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) = | |
62 | ltt_filter_control_default; | |
63 | struct module *ltt_filter_control_owner; | |
64 | ||
65 | /* These function pointers are protected by a trace activation check */ | |
66 | struct module *ltt_run_filter_owner; | |
67 | int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) = | |
68 | ltt_statedump_default; | |
69 | struct module *ltt_statedump_owner; | |
70 | ||
71 | struct chan_info_struct { | |
72 | const char *name; | |
73 | unsigned int def_subbufsize; | |
74 | unsigned int def_subbufcount; | |
75 | } chan_infos[] = { | |
76 | [LTT_CHANNEL_METADATA] = { | |
77 | LTT_METADATA_CHANNEL, | |
78 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
79 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
80 | }, | |
81 | [LTT_CHANNEL_FD_STATE] = { | |
82 | LTT_FD_STATE_CHANNEL, | |
83 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
84 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
85 | }, | |
86 | [LTT_CHANNEL_GLOBAL_STATE] = { | |
87 | LTT_GLOBAL_STATE_CHANNEL, | |
88 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
89 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
90 | }, | |
91 | [LTT_CHANNEL_IRQ_STATE] = { | |
92 | LTT_IRQ_STATE_CHANNEL, | |
93 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
94 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
95 | }, | |
96 | [LTT_CHANNEL_MODULE_STATE] = { | |
97 | LTT_MODULE_STATE_CHANNEL, | |
98 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
99 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
100 | }, | |
101 | [LTT_CHANNEL_NETIF_STATE] = { | |
102 | LTT_NETIF_STATE_CHANNEL, | |
103 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
104 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
105 | }, | |
106 | [LTT_CHANNEL_SOFTIRQ_STATE] = { | |
107 | LTT_SOFTIRQ_STATE_CHANNEL, | |
108 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
109 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
110 | }, | |
111 | [LTT_CHANNEL_SWAP_STATE] = { | |
112 | LTT_SWAP_STATE_CHANNEL, | |
113 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
114 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
115 | }, | |
116 | [LTT_CHANNEL_SYSCALL_STATE] = { | |
117 | LTT_SYSCALL_STATE_CHANNEL, | |
118 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
119 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
120 | }, | |
121 | [LTT_CHANNEL_TASK_STATE] = { | |
122 | LTT_TASK_STATE_CHANNEL, | |
123 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
124 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
125 | }, | |
126 | [LTT_CHANNEL_VM_STATE] = { | |
127 | LTT_VM_STATE_CHANNEL, | |
128 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
129 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
130 | }, | |
131 | [LTT_CHANNEL_FS] = { | |
132 | LTT_FS_CHANNEL, | |
133 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
134 | LTT_DEFAULT_N_SUBBUFS_MED, | |
135 | }, | |
136 | [LTT_CHANNEL_INPUT] = { | |
137 | LTT_INPUT_CHANNEL, | |
138 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
139 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
140 | }, | |
141 | [LTT_CHANNEL_IPC] = { | |
142 | LTT_IPC_CHANNEL, | |
143 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
144 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
145 | }, | |
146 | [LTT_CHANNEL_KERNEL] = { | |
147 | LTT_KERNEL_CHANNEL, | |
148 | LTT_DEFAULT_SUBBUF_SIZE_HIGH, | |
149 | LTT_DEFAULT_N_SUBBUFS_HIGH, | |
150 | }, | |
151 | [LTT_CHANNEL_MM] = { | |
152 | LTT_MM_CHANNEL, | |
153 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
154 | LTT_DEFAULT_N_SUBBUFS_MED, | |
155 | }, | |
156 | [LTT_CHANNEL_RCU] = { | |
157 | LTT_RCU_CHANNEL, | |
158 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
159 | LTT_DEFAULT_N_SUBBUFS_MED, | |
160 | }, | |
161 | [LTT_CHANNEL_DEFAULT] = { | |
162 | NULL, | |
163 | LTT_DEFAULT_SUBBUF_SIZE_MED, | |
164 | LTT_DEFAULT_N_SUBBUFS_MED, | |
165 | }, | |
166 | }; | |
167 | ||
168 | static enum ltt_channels get_channel_type_from_name(const char *name) | |
169 | { | |
170 | int i; | |
171 | ||
172 | if (!name) | |
173 | return LTT_CHANNEL_DEFAULT; | |
174 | ||
175 | for (i = 0; i < ARRAY_SIZE(chan_infos); i++) | |
176 | if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) | |
177 | return (enum ltt_channels)i; | |
178 | ||
179 | return LTT_CHANNEL_DEFAULT; | |
180 | } | |
181 | ||
182 | /** | |
183 | * ltt_module_register - LTT module registration | |
184 | * @name: module type | |
185 | * @function: callback to register | |
186 | * @owner: module which owns the callback | |
187 | * | |
188 | * The module calling this registration function must ensure that no | |
189 | * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() | |
190 | * must be called between a vmalloc and the moment the memory is made visible to | |
191 | * "function". This registration acts as a vmalloc_sync_all. Therefore, only if | |
192 | * the module allocates virtual memory after its registration must it | |
193 | * synchronize the TLBs. | |
194 | */ | |
195 | int ltt_module_register(enum ltt_module_function name, void *function, | |
196 | struct module *owner) | |
197 | { | |
198 | int ret = 0; | |
199 | ||
200 | /* | |
201 | * Make sure no page fault can be triggered by the module about to be | |
202 | * registered. We deal with this here so we don't have to call | |
203 | * vmalloc_sync_all() in each module's init. | |
204 | */ | |
205 | vmalloc_sync_all(); | |
206 | ||
207 | switch (name) { | |
208 | case LTT_FUNCTION_RUN_FILTER: | |
209 | if (ltt_run_filter_owner != NULL) { | |
210 | ret = -EEXIST; | |
211 | goto end; | |
212 | } | |
213 | ltt_filter_register((ltt_run_filter_functor)function); | |
214 | ltt_run_filter_owner = owner; | |
215 | break; | |
216 | case LTT_FUNCTION_FILTER_CONTROL: | |
217 | if (ltt_filter_control_owner != NULL) { | |
218 | ret = -EEXIST; | |
219 | goto end; | |
220 | } | |
221 | ltt_filter_control_functor = | |
222 | (int (*)(enum ltt_filter_control_msg, | |
223 | struct ltt_trace_struct *))function; | |
224 | ltt_filter_control_owner = owner; | |
225 | break; | |
226 | case LTT_FUNCTION_STATEDUMP: | |
227 | if (ltt_statedump_owner != NULL) { | |
228 | ret = -EEXIST; | |
229 | goto end; | |
230 | } | |
231 | ltt_statedump_functor = | |
232 | (int (*)(struct ltt_trace_struct *))function; | |
233 | ltt_statedump_owner = owner; | |
234 | break; | |
235 | } | |
236 | ||
237 | end: | |
238 | ||
239 | return ret; | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(ltt_module_register); | |
242 | ||
243 | /** | |
244 | * ltt_module_unregister - LTT module unregistration | |
245 | * @name: module type | |
246 | */ | |
247 | void ltt_module_unregister(enum ltt_module_function name) | |
248 | { | |
249 | switch (name) { | |
250 | case LTT_FUNCTION_RUN_FILTER: | |
251 | ltt_filter_unregister(); | |
252 | ltt_run_filter_owner = NULL; | |
253 | /* Wait for preempt sections to finish */ | |
254 | synchronize_sched(); | |
255 | break; | |
256 | case LTT_FUNCTION_FILTER_CONTROL: | |
257 | ltt_filter_control_functor = ltt_filter_control_default; | |
258 | ltt_filter_control_owner = NULL; | |
259 | break; | |
260 | case LTT_FUNCTION_STATEDUMP: | |
261 | ltt_statedump_functor = ltt_statedump_default; | |
262 | ltt_statedump_owner = NULL; | |
263 | break; | |
264 | } | |
265 | ||
266 | } | |
267 | EXPORT_SYMBOL_GPL(ltt_module_unregister); | |
268 | ||
269 | static LIST_HEAD(ltt_transport_list); | |
270 | ||
271 | /** | |
272 | * ltt_transport_register - LTT transport registration | |
273 | * @transport: transport structure | |
274 | * | |
275 | * Registers a transport which can be used as output to extract the data out of | |
276 | * LTTng. The module calling this registration function must ensure that no | |
277 | * trap-inducing code will be executed by the transport functions. E.g. | |
278 | * vmalloc_sync_all() must be called between a vmalloc and the moment the memory | |
279 | * is made visible to the transport function. This registration acts as a | |
280 | * vmalloc_sync_all. Therefore, only if the module allocates virtual memory | |
281 | * after its registration must it synchronize the TLBs. | |
282 | */ | |
283 | void ltt_transport_register(struct ltt_transport *transport) | |
284 | { | |
285 | /* | |
286 | * Make sure no page fault can be triggered by the module about to be | |
287 | * registered. We deal with this here so we don't have to call | |
288 | * vmalloc_sync_all() in each module's init. | |
289 | */ | |
290 | vmalloc_sync_all(); | |
291 | ||
292 | ltt_lock_traces(); | |
293 | list_add_tail(&transport->node, <t_transport_list); | |
294 | ltt_unlock_traces(); | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(ltt_transport_register); | |
297 | ||
298 | /** | |
299 | * ltt_transport_unregister - LTT transport unregistration | |
300 | * @transport: transport structure | |
301 | */ | |
302 | void ltt_transport_unregister(struct ltt_transport *transport) | |
303 | { | |
304 | ltt_lock_traces(); | |
305 | list_del(&transport->node); | |
306 | ltt_unlock_traces(); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(ltt_transport_unregister); | |
309 | ||
310 | static inline int is_channel_overwrite(enum ltt_channels chan, | |
311 | enum trace_mode mode) | |
312 | { | |
313 | switch (mode) { | |
314 | case LTT_TRACE_NORMAL: | |
315 | return 0; | |
316 | case LTT_TRACE_FLIGHT: | |
317 | switch (chan) { | |
318 | case LTT_CHANNEL_METADATA: | |
319 | return 0; | |
320 | default: | |
321 | return 1; | |
322 | } | |
323 | case LTT_TRACE_HYBRID: | |
324 | switch (chan) { | |
325 | case LTT_CHANNEL_KERNEL: | |
326 | case LTT_CHANNEL_FS: | |
327 | case LTT_CHANNEL_MM: | |
328 | case LTT_CHANNEL_RCU: | |
329 | case LTT_CHANNEL_IPC: | |
330 | case LTT_CHANNEL_INPUT: | |
331 | return 1; | |
332 | default: | |
333 | return 0; | |
334 | } | |
335 | default: | |
336 | return 0; | |
337 | } | |
338 | } | |
339 | ||
340 | /** | |
341 | * ltt_write_trace_header - Write trace header | |
342 | * @trace: Trace information | |
343 | * @header: Memory address where the information must be written to | |
344 | */ | |
345 | void notrace ltt_write_trace_header(struct ltt_trace_struct *trace, | |
346 | struct ltt_subbuffer_header *header) | |
347 | { | |
348 | header->magic_number = LTT_TRACER_MAGIC_NUMBER; | |
349 | header->major_version = LTT_TRACER_VERSION_MAJOR; | |
350 | header->minor_version = LTT_TRACER_VERSION_MINOR; | |
351 | header->arch_size = sizeof(void *); | |
352 | header->alignment = ltt_get_alignment(); | |
353 | header->start_time_sec = trace->start_time.tv_sec; | |
354 | header->start_time_usec = trace->start_time.tv_usec; | |
355 | header->start_freq = trace->start_freq; | |
356 | header->freq_scale = trace->freq_scale; | |
357 | } | |
358 | EXPORT_SYMBOL_GPL(ltt_write_trace_header); | |
359 | ||
360 | static void trace_async_wakeup(struct ltt_trace_struct *trace) | |
361 | { | |
362 | int i; | |
363 | struct ltt_channel_struct *chan; | |
364 | ||
365 | /* Must check each channel for pending read wakeup */ | |
366 | for (i = 0; i < trace->nr_channels; i++) { | |
367 | chan = &trace->channels[i]; | |
368 | if (chan->active) | |
369 | trace->ops->wakeup_channel(chan); | |
370 | } | |
371 | } | |
372 | ||
373 | /* Timer to send async wakeups to the readers */ | |
374 | static void async_wakeup(unsigned long data) | |
375 | { | |
376 | struct ltt_trace_struct *trace; | |
377 | ||
378 | /* | |
379 | * PREEMPT_RT does not allow spinlocks to be taken within preempt | |
380 | * disable sections (spinlock taken in wake_up). However, mainline won't | |
381 | * allow mutex to be taken in interrupt context. Ugly. | |
382 | * A proper way to do this would be to turn the timer into a | |
383 | * periodically woken up thread, but it adds to the footprint. | |
384 | */ | |
385 | #ifndef CONFIG_PREEMPT_RT | |
386 | rcu_read_lock_sched(); | |
387 | #else | |
388 | ltt_lock_traces(); | |
389 | #endif | |
390 | list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
391 | trace_async_wakeup(trace); | |
392 | } | |
393 | #ifndef CONFIG_PREEMPT_RT | |
394 | rcu_read_unlock_sched(); | |
395 | #else | |
396 | ltt_unlock_traces(); | |
397 | #endif | |
398 | ||
399 | mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
400 | } | |
401 | ||
402 | /** | |
403 | * _ltt_trace_find - find a trace by given name. | |
404 | * trace_name: trace name | |
405 | * | |
406 | * Returns a pointer to the trace structure, NULL if not found. | |
407 | */ | |
408 | static struct ltt_trace_struct *_ltt_trace_find(const char *trace_name) | |
409 | { | |
410 | struct ltt_trace_struct *trace; | |
411 | ||
412 | list_for_each_entry(trace, <t_traces.head, list) | |
413 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
414 | return trace; | |
415 | ||
416 | return NULL; | |
417 | } | |
418 | ||
419 | /* _ltt_trace_find_setup : | |
420 | * find a trace in setup list by given name. | |
421 | * | |
422 | * Returns a pointer to the trace structure, NULL if not found. | |
423 | */ | |
424 | struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name) | |
425 | { | |
426 | struct ltt_trace_struct *trace; | |
427 | ||
428 | list_for_each_entry(trace, <t_traces.setup_head, list) | |
429 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
430 | return trace; | |
431 | ||
432 | return NULL; | |
433 | } | |
434 | EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); | |
435 | ||
436 | /** | |
437 | * ltt_release_transport - Release an LTT transport | |
438 | * @kref : reference count on the transport | |
439 | */ | |
440 | void ltt_release_transport(struct kref *kref) | |
441 | { | |
442 | struct ltt_trace_struct *trace = container_of(kref, | |
443 | struct ltt_trace_struct, ltt_transport_kref); | |
444 | trace->ops->remove_dirs(trace); | |
445 | } | |
446 | EXPORT_SYMBOL_GPL(ltt_release_transport); | |
447 | ||
448 | /** | |
449 | * ltt_release_trace - Release a LTT trace | |
450 | * @kref : reference count on the trace | |
451 | */ | |
452 | void ltt_release_trace(struct kref *kref) | |
453 | { | |
454 | struct ltt_trace_struct *trace = container_of(kref, | |
455 | struct ltt_trace_struct, kref); | |
456 | ltt_channels_trace_free(trace->channels); | |
457 | kfree(trace); | |
458 | } | |
459 | EXPORT_SYMBOL_GPL(ltt_release_trace); | |
460 | ||
461 | static inline void prepare_chan_size_num(unsigned int *subbuf_size, | |
462 | unsigned int *n_subbufs) | |
463 | { | |
464 | *subbuf_size = 1 << get_count_order(*subbuf_size); | |
465 | *n_subbufs = 1 << get_count_order(*n_subbufs); | |
466 | ||
467 | /* Subbuf size and number must both be power of two */ | |
468 | WARN_ON(hweight32(*subbuf_size) != 1); | |
469 | WARN_ON(hweight32(*n_subbufs) != 1); | |
470 | } | |
471 | ||
472 | int _ltt_trace_setup(const char *trace_name) | |
473 | { | |
474 | int err = 0; | |
475 | struct ltt_trace_struct *new_trace = NULL; | |
476 | int metadata_index; | |
477 | unsigned int chan; | |
478 | enum ltt_channels chantype; | |
479 | ||
480 | if (_ltt_trace_find_setup(trace_name)) { | |
481 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
482 | trace_name); | |
483 | err = -EEXIST; | |
484 | goto traces_error; | |
485 | } | |
486 | ||
487 | if (_ltt_trace_find(trace_name)) { | |
488 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
489 | trace_name); | |
490 | err = -EEXIST; | |
491 | goto traces_error; | |
492 | } | |
493 | ||
494 | new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL); | |
495 | if (!new_trace) { | |
496 | printk(KERN_ERR | |
497 | "LTT : Unable to allocate memory for trace %s\n", | |
498 | trace_name); | |
499 | err = -ENOMEM; | |
500 | goto traces_error; | |
501 | } | |
502 | strncpy(new_trace->trace_name, trace_name, NAME_MAX); | |
503 | new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels, | |
504 | 0, 1); | |
505 | if (!new_trace->channels) { | |
506 | printk(KERN_ERR | |
507 | "LTT : Unable to allocate memory for chaninfo %s\n", | |
508 | trace_name); | |
509 | err = -ENOMEM; | |
510 | goto trace_free; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Force metadata channel to active, no overwrite. | |
515 | */ | |
516 | metadata_index = ltt_channels_get_index_from_name("metadata"); | |
517 | WARN_ON(metadata_index < 0); | |
518 | new_trace->channels[metadata_index].overwrite = 0; | |
519 | new_trace->channels[metadata_index].active = 1; | |
520 | ||
521 | /* | |
522 | * Set hardcoded tracer defaults for some channels | |
523 | */ | |
524 | for (chan = 0; chan < new_trace->nr_channels; chan++) { | |
525 | if (!(new_trace->channels[chan].active)) | |
526 | continue; | |
527 | ||
528 | chantype = get_channel_type_from_name( | |
529 | ltt_channels_get_name_from_index(chan)); | |
530 | new_trace->channels[chan].subbuf_size = | |
531 | chan_infos[chantype].def_subbufsize; | |
532 | new_trace->channels[chan].subbuf_cnt = | |
533 | chan_infos[chantype].def_subbufcount; | |
534 | } | |
535 | ||
536 | list_add(&new_trace->list, <t_traces.setup_head); | |
537 | return 0; | |
538 | ||
539 | trace_free: | |
540 | kfree(new_trace); | |
541 | traces_error: | |
542 | return err; | |
543 | } | |
544 | EXPORT_SYMBOL_GPL(_ltt_trace_setup); | |
545 | ||
546 | ||
547 | int ltt_trace_setup(const char *trace_name) | |
548 | { | |
549 | int ret; | |
550 | ltt_lock_traces(); | |
551 | ret = _ltt_trace_setup(trace_name); | |
552 | ltt_unlock_traces(); | |
553 | return ret; | |
554 | } | |
555 | EXPORT_SYMBOL_GPL(ltt_trace_setup); | |
556 | ||
557 | /* must be called from within a traces lock. */ | |
558 | static void _ltt_trace_free(struct ltt_trace_struct *trace) | |
559 | { | |
560 | list_del(&trace->list); | |
561 | kfree(trace); | |
562 | } | |
563 | ||
564 | int ltt_trace_set_type(const char *trace_name, const char *trace_type) | |
565 | { | |
566 | int err = 0; | |
567 | struct ltt_trace_struct *trace; | |
568 | struct ltt_transport *tran_iter, *transport = NULL; | |
569 | ||
570 | ltt_lock_traces(); | |
571 | ||
572 | trace = _ltt_trace_find_setup(trace_name); | |
573 | if (!trace) { | |
574 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
575 | err = -ENOENT; | |
576 | goto traces_error; | |
577 | } | |
578 | ||
579 | list_for_each_entry(tran_iter, <t_transport_list, node) { | |
580 | if (!strcmp(tran_iter->name, trace_type)) { | |
581 | transport = tran_iter; | |
582 | break; | |
583 | } | |
584 | } | |
585 | if (!transport) { | |
586 | printk(KERN_ERR "LTT : Transport %s is not present.\n", | |
587 | trace_type); | |
588 | err = -EINVAL; | |
589 | goto traces_error; | |
590 | } | |
591 | ||
592 | trace->transport = transport; | |
593 | ||
594 | traces_error: | |
595 | ltt_unlock_traces(); | |
596 | return err; | |
597 | } | |
598 | EXPORT_SYMBOL_GPL(ltt_trace_set_type); | |
599 | ||
600 | int ltt_trace_set_channel_subbufsize(const char *trace_name, | |
601 | const char *channel_name, unsigned int size) | |
602 | { | |
603 | int err = 0; | |
604 | struct ltt_trace_struct *trace; | |
605 | int index; | |
606 | ||
607 | ltt_lock_traces(); | |
608 | ||
609 | trace = _ltt_trace_find_setup(trace_name); | |
610 | if (!trace) { | |
611 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
612 | err = -ENOENT; | |
613 | goto traces_error; | |
614 | } | |
615 | ||
616 | index = ltt_channels_get_index_from_name(channel_name); | |
617 | if (index < 0) { | |
618 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
619 | err = -ENOENT; | |
620 | goto traces_error; | |
621 | } | |
622 | trace->channels[index].subbuf_size = size; | |
623 | ||
624 | traces_error: | |
625 | ltt_unlock_traces(); | |
626 | return err; | |
627 | } | |
628 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); | |
629 | ||
630 | int ltt_trace_set_channel_subbufcount(const char *trace_name, | |
631 | const char *channel_name, unsigned int cnt) | |
632 | { | |
633 | int err = 0; | |
634 | struct ltt_trace_struct *trace; | |
635 | int index; | |
636 | ||
637 | ltt_lock_traces(); | |
638 | ||
639 | trace = _ltt_trace_find_setup(trace_name); | |
640 | if (!trace) { | |
641 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
642 | err = -ENOENT; | |
643 | goto traces_error; | |
644 | } | |
645 | ||
646 | index = ltt_channels_get_index_from_name(channel_name); | |
647 | if (index < 0) { | |
648 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
649 | err = -ENOENT; | |
650 | goto traces_error; | |
651 | } | |
652 | trace->channels[index].subbuf_cnt = cnt; | |
653 | ||
654 | traces_error: | |
655 | ltt_unlock_traces(); | |
656 | return err; | |
657 | } | |
658 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); | |
659 | ||
660 | int ltt_trace_set_channel_enable(const char *trace_name, | |
661 | const char *channel_name, unsigned int enable) | |
662 | { | |
663 | int err = 0; | |
664 | struct ltt_trace_struct *trace; | |
665 | int index; | |
666 | ||
667 | ltt_lock_traces(); | |
668 | ||
669 | trace = _ltt_trace_find_setup(trace_name); | |
670 | if (!trace) { | |
671 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
672 | err = -ENOENT; | |
673 | goto traces_error; | |
674 | } | |
675 | ||
676 | /* | |
677 | * Datas in metadata channel(marker info) is necessary to be able to | |
678 | * read the trace, we always enable this channel. | |
679 | */ | |
680 | if (!enable && !strcmp(channel_name, "metadata")) { | |
681 | printk(KERN_ERR "LTT : Trying to disable metadata channel\n"); | |
682 | err = -EINVAL; | |
683 | goto traces_error; | |
684 | } | |
685 | ||
686 | index = ltt_channels_get_index_from_name(channel_name); | |
687 | if (index < 0) { | |
688 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
689 | err = -ENOENT; | |
690 | goto traces_error; | |
691 | } | |
692 | ||
693 | trace->channels[index].active = enable; | |
694 | ||
695 | traces_error: | |
696 | ltt_unlock_traces(); | |
697 | return err; | |
698 | } | |
699 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable); | |
700 | ||
701 | int ltt_trace_set_channel_overwrite(const char *trace_name, | |
702 | const char *channel_name, unsigned int overwrite) | |
703 | { | |
704 | int err = 0; | |
705 | struct ltt_trace_struct *trace; | |
706 | int index; | |
707 | ||
708 | ltt_lock_traces(); | |
709 | ||
710 | trace = _ltt_trace_find_setup(trace_name); | |
711 | if (!trace) { | |
712 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
713 | err = -ENOENT; | |
714 | goto traces_error; | |
715 | } | |
716 | ||
717 | /* | |
718 | * Always put the metadata channel in non-overwrite mode : | |
719 | * This is a very low traffic channel and it can't afford to have its | |
720 | * data overwritten : this data (marker info) is necessary to be | |
721 | * able to read the trace. | |
722 | */ | |
723 | if (overwrite && !strcmp(channel_name, "metadata")) { | |
724 | printk(KERN_ERR "LTT : Trying to set metadata channel to " | |
725 | "overwrite mode\n"); | |
726 | err = -EINVAL; | |
727 | goto traces_error; | |
728 | } | |
729 | ||
730 | index = ltt_channels_get_index_from_name(channel_name); | |
731 | if (index < 0) { | |
732 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
733 | err = -ENOENT; | |
734 | goto traces_error; | |
735 | } | |
736 | ||
737 | trace->channels[index].overwrite = overwrite; | |
738 | ||
739 | traces_error: | |
740 | ltt_unlock_traces(); | |
741 | return err; | |
742 | } | |
743 | EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); | |
744 | ||
745 | int ltt_trace_alloc(const char *trace_name) | |
746 | { | |
747 | int err = 0; | |
748 | struct ltt_trace_struct *trace; | |
749 | int subbuf_size, subbuf_cnt; | |
750 | unsigned long flags; | |
751 | int chan; | |
752 | const char *channel_name; | |
753 | ||
754 | ltt_lock_traces(); | |
755 | ||
756 | trace = _ltt_trace_find_setup(trace_name); | |
757 | if (!trace) { | |
758 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
759 | err = -ENOENT; | |
760 | goto traces_error; | |
761 | } | |
762 | ||
763 | kref_init(&trace->kref); | |
764 | kref_init(&trace->ltt_transport_kref); | |
765 | init_waitqueue_head(&trace->kref_wq); | |
766 | trace->active = 0; | |
767 | get_trace_clock(); | |
768 | trace->freq_scale = trace_clock_freq_scale(); | |
769 | ||
770 | if (!trace->transport) { | |
771 | printk(KERN_ERR "LTT : Transport is not set.\n"); | |
772 | err = -EINVAL; | |
773 | goto transport_error; | |
774 | } | |
775 | if (!try_module_get(trace->transport->owner)) { | |
776 | printk(KERN_ERR "LTT : Can't lock transport module.\n"); | |
777 | err = -ENODEV; | |
778 | goto transport_error; | |
779 | } | |
780 | trace->ops = &trace->transport->ops; | |
781 | ||
782 | err = trace->ops->create_dirs(trace); | |
783 | if (err) { | |
784 | printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", | |
785 | trace_name); | |
786 | goto dirs_error; | |
787 | } | |
788 | ||
789 | local_irq_save(flags); | |
790 | trace->start_freq = trace_clock_frequency(); | |
791 | trace->start_tsc = trace_clock_read64(); | |
792 | do_gettimeofday(&trace->start_time); | |
793 | local_irq_restore(flags); | |
794 | ||
795 | for (chan = 0; chan < trace->nr_channels; chan++) { | |
796 | if (!(trace->channels[chan].active)) | |
797 | continue; | |
798 | ||
799 | channel_name = ltt_channels_get_name_from_index(chan); | |
800 | WARN_ON(!channel_name); | |
801 | subbuf_size = trace->channels[chan].subbuf_size; | |
802 | subbuf_cnt = trace->channels[chan].subbuf_cnt; | |
803 | prepare_chan_size_num(&subbuf_size, &subbuf_cnt); | |
804 | err = trace->ops->create_channel(trace_name, trace, | |
805 | trace->dentry.trace_root, | |
806 | channel_name, | |
807 | &trace->channels[chan], | |
808 | subbuf_size, | |
809 | subbuf_cnt, | |
810 | trace->channels[chan].overwrite); | |
811 | if (err != 0) { | |
812 | printk(KERN_ERR "LTT : Can't create channel %s.\n", | |
813 | channel_name); | |
814 | goto create_channel_error; | |
815 | } | |
816 | } | |
817 | ||
818 | list_del(&trace->list); | |
819 | if (list_empty(<t_traces.head)) { | |
820 | mod_timer(<t_async_wakeup_timer, | |
821 | jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
822 | set_kernel_trace_flag_all_tasks(); | |
823 | } | |
824 | list_add_rcu(&trace->list, <t_traces.head); | |
825 | synchronize_sched(); | |
826 | ||
827 | ltt_unlock_traces(); | |
828 | ||
829 | return 0; | |
830 | ||
831 | create_channel_error: | |
832 | for (chan--; chan >= 0; chan--) | |
833 | if (trace->channels[chan].active) | |
834 | trace->ops->remove_channel(&trace->channels[chan]); | |
835 | ||
836 | dirs_error: | |
837 | module_put(trace->transport->owner); | |
838 | transport_error: | |
839 | put_trace_clock(); | |
840 | traces_error: | |
841 | ltt_unlock_traces(); | |
842 | return err; | |
843 | } | |
844 | EXPORT_SYMBOL_GPL(ltt_trace_alloc); | |
845 | ||
846 | /* | |
847 | * It is worked as a wrapper for current version of ltt_control.ko. | |
848 | * We will make a new ltt_control based on debugfs, and control each channel's | |
849 | * buffer. | |
850 | */ | |
851 | static int ltt_trace_create(const char *trace_name, const char *trace_type, | |
852 | enum trace_mode mode, | |
853 | unsigned int subbuf_size_low, unsigned int n_subbufs_low, | |
854 | unsigned int subbuf_size_med, unsigned int n_subbufs_med, | |
855 | unsigned int subbuf_size_high, unsigned int n_subbufs_high) | |
856 | { | |
857 | int err = 0; | |
858 | ||
859 | err = ltt_trace_setup(trace_name); | |
860 | if (IS_ERR_VALUE(err)) | |
861 | return err; | |
862 | ||
863 | err = ltt_trace_set_type(trace_name, trace_type); | |
864 | if (IS_ERR_VALUE(err)) | |
865 | return err; | |
866 | ||
867 | err = ltt_trace_alloc(trace_name); | |
868 | if (IS_ERR_VALUE(err)) | |
869 | return err; | |
870 | ||
871 | return err; | |
872 | } | |
873 | ||
874 | /* Must be called while sure that trace is in the list. */ | |
875 | static int _ltt_trace_destroy(struct ltt_trace_struct *trace) | |
876 | { | |
877 | int err = -EPERM; | |
878 | ||
879 | if (trace == NULL) { | |
880 | err = -ENOENT; | |
881 | goto traces_error; | |
882 | } | |
883 | if (trace->active) { | |
884 | printk(KERN_ERR | |
885 | "LTT : Can't destroy trace %s : tracer is active\n", | |
886 | trace->trace_name); | |
887 | err = -EBUSY; | |
888 | goto active_error; | |
889 | } | |
890 | /* Everything went fine */ | |
891 | list_del_rcu(&trace->list); | |
892 | synchronize_sched(); | |
893 | if (list_empty(<t_traces.head)) { | |
894 | clear_kernel_trace_flag_all_tasks(); | |
895 | /* | |
896 | * We stop the asynchronous delivery of reader wakeup, but | |
897 | * we must make one last check for reader wakeups pending | |
898 | * later in __ltt_trace_destroy. | |
899 | */ | |
900 | del_timer_sync(<t_async_wakeup_timer); | |
901 | } | |
902 | return 0; | |
903 | ||
904 | /* error handling */ | |
905 | active_error: | |
906 | traces_error: | |
907 | return err; | |
908 | } | |
909 | ||
910 | /* Sleepable part of the destroy */ | |
911 | static void __ltt_trace_destroy(struct ltt_trace_struct *trace) | |
912 | { | |
913 | int i; | |
914 | struct ltt_channel_struct *chan; | |
915 | ||
916 | for (i = 0; i < trace->nr_channels; i++) { | |
917 | chan = &trace->channels[i]; | |
918 | if (chan->active) | |
919 | trace->ops->finish_channel(chan); | |
920 | } | |
921 | ||
922 | flush_scheduled_work(); | |
923 | ||
924 | /* | |
925 | * The currently destroyed trace is not in the trace list anymore, | |
926 | * so it's safe to call the async wakeup ourself. It will deliver | |
927 | * the last subbuffers. | |
928 | */ | |
929 | trace_async_wakeup(trace); | |
930 | ||
931 | for (i = 0; i < trace->nr_channels; i++) { | |
932 | chan = &trace->channels[i]; | |
933 | if (chan->active) | |
934 | trace->ops->remove_channel(chan); | |
935 | } | |
936 | ||
937 | kref_put(&trace->ltt_transport_kref, ltt_release_transport); | |
938 | ||
939 | module_put(trace->transport->owner); | |
940 | ||
941 | /* | |
942 | * Wait for lttd readers to release the files, therefore making sure | |
943 | * the last subbuffers have been read. | |
944 | */ | |
945 | if (atomic_read(&trace->kref.refcount) > 1) { | |
946 | int ret = 0; | |
947 | __wait_event_interruptible(trace->kref_wq, | |
948 | (atomic_read(&trace->kref.refcount) == 1), ret); | |
949 | } | |
950 | kref_put(&trace->kref, ltt_release_trace); | |
951 | } | |
952 | ||
953 | int ltt_trace_destroy(const char *trace_name) | |
954 | { | |
955 | int err = 0; | |
956 | struct ltt_trace_struct *trace; | |
957 | ||
958 | ltt_lock_traces(); | |
959 | ||
960 | trace = _ltt_trace_find(trace_name); | |
961 | if (trace) { | |
962 | err = _ltt_trace_destroy(trace); | |
963 | if (err) | |
964 | goto error; | |
965 | ||
966 | ltt_unlock_traces(); | |
967 | ||
968 | __ltt_trace_destroy(trace); | |
969 | put_trace_clock(); | |
970 | ||
971 | return 0; | |
972 | } | |
973 | ||
974 | trace = _ltt_trace_find_setup(trace_name); | |
975 | if (trace) { | |
976 | _ltt_trace_free(trace); | |
977 | ltt_unlock_traces(); | |
978 | return 0; | |
979 | } | |
980 | ||
981 | err = -ENOENT; | |
982 | ||
983 | /* Error handling */ | |
984 | error: | |
985 | ltt_unlock_traces(); | |
986 | return err; | |
987 | } | |
988 | EXPORT_SYMBOL_GPL(ltt_trace_destroy); | |
989 | ||
990 | /* must be called from within a traces lock. */ | |
991 | static int _ltt_trace_start(struct ltt_trace_struct *trace) | |
992 | { | |
993 | int err = 0; | |
994 | ||
995 | if (trace == NULL) { | |
996 | err = -ENOENT; | |
997 | goto traces_error; | |
998 | } | |
999 | if (trace->active) | |
1000 | printk(KERN_INFO "LTT : Tracing already active for trace %s\n", | |
1001 | trace->trace_name); | |
1002 | if (!try_module_get(ltt_run_filter_owner)) { | |
1003 | err = -ENODEV; | |
1004 | printk(KERN_ERR "LTT : Can't lock filter module.\n"); | |
1005 | goto get_ltt_run_filter_error; | |
1006 | } | |
1007 | trace->active = 1; | |
1008 | /* Read by trace points without protection : be careful */ | |
1009 | ltt_traces.num_active_traces++; | |
1010 | return err; | |
1011 | ||
1012 | /* error handling */ | |
1013 | get_ltt_run_filter_error: | |
1014 | traces_error: | |
1015 | return err; | |
1016 | } | |
1017 | ||
1018 | int ltt_trace_start(const char *trace_name) | |
1019 | { | |
1020 | int err = 0; | |
1021 | struct ltt_trace_struct *trace; | |
1022 | ||
1023 | ltt_lock_traces(); | |
1024 | ||
1025 | trace = _ltt_trace_find(trace_name); | |
1026 | err = _ltt_trace_start(trace); | |
1027 | if (err) | |
1028 | goto no_trace; | |
1029 | ||
1030 | ltt_unlock_traces(); | |
1031 | ||
1032 | /* | |
1033 | * Call the kernel state dump. | |
1034 | * Events will be mixed with real kernel events, it's ok. | |
1035 | * Notice that there is no protection on the trace : that's exactly | |
1036 | * why we iterate on the list and check for trace equality instead of | |
1037 | * directly using this trace handle inside the logging function. | |
1038 | */ | |
1039 | ||
1040 | ltt_dump_marker_state(trace); | |
1041 | ||
1042 | if (!try_module_get(ltt_statedump_owner)) { | |
1043 | err = -ENODEV; | |
1044 | printk(KERN_ERR | |
1045 | "LTT : Can't lock state dump module.\n"); | |
1046 | } else { | |
1047 | ltt_statedump_functor(trace); | |
1048 | module_put(ltt_statedump_owner); | |
1049 | } | |
1050 | ||
1051 | return err; | |
1052 | ||
1053 | /* Error handling */ | |
1054 | no_trace: | |
1055 | ltt_unlock_traces(); | |
1056 | return err; | |
1057 | } | |
1058 | EXPORT_SYMBOL_GPL(ltt_trace_start); | |
1059 | ||
1060 | /* must be called from within traces lock */ | |
1061 | static int _ltt_trace_stop(struct ltt_trace_struct *trace) | |
1062 | { | |
1063 | int err = -EPERM; | |
1064 | ||
1065 | if (trace == NULL) { | |
1066 | err = -ENOENT; | |
1067 | goto traces_error; | |
1068 | } | |
1069 | if (!trace->active) | |
1070 | printk(KERN_INFO "LTT : Tracing not active for trace %s\n", | |
1071 | trace->trace_name); | |
1072 | if (trace->active) { | |
1073 | trace->active = 0; | |
1074 | ltt_traces.num_active_traces--; | |
1075 | synchronize_sched(); /* Wait for each tracing to be finished */ | |
1076 | } | |
1077 | module_put(ltt_run_filter_owner); | |
1078 | /* Everything went fine */ | |
1079 | return 0; | |
1080 | ||
1081 | /* Error handling */ | |
1082 | traces_error: | |
1083 | return err; | |
1084 | } | |
1085 | ||
1086 | int ltt_trace_stop(const char *trace_name) | |
1087 | { | |
1088 | int err = 0; | |
1089 | struct ltt_trace_struct *trace; | |
1090 | ||
1091 | ltt_lock_traces(); | |
1092 | trace = _ltt_trace_find(trace_name); | |
1093 | err = _ltt_trace_stop(trace); | |
1094 | ltt_unlock_traces(); | |
1095 | return err; | |
1096 | } | |
1097 | EXPORT_SYMBOL_GPL(ltt_trace_stop); | |
1098 | ||
1099 | /** | |
1100 | * ltt_control - Trace control in-kernel API | |
1101 | * @msg: Action to perform | |
1102 | * @trace_name: Trace on which the action must be done | |
1103 | * @trace_type: Type of trace (normal, flight, hybrid) | |
1104 | * @args: Arguments specific to the action | |
1105 | */ | |
1106 | int ltt_control(enum ltt_control_msg msg, const char *trace_name, | |
1107 | const char *trace_type, union ltt_control_args args) | |
1108 | { | |
1109 | int err = -EPERM; | |
1110 | ||
1111 | printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); | |
1112 | switch (msg) { | |
1113 | case LTT_CONTROL_START: | |
1114 | printk(KERN_DEBUG "Start tracing %s\n", trace_name); | |
1115 | err = ltt_trace_start(trace_name); | |
1116 | break; | |
1117 | case LTT_CONTROL_STOP: | |
1118 | printk(KERN_DEBUG "Stop tracing %s\n", trace_name); | |
1119 | err = ltt_trace_stop(trace_name); | |
1120 | break; | |
1121 | case LTT_CONTROL_CREATE_TRACE: | |
1122 | printk(KERN_DEBUG "Creating trace %s\n", trace_name); | |
1123 | err = ltt_trace_create(trace_name, trace_type, | |
1124 | args.new_trace.mode, | |
1125 | args.new_trace.subbuf_size_low, | |
1126 | args.new_trace.n_subbufs_low, | |
1127 | args.new_trace.subbuf_size_med, | |
1128 | args.new_trace.n_subbufs_med, | |
1129 | args.new_trace.subbuf_size_high, | |
1130 | args.new_trace.n_subbufs_high); | |
1131 | break; | |
1132 | case LTT_CONTROL_DESTROY_TRACE: | |
1133 | printk(KERN_DEBUG "Destroying trace %s\n", trace_name); | |
1134 | err = ltt_trace_destroy(trace_name); | |
1135 | break; | |
1136 | } | |
1137 | return err; | |
1138 | } | |
1139 | EXPORT_SYMBOL_GPL(ltt_control); | |
1140 | ||
1141 | /** | |
1142 | * ltt_filter_control - Trace filter control in-kernel API | |
1143 | * @msg: Action to perform on the filter | |
1144 | * @trace_name: Trace on which the action must be done | |
1145 | */ | |
1146 | int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) | |
1147 | { | |
1148 | int err; | |
1149 | struct ltt_trace_struct *trace; | |
1150 | ||
1151 | printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); | |
1152 | ltt_lock_traces(); | |
1153 | trace = _ltt_trace_find(trace_name); | |
1154 | if (trace == NULL) { | |
1155 | printk(KERN_ALERT | |
1156 | "Trace does not exist. Cannot proxy control request\n"); | |
1157 | err = -ENOENT; | |
1158 | goto trace_error; | |
1159 | } | |
1160 | if (!try_module_get(ltt_filter_control_owner)) { | |
1161 | err = -ENODEV; | |
1162 | goto get_module_error; | |
1163 | } | |
1164 | switch (msg) { | |
1165 | case LTT_FILTER_DEFAULT_ACCEPT: | |
1166 | printk(KERN_DEBUG | |
1167 | "Proxy filter default accept %s\n", trace_name); | |
1168 | err = (*ltt_filter_control_functor)(msg, trace); | |
1169 | break; | |
1170 | case LTT_FILTER_DEFAULT_REJECT: | |
1171 | printk(KERN_DEBUG | |
1172 | "Proxy filter default reject %s\n", trace_name); | |
1173 | err = (*ltt_filter_control_functor)(msg, trace); | |
1174 | break; | |
1175 | default: | |
1176 | err = -EPERM; | |
1177 | } | |
1178 | module_put(ltt_filter_control_owner); | |
1179 | ||
1180 | get_module_error: | |
1181 | trace_error: | |
1182 | ltt_unlock_traces(); | |
1183 | return err; | |
1184 | } | |
1185 | EXPORT_SYMBOL_GPL(ltt_filter_control); | |
1186 | ||
1187 | int __init ltt_init(void) | |
1188 | { | |
1189 | /* Make sure no page fault can be triggered by this module */ | |
1190 | vmalloc_sync_all(); | |
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | module_init(ltt_init) | |
1195 | ||
1196 | static void __exit ltt_exit(void) | |
1197 | { | |
1198 | struct ltt_trace_struct *trace; | |
1199 | struct list_head *pos, *n; | |
1200 | ||
1201 | ltt_lock_traces(); | |
1202 | /* Stop each trace, currently being read by RCU read-side */ | |
1203 | list_for_each_entry_rcu(trace, <t_traces.head, list) | |
1204 | _ltt_trace_stop(trace); | |
1205 | /* Wait for quiescent state. Readers have preemption disabled. */ | |
1206 | synchronize_sched(); | |
1207 | /* Safe iteration is now permitted. It does not have to be RCU-safe | |
1208 | * because no readers are left. */ | |
1209 | list_for_each_safe(pos, n, <t_traces.head) { | |
1210 | trace = container_of(pos, struct ltt_trace_struct, list); | |
1211 | /* _ltt_trace_destroy does a synchronize_sched() */ | |
1212 | _ltt_trace_destroy(trace); | |
1213 | __ltt_trace_destroy(trace); | |
1214 | } | |
1215 | /* free traces in pre-alloc status */ | |
1216 | list_for_each_safe(pos, n, <t_traces.setup_head) { | |
1217 | trace = container_of(pos, struct ltt_trace_struct, list); | |
1218 | _ltt_trace_free(trace); | |
1219 | } | |
1220 | ||
1221 | ltt_unlock_traces(); | |
1222 | } | |
1223 | ||
1224 | module_exit(ltt_exit) | |
1225 | ||
1226 | MODULE_LICENSE("GPL"); | |
1227 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
1228 | MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); |