Commit | Line | Data |
---|---|---|
9dad1eb8 PMF |
1 | /* |
2 | * ltt/ltt-tracer.c | |
3 | * | |
4 | * (C) Copyright 2005-2008 - | |
5 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
6 | * | |
7 | * Tracing management internal kernel API. Trace buffer allocation/free, tracing | |
8 | * start/stop. | |
9 | * | |
10 | * Author: | |
11 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
12 | * | |
13 | * Inspired from LTT : | |
14 | * Karim Yaghmour (karim@opersys.com) | |
15 | * Tom Zanussi (zanussi@us.ibm.com) | |
16 | * Bob Wisniewski (bob@watson.ibm.com) | |
17 | * And from K42 : | |
18 | * Bob Wisniewski (bob@watson.ibm.com) | |
19 | * | |
20 | * Changelog: | |
21 | * 22/09/06, Move to the marker/probes mechanism. | |
22 | * 19/10/05, Complete lockless mechanism. | |
23 | * 27/05/05, Modular redesign and rewrite. | |
24 | */ | |
25 | ||
b6bf28ec PMF |
26 | //ust// #include <linux/time.h> |
27 | //ust// #include <linux/ltt-tracer.h> | |
28 | //ust// #include <linux/module.h> | |
29 | //ust// #include <linux/string.h> | |
30 | //ust// #include <linux/slab.h> | |
31 | //ust// #include <linux/init.h> | |
32 | //ust// #include <linux/rcupdate.h> | |
33 | //ust// #include <linux/sched.h> | |
34 | //ust// #include <linux/bitops.h> | |
35 | //ust// #include <linux/fs.h> | |
36 | //ust// #include <linux/cpu.h> | |
37 | //ust// #include <linux/kref.h> | |
38 | //ust// #include <linux/delay.h> | |
39 | //ust// #include <linux/vmalloc.h> | |
40 | //ust// #include <asm/atomic.h> | |
4268fdcd PMF |
41 | #include <kcompat/rculist.h> |
42 | ||
1ae7f074 | 43 | #include "kernelcompat.h" |
b6bf28ec PMF |
44 | #include "tracercore.h" |
45 | #include "tracer.h" | |
b6bf28ec PMF |
46 | #include "usterr.h" |
47 | ||
48 | //ust// static void async_wakeup(unsigned long data); | |
49 | //ust// | |
50 | //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); | |
9dad1eb8 PMF |
51 | |
52 | /* Default callbacks for modules */ | |
53 | notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg, | |
54 | struct ltt_trace_struct *trace) | |
55 | { | |
56 | return 0; | |
57 | } | |
58 | ||
59 | int ltt_statedump_default(struct ltt_trace_struct *trace) | |
60 | { | |
61 | return 0; | |
62 | } | |
63 | ||
64 | /* Callbacks for registered modules */ | |
65 | ||
66 | int (*ltt_filter_control_functor) | |
67 | (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) = | |
68 | ltt_filter_control_default; | |
69 | struct module *ltt_filter_control_owner; | |
70 | ||
71 | /* These function pointers are protected by a trace activation check */ | |
72 | struct module *ltt_run_filter_owner; | |
73 | int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) = | |
74 | ltt_statedump_default; | |
75 | struct module *ltt_statedump_owner; | |
76 | ||
77 | struct chan_info_struct { | |
78 | const char *name; | |
79 | unsigned int def_subbufsize; | |
80 | unsigned int def_subbufcount; | |
81 | } chan_infos[] = { | |
82 | [LTT_CHANNEL_METADATA] = { | |
83 | LTT_METADATA_CHANNEL, | |
84 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
85 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
86 | }, | |
b6bf28ec PMF |
87 | [LTT_CHANNEL_UST] = { |
88 | LTT_UST_CHANNEL, | |
9dad1eb8 PMF |
89 | LTT_DEFAULT_SUBBUF_SIZE_HIGH, |
90 | LTT_DEFAULT_N_SUBBUFS_HIGH, | |
91 | }, | |
9dad1eb8 PMF |
92 | }; |
93 | ||
94 | static enum ltt_channels get_channel_type_from_name(const char *name) | |
95 | { | |
96 | int i; | |
97 | ||
98 | if (!name) | |
b6bf28ec | 99 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
100 | |
101 | for (i = 0; i < ARRAY_SIZE(chan_infos); i++) | |
102 | if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) | |
103 | return (enum ltt_channels)i; | |
104 | ||
b6bf28ec | 105 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
106 | } |
107 | ||
108 | /** | |
109 | * ltt_module_register - LTT module registration | |
110 | * @name: module type | |
111 | * @function: callback to register | |
112 | * @owner: module which owns the callback | |
113 | * | |
114 | * The module calling this registration function must ensure that no | |
115 | * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() | |
116 | * must be called between a vmalloc and the moment the memory is made visible to | |
117 | * "function". This registration acts as a vmalloc_sync_all. Therefore, only if | |
118 | * the module allocates virtual memory after its registration must it | |
119 | * synchronize the TLBs. | |
120 | */ | |
b6bf28ec PMF |
121 | //ust// int ltt_module_register(enum ltt_module_function name, void *function, |
122 | //ust// struct module *owner) | |
123 | //ust// { | |
124 | //ust// int ret = 0; | |
125 | //ust// | |
126 | //ust// /* | |
127 | //ust// * Make sure no page fault can be triggered by the module about to be | |
128 | //ust// * registered. We deal with this here so we don't have to call | |
129 | //ust// * vmalloc_sync_all() in each module's init. | |
130 | //ust// */ | |
131 | //ust// vmalloc_sync_all(); | |
132 | //ust// | |
133 | //ust// switch (name) { | |
134 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
135 | //ust// if (ltt_run_filter_owner != NULL) { | |
136 | //ust// ret = -EEXIST; | |
137 | //ust// goto end; | |
138 | //ust// } | |
139 | //ust// ltt_filter_register((ltt_run_filter_functor)function); | |
140 | //ust// ltt_run_filter_owner = owner; | |
141 | //ust// break; | |
142 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
143 | //ust// if (ltt_filter_control_owner != NULL) { | |
144 | //ust// ret = -EEXIST; | |
145 | //ust// goto end; | |
146 | //ust// } | |
147 | //ust// ltt_filter_control_functor = | |
148 | //ust// (int (*)(enum ltt_filter_control_msg, | |
149 | //ust// struct ltt_trace_struct *))function; | |
150 | //ust// ltt_filter_control_owner = owner; | |
151 | //ust// break; | |
152 | //ust// case LTT_FUNCTION_STATEDUMP: | |
153 | //ust// if (ltt_statedump_owner != NULL) { | |
154 | //ust// ret = -EEXIST; | |
155 | //ust// goto end; | |
156 | //ust// } | |
157 | //ust// ltt_statedump_functor = | |
158 | //ust// (int (*)(struct ltt_trace_struct *))function; | |
159 | //ust// ltt_statedump_owner = owner; | |
160 | //ust// break; | |
161 | //ust// } | |
162 | //ust// | |
163 | //ust// end: | |
164 | //ust// | |
165 | //ust// return ret; | |
166 | //ust// } | |
167 | //ust// EXPORT_SYMBOL_GPL(ltt_module_register); | |
9dad1eb8 PMF |
168 | |
169 | /** | |
170 | * ltt_module_unregister - LTT module unregistration | |
171 | * @name: module type | |
172 | */ | |
b6bf28ec PMF |
173 | //ust// void ltt_module_unregister(enum ltt_module_function name) |
174 | //ust// { | |
175 | //ust// switch (name) { | |
176 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
177 | //ust// ltt_filter_unregister(); | |
178 | //ust// ltt_run_filter_owner = NULL; | |
179 | //ust// /* Wait for preempt sections to finish */ | |
180 | //ust// synchronize_sched(); | |
181 | //ust// break; | |
182 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
183 | //ust// ltt_filter_control_functor = ltt_filter_control_default; | |
184 | //ust// ltt_filter_control_owner = NULL; | |
185 | //ust// break; | |
186 | //ust// case LTT_FUNCTION_STATEDUMP: | |
187 | //ust// ltt_statedump_functor = ltt_statedump_default; | |
188 | //ust// ltt_statedump_owner = NULL; | |
189 | //ust// break; | |
190 | //ust// } | |
191 | //ust// | |
192 | //ust// } | |
193 | //ust// EXPORT_SYMBOL_GPL(ltt_module_unregister); | |
9dad1eb8 PMF |
194 | |
195 | static LIST_HEAD(ltt_transport_list); | |
196 | ||
197 | /** | |
198 | * ltt_transport_register - LTT transport registration | |
199 | * @transport: transport structure | |
200 | * | |
201 | * Registers a transport which can be used as output to extract the data out of | |
202 | * LTTng. The module calling this registration function must ensure that no | |
203 | * trap-inducing code will be executed by the transport functions. E.g. | |
204 | * vmalloc_sync_all() must be called between a vmalloc and the moment the memory | |
205 | * is made visible to the transport function. This registration acts as a | |
206 | * vmalloc_sync_all. Therefore, only if the module allocates virtual memory | |
207 | * after its registration must it synchronize the TLBs. | |
208 | */ | |
5f54827b PMF |
209 | void ltt_transport_register(struct ltt_transport *transport) |
210 | { | |
211 | /* | |
212 | * Make sure no page fault can be triggered by the module about to be | |
213 | * registered. We deal with this here so we don't have to call | |
214 | * vmalloc_sync_all() in each module's init. | |
215 | */ | |
bb07823d | 216 | //ust// vmalloc_sync_all(); |
5f54827b PMF |
217 | |
218 | ltt_lock_traces(); | |
219 | list_add_tail(&transport->node, <t_transport_list); | |
220 | ltt_unlock_traces(); | |
221 | } | |
b6bf28ec | 222 | //ust// EXPORT_SYMBOL_GPL(ltt_transport_register); |
9dad1eb8 PMF |
223 | |
224 | /** | |
225 | * ltt_transport_unregister - LTT transport unregistration | |
226 | * @transport: transport structure | |
227 | */ | |
bb07823d PMF |
228 | void ltt_transport_unregister(struct ltt_transport *transport) |
229 | { | |
230 | ltt_lock_traces(); | |
231 | list_del(&transport->node); | |
232 | ltt_unlock_traces(); | |
233 | } | |
b6bf28ec | 234 | //ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister); |
9dad1eb8 PMF |
235 | |
236 | static inline int is_channel_overwrite(enum ltt_channels chan, | |
237 | enum trace_mode mode) | |
238 | { | |
239 | switch (mode) { | |
240 | case LTT_TRACE_NORMAL: | |
241 | return 0; | |
242 | case LTT_TRACE_FLIGHT: | |
243 | switch (chan) { | |
244 | case LTT_CHANNEL_METADATA: | |
245 | return 0; | |
246 | default: | |
247 | return 1; | |
248 | } | |
249 | case LTT_TRACE_HYBRID: | |
250 | switch (chan) { | |
b6bf28ec | 251 | case LTT_CHANNEL_METADATA: |
9dad1eb8 | 252 | return 0; |
b6bf28ec PMF |
253 | default: |
254 | return 1; | |
9dad1eb8 PMF |
255 | } |
256 | default: | |
257 | return 0; | |
258 | } | |
259 | } | |
260 | ||
261 | /** | |
262 | * ltt_write_trace_header - Write trace header | |
263 | * @trace: Trace information | |
264 | * @header: Memory address where the information must be written to | |
265 | */ | |
266 | void notrace ltt_write_trace_header(struct ltt_trace_struct *trace, | |
267 | struct ltt_subbuffer_header *header) | |
268 | { | |
269 | header->magic_number = LTT_TRACER_MAGIC_NUMBER; | |
270 | header->major_version = LTT_TRACER_VERSION_MAJOR; | |
271 | header->minor_version = LTT_TRACER_VERSION_MINOR; | |
272 | header->arch_size = sizeof(void *); | |
273 | header->alignment = ltt_get_alignment(); | |
274 | header->start_time_sec = trace->start_time.tv_sec; | |
275 | header->start_time_usec = trace->start_time.tv_usec; | |
276 | header->start_freq = trace->start_freq; | |
277 | header->freq_scale = trace->freq_scale; | |
278 | } | |
b6bf28ec | 279 | //ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header); |
9dad1eb8 PMF |
280 | |
281 | static void trace_async_wakeup(struct ltt_trace_struct *trace) | |
282 | { | |
283 | int i; | |
284 | struct ltt_channel_struct *chan; | |
285 | ||
286 | /* Must check each channel for pending read wakeup */ | |
287 | for (i = 0; i < trace->nr_channels; i++) { | |
288 | chan = &trace->channels[i]; | |
289 | if (chan->active) | |
290 | trace->ops->wakeup_channel(chan); | |
291 | } | |
292 | } | |
293 | ||
b6bf28ec PMF |
294 | //ust// /* Timer to send async wakeups to the readers */ |
295 | //ust// static void async_wakeup(unsigned long data) | |
296 | //ust// { | |
297 | //ust// struct ltt_trace_struct *trace; | |
298 | //ust// | |
299 | //ust// /* | |
300 | //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt | |
301 | //ust// * disable sections (spinlock taken in wake_up). However, mainline won't | |
302 | //ust// * allow mutex to be taken in interrupt context. Ugly. | |
303 | //ust// * A proper way to do this would be to turn the timer into a | |
304 | //ust// * periodically woken up thread, but it adds to the footprint. | |
305 | //ust// */ | |
306 | //ust// #ifndef CONFIG_PREEMPT_RT | |
307 | //ust// rcu_read_lock_sched(); | |
308 | //ust// #else | |
309 | //ust// ltt_lock_traces(); | |
310 | //ust// #endif | |
311 | //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
312 | //ust// trace_async_wakeup(trace); | |
313 | //ust// } | |
314 | //ust// #ifndef CONFIG_PREEMPT_RT | |
315 | //ust// rcu_read_unlock_sched(); | |
316 | //ust// #else | |
317 | //ust// ltt_unlock_traces(); | |
318 | //ust// #endif | |
319 | //ust// | |
320 | //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
321 | //ust// } | |
9dad1eb8 PMF |
322 | |
323 | /** | |
324 | * _ltt_trace_find - find a trace by given name. | |
325 | * trace_name: trace name | |
326 | * | |
327 | * Returns a pointer to the trace structure, NULL if not found. | |
328 | */ | |
9c67dc50 | 329 | struct ltt_trace_struct *_ltt_trace_find(const char *trace_name) |
9dad1eb8 PMF |
330 | { |
331 | struct ltt_trace_struct *trace; | |
332 | ||
333 | list_for_each_entry(trace, <t_traces.head, list) | |
334 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
335 | return trace; | |
336 | ||
337 | return NULL; | |
338 | } | |
339 | ||
340 | /* _ltt_trace_find_setup : | |
341 | * find a trace in setup list by given name. | |
342 | * | |
343 | * Returns a pointer to the trace structure, NULL if not found. | |
344 | */ | |
345 | struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name) | |
346 | { | |
347 | struct ltt_trace_struct *trace; | |
348 | ||
349 | list_for_each_entry(trace, <t_traces.setup_head, list) | |
350 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
351 | return trace; | |
352 | ||
353 | return NULL; | |
354 | } | |
b6bf28ec | 355 | //ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); |
9dad1eb8 PMF |
356 | |
357 | /** | |
358 | * ltt_release_transport - Release an LTT transport | |
359 | * @kref : reference count on the transport | |
360 | */ | |
361 | void ltt_release_transport(struct kref *kref) | |
362 | { | |
363 | struct ltt_trace_struct *trace = container_of(kref, | |
364 | struct ltt_trace_struct, ltt_transport_kref); | |
b6bf28ec | 365 | //ust// trace->ops->remove_dirs(trace); |
9dad1eb8 | 366 | } |
b6bf28ec | 367 | //ust// EXPORT_SYMBOL_GPL(ltt_release_transport); |
9dad1eb8 PMF |
368 | |
369 | /** | |
370 | * ltt_release_trace - Release a LTT trace | |
371 | * @kref : reference count on the trace | |
372 | */ | |
373 | void ltt_release_trace(struct kref *kref) | |
374 | { | |
375 | struct ltt_trace_struct *trace = container_of(kref, | |
376 | struct ltt_trace_struct, kref); | |
377 | ltt_channels_trace_free(trace->channels); | |
378 | kfree(trace); | |
379 | } | |
b6bf28ec | 380 | //ust// EXPORT_SYMBOL_GPL(ltt_release_trace); |
9dad1eb8 PMF |
381 | |
382 | static inline void prepare_chan_size_num(unsigned int *subbuf_size, | |
383 | unsigned int *n_subbufs) | |
384 | { | |
385 | *subbuf_size = 1 << get_count_order(*subbuf_size); | |
386 | *n_subbufs = 1 << get_count_order(*n_subbufs); | |
387 | ||
388 | /* Subbuf size and number must both be power of two */ | |
389 | WARN_ON(hweight32(*subbuf_size) != 1); | |
390 | WARN_ON(hweight32(*n_subbufs) != 1); | |
391 | } | |
392 | ||
393 | int _ltt_trace_setup(const char *trace_name) | |
394 | { | |
395 | int err = 0; | |
396 | struct ltt_trace_struct *new_trace = NULL; | |
397 | int metadata_index; | |
398 | unsigned int chan; | |
399 | enum ltt_channels chantype; | |
400 | ||
401 | if (_ltt_trace_find_setup(trace_name)) { | |
402 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
403 | trace_name); | |
404 | err = -EEXIST; | |
405 | goto traces_error; | |
406 | } | |
407 | ||
408 | if (_ltt_trace_find(trace_name)) { | |
409 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
410 | trace_name); | |
411 | err = -EEXIST; | |
412 | goto traces_error; | |
413 | } | |
414 | ||
415 | new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL); | |
416 | if (!new_trace) { | |
417 | printk(KERN_ERR | |
418 | "LTT : Unable to allocate memory for trace %s\n", | |
419 | trace_name); | |
420 | err = -ENOMEM; | |
421 | goto traces_error; | |
422 | } | |
423 | strncpy(new_trace->trace_name, trace_name, NAME_MAX); | |
424 | new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels, | |
425 | 0, 1); | |
426 | if (!new_trace->channels) { | |
427 | printk(KERN_ERR | |
428 | "LTT : Unable to allocate memory for chaninfo %s\n", | |
429 | trace_name); | |
430 | err = -ENOMEM; | |
431 | goto trace_free; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Force metadata channel to active, no overwrite. | |
436 | */ | |
437 | metadata_index = ltt_channels_get_index_from_name("metadata"); | |
438 | WARN_ON(metadata_index < 0); | |
439 | new_trace->channels[metadata_index].overwrite = 0; | |
440 | new_trace->channels[metadata_index].active = 1; | |
441 | ||
442 | /* | |
443 | * Set hardcoded tracer defaults for some channels | |
444 | */ | |
445 | for (chan = 0; chan < new_trace->nr_channels; chan++) { | |
446 | if (!(new_trace->channels[chan].active)) | |
447 | continue; | |
448 | ||
449 | chantype = get_channel_type_from_name( | |
450 | ltt_channels_get_name_from_index(chan)); | |
451 | new_trace->channels[chan].subbuf_size = | |
452 | chan_infos[chantype].def_subbufsize; | |
453 | new_trace->channels[chan].subbuf_cnt = | |
454 | chan_infos[chantype].def_subbufcount; | |
455 | } | |
456 | ||
457 | list_add(&new_trace->list, <t_traces.setup_head); | |
458 | return 0; | |
459 | ||
460 | trace_free: | |
461 | kfree(new_trace); | |
462 | traces_error: | |
463 | return err; | |
464 | } | |
b6bf28ec | 465 | //ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup); |
9dad1eb8 PMF |
466 | |
467 | ||
468 | int ltt_trace_setup(const char *trace_name) | |
469 | { | |
470 | int ret; | |
471 | ltt_lock_traces(); | |
472 | ret = _ltt_trace_setup(trace_name); | |
473 | ltt_unlock_traces(); | |
474 | return ret; | |
475 | } | |
b6bf28ec | 476 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_setup); |
9dad1eb8 PMF |
477 | |
478 | /* must be called from within a traces lock. */ | |
479 | static void _ltt_trace_free(struct ltt_trace_struct *trace) | |
480 | { | |
481 | list_del(&trace->list); | |
482 | kfree(trace); | |
483 | } | |
484 | ||
485 | int ltt_trace_set_type(const char *trace_name, const char *trace_type) | |
486 | { | |
487 | int err = 0; | |
488 | struct ltt_trace_struct *trace; | |
489 | struct ltt_transport *tran_iter, *transport = NULL; | |
490 | ||
491 | ltt_lock_traces(); | |
492 | ||
493 | trace = _ltt_trace_find_setup(trace_name); | |
494 | if (!trace) { | |
495 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
496 | err = -ENOENT; | |
497 | goto traces_error; | |
498 | } | |
499 | ||
500 | list_for_each_entry(tran_iter, <t_transport_list, node) { | |
501 | if (!strcmp(tran_iter->name, trace_type)) { | |
502 | transport = tran_iter; | |
503 | break; | |
504 | } | |
505 | } | |
506 | if (!transport) { | |
507 | printk(KERN_ERR "LTT : Transport %s is not present.\n", | |
508 | trace_type); | |
509 | err = -EINVAL; | |
510 | goto traces_error; | |
511 | } | |
512 | ||
513 | trace->transport = transport; | |
514 | ||
515 | traces_error: | |
516 | ltt_unlock_traces(); | |
517 | return err; | |
518 | } | |
b6bf28ec | 519 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type); |
9dad1eb8 PMF |
520 | |
521 | int ltt_trace_set_channel_subbufsize(const char *trace_name, | |
522 | const char *channel_name, unsigned int size) | |
523 | { | |
524 | int err = 0; | |
525 | struct ltt_trace_struct *trace; | |
526 | int index; | |
527 | ||
528 | ltt_lock_traces(); | |
529 | ||
530 | trace = _ltt_trace_find_setup(trace_name); | |
531 | if (!trace) { | |
532 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
533 | err = -ENOENT; | |
534 | goto traces_error; | |
535 | } | |
536 | ||
537 | index = ltt_channels_get_index_from_name(channel_name); | |
538 | if (index < 0) { | |
539 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
540 | err = -ENOENT; | |
541 | goto traces_error; | |
542 | } | |
543 | trace->channels[index].subbuf_size = size; | |
544 | ||
545 | traces_error: | |
546 | ltt_unlock_traces(); | |
547 | return err; | |
548 | } | |
b6bf28ec | 549 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); |
9dad1eb8 PMF |
550 | |
551 | int ltt_trace_set_channel_subbufcount(const char *trace_name, | |
552 | const char *channel_name, unsigned int cnt) | |
553 | { | |
554 | int err = 0; | |
555 | struct ltt_trace_struct *trace; | |
556 | int index; | |
557 | ||
558 | ltt_lock_traces(); | |
559 | ||
560 | trace = _ltt_trace_find_setup(trace_name); | |
561 | if (!trace) { | |
562 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
563 | err = -ENOENT; | |
564 | goto traces_error; | |
565 | } | |
566 | ||
567 | index = ltt_channels_get_index_from_name(channel_name); | |
568 | if (index < 0) { | |
569 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
570 | err = -ENOENT; | |
571 | goto traces_error; | |
572 | } | |
573 | trace->channels[index].subbuf_cnt = cnt; | |
574 | ||
575 | traces_error: | |
576 | ltt_unlock_traces(); | |
577 | return err; | |
578 | } | |
b6bf28ec | 579 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); |
9dad1eb8 PMF |
580 | |
581 | int ltt_trace_set_channel_enable(const char *trace_name, | |
582 | const char *channel_name, unsigned int enable) | |
583 | { | |
584 | int err = 0; | |
585 | struct ltt_trace_struct *trace; | |
586 | int index; | |
587 | ||
588 | ltt_lock_traces(); | |
589 | ||
590 | trace = _ltt_trace_find_setup(trace_name); | |
591 | if (!trace) { | |
592 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
593 | err = -ENOENT; | |
594 | goto traces_error; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Datas in metadata channel(marker info) is necessary to be able to | |
599 | * read the trace, we always enable this channel. | |
600 | */ | |
601 | if (!enable && !strcmp(channel_name, "metadata")) { | |
602 | printk(KERN_ERR "LTT : Trying to disable metadata channel\n"); | |
603 | err = -EINVAL; | |
604 | goto traces_error; | |
605 | } | |
606 | ||
607 | index = ltt_channels_get_index_from_name(channel_name); | |
608 | if (index < 0) { | |
609 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
610 | err = -ENOENT; | |
611 | goto traces_error; | |
612 | } | |
613 | ||
614 | trace->channels[index].active = enable; | |
615 | ||
616 | traces_error: | |
617 | ltt_unlock_traces(); | |
618 | return err; | |
619 | } | |
b6bf28ec | 620 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable); |
9dad1eb8 PMF |
621 | |
622 | int ltt_trace_set_channel_overwrite(const char *trace_name, | |
623 | const char *channel_name, unsigned int overwrite) | |
624 | { | |
625 | int err = 0; | |
626 | struct ltt_trace_struct *trace; | |
627 | int index; | |
628 | ||
629 | ltt_lock_traces(); | |
630 | ||
631 | trace = _ltt_trace_find_setup(trace_name); | |
632 | if (!trace) { | |
633 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
634 | err = -ENOENT; | |
635 | goto traces_error; | |
636 | } | |
637 | ||
638 | /* | |
639 | * Always put the metadata channel in non-overwrite mode : | |
640 | * This is a very low traffic channel and it can't afford to have its | |
641 | * data overwritten : this data (marker info) is necessary to be | |
642 | * able to read the trace. | |
643 | */ | |
644 | if (overwrite && !strcmp(channel_name, "metadata")) { | |
645 | printk(KERN_ERR "LTT : Trying to set metadata channel to " | |
646 | "overwrite mode\n"); | |
647 | err = -EINVAL; | |
648 | goto traces_error; | |
649 | } | |
650 | ||
651 | index = ltt_channels_get_index_from_name(channel_name); | |
652 | if (index < 0) { | |
653 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
654 | err = -ENOENT; | |
655 | goto traces_error; | |
656 | } | |
657 | ||
658 | trace->channels[index].overwrite = overwrite; | |
659 | ||
660 | traces_error: | |
661 | ltt_unlock_traces(); | |
662 | return err; | |
663 | } | |
b6bf28ec | 664 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); |
9dad1eb8 PMF |
665 | |
666 | int ltt_trace_alloc(const char *trace_name) | |
667 | { | |
668 | int err = 0; | |
669 | struct ltt_trace_struct *trace; | |
c697d411 | 670 | unsigned int subbuf_size, subbuf_cnt; |
9dad1eb8 PMF |
671 | unsigned long flags; |
672 | int chan; | |
673 | const char *channel_name; | |
674 | ||
675 | ltt_lock_traces(); | |
676 | ||
677 | trace = _ltt_trace_find_setup(trace_name); | |
678 | if (!trace) { | |
679 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
680 | err = -ENOENT; | |
681 | goto traces_error; | |
682 | } | |
683 | ||
684 | kref_init(&trace->kref); | |
685 | kref_init(&trace->ltt_transport_kref); | |
b6bf28ec | 686 | //ust// init_waitqueue_head(&trace->kref_wq); |
9dad1eb8 | 687 | trace->active = 0; |
b6bf28ec | 688 | //ust// get_trace_clock(); |
9dad1eb8 PMF |
689 | trace->freq_scale = trace_clock_freq_scale(); |
690 | ||
691 | if (!trace->transport) { | |
692 | printk(KERN_ERR "LTT : Transport is not set.\n"); | |
693 | err = -EINVAL; | |
694 | goto transport_error; | |
695 | } | |
b6bf28ec PMF |
696 | //ust// if (!try_module_get(trace->transport->owner)) { |
697 | //ust// printk(KERN_ERR "LTT : Can't lock transport module.\n"); | |
698 | //ust// err = -ENODEV; | |
699 | //ust// goto transport_error; | |
700 | //ust// } | |
9dad1eb8 PMF |
701 | trace->ops = &trace->transport->ops; |
702 | ||
b6bf28ec PMF |
703 | //ust// err = trace->ops->create_dirs(trace); |
704 | //ust// if (err) { | |
705 | //ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", | |
706 | //ust// trace_name); | |
707 | //ust// goto dirs_error; | |
708 | //ust// } | |
9dad1eb8 | 709 | |
b6bf28ec | 710 | //ust// local_irq_save(flags); |
9dad1eb8 PMF |
711 | trace->start_freq = trace_clock_frequency(); |
712 | trace->start_tsc = trace_clock_read64(); | |
b6bf28ec PMF |
713 | gettimeofday(&trace->start_time, NULL); //ust// changed |
714 | //ust// local_irq_restore(flags); | |
9dad1eb8 PMF |
715 | |
716 | for (chan = 0; chan < trace->nr_channels; chan++) { | |
717 | if (!(trace->channels[chan].active)) | |
718 | continue; | |
719 | ||
720 | channel_name = ltt_channels_get_name_from_index(chan); | |
721 | WARN_ON(!channel_name); | |
722 | subbuf_size = trace->channels[chan].subbuf_size; | |
723 | subbuf_cnt = trace->channels[chan].subbuf_cnt; | |
724 | prepare_chan_size_num(&subbuf_size, &subbuf_cnt); | |
725 | err = trace->ops->create_channel(trace_name, trace, | |
726 | trace->dentry.trace_root, | |
727 | channel_name, | |
728 | &trace->channels[chan], | |
729 | subbuf_size, | |
730 | subbuf_cnt, | |
731 | trace->channels[chan].overwrite); | |
732 | if (err != 0) { | |
733 | printk(KERN_ERR "LTT : Can't create channel %s.\n", | |
734 | channel_name); | |
735 | goto create_channel_error; | |
736 | } | |
737 | } | |
738 | ||
739 | list_del(&trace->list); | |
b6bf28ec PMF |
740 | //ust// if (list_empty(<t_traces.head)) { |
741 | //ust// mod_timer(<t_async_wakeup_timer, | |
742 | //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
743 | //ust// set_kernel_trace_flag_all_tasks(); | |
744 | //ust// } | |
8d938dbd | 745 | list_add_rcu(&trace->list, <t_traces.head); |
b6bf28ec | 746 | //ust// synchronize_sched(); |
9dad1eb8 PMF |
747 | |
748 | ltt_unlock_traces(); | |
749 | ||
750 | return 0; | |
751 | ||
752 | create_channel_error: | |
753 | for (chan--; chan >= 0; chan--) | |
754 | if (trace->channels[chan].active) | |
755 | trace->ops->remove_channel(&trace->channels[chan]); | |
756 | ||
757 | dirs_error: | |
b6bf28ec | 758 | //ust// module_put(trace->transport->owner); |
9dad1eb8 | 759 | transport_error: |
b6bf28ec | 760 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
761 | traces_error: |
762 | ltt_unlock_traces(); | |
763 | return err; | |
764 | } | |
b6bf28ec | 765 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc); |
9dad1eb8 PMF |
766 | |
767 | /* | |
768 | * It is worked as a wrapper for current version of ltt_control.ko. | |
769 | * We will make a new ltt_control based on debugfs, and control each channel's | |
770 | * buffer. | |
771 | */ | |
772 | static int ltt_trace_create(const char *trace_name, const char *trace_type, | |
773 | enum trace_mode mode, | |
774 | unsigned int subbuf_size_low, unsigned int n_subbufs_low, | |
775 | unsigned int subbuf_size_med, unsigned int n_subbufs_med, | |
776 | unsigned int subbuf_size_high, unsigned int n_subbufs_high) | |
777 | { | |
778 | int err = 0; | |
779 | ||
780 | err = ltt_trace_setup(trace_name); | |
781 | if (IS_ERR_VALUE(err)) | |
782 | return err; | |
783 | ||
784 | err = ltt_trace_set_type(trace_name, trace_type); | |
785 | if (IS_ERR_VALUE(err)) | |
786 | return err; | |
787 | ||
788 | err = ltt_trace_alloc(trace_name); | |
789 | if (IS_ERR_VALUE(err)) | |
790 | return err; | |
791 | ||
792 | return err; | |
793 | } | |
794 | ||
795 | /* Must be called while sure that trace is in the list. */ | |
796 | static int _ltt_trace_destroy(struct ltt_trace_struct *trace) | |
797 | { | |
798 | int err = -EPERM; | |
799 | ||
800 | if (trace == NULL) { | |
801 | err = -ENOENT; | |
802 | goto traces_error; | |
803 | } | |
804 | if (trace->active) { | |
805 | printk(KERN_ERR | |
806 | "LTT : Can't destroy trace %s : tracer is active\n", | |
807 | trace->trace_name); | |
808 | err = -EBUSY; | |
809 | goto active_error; | |
810 | } | |
811 | /* Everything went fine */ | |
b6bf28ec PMF |
812 | //ust// list_del_rcu(&trace->list); |
813 | //ust// synchronize_sched(); | |
9dad1eb8 | 814 | if (list_empty(<t_traces.head)) { |
b6bf28ec | 815 | //ust// clear_kernel_trace_flag_all_tasks(); |
9dad1eb8 PMF |
816 | /* |
817 | * We stop the asynchronous delivery of reader wakeup, but | |
818 | * we must make one last check for reader wakeups pending | |
819 | * later in __ltt_trace_destroy. | |
820 | */ | |
b6bf28ec | 821 | //ust// del_timer_sync(<t_async_wakeup_timer); |
9dad1eb8 PMF |
822 | } |
823 | return 0; | |
824 | ||
825 | /* error handling */ | |
826 | active_error: | |
827 | traces_error: | |
828 | return err; | |
829 | } | |
830 | ||
831 | /* Sleepable part of the destroy */ | |
832 | static void __ltt_trace_destroy(struct ltt_trace_struct *trace) | |
833 | { | |
834 | int i; | |
835 | struct ltt_channel_struct *chan; | |
836 | ||
837 | for (i = 0; i < trace->nr_channels; i++) { | |
838 | chan = &trace->channels[i]; | |
839 | if (chan->active) | |
840 | trace->ops->finish_channel(chan); | |
841 | } | |
842 | ||
98963de4 | 843 | return; /* FIXME: temporary for ust */ |
b6bf28ec | 844 | //ust// flush_scheduled_work(); |
9dad1eb8 PMF |
845 | |
846 | /* | |
847 | * The currently destroyed trace is not in the trace list anymore, | |
848 | * so it's safe to call the async wakeup ourself. It will deliver | |
849 | * the last subbuffers. | |
850 | */ | |
851 | trace_async_wakeup(trace); | |
852 | ||
853 | for (i = 0; i < trace->nr_channels; i++) { | |
854 | chan = &trace->channels[i]; | |
855 | if (chan->active) | |
856 | trace->ops->remove_channel(chan); | |
857 | } | |
858 | ||
859 | kref_put(&trace->ltt_transport_kref, ltt_release_transport); | |
860 | ||
b6bf28ec | 861 | //ust// module_put(trace->transport->owner); |
9dad1eb8 PMF |
862 | |
863 | /* | |
864 | * Wait for lttd readers to release the files, therefore making sure | |
865 | * the last subbuffers have been read. | |
866 | */ | |
b6bf28ec PMF |
867 | //ust// if (atomic_read(&trace->kref.refcount) > 1) { |
868 | //ust// int ret = 0; | |
869 | //ust// __wait_event_interruptible(trace->kref_wq, | |
870 | //ust// (atomic_read(&trace->kref.refcount) == 1), ret); | |
871 | //ust// } | |
9dad1eb8 PMF |
872 | kref_put(&trace->kref, ltt_release_trace); |
873 | } | |
874 | ||
875 | int ltt_trace_destroy(const char *trace_name) | |
876 | { | |
877 | int err = 0; | |
878 | struct ltt_trace_struct *trace; | |
879 | ||
880 | ltt_lock_traces(); | |
881 | ||
882 | trace = _ltt_trace_find(trace_name); | |
883 | if (trace) { | |
884 | err = _ltt_trace_destroy(trace); | |
885 | if (err) | |
886 | goto error; | |
887 | ||
888 | ltt_unlock_traces(); | |
889 | ||
890 | __ltt_trace_destroy(trace); | |
b6bf28ec | 891 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
892 | |
893 | return 0; | |
894 | } | |
895 | ||
896 | trace = _ltt_trace_find_setup(trace_name); | |
897 | if (trace) { | |
898 | _ltt_trace_free(trace); | |
899 | ltt_unlock_traces(); | |
900 | return 0; | |
901 | } | |
902 | ||
903 | err = -ENOENT; | |
904 | ||
905 | /* Error handling */ | |
906 | error: | |
907 | ltt_unlock_traces(); | |
908 | return err; | |
909 | } | |
b6bf28ec | 910 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy); |
9dad1eb8 PMF |
911 | |
912 | /* must be called from within a traces lock. */ | |
913 | static int _ltt_trace_start(struct ltt_trace_struct *trace) | |
914 | { | |
915 | int err = 0; | |
916 | ||
917 | if (trace == NULL) { | |
918 | err = -ENOENT; | |
919 | goto traces_error; | |
920 | } | |
921 | if (trace->active) | |
922 | printk(KERN_INFO "LTT : Tracing already active for trace %s\n", | |
923 | trace->trace_name); | |
b6bf28ec PMF |
924 | //ust// if (!try_module_get(ltt_run_filter_owner)) { |
925 | //ust// err = -ENODEV; | |
926 | //ust// printk(KERN_ERR "LTT : Can't lock filter module.\n"); | |
927 | //ust// goto get_ltt_run_filter_error; | |
928 | //ust// } | |
9dad1eb8 PMF |
929 | trace->active = 1; |
930 | /* Read by trace points without protection : be careful */ | |
931 | ltt_traces.num_active_traces++; | |
932 | return err; | |
933 | ||
934 | /* error handling */ | |
935 | get_ltt_run_filter_error: | |
936 | traces_error: | |
937 | return err; | |
938 | } | |
939 | ||
940 | int ltt_trace_start(const char *trace_name) | |
941 | { | |
942 | int err = 0; | |
943 | struct ltt_trace_struct *trace; | |
944 | ||
945 | ltt_lock_traces(); | |
946 | ||
947 | trace = _ltt_trace_find(trace_name); | |
948 | err = _ltt_trace_start(trace); | |
949 | if (err) | |
950 | goto no_trace; | |
951 | ||
952 | ltt_unlock_traces(); | |
953 | ||
954 | /* | |
955 | * Call the kernel state dump. | |
956 | * Events will be mixed with real kernel events, it's ok. | |
957 | * Notice that there is no protection on the trace : that's exactly | |
958 | * why we iterate on the list and check for trace equality instead of | |
959 | * directly using this trace handle inside the logging function. | |
960 | */ | |
961 | ||
9c67dc50 | 962 | ltt_dump_marker_state(trace); |
9dad1eb8 | 963 | |
b6bf28ec PMF |
964 | //ust// if (!try_module_get(ltt_statedump_owner)) { |
965 | //ust// err = -ENODEV; | |
966 | //ust// printk(KERN_ERR | |
967 | //ust// "LTT : Can't lock state dump module.\n"); | |
968 | //ust// } else { | |
9dad1eb8 | 969 | ltt_statedump_functor(trace); |
b6bf28ec PMF |
970 | //ust// module_put(ltt_statedump_owner); |
971 | //ust// } | |
9dad1eb8 PMF |
972 | |
973 | return err; | |
974 | ||
975 | /* Error handling */ | |
976 | no_trace: | |
977 | ltt_unlock_traces(); | |
978 | return err; | |
979 | } | |
b6bf28ec | 980 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_start); |
9dad1eb8 PMF |
981 | |
982 | /* must be called from within traces lock */ | |
983 | static int _ltt_trace_stop(struct ltt_trace_struct *trace) | |
984 | { | |
985 | int err = -EPERM; | |
986 | ||
987 | if (trace == NULL) { | |
988 | err = -ENOENT; | |
989 | goto traces_error; | |
990 | } | |
991 | if (!trace->active) | |
992 | printk(KERN_INFO "LTT : Tracing not active for trace %s\n", | |
993 | trace->trace_name); | |
994 | if (trace->active) { | |
995 | trace->active = 0; | |
996 | ltt_traces.num_active_traces--; | |
b6bf28ec | 997 | //ust// synchronize_sched(); /* Wait for each tracing to be finished */ |
9dad1eb8 | 998 | } |
b6bf28ec | 999 | //ust// module_put(ltt_run_filter_owner); |
9dad1eb8 PMF |
1000 | /* Everything went fine */ |
1001 | return 0; | |
1002 | ||
1003 | /* Error handling */ | |
1004 | traces_error: | |
1005 | return err; | |
1006 | } | |
1007 | ||
1008 | int ltt_trace_stop(const char *trace_name) | |
1009 | { | |
1010 | int err = 0; | |
1011 | struct ltt_trace_struct *trace; | |
1012 | ||
1013 | ltt_lock_traces(); | |
1014 | trace = _ltt_trace_find(trace_name); | |
1015 | err = _ltt_trace_stop(trace); | |
1016 | ltt_unlock_traces(); | |
1017 | return err; | |
1018 | } | |
b6bf28ec | 1019 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_stop); |
9dad1eb8 PMF |
1020 | |
1021 | /** | |
1022 | * ltt_control - Trace control in-kernel API | |
1023 | * @msg: Action to perform | |
1024 | * @trace_name: Trace on which the action must be done | |
1025 | * @trace_type: Type of trace (normal, flight, hybrid) | |
1026 | * @args: Arguments specific to the action | |
1027 | */ | |
b6bf28ec PMF |
1028 | //ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name, |
1029 | //ust// const char *trace_type, union ltt_control_args args) | |
1030 | //ust// { | |
1031 | //ust// int err = -EPERM; | |
1032 | //ust// | |
1033 | //ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); | |
1034 | //ust// switch (msg) { | |
1035 | //ust// case LTT_CONTROL_START: | |
1036 | //ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name); | |
1037 | //ust// err = ltt_trace_start(trace_name); | |
1038 | //ust// break; | |
1039 | //ust// case LTT_CONTROL_STOP: | |
1040 | //ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name); | |
1041 | //ust// err = ltt_trace_stop(trace_name); | |
1042 | //ust// break; | |
1043 | //ust// case LTT_CONTROL_CREATE_TRACE: | |
1044 | //ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name); | |
1045 | //ust// err = ltt_trace_create(trace_name, trace_type, | |
1046 | //ust// args.new_trace.mode, | |
1047 | //ust// args.new_trace.subbuf_size_low, | |
1048 | //ust// args.new_trace.n_subbufs_low, | |
1049 | //ust// args.new_trace.subbuf_size_med, | |
1050 | //ust// args.new_trace.n_subbufs_med, | |
1051 | //ust// args.new_trace.subbuf_size_high, | |
1052 | //ust// args.new_trace.n_subbufs_high); | |
1053 | //ust// break; | |
1054 | //ust// case LTT_CONTROL_DESTROY_TRACE: | |
1055 | //ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name); | |
1056 | //ust// err = ltt_trace_destroy(trace_name); | |
1057 | //ust// break; | |
1058 | //ust// } | |
1059 | //ust// return err; | |
1060 | //ust// } | |
1061 | //ust// EXPORT_SYMBOL_GPL(ltt_control); | |
9dad1eb8 PMF |
1062 | |
1063 | /** | |
1064 | * ltt_filter_control - Trace filter control in-kernel API | |
1065 | * @msg: Action to perform on the filter | |
1066 | * @trace_name: Trace on which the action must be done | |
1067 | */ | |
1068 | int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) | |
1069 | { | |
1070 | int err; | |
1071 | struct ltt_trace_struct *trace; | |
1072 | ||
1073 | printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); | |
1074 | ltt_lock_traces(); | |
1075 | trace = _ltt_trace_find(trace_name); | |
1076 | if (trace == NULL) { | |
1077 | printk(KERN_ALERT | |
1078 | "Trace does not exist. Cannot proxy control request\n"); | |
1079 | err = -ENOENT; | |
1080 | goto trace_error; | |
1081 | } | |
b6bf28ec PMF |
1082 | //ust// if (!try_module_get(ltt_filter_control_owner)) { |
1083 | //ust// err = -ENODEV; | |
1084 | //ust// goto get_module_error; | |
1085 | //ust// } | |
9dad1eb8 PMF |
1086 | switch (msg) { |
1087 | case LTT_FILTER_DEFAULT_ACCEPT: | |
1088 | printk(KERN_DEBUG | |
1089 | "Proxy filter default accept %s\n", trace_name); | |
1090 | err = (*ltt_filter_control_functor)(msg, trace); | |
1091 | break; | |
1092 | case LTT_FILTER_DEFAULT_REJECT: | |
1093 | printk(KERN_DEBUG | |
1094 | "Proxy filter default reject %s\n", trace_name); | |
1095 | err = (*ltt_filter_control_functor)(msg, trace); | |
1096 | break; | |
1097 | default: | |
1098 | err = -EPERM; | |
1099 | } | |
b6bf28ec | 1100 | //ust// module_put(ltt_filter_control_owner); |
9dad1eb8 PMF |
1101 | |
1102 | get_module_error: | |
1103 | trace_error: | |
1104 | ltt_unlock_traces(); | |
1105 | return err; | |
1106 | } | |
b6bf28ec PMF |
1107 | //ust// EXPORT_SYMBOL_GPL(ltt_filter_control); |
1108 | ||
1109 | //ust// int __init ltt_init(void) | |
1110 | //ust// { | |
1111 | //ust// /* Make sure no page fault can be triggered by this module */ | |
1112 | //ust// vmalloc_sync_all(); | |
1113 | //ust// return 0; | |
1114 | //ust// } | |
1115 | ||
1116 | //ust// module_init(ltt_init) | |
1117 | ||
1118 | //ust// static void __exit ltt_exit(void) | |
1119 | //ust// { | |
1120 | //ust// struct ltt_trace_struct *trace; | |
1121 | //ust// struct list_head *pos, *n; | |
1122 | //ust// | |
1123 | //ust// ltt_lock_traces(); | |
1124 | //ust// /* Stop each trace, currently being read by RCU read-side */ | |
1125 | //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) | |
1126 | //ust// _ltt_trace_stop(trace); | |
1127 | //ust// /* Wait for quiescent state. Readers have preemption disabled. */ | |
1128 | //ust// synchronize_sched(); | |
1129 | //ust// /* Safe iteration is now permitted. It does not have to be RCU-safe | |
1130 | //ust// * because no readers are left. */ | |
1131 | //ust// list_for_each_safe(pos, n, <t_traces.head) { | |
1132 | //ust// trace = container_of(pos, struct ltt_trace_struct, list); | |
1133 | //ust// /* _ltt_trace_destroy does a synchronize_sched() */ | |
1134 | //ust// _ltt_trace_destroy(trace); | |
1135 | //ust// __ltt_trace_destroy(trace); | |
1136 | //ust// } | |
1137 | //ust// /* free traces in pre-alloc status */ | |
1138 | //ust// list_for_each_safe(pos, n, <t_traces.setup_head) { | |
1139 | //ust// trace = container_of(pos, struct ltt_trace_struct, list); | |
1140 | //ust// _ltt_trace_free(trace); | |
1141 | //ust// } | |
1142 | //ust// | |
1143 | //ust// ltt_unlock_traces(); | |
1144 | //ust// } | |
1145 | ||
1146 | //ust// module_exit(ltt_exit) | |
1147 | ||
1148 | //ust// MODULE_LICENSE("GPL"); | |
1149 | //ust// MODULE_AUTHOR("Mathieu Desnoyers"); | |
1150 | //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); |