New naming convention for internal macros
[lttng-ust.git] / libust / tracer.c
1 /*
2 * tracer.c
3 *
4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 * Inspired from LTT :
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
25 * And from K42 :
26 * Bob Wisniewski (bob@watson.ibm.com)
27 *
28 * Changelog:
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
32 */
33
34 #include <urcu-bp.h>
35 #include <urcu/rculist.h>
36
37 #include <ust/clock.h>
38
39 #include "tracercore.h"
40 #include "tracer.h"
41 #include "usterr.h"
42
43 //ust// static void async_wakeup(unsigned long data);
44 //ust//
45 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
46
47 /* Default callbacks for modules */
48 notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg,
49 struct ust_trace *trace)
50 {
51 return 0;
52 }
53
54 int ltt_statedump_default(struct ust_trace *trace)
55 {
56 return 0;
57 }
58
59 /* Callbacks for registered modules */
60
61 int (*ltt_filter_control_functor)
62 (enum ltt_filter_control_msg msg, struct ust_trace *trace) =
63 ltt_filter_control_default;
64 struct module *ltt_filter_control_owner;
65
66 /* These function pointers are protected by a trace activation check */
67 struct module *ltt_run_filter_owner;
68 int (*ltt_statedump_functor)(struct ust_trace *trace) =
69 ltt_statedump_default;
70 struct module *ltt_statedump_owner;
71
72 struct chan_info_struct chan_infos[] = {
73 [LTT_CHANNEL_METADATA] = {
74 LTT_METADATA_CHANNEL,
75 LTT_DEFAULT_SUBBUF_SIZE_LOW,
76 LTT_DEFAULT_N_SUBBUFS_LOW,
77 },
78 [LTT_CHANNEL_UST] = {
79 LTT_UST_CHANNEL,
80 LTT_DEFAULT_SUBBUF_SIZE_HIGH,
81 LTT_DEFAULT_N_SUBBUFS_HIGH,
82 },
83 };
84
85 static enum ltt_channels get_channel_type_from_name(const char *name)
86 {
87 int i;
88
89 if (!name)
90 return LTT_CHANNEL_UST;
91
92 for (i = 0; i < ARRAY_SIZE(chan_infos); i++)
93 if (chan_infos[i].name && !strcmp(name, chan_infos[i].name))
94 return (enum ltt_channels)i;
95
96 return LTT_CHANNEL_UST;
97 }
98
99 /**
100 * ltt_module_register - LTT module registration
101 * @name: module type
102 * @function: callback to register
103 * @owner: module which owns the callback
104 *
105 * The module calling this registration function must ensure that no
106 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
107 * must be called between a vmalloc and the moment the memory is made visible to
108 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
109 * the module allocates virtual memory after its registration must it
110 * synchronize the TLBs.
111 */
112 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
113 //ust// struct module *owner)
114 //ust// {
115 //ust// int ret = 0;
116 //ust//
117 //ust// /*
118 //ust// * Make sure no page fault can be triggered by the module about to be
119 //ust// * registered. We deal with this here so we don't have to call
120 //ust// * vmalloc_sync_all() in each module's init.
121 //ust// */
122 //ust// vmalloc_sync_all();
123 //ust//
124 //ust// switch (name) {
125 //ust// case LTT_FUNCTION_RUN_FILTER:
126 //ust// if (ltt_run_filter_owner != NULL) {
127 //ust// ret = -EEXIST;
128 //ust// goto end;
129 //ust// }
130 //ust// ltt_filter_register((ltt_run_filter_functor)function);
131 //ust// ltt_run_filter_owner = owner;
132 //ust// break;
133 //ust// case LTT_FUNCTION_FILTER_CONTROL:
134 //ust// if (ltt_filter_control_owner != NULL) {
135 //ust// ret = -EEXIST;
136 //ust// goto end;
137 //ust// }
138 //ust// ltt_filter_control_functor =
139 //ust// (int (*)(enum ltt_filter_control_msg,
140 //ust// struct ust_trace *))function;
141 //ust// ltt_filter_control_owner = owner;
142 //ust// break;
143 //ust// case LTT_FUNCTION_STATEDUMP:
144 //ust// if (ltt_statedump_owner != NULL) {
145 //ust// ret = -EEXIST;
146 //ust// goto end;
147 //ust// }
148 //ust// ltt_statedump_functor =
149 //ust// (int (*)(struct ust_trace *))function;
150 //ust// ltt_statedump_owner = owner;
151 //ust// break;
152 //ust// }
153 //ust//
154 //ust// end:
155 //ust//
156 //ust// return ret;
157 //ust// }
158
159 /**
160 * ltt_module_unregister - LTT module unregistration
161 * @name: module type
162 */
163 //ust// void ltt_module_unregister(enum ltt_module_function name)
164 //ust// {
165 //ust// switch (name) {
166 //ust// case LTT_FUNCTION_RUN_FILTER:
167 //ust// ltt_filter_unregister();
168 //ust// ltt_run_filter_owner = NULL;
169 //ust// /* Wait for preempt sections to finish */
170 //ust// synchronize_sched();
171 //ust// break;
172 //ust// case LTT_FUNCTION_FILTER_CONTROL:
173 //ust// ltt_filter_control_functor = ltt_filter_control_default;
174 //ust// ltt_filter_control_owner = NULL;
175 //ust// break;
176 //ust// case LTT_FUNCTION_STATEDUMP:
177 //ust// ltt_statedump_functor = ltt_statedump_default;
178 //ust// ltt_statedump_owner = NULL;
179 //ust// break;
180 //ust// }
181 //ust//
182 //ust// }
183
184 static LIST_HEAD(ltt_transport_list);
185
186 /**
187 * ltt_transport_register - LTT transport registration
188 * @transport: transport structure
189 *
190 * Registers a transport which can be used as output to extract the data out of
191 * LTTng. The module calling this registration function must ensure that no
192 * trap-inducing code will be executed by the transport functions. E.g.
193 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
194 * is made visible to the transport function. This registration acts as a
195 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
196 * after its registration must it synchronize the TLBs.
197 */
198 void ltt_transport_register(struct ltt_transport *transport)
199 {
200 /*
201 * Make sure no page fault can be triggered by the module about to be
202 * registered. We deal with this here so we don't have to call
203 * vmalloc_sync_all() in each module's init.
204 */
205 //ust// vmalloc_sync_all();
206
207 ltt_lock_traces();
208 list_add_tail(&transport->node, &ltt_transport_list);
209 ltt_unlock_traces();
210 }
211
212 /**
213 * ltt_transport_unregister - LTT transport unregistration
214 * @transport: transport structure
215 */
216 void ltt_transport_unregister(struct ltt_transport *transport)
217 {
218 ltt_lock_traces();
219 list_del(&transport->node);
220 ltt_unlock_traces();
221 }
222
223 static inline int is_channel_overwrite(enum ltt_channels chan,
224 enum trace_mode mode)
225 {
226 switch (mode) {
227 case LTT_TRACE_NORMAL:
228 return 0;
229 case LTT_TRACE_FLIGHT:
230 switch (chan) {
231 case LTT_CHANNEL_METADATA:
232 return 0;
233 default:
234 return 1;
235 }
236 case LTT_TRACE_HYBRID:
237 switch (chan) {
238 case LTT_CHANNEL_METADATA:
239 return 0;
240 default:
241 return 1;
242 }
243 default:
244 return 0;
245 }
246 }
247
248 static void trace_async_wakeup(struct ust_trace *trace)
249 {
250 int i;
251 struct ust_channel *chan;
252
253 /* Must check each channel for pending read wakeup */
254 for (i = 0; i < trace->nr_channels; i++) {
255 chan = &trace->channels[i];
256 if (chan->active)
257 trace->ops->wakeup_channel(chan);
258 }
259 }
260
261 //ust// /* Timer to send async wakeups to the readers */
262 //ust// static void async_wakeup(unsigned long data)
263 //ust// {
264 //ust// struct ust_trace *trace;
265 //ust//
266 //ust// /*
267 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
268 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
269 //ust// * allow mutex to be taken in interrupt context. Ugly.
270 //ust// * A proper way to do this would be to turn the timer into a
271 //ust// * periodically woken up thread, but it adds to the footprint.
272 //ust// */
273 //ust// #ifndef CONFIG_PREEMPT_RT
274 //ust// rcu_read_lock_sched();
275 //ust// #else
276 //ust// ltt_lock_traces();
277 //ust// #endif
278 //ust// list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
279 //ust// trace_async_wakeup(trace);
280 //ust// }
281 //ust// #ifndef CONFIG_PREEMPT_RT
282 //ust// rcu_read_unlock_sched();
283 //ust// #else
284 //ust// ltt_unlock_traces();
285 //ust// #endif
286 //ust//
287 //ust// mod_timer(&ltt_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
288 //ust// }
289
290 /**
291 * _ltt_trace_find - find a trace by given name.
292 * trace_name: trace name
293 *
294 * Returns a pointer to the trace structure, NULL if not found.
295 */
296 struct ust_trace *_ltt_trace_find(const char *trace_name)
297 {
298 struct ust_trace *trace;
299
300 list_for_each_entry(trace, &ltt_traces.head, list)
301 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
302 return trace;
303
304 return NULL;
305 }
306
307 /* _ltt_trace_find_setup :
308 * find a trace in setup list by given name.
309 *
310 * Returns a pointer to the trace structure, NULL if not found.
311 */
312 struct ust_trace *_ltt_trace_find_setup(const char *trace_name)
313 {
314 struct ust_trace *trace;
315
316 list_for_each_entry(trace, &ltt_traces.setup_head, list)
317 if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
318 return trace;
319
320 return NULL;
321 }
322
323 /**
324 * ltt_release_transport - Release an LTT transport
325 * @kref : reference count on the transport
326 */
327 void ltt_release_transport(struct kref *kref)
328 {
329 //ust// struct ust_trace *trace = container_of(kref,
330 //ust// struct ust_trace, ltt_transport_kref);
331 //ust// trace->ops->remove_dirs(trace);
332 }
333
334 /**
335 * ltt_release_trace - Release a LTT trace
336 * @kref : reference count on the trace
337 */
338 void ltt_release_trace(struct kref *kref)
339 {
340 struct ust_trace *trace = _ust_container_of(kref,
341 struct ust_trace, kref);
342 ltt_channels_trace_free(trace->channels);
343 free(trace);
344 }
345
346 static inline void prepare_chan_size_num(unsigned int *subbuf_size,
347 unsigned int *n_subbufs)
348 {
349 /* Make sure the subbuffer size is larger than a page */
350 *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE);
351
352 /* round to next power of 2 */
353 *subbuf_size = 1 << get_count_order(*subbuf_size);
354 *n_subbufs = 1 << get_count_order(*n_subbufs);
355
356 /* Subbuf size and number must both be power of two */
357 WARN_ON(hweight32(*subbuf_size) != 1);
358 WARN_ON(hweight32(*n_subbufs) != 1);
359 }
360
361 int _ltt_trace_setup(const char *trace_name)
362 {
363 int err = 0;
364 struct ust_trace *new_trace = NULL;
365 int metadata_index;
366 unsigned int chan;
367 enum ltt_channels chantype;
368
369 if (_ltt_trace_find_setup(trace_name)) {
370 ERR("Trace name %s already used", trace_name);
371 err = -EEXIST;
372 goto traces_error;
373 }
374
375 if (_ltt_trace_find(trace_name)) {
376 ERR("Trace name %s already used", trace_name);
377 err = -EEXIST;
378 goto traces_error;
379 }
380
381 new_trace = zmalloc(sizeof(struct ust_trace));
382 if (!new_trace) {
383 ERR("Unable to allocate memory for trace %s", trace_name);
384 err = -ENOMEM;
385 goto traces_error;
386 }
387 strncpy(new_trace->trace_name, trace_name, NAME_MAX);
388 new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels,
389 ust_channels_overwrite_by_default,
390 ust_channels_request_collection_by_default, 1);
391 if (!new_trace->channels) {
392 ERR("Unable to allocate memory for chaninfo %s\n", trace_name);
393 err = -ENOMEM;
394 goto trace_free;
395 }
396
397 /*
398 * Force metadata channel to active, no overwrite.
399 */
400 metadata_index = ltt_channels_get_index_from_name("metadata");
401 WARN_ON(metadata_index < 0);
402 new_trace->channels[metadata_index].overwrite = 0;
403 new_trace->channels[metadata_index].active = 1;
404
405 /*
406 * Set hardcoded tracer defaults for some channels
407 */
408 for (chan = 0; chan < new_trace->nr_channels; chan++) {
409 if (!(new_trace->channels[chan].active))
410 continue;
411
412 chantype = get_channel_type_from_name(
413 ltt_channels_get_name_from_index(chan));
414 new_trace->channels[chan].subbuf_size =
415 chan_infos[chantype].def_subbufsize;
416 new_trace->channels[chan].subbuf_cnt =
417 chan_infos[chantype].def_subbufcount;
418 }
419
420 list_add(&new_trace->list, &ltt_traces.setup_head);
421 return 0;
422
423 trace_free:
424 free(new_trace);
425 traces_error:
426 return err;
427 }
428
429
430 int ltt_trace_setup(const char *trace_name)
431 {
432 int ret;
433 ltt_lock_traces();
434 ret = _ltt_trace_setup(trace_name);
435 ltt_unlock_traces();
436 return ret;
437 }
438
439 /* must be called from within a traces lock. */
440 static void _ltt_trace_free(struct ust_trace *trace)
441 {
442 list_del(&trace->list);
443 free(trace);
444 }
445
446 int ltt_trace_set_type(const char *trace_name, const char *trace_type)
447 {
448 int err = 0;
449 struct ust_trace *trace;
450 struct ltt_transport *tran_iter, *transport = NULL;
451
452 ltt_lock_traces();
453
454 trace = _ltt_trace_find_setup(trace_name);
455 if (!trace) {
456 ERR("Trace not found %s", trace_name);
457 err = -ENOENT;
458 goto traces_error;
459 }
460
461 list_for_each_entry(tran_iter, &ltt_transport_list, node) {
462 if (!strcmp(tran_iter->name, trace_type)) {
463 transport = tran_iter;
464 break;
465 }
466 }
467 if (!transport) {
468 ERR("Transport %s is not present", trace_type);
469 err = -EINVAL;
470 goto traces_error;
471 }
472
473 trace->transport = transport;
474
475 traces_error:
476 ltt_unlock_traces();
477 return err;
478 }
479
480 int ltt_trace_set_channel_subbufsize(const char *trace_name,
481 const char *channel_name, unsigned int size)
482 {
483 int err = 0;
484 struct ust_trace *trace;
485 int index;
486
487 ltt_lock_traces();
488
489 trace = _ltt_trace_find_setup(trace_name);
490 if (!trace) {
491 ERR("Trace not found %s", trace_name);
492 err = -ENOENT;
493 goto traces_error;
494 }
495
496 index = ltt_channels_get_index_from_name(channel_name);
497 if (index < 0) {
498 ERR("Channel %s not found", channel_name);
499 err = -ENOENT;
500 goto traces_error;
501 }
502 trace->channels[index].subbuf_size = size;
503
504 traces_error:
505 ltt_unlock_traces();
506 return err;
507 }
508
509 int ltt_trace_set_channel_subbufcount(const char *trace_name,
510 const char *channel_name, unsigned int cnt)
511 {
512 int err = 0;
513 struct ust_trace *trace;
514 int index;
515
516 ltt_lock_traces();
517
518 trace = _ltt_trace_find_setup(trace_name);
519 if (!trace) {
520 ERR("Trace not found %s", trace_name);
521 err = -ENOENT;
522 goto traces_error;
523 }
524
525 index = ltt_channels_get_index_from_name(channel_name);
526 if (index < 0) {
527 ERR("Channel %s not found", channel_name);
528 err = -ENOENT;
529 goto traces_error;
530 }
531 trace->channels[index].subbuf_cnt = cnt;
532
533 traces_error:
534 ltt_unlock_traces();
535 return err;
536 }
537
538 int ltt_trace_set_channel_enable(const char *trace_name,
539 const char *channel_name, unsigned int enable)
540 {
541 int err = 0;
542 struct ust_trace *trace;
543 int index;
544
545 ltt_lock_traces();
546
547 trace = _ltt_trace_find_setup(trace_name);
548 if (!trace) {
549 ERR("Trace not found %s", trace_name);
550 err = -ENOENT;
551 goto traces_error;
552 }
553
554 /*
555 * Datas in metadata channel(marker info) is necessary to be able to
556 * read the trace, we always enable this channel.
557 */
558 if (!enable && !strcmp(channel_name, "metadata")) {
559 ERR("Trying to disable metadata channel");
560 err = -EINVAL;
561 goto traces_error;
562 }
563
564 index = ltt_channels_get_index_from_name(channel_name);
565 if (index < 0) {
566 ERR("Channel %s not found", channel_name);
567 err = -ENOENT;
568 goto traces_error;
569 }
570
571 trace->channels[index].active = enable;
572
573 traces_error:
574 ltt_unlock_traces();
575 return err;
576 }
577
578 int ltt_trace_set_channel_overwrite(const char *trace_name,
579 const char *channel_name, unsigned int overwrite)
580 {
581 int err = 0;
582 struct ust_trace *trace;
583 int index;
584
585 ltt_lock_traces();
586
587 trace = _ltt_trace_find_setup(trace_name);
588 if (!trace) {
589 ERR("Trace not found %s", trace_name);
590 err = -ENOENT;
591 goto traces_error;
592 }
593
594 /*
595 * Always put the metadata channel in non-overwrite mode :
596 * This is a very low traffic channel and it can't afford to have its
597 * data overwritten : this data (marker info) is necessary to be
598 * able to read the trace.
599 */
600 if (overwrite && !strcmp(channel_name, "metadata")) {
601 ERR("Trying to set metadata channel to overwrite mode");
602 err = -EINVAL;
603 goto traces_error;
604 }
605
606 index = ltt_channels_get_index_from_name(channel_name);
607 if (index < 0) {
608 ERR("Channel %s not found", channel_name);
609 err = -ENOENT;
610 goto traces_error;
611 }
612
613 trace->channels[index].overwrite = overwrite;
614
615 traces_error:
616 ltt_unlock_traces();
617 return err;
618 }
619
620 int ltt_trace_alloc(const char *trace_name)
621 {
622 int err = 0;
623 struct ust_trace *trace;
624 unsigned int subbuf_size, subbuf_cnt;
625 //ust// unsigned long flags;
626 int chan;
627 const char *channel_name;
628
629 ltt_lock_traces();
630
631 if (_ltt_trace_find(trace_name)) { /* Trace already allocated */
632 err = 1;
633 goto traces_error;
634 }
635
636 trace = _ltt_trace_find_setup(trace_name);
637 if (!trace) {
638 ERR("Trace not found %s", trace_name);
639 err = -ENOENT;
640 goto traces_error;
641 }
642
643 kref_init(&trace->kref);
644 kref_init(&trace->ltt_transport_kref);
645 //ust// init_waitqueue_head(&trace->kref_wq);
646 trace->active = 0;
647 //ust// get_trace_clock();
648 trace->freq_scale = trace_clock_freq_scale();
649
650 if (!trace->transport) {
651 ERR("Transport is not set");
652 err = -EINVAL;
653 goto transport_error;
654 }
655 //ust// if (!try_module_get(trace->transport->owner)) {
656 //ust// ERR("Can't lock transport module");
657 //ust// err = -ENODEV;
658 //ust// goto transport_error;
659 //ust// }
660 trace->ops = &trace->transport->ops;
661
662 //ust// err = trace->ops->create_dirs(trace);
663 //ust// if (err) {
664 //ust// ERR("Can't create dir for trace %s", trace_name);
665 //ust// goto dirs_error;
666 //ust// }
667
668 //ust// local_irq_save(flags);
669 trace->start_freq = trace_clock_frequency();
670 trace->start_tsc = trace_clock_read64();
671 gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */
672 //ust// local_irq_restore(flags);
673
674 for (chan = 0; chan < trace->nr_channels; chan++) {
675 if (!(trace->channels[chan].active))
676 continue;
677
678 channel_name = ltt_channels_get_name_from_index(chan);
679 WARN_ON(!channel_name);
680 subbuf_size = trace->channels[chan].subbuf_size;
681 subbuf_cnt = trace->channels[chan].subbuf_cnt;
682 prepare_chan_size_num(&subbuf_size, &subbuf_cnt);
683 err = trace->ops->create_channel(trace_name, trace,
684 channel_name,
685 &trace->channels[chan],
686 subbuf_size,
687 subbuf_cnt,
688 trace->channels[chan].overwrite);
689 if (err != 0) {
690 ERR("Cannot create channel %s", channel_name);
691 goto create_channel_error;
692 }
693 }
694
695 list_del(&trace->list);
696 //ust// if (list_empty(&ltt_traces.head)) {
697 //ust// mod_timer(&ltt_async_wakeup_timer,
698 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
699 //ust// set_kernel_trace_flag_all_tasks();
700 //ust// }
701 list_add_rcu(&trace->list, &ltt_traces.head);
702 //ust// synchronize_sched();
703
704 ltt_unlock_traces();
705
706 return 0;
707
708 create_channel_error:
709 for (chan--; chan >= 0; chan--)
710 if (trace->channels[chan].active)
711 trace->ops->remove_channel(&trace->channels[chan]);
712
713 //ust// dirs_error:
714 //ust// module_put(trace->transport->owner);
715 transport_error:
716 //ust// put_trace_clock();
717 traces_error:
718 ltt_unlock_traces();
719 return err;
720 }
721
722 /*
723 * It is worked as a wrapper for current version of ltt_control.ko.
724 * We will make a new ltt_control based on debugfs, and control each channel's
725 * buffer.
726 */
727 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
728 //ust// enum trace_mode mode,
729 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
730 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
731 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
732 //ust// {
733 //ust// int err = 0;
734 //ust//
735 //ust// err = ltt_trace_setup(trace_name);
736 //ust// if (IS_ERR_VALUE(err))
737 //ust// return err;
738 //ust//
739 //ust// err = ltt_trace_set_type(trace_name, trace_type);
740 //ust// if (IS_ERR_VALUE(err))
741 //ust// return err;
742 //ust//
743 //ust// err = ltt_trace_alloc(trace_name);
744 //ust// if (IS_ERR_VALUE(err))
745 //ust// return err;
746 //ust//
747 //ust// return err;
748 //ust// }
749
750 /* Must be called while sure that trace is in the list. */
751 static int _ltt_trace_destroy(struct ust_trace *trace)
752 {
753 int err = -EPERM;
754
755 if (trace == NULL) {
756 err = -ENOENT;
757 goto traces_error;
758 }
759 if (trace->active) {
760 ERR("Can't destroy trace %s : tracer is active", trace->trace_name);
761 err = -EBUSY;
762 goto active_error;
763 }
764 /* Everything went fine */
765 list_del_rcu(&trace->list);
766 synchronize_rcu();
767 if (list_empty(&ltt_traces.head)) {
768 //ust// clear_kernel_trace_flag_all_tasks();
769 /*
770 * We stop the asynchronous delivery of reader wakeup, but
771 * we must make one last check for reader wakeups pending
772 * later in __ltt_trace_destroy.
773 */
774 //ust// del_timer_sync(&ltt_async_wakeup_timer);
775 }
776 return 0;
777
778 /* error handling */
779 active_error:
780 traces_error:
781 return err;
782 }
783
784 /* Sleepable part of the destroy */
785 static void __ltt_trace_destroy(struct ust_trace *trace, int drop)
786 {
787 int i;
788 struct ust_channel *chan;
789
790 if(!drop) {
791 for (i = 0; i < trace->nr_channels; i++) {
792 chan = &trace->channels[i];
793 if (chan->active)
794 trace->ops->finish_channel(chan);
795 }
796 }
797
798 return; /* FIXME: temporary for ust */
799 //ust// flush_scheduled_work();
800
801 /*
802 * The currently destroyed trace is not in the trace list anymore,
803 * so it's safe to call the async wakeup ourself. It will deliver
804 * the last subbuffers.
805 */
806 trace_async_wakeup(trace);
807
808 for (i = 0; i < trace->nr_channels; i++) {
809 chan = &trace->channels[i];
810 if (chan->active)
811 trace->ops->remove_channel(chan);
812 }
813
814 kref_put(&trace->ltt_transport_kref, ltt_release_transport);
815
816 //ust// module_put(trace->transport->owner);
817
818 /*
819 * Wait for lttd readers to release the files, therefore making sure
820 * the last subbuffers have been read.
821 */
822 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
823 //ust// int ret = 0;
824 //ust// __wait_event_interruptible(trace->kref_wq,
825 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
826 //ust// }
827 kref_put(&trace->kref, ltt_release_trace);
828 }
829
830 int ltt_trace_destroy(const char *trace_name, int drop)
831 {
832 int err = 0;
833 struct ust_trace *trace;
834
835 ltt_lock_traces();
836
837 trace = _ltt_trace_find(trace_name);
838 if (trace) {
839 err = _ltt_trace_destroy(trace);
840 if (err)
841 goto error;
842
843 ltt_unlock_traces();
844
845 __ltt_trace_destroy(trace, drop);
846 //ust// put_trace_clock();
847
848 return 0;
849 }
850
851 trace = _ltt_trace_find_setup(trace_name);
852 if (trace) {
853 _ltt_trace_free(trace);
854 ltt_unlock_traces();
855 return 0;
856 }
857
858 err = -ENOENT;
859
860 /* Error handling */
861 error:
862 ltt_unlock_traces();
863 return err;
864 }
865
866 /* must be called from within a traces lock. */
867 static int _ltt_trace_start(struct ust_trace *trace)
868 {
869 int err = 0;
870
871 if (trace == NULL) {
872 err = -ENOENT;
873 goto traces_error;
874 }
875 if (trace->active)
876 DBG("Tracing already active for trace %s", trace->trace_name);
877 //ust// if (!try_module_get(ltt_run_filter_owner)) {
878 //ust// err = -ENODEV;
879 //ust// ERR("Cannot lock filter module");
880 //ust// goto get_ltt_run_filter_error;
881 //ust// }
882 trace->active = 1;
883 /* Read by trace points without protection : be careful */
884 ltt_traces.num_active_traces++;
885 return err;
886
887 /* error handling */
888 //ust// get_ltt_run_filter_error:
889 traces_error:
890 return err;
891 }
892
893 int ltt_trace_start(const char *trace_name)
894 {
895 int err = 0;
896 struct ust_trace *trace;
897
898 ltt_lock_traces();
899
900 trace = _ltt_trace_find(trace_name);
901 err = _ltt_trace_start(trace);
902 if (err)
903 goto no_trace;
904
905 ltt_unlock_traces();
906
907 /*
908 * Call the kernel state dump.
909 * Events will be mixed with real kernel events, it's ok.
910 * Notice that there is no protection on the trace : that's exactly
911 * why we iterate on the list and check for trace equality instead of
912 * directly using this trace handle inside the logging function.
913 */
914
915 ltt_dump_marker_state(trace);
916
917 //ust// if (!try_module_get(ltt_statedump_owner)) {
918 //ust// err = -ENODEV;
919 //ust// ERR("Cannot lock state dump module");
920 //ust// } else {
921 ltt_statedump_functor(trace);
922 //ust// module_put(ltt_statedump_owner);
923 //ust// }
924
925 return err;
926
927 /* Error handling */
928 no_trace:
929 ltt_unlock_traces();
930 return err;
931 }
932
933 /* must be called from within traces lock */
934 static int _ltt_trace_stop(struct ust_trace *trace)
935 {
936 int err = -EPERM;
937
938 if (trace == NULL) {
939 err = -ENOENT;
940 goto traces_error;
941 }
942 if (!trace->active)
943 DBG("LTT : Tracing not active for trace %s", trace->trace_name);
944 if (trace->active) {
945 trace->active = 0;
946 ltt_traces.num_active_traces--;
947 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
948 }
949 //ust// module_put(ltt_run_filter_owner);
950 /* Everything went fine */
951 return 0;
952
953 /* Error handling */
954 traces_error:
955 return err;
956 }
957
958 int ltt_trace_stop(const char *trace_name)
959 {
960 int err = 0;
961 struct ust_trace *trace;
962
963 ltt_lock_traces();
964 trace = _ltt_trace_find(trace_name);
965 err = _ltt_trace_stop(trace);
966 ltt_unlock_traces();
967 return err;
968 }
969
970 /**
971 * ltt_filter_control - Trace filter control in-kernel API
972 * @msg: Action to perform on the filter
973 * @trace_name: Trace on which the action must be done
974 */
975 int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name)
976 {
977 int err;
978 struct ust_trace *trace;
979
980 DBG("ltt_filter_control : trace %s", trace_name);
981 ltt_lock_traces();
982 trace = _ltt_trace_find(trace_name);
983 if (trace == NULL) {
984 ERR("Trace does not exist. Cannot proxy control request");
985 err = -ENOENT;
986 goto trace_error;
987 }
988 //ust// if (!try_module_get(ltt_filter_control_owner)) {
989 //ust// err = -ENODEV;
990 //ust// goto get_module_error;
991 //ust// }
992 switch (msg) {
993 case LTT_FILTER_DEFAULT_ACCEPT:
994 DBG("Proxy filter default accept %s", trace_name);
995 err = (*ltt_filter_control_functor)(msg, trace);
996 break;
997 case LTT_FILTER_DEFAULT_REJECT:
998 DBG("Proxy filter default reject %s", trace_name);
999 err = (*ltt_filter_control_functor)(msg, trace);
1000 break;
1001 default:
1002 err = -EPERM;
1003 }
1004 //ust// module_put(ltt_filter_control_owner);
1005
1006 //ust// get_module_error:
1007 trace_error:
1008 ltt_unlock_traces();
1009 return err;
1010 }
This page took 0.069232 seconds and 4 git commands to generate.