4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
11 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
14 * Karim Yaghmour (karim@opersys.com)
15 * Tom Zanussi (zanussi@us.ibm.com)
16 * Bob Wisniewski (bob@watson.ibm.com)
18 * Bob Wisniewski (bob@watson.ibm.com)
21 * 22/09/06, Move to the marker/probes mechanism.
22 * 19/10/05, Complete lockless mechanism.
23 * 27/05/05, Modular redesign and rewrite.
26 //ust// #include <linux/time.h>
27 //ust// #include <linux/ltt-tracer.h>
28 //ust// #include <linux/module.h>
29 //ust// #include <linux/string.h>
30 //ust// #include <linux/slab.h>
31 //ust// #include <linux/init.h>
32 //ust// #include <linux/rcupdate.h>
33 //ust// #include <linux/sched.h>
34 //ust// #include <linux/bitops.h>
35 //ust// #include <linux/fs.h>
36 //ust// #include <linux/cpu.h>
37 //ust// #include <linux/kref.h>
38 //ust// #include <linux/delay.h>
39 //ust// #include <linux/vmalloc.h>
40 //ust// #include <asm/atomic.h>
41 #include <kcompat/rculist.h>
43 #include "kernelcompat.h"
44 #include "tracercore.h"
48 //ust// static void async_wakeup(unsigned long data);
50 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
52 /* Default callbacks for modules */
53 notrace
int ltt_filter_control_default(enum ltt_filter_control_msg msg
,
54 struct ltt_trace_struct
*trace
)
59 int ltt_statedump_default(struct ltt_trace_struct
*trace
)
64 /* Callbacks for registered modules */
66 int (*ltt_filter_control_functor
)
67 (enum ltt_filter_control_msg msg
, struct ltt_trace_struct
*trace
) =
68 ltt_filter_control_default
;
69 struct module
*ltt_filter_control_owner
;
71 /* These function pointers are protected by a trace activation check */
72 struct module
*ltt_run_filter_owner
;
73 int (*ltt_statedump_functor
)(struct ltt_trace_struct
*trace
) =
74 ltt_statedump_default
;
75 struct module
*ltt_statedump_owner
;
77 struct chan_info_struct
{
79 unsigned int def_subbufsize
;
80 unsigned int def_subbufcount
;
82 [LTT_CHANNEL_METADATA
] = {
84 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
85 LTT_DEFAULT_N_SUBBUFS_LOW
,
89 LTT_DEFAULT_SUBBUF_SIZE_HIGH
,
90 LTT_DEFAULT_N_SUBBUFS_HIGH
,
94 static enum ltt_channels
get_channel_type_from_name(const char *name
)
99 return LTT_CHANNEL_UST
;
101 for (i
= 0; i
< ARRAY_SIZE(chan_infos
); i
++)
102 if (chan_infos
[i
].name
&& !strcmp(name
, chan_infos
[i
].name
))
103 return (enum ltt_channels
)i
;
105 return LTT_CHANNEL_UST
;
109 * ltt_module_register - LTT module registration
111 * @function: callback to register
112 * @owner: module which owns the callback
114 * The module calling this registration function must ensure that no
115 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
116 * must be called between a vmalloc and the moment the memory is made visible to
117 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
118 * the module allocates virtual memory after its registration must it
119 * synchronize the TLBs.
121 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
122 //ust// struct module *owner)
127 //ust// * Make sure no page fault can be triggered by the module about to be
128 //ust// * registered. We deal with this here so we don't have to call
129 //ust// * vmalloc_sync_all() in each module's init.
131 //ust// vmalloc_sync_all();
133 //ust// switch (name) {
134 //ust// case LTT_FUNCTION_RUN_FILTER:
135 //ust// if (ltt_run_filter_owner != NULL) {
136 //ust// ret = -EEXIST;
139 //ust// ltt_filter_register((ltt_run_filter_functor)function);
140 //ust// ltt_run_filter_owner = owner;
142 //ust// case LTT_FUNCTION_FILTER_CONTROL:
143 //ust// if (ltt_filter_control_owner != NULL) {
144 //ust// ret = -EEXIST;
147 //ust// ltt_filter_control_functor =
148 //ust// (int (*)(enum ltt_filter_control_msg,
149 //ust// struct ltt_trace_struct *))function;
150 //ust// ltt_filter_control_owner = owner;
152 //ust// case LTT_FUNCTION_STATEDUMP:
153 //ust// if (ltt_statedump_owner != NULL) {
154 //ust// ret = -EEXIST;
157 //ust// ltt_statedump_functor =
158 //ust// (int (*)(struct ltt_trace_struct *))function;
159 //ust// ltt_statedump_owner = owner;
167 //ust// EXPORT_SYMBOL_GPL(ltt_module_register);
170 * ltt_module_unregister - LTT module unregistration
173 //ust// void ltt_module_unregister(enum ltt_module_function name)
175 //ust// switch (name) {
176 //ust// case LTT_FUNCTION_RUN_FILTER:
177 //ust// ltt_filter_unregister();
178 //ust// ltt_run_filter_owner = NULL;
179 //ust// /* Wait for preempt sections to finish */
180 //ust// synchronize_sched();
182 //ust// case LTT_FUNCTION_FILTER_CONTROL:
183 //ust// ltt_filter_control_functor = ltt_filter_control_default;
184 //ust// ltt_filter_control_owner = NULL;
186 //ust// case LTT_FUNCTION_STATEDUMP:
187 //ust// ltt_statedump_functor = ltt_statedump_default;
188 //ust// ltt_statedump_owner = NULL;
193 //ust// EXPORT_SYMBOL_GPL(ltt_module_unregister);
195 static LIST_HEAD(ltt_transport_list
);
198 * ltt_transport_register - LTT transport registration
199 * @transport: transport structure
201 * Registers a transport which can be used as output to extract the data out of
202 * LTTng. The module calling this registration function must ensure that no
203 * trap-inducing code will be executed by the transport functions. E.g.
204 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
205 * is made visible to the transport function. This registration acts as a
206 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
207 * after its registration must it synchronize the TLBs.
209 void ltt_transport_register(struct ltt_transport
*transport
)
212 * Make sure no page fault can be triggered by the module about to be
213 * registered. We deal with this here so we don't have to call
214 * vmalloc_sync_all() in each module's init.
216 //ust// vmalloc_sync_all();
219 list_add_tail(&transport
->node
, <t_transport_list
);
222 //ust// EXPORT_SYMBOL_GPL(ltt_transport_register);
225 * ltt_transport_unregister - LTT transport unregistration
226 * @transport: transport structure
228 void ltt_transport_unregister(struct ltt_transport
*transport
)
231 list_del(&transport
->node
);
234 //ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister);
236 static inline int is_channel_overwrite(enum ltt_channels chan
,
237 enum trace_mode mode
)
240 case LTT_TRACE_NORMAL
:
242 case LTT_TRACE_FLIGHT
:
244 case LTT_CHANNEL_METADATA
:
249 case LTT_TRACE_HYBRID
:
251 case LTT_CHANNEL_METADATA
:
262 * ltt_write_trace_header - Write trace header
263 * @trace: Trace information
264 * @header: Memory address where the information must be written to
266 void notrace
ltt_write_trace_header(struct ltt_trace_struct
*trace
,
267 struct ltt_subbuffer_header
*header
)
269 header
->magic_number
= LTT_TRACER_MAGIC_NUMBER
;
270 header
->major_version
= LTT_TRACER_VERSION_MAJOR
;
271 header
->minor_version
= LTT_TRACER_VERSION_MINOR
;
272 header
->arch_size
= sizeof(void *);
273 header
->alignment
= ltt_get_alignment();
274 header
->start_time_sec
= trace
->start_time
.tv_sec
;
275 header
->start_time_usec
= trace
->start_time
.tv_usec
;
276 header
->start_freq
= trace
->start_freq
;
277 header
->freq_scale
= trace
->freq_scale
;
279 //ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header);
281 static void trace_async_wakeup(struct ltt_trace_struct
*trace
)
284 struct ltt_channel_struct
*chan
;
286 /* Must check each channel for pending read wakeup */
287 for (i
= 0; i
< trace
->nr_channels
; i
++) {
288 chan
= &trace
->channels
[i
];
290 trace
->ops
->wakeup_channel(chan
);
294 //ust// /* Timer to send async wakeups to the readers */
295 //ust// static void async_wakeup(unsigned long data)
297 //ust// struct ltt_trace_struct *trace;
300 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
301 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
302 //ust// * allow mutex to be taken in interrupt context. Ugly.
303 //ust// * A proper way to do this would be to turn the timer into a
304 //ust// * periodically woken up thread, but it adds to the footprint.
306 //ust// #ifndef CONFIG_PREEMPT_RT
307 //ust// rcu_read_lock_sched();
309 //ust// ltt_lock_traces();
311 //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
312 //ust// trace_async_wakeup(trace);
314 //ust// #ifndef CONFIG_PREEMPT_RT
315 //ust// rcu_read_unlock_sched();
317 //ust// ltt_unlock_traces();
320 //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
324 * _ltt_trace_find - find a trace by given name.
325 * trace_name: trace name
327 * Returns a pointer to the trace structure, NULL if not found.
329 struct ltt_trace_struct
*_ltt_trace_find(const char *trace_name
)
331 struct ltt_trace_struct
*trace
;
333 list_for_each_entry(trace
, <t_traces
.head
, list
)
334 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
340 /* _ltt_trace_find_setup :
341 * find a trace in setup list by given name.
343 * Returns a pointer to the trace structure, NULL if not found.
345 struct ltt_trace_struct
*_ltt_trace_find_setup(const char *trace_name
)
347 struct ltt_trace_struct
*trace
;
349 list_for_each_entry(trace
, <t_traces
.setup_head
, list
)
350 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
355 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup);
358 * ltt_release_transport - Release an LTT transport
359 * @kref : reference count on the transport
361 void ltt_release_transport(struct kref
*kref
)
363 struct ltt_trace_struct
*trace
= container_of(kref
,
364 struct ltt_trace_struct
, ltt_transport_kref
);
365 //ust// trace->ops->remove_dirs(trace);
367 //ust// EXPORT_SYMBOL_GPL(ltt_release_transport);
370 * ltt_release_trace - Release a LTT trace
371 * @kref : reference count on the trace
373 void ltt_release_trace(struct kref
*kref
)
375 struct ltt_trace_struct
*trace
= container_of(kref
,
376 struct ltt_trace_struct
, kref
);
377 ltt_channels_trace_free(trace
->channels
);
380 //ust// EXPORT_SYMBOL_GPL(ltt_release_trace);
382 static inline void prepare_chan_size_num(unsigned int *subbuf_size
,
383 unsigned int *n_subbufs
)
385 *subbuf_size
= 1 << get_count_order(*subbuf_size
);
386 *n_subbufs
= 1 << get_count_order(*n_subbufs
);
388 /* Subbuf size and number must both be power of two */
389 WARN_ON(hweight32(*subbuf_size
) != 1);
390 WARN_ON(hweight32(*n_subbufs
) != 1);
393 int _ltt_trace_setup(const char *trace_name
)
396 struct ltt_trace_struct
*new_trace
= NULL
;
399 enum ltt_channels chantype
;
401 if (_ltt_trace_find_setup(trace_name
)) {
402 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
408 if (_ltt_trace_find(trace_name
)) {
409 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
415 new_trace
= kzalloc(sizeof(struct ltt_trace_struct
), GFP_KERNEL
);
418 "LTT : Unable to allocate memory for trace %s\n",
423 strncpy(new_trace
->trace_name
, trace_name
, NAME_MAX
);
424 new_trace
->channels
= ltt_channels_trace_alloc(&new_trace
->nr_channels
,
426 if (!new_trace
->channels
) {
428 "LTT : Unable to allocate memory for chaninfo %s\n",
435 * Force metadata channel to active, no overwrite.
437 metadata_index
= ltt_channels_get_index_from_name("metadata");
438 WARN_ON(metadata_index
< 0);
439 new_trace
->channels
[metadata_index
].overwrite
= 0;
440 new_trace
->channels
[metadata_index
].active
= 1;
443 * Set hardcoded tracer defaults for some channels
445 for (chan
= 0; chan
< new_trace
->nr_channels
; chan
++) {
446 if (!(new_trace
->channels
[chan
].active
))
449 chantype
= get_channel_type_from_name(
450 ltt_channels_get_name_from_index(chan
));
451 new_trace
->channels
[chan
].subbuf_size
=
452 chan_infos
[chantype
].def_subbufsize
;
453 new_trace
->channels
[chan
].subbuf_cnt
=
454 chan_infos
[chantype
].def_subbufcount
;
457 list_add(&new_trace
->list
, <t_traces
.setup_head
);
465 //ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup);
468 int ltt_trace_setup(const char *trace_name
)
472 ret
= _ltt_trace_setup(trace_name
);
476 //ust// EXPORT_SYMBOL_GPL(ltt_trace_setup);
478 /* must be called from within a traces lock. */
479 static void _ltt_trace_free(struct ltt_trace_struct
*trace
)
481 list_del(&trace
->list
);
485 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
)
488 struct ltt_trace_struct
*trace
;
489 struct ltt_transport
*tran_iter
, *transport
= NULL
;
493 trace
= _ltt_trace_find_setup(trace_name
);
495 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
500 list_for_each_entry(tran_iter
, <t_transport_list
, node
) {
501 if (!strcmp(tran_iter
->name
, trace_type
)) {
502 transport
= tran_iter
;
507 printk(KERN_ERR
"LTT : Transport %s is not present.\n",
513 trace
->transport
= transport
;
519 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type);
521 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
522 const char *channel_name
, unsigned int size
)
525 struct ltt_trace_struct
*trace
;
530 trace
= _ltt_trace_find_setup(trace_name
);
532 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
537 index
= ltt_channels_get_index_from_name(channel_name
);
539 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
543 trace
->channels
[index
].subbuf_size
= size
;
549 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize);
551 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
552 const char *channel_name
, unsigned int cnt
)
555 struct ltt_trace_struct
*trace
;
560 trace
= _ltt_trace_find_setup(trace_name
);
562 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
567 index
= ltt_channels_get_index_from_name(channel_name
);
569 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
573 trace
->channels
[index
].subbuf_cnt
= cnt
;
579 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount);
581 int ltt_trace_set_channel_enable(const char *trace_name
,
582 const char *channel_name
, unsigned int enable
)
585 struct ltt_trace_struct
*trace
;
590 trace
= _ltt_trace_find_setup(trace_name
);
592 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
598 * Datas in metadata channel(marker info) is necessary to be able to
599 * read the trace, we always enable this channel.
601 if (!enable
&& !strcmp(channel_name
, "metadata")) {
602 printk(KERN_ERR
"LTT : Trying to disable metadata channel\n");
607 index
= ltt_channels_get_index_from_name(channel_name
);
609 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
614 trace
->channels
[index
].active
= enable
;
620 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable);
622 int ltt_trace_set_channel_overwrite(const char *trace_name
,
623 const char *channel_name
, unsigned int overwrite
)
626 struct ltt_trace_struct
*trace
;
631 trace
= _ltt_trace_find_setup(trace_name
);
633 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
639 * Always put the metadata channel in non-overwrite mode :
640 * This is a very low traffic channel and it can't afford to have its
641 * data overwritten : this data (marker info) is necessary to be
642 * able to read the trace.
644 if (overwrite
&& !strcmp(channel_name
, "metadata")) {
645 printk(KERN_ERR
"LTT : Trying to set metadata channel to "
651 index
= ltt_channels_get_index_from_name(channel_name
);
653 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
658 trace
->channels
[index
].overwrite
= overwrite
;
664 //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite);
666 int ltt_trace_alloc(const char *trace_name
)
669 struct ltt_trace_struct
*trace
;
670 unsigned int subbuf_size
, subbuf_cnt
;
673 const char *channel_name
;
677 trace
= _ltt_trace_find_setup(trace_name
);
679 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
684 kref_init(&trace
->kref
);
685 kref_init(&trace
->ltt_transport_kref
);
686 //ust// init_waitqueue_head(&trace->kref_wq);
688 //ust// get_trace_clock();
689 trace
->freq_scale
= trace_clock_freq_scale();
691 if (!trace
->transport
) {
692 printk(KERN_ERR
"LTT : Transport is not set.\n");
694 goto transport_error
;
696 //ust// if (!try_module_get(trace->transport->owner)) {
697 //ust// printk(KERN_ERR "LTT : Can't lock transport module.\n");
698 //ust// err = -ENODEV;
699 //ust// goto transport_error;
701 trace
->ops
= &trace
->transport
->ops
;
703 //ust// err = trace->ops->create_dirs(trace);
705 //ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n",
707 //ust// goto dirs_error;
710 //ust// local_irq_save(flags);
711 trace
->start_freq
= trace_clock_frequency();
712 trace
->start_tsc
= trace_clock_read64();
713 gettimeofday(&trace
->start_time
, NULL
); //ust// changed
714 //ust// local_irq_restore(flags);
716 for (chan
= 0; chan
< trace
->nr_channels
; chan
++) {
717 if (!(trace
->channels
[chan
].active
))
720 channel_name
= ltt_channels_get_name_from_index(chan
);
721 WARN_ON(!channel_name
);
722 subbuf_size
= trace
->channels
[chan
].subbuf_size
;
723 subbuf_cnt
= trace
->channels
[chan
].subbuf_cnt
;
724 prepare_chan_size_num(&subbuf_size
, &subbuf_cnt
);
725 err
= trace
->ops
->create_channel(trace_name
, trace
,
726 trace
->dentry
.trace_root
,
728 &trace
->channels
[chan
],
731 trace
->channels
[chan
].overwrite
);
733 printk(KERN_ERR
"LTT : Can't create channel %s.\n",
735 goto create_channel_error
;
739 list_del(&trace
->list
);
740 //ust// if (list_empty(<t_traces.head)) {
741 //ust// mod_timer(<t_async_wakeup_timer,
742 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
743 //ust// set_kernel_trace_flag_all_tasks();
745 list_add_rcu(&trace
->list
, <t_traces
.head
);
746 //ust// synchronize_sched();
752 create_channel_error
:
753 for (chan
--; chan
>= 0; chan
--)
754 if (trace
->channels
[chan
].active
)
755 trace
->ops
->remove_channel(&trace
->channels
[chan
]);
758 //ust// module_put(trace->transport->owner);
760 //ust// put_trace_clock();
765 //ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc);
768 * It is worked as a wrapper for current version of ltt_control.ko.
769 * We will make a new ltt_control based on debugfs, and control each channel's
772 static int ltt_trace_create(const char *trace_name
, const char *trace_type
,
773 enum trace_mode mode
,
774 unsigned int subbuf_size_low
, unsigned int n_subbufs_low
,
775 unsigned int subbuf_size_med
, unsigned int n_subbufs_med
,
776 unsigned int subbuf_size_high
, unsigned int n_subbufs_high
)
780 err
= ltt_trace_setup(trace_name
);
781 if (IS_ERR_VALUE(err
))
784 err
= ltt_trace_set_type(trace_name
, trace_type
);
785 if (IS_ERR_VALUE(err
))
788 err
= ltt_trace_alloc(trace_name
);
789 if (IS_ERR_VALUE(err
))
795 /* Must be called while sure that trace is in the list. */
796 static int _ltt_trace_destroy(struct ltt_trace_struct
*trace
)
806 "LTT : Can't destroy trace %s : tracer is active\n",
811 /* Everything went fine */
812 //ust// list_del_rcu(&trace->list);
813 //ust// synchronize_sched();
814 if (list_empty(<t_traces
.head
)) {
815 //ust// clear_kernel_trace_flag_all_tasks();
817 * We stop the asynchronous delivery of reader wakeup, but
818 * we must make one last check for reader wakeups pending
819 * later in __ltt_trace_destroy.
821 //ust// del_timer_sync(<t_async_wakeup_timer);
831 /* Sleepable part of the destroy */
832 static void __ltt_trace_destroy(struct ltt_trace_struct
*trace
)
835 struct ltt_channel_struct
*chan
;
837 for (i
= 0; i
< trace
->nr_channels
; i
++) {
838 chan
= &trace
->channels
[i
];
840 trace
->ops
->finish_channel(chan
);
843 return; /* FIXME: temporary for ust */
844 //ust// flush_scheduled_work();
847 * The currently destroyed trace is not in the trace list anymore,
848 * so it's safe to call the async wakeup ourself. It will deliver
849 * the last subbuffers.
851 trace_async_wakeup(trace
);
853 for (i
= 0; i
< trace
->nr_channels
; i
++) {
854 chan
= &trace
->channels
[i
];
856 trace
->ops
->remove_channel(chan
);
859 kref_put(&trace
->ltt_transport_kref
, ltt_release_transport
);
861 //ust// module_put(trace->transport->owner);
864 * Wait for lttd readers to release the files, therefore making sure
865 * the last subbuffers have been read.
867 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
869 //ust// __wait_event_interruptible(trace->kref_wq,
870 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
872 kref_put(&trace
->kref
, ltt_release_trace
);
875 int ltt_trace_destroy(const char *trace_name
)
878 struct ltt_trace_struct
*trace
;
882 trace
= _ltt_trace_find(trace_name
);
884 err
= _ltt_trace_destroy(trace
);
890 __ltt_trace_destroy(trace
);
891 //ust// put_trace_clock();
896 trace
= _ltt_trace_find_setup(trace_name
);
898 _ltt_trace_free(trace
);
910 //ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy);
912 /* must be called from within a traces lock. */
913 static int _ltt_trace_start(struct ltt_trace_struct
*trace
)
922 printk(KERN_INFO
"LTT : Tracing already active for trace %s\n",
924 //ust// if (!try_module_get(ltt_run_filter_owner)) {
925 //ust// err = -ENODEV;
926 //ust// printk(KERN_ERR "LTT : Can't lock filter module.\n");
927 //ust// goto get_ltt_run_filter_error;
930 /* Read by trace points without protection : be careful */
931 ltt_traces
.num_active_traces
++;
935 get_ltt_run_filter_error
:
940 int ltt_trace_start(const char *trace_name
)
943 struct ltt_trace_struct
*trace
;
947 trace
= _ltt_trace_find(trace_name
);
948 err
= _ltt_trace_start(trace
);
955 * Call the kernel state dump.
956 * Events will be mixed with real kernel events, it's ok.
957 * Notice that there is no protection on the trace : that's exactly
958 * why we iterate on the list and check for trace equality instead of
959 * directly using this trace handle inside the logging function.
962 ltt_dump_marker_state(trace
);
964 //ust// if (!try_module_get(ltt_statedump_owner)) {
965 //ust// err = -ENODEV;
966 //ust// printk(KERN_ERR
967 //ust// "LTT : Can't lock state dump module.\n");
969 ltt_statedump_functor(trace
);
970 //ust// module_put(ltt_statedump_owner);
980 //ust// EXPORT_SYMBOL_GPL(ltt_trace_start);
982 /* must be called from within traces lock */
983 static int _ltt_trace_stop(struct ltt_trace_struct
*trace
)
992 printk(KERN_INFO
"LTT : Tracing not active for trace %s\n",
996 ltt_traces
.num_active_traces
--;
997 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
999 //ust// module_put(ltt_run_filter_owner);
1000 /* Everything went fine */
1003 /* Error handling */
1008 int ltt_trace_stop(const char *trace_name
)
1011 struct ltt_trace_struct
*trace
;
1014 trace
= _ltt_trace_find(trace_name
);
1015 err
= _ltt_trace_stop(trace
);
1016 ltt_unlock_traces();
1019 //ust// EXPORT_SYMBOL_GPL(ltt_trace_stop);
1022 * ltt_control - Trace control in-kernel API
1023 * @msg: Action to perform
1024 * @trace_name: Trace on which the action must be done
1025 * @trace_type: Type of trace (normal, flight, hybrid)
1026 * @args: Arguments specific to the action
1028 //ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name,
1029 //ust// const char *trace_type, union ltt_control_args args)
1031 //ust// int err = -EPERM;
1033 //ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name);
1034 //ust// switch (msg) {
1035 //ust// case LTT_CONTROL_START:
1036 //ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name);
1037 //ust// err = ltt_trace_start(trace_name);
1039 //ust// case LTT_CONTROL_STOP:
1040 //ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name);
1041 //ust// err = ltt_trace_stop(trace_name);
1043 //ust// case LTT_CONTROL_CREATE_TRACE:
1044 //ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name);
1045 //ust// err = ltt_trace_create(trace_name, trace_type,
1046 //ust// args.new_trace.mode,
1047 //ust// args.new_trace.subbuf_size_low,
1048 //ust// args.new_trace.n_subbufs_low,
1049 //ust// args.new_trace.subbuf_size_med,
1050 //ust// args.new_trace.n_subbufs_med,
1051 //ust// args.new_trace.subbuf_size_high,
1052 //ust// args.new_trace.n_subbufs_high);
1054 //ust// case LTT_CONTROL_DESTROY_TRACE:
1055 //ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name);
1056 //ust// err = ltt_trace_destroy(trace_name);
1061 //ust// EXPORT_SYMBOL_GPL(ltt_control);
1064 * ltt_filter_control - Trace filter control in-kernel API
1065 * @msg: Action to perform on the filter
1066 * @trace_name: Trace on which the action must be done
1068 int ltt_filter_control(enum ltt_filter_control_msg msg
, const char *trace_name
)
1071 struct ltt_trace_struct
*trace
;
1073 printk(KERN_DEBUG
"ltt_filter_control : trace %s\n", trace_name
);
1075 trace
= _ltt_trace_find(trace_name
);
1076 if (trace
== NULL
) {
1078 "Trace does not exist. Cannot proxy control request\n");
1082 //ust// if (!try_module_get(ltt_filter_control_owner)) {
1083 //ust// err = -ENODEV;
1084 //ust// goto get_module_error;
1087 case LTT_FILTER_DEFAULT_ACCEPT
:
1089 "Proxy filter default accept %s\n", trace_name
);
1090 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1092 case LTT_FILTER_DEFAULT_REJECT
:
1094 "Proxy filter default reject %s\n", trace_name
);
1095 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1100 //ust// module_put(ltt_filter_control_owner);
1104 ltt_unlock_traces();
1107 //ust// EXPORT_SYMBOL_GPL(ltt_filter_control);
1109 //ust// int __init ltt_init(void)
1111 //ust// /* Make sure no page fault can be triggered by this module */
1112 //ust// vmalloc_sync_all();
1116 //ust// module_init(ltt_init)
1118 //ust// static void __exit ltt_exit(void)
1120 //ust// struct ltt_trace_struct *trace;
1121 //ust// struct list_head *pos, *n;
1123 //ust// ltt_lock_traces();
1124 //ust// /* Stop each trace, currently being read by RCU read-side */
1125 //ust// list_for_each_entry_rcu(trace, <t_traces.head, list)
1126 //ust// _ltt_trace_stop(trace);
1127 //ust// /* Wait for quiescent state. Readers have preemption disabled. */
1128 //ust// synchronize_sched();
1129 //ust// /* Safe iteration is now permitted. It does not have to be RCU-safe
1130 //ust// * because no readers are left. */
1131 //ust// list_for_each_safe(pos, n, <t_traces.head) {
1132 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1133 //ust// /* _ltt_trace_destroy does a synchronize_sched() */
1134 //ust// _ltt_trace_destroy(trace);
1135 //ust// __ltt_trace_destroy(trace);
1137 //ust// /* free traces in pre-alloc status */
1138 //ust// list_for_each_safe(pos, n, <t_traces.setup_head) {
1139 //ust// trace = container_of(pos, struct ltt_trace_struct, list);
1140 //ust// _ltt_trace_free(trace);
1143 //ust// ltt_unlock_traces();
1146 //ust// module_exit(ltt_exit)
1148 //ust// MODULE_LICENSE("GPL");
1149 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
1150 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");