4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 * Tracing management internal kernel API. Trace buffer allocation/free, tracing
11 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
14 * Karim Yaghmour (karim@opersys.com)
15 * Tom Zanussi (zanussi@us.ibm.com)
16 * Bob Wisniewski (bob@watson.ibm.com)
18 * Bob Wisniewski (bob@watson.ibm.com)
21 * 22/09/06, Move to the marker/probes mechanism.
22 * 19/10/05, Complete lockless mechanism.
23 * 27/05/05, Modular redesign and rewrite.
25 * Dual LGPL v2.1/GPL v2 license.
28 #include <linux/time.h>
29 #include <linux/module.h>
30 #include <linux/string.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
37 #include <linux/cpu.h>
38 #include <linux/kref.h>
39 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <asm/atomic.h>
43 #include "ltt-tracer.h"
45 static void synchronize_trace(void)
48 #ifdef CONFIG_PREEMPT_RT
53 static void async_wakeup(unsigned long data
);
55 static DEFINE_TIMER(ltt_async_wakeup_timer
, async_wakeup
, 0, 0);
57 /* Default callbacks for modules */
59 int ltt_filter_control_default(enum ltt_filter_control_msg msg
,
60 struct ltt_trace
*trace
)
65 int ltt_statedump_default(struct ltt_trace
*trace
)
70 /* Callbacks for registered modules */
72 int (*ltt_filter_control_functor
)
73 (enum ltt_filter_control_msg msg
, struct ltt_trace
*trace
) =
74 ltt_filter_control_default
;
75 struct module
*ltt_filter_control_owner
;
77 /* These function pointers are protected by a trace activation check */
78 struct module
*ltt_run_filter_owner
;
79 int (*ltt_statedump_functor
)(struct ltt_trace
*trace
) = ltt_statedump_default
;
80 struct module
*ltt_statedump_owner
;
82 struct chan_info_struct
{
84 unsigned int def_sb_size
;
85 unsigned int def_n_sb
;
87 [LTT_CHANNEL_METADATA
] = {
89 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
90 LTT_DEFAULT_N_SUBBUFS_LOW
,
92 [LTT_CHANNEL_FD_STATE
] = {
94 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
95 LTT_DEFAULT_N_SUBBUFS_LOW
,
97 [LTT_CHANNEL_GLOBAL_STATE
] = {
98 LTT_GLOBAL_STATE_CHANNEL
,
99 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
100 LTT_DEFAULT_N_SUBBUFS_LOW
,
102 [LTT_CHANNEL_IRQ_STATE
] = {
103 LTT_IRQ_STATE_CHANNEL
,
104 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
105 LTT_DEFAULT_N_SUBBUFS_LOW
,
107 [LTT_CHANNEL_MODULE_STATE
] = {
108 LTT_MODULE_STATE_CHANNEL
,
109 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
110 LTT_DEFAULT_N_SUBBUFS_LOW
,
112 [LTT_CHANNEL_NETIF_STATE
] = {
113 LTT_NETIF_STATE_CHANNEL
,
114 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
115 LTT_DEFAULT_N_SUBBUFS_LOW
,
117 [LTT_CHANNEL_SOFTIRQ_STATE
] = {
118 LTT_SOFTIRQ_STATE_CHANNEL
,
119 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
120 LTT_DEFAULT_N_SUBBUFS_LOW
,
122 [LTT_CHANNEL_SWAP_STATE
] = {
123 LTT_SWAP_STATE_CHANNEL
,
124 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
125 LTT_DEFAULT_N_SUBBUFS_LOW
,
127 [LTT_CHANNEL_SYSCALL_STATE
] = {
128 LTT_SYSCALL_STATE_CHANNEL
,
129 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
130 LTT_DEFAULT_N_SUBBUFS_LOW
,
132 [LTT_CHANNEL_TASK_STATE
] = {
133 LTT_TASK_STATE_CHANNEL
,
134 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
135 LTT_DEFAULT_N_SUBBUFS_LOW
,
137 [LTT_CHANNEL_VM_STATE
] = {
138 LTT_VM_STATE_CHANNEL
,
139 LTT_DEFAULT_SUBBUF_SIZE_MED
,
140 LTT_DEFAULT_N_SUBBUFS_MED
,
144 LTT_DEFAULT_SUBBUF_SIZE_MED
,
145 LTT_DEFAULT_N_SUBBUFS_MED
,
147 [LTT_CHANNEL_INPUT
] = {
149 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
150 LTT_DEFAULT_N_SUBBUFS_LOW
,
152 [LTT_CHANNEL_IPC
] = {
154 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
155 LTT_DEFAULT_N_SUBBUFS_LOW
,
157 [LTT_CHANNEL_KERNEL
] = {
159 LTT_DEFAULT_SUBBUF_SIZE_HIGH
,
160 LTT_DEFAULT_N_SUBBUFS_HIGH
,
164 LTT_DEFAULT_SUBBUF_SIZE_MED
,
165 LTT_DEFAULT_N_SUBBUFS_MED
,
167 [LTT_CHANNEL_RCU
] = {
169 LTT_DEFAULT_SUBBUF_SIZE_MED
,
170 LTT_DEFAULT_N_SUBBUFS_MED
,
172 [LTT_CHANNEL_DEFAULT
] = {
174 LTT_DEFAULT_SUBBUF_SIZE_MED
,
175 LTT_DEFAULT_N_SUBBUFS_MED
,
179 static enum ltt_channels
get_channel_type_from_name(const char *name
)
184 return LTT_CHANNEL_DEFAULT
;
186 for (i
= 0; i
< ARRAY_SIZE(chan_infos
); i
++)
187 if (chan_infos
[i
].name
&& !strcmp(name
, chan_infos
[i
].name
))
188 return (enum ltt_channels
)i
;
190 return LTT_CHANNEL_DEFAULT
;
194 * ltt_module_register - LTT module registration
196 * @function: callback to register
197 * @owner: module which owns the callback
199 * The module calling this registration function must ensure that no
200 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
201 * must be called between a vmalloc and the moment the memory is made visible to
202 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
203 * the module allocates virtual memory after its registration must it
204 * synchronize the TLBs.
206 int ltt_module_register(enum ltt_module_function name
, void *function
,
207 struct module
*owner
)
212 * Make sure no page fault can be triggered by the module about to be
213 * registered. We deal with this here so we don't have to call
214 * vmalloc_sync_all() in each module's init.
219 case LTT_FUNCTION_RUN_FILTER
:
220 if (ltt_run_filter_owner
!= NULL
) {
224 ltt_filter_register((ltt_run_filter_functor
)function
);
225 ltt_run_filter_owner
= owner
;
227 case LTT_FUNCTION_FILTER_CONTROL
:
228 if (ltt_filter_control_owner
!= NULL
) {
232 ltt_filter_control_functor
=
233 (int (*)(enum ltt_filter_control_msg
,
234 struct ltt_trace
*))function
;
235 ltt_filter_control_owner
= owner
;
237 case LTT_FUNCTION_STATEDUMP
:
238 if (ltt_statedump_owner
!= NULL
) {
242 ltt_statedump_functor
=
243 (int (*)(struct ltt_trace
*))function
;
244 ltt_statedump_owner
= owner
;
252 EXPORT_SYMBOL_GPL(ltt_module_register
);
255 * ltt_module_unregister - LTT module unregistration
258 void ltt_module_unregister(enum ltt_module_function name
)
261 case LTT_FUNCTION_RUN_FILTER
:
262 ltt_filter_unregister();
263 ltt_run_filter_owner
= NULL
;
264 /* Wait for preempt sections to finish */
267 case LTT_FUNCTION_FILTER_CONTROL
:
268 ltt_filter_control_functor
= ltt_filter_control_default
;
269 ltt_filter_control_owner
= NULL
;
271 case LTT_FUNCTION_STATEDUMP
:
272 ltt_statedump_functor
= ltt_statedump_default
;
273 ltt_statedump_owner
= NULL
;
278 EXPORT_SYMBOL_GPL(ltt_module_unregister
);
280 static LIST_HEAD(ltt_transport_list
);
283 * ltt_transport_register - LTT transport registration
284 * @transport: transport structure
286 * Registers a transport which can be used as output to extract the data out of
287 * LTTng. The module calling this registration function must ensure that no
288 * trap-inducing code will be executed by the transport functions. E.g.
289 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
290 * is made visible to the transport function. This registration acts as a
291 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
292 * after its registration must it synchronize the TLBs.
294 void ltt_transport_register(struct ltt_transport
*transport
)
297 * Make sure no page fault can be triggered by the module about to be
298 * registered. We deal with this here so we don't have to call
299 * vmalloc_sync_all() in each module's init.
304 list_add_tail(&transport
->node
, <t_transport_list
);
307 EXPORT_SYMBOL_GPL(ltt_transport_register
);
310 * ltt_transport_unregister - LTT transport unregistration
311 * @transport: transport structure
313 void ltt_transport_unregister(struct ltt_transport
*transport
)
316 list_del(&transport
->node
);
319 EXPORT_SYMBOL_GPL(ltt_transport_unregister
);
322 int is_channel_overwrite(enum ltt_channels chan
, enum trace_mode mode
)
325 case LTT_TRACE_NORMAL
:
327 case LTT_TRACE_FLIGHT
:
329 case LTT_CHANNEL_METADATA
:
334 case LTT_TRACE_HYBRID
:
336 case LTT_CHANNEL_KERNEL
:
339 case LTT_CHANNEL_RCU
:
340 case LTT_CHANNEL_IPC
:
341 case LTT_CHANNEL_INPUT
:
351 static void trace_async_wakeup(struct ltt_trace
*trace
)
354 struct ltt_chan
*chan
;
356 /* Must check each channel for pending read wakeup */
357 for (i
= 0; i
< trace
->nr_channels
; i
++) {
358 chan
= &trace
->channels
[i
];
360 trace
->ops
->wakeup_channel(chan
);
364 /* Timer to send async wakeups to the readers */
365 static void async_wakeup(unsigned long data
)
367 struct ltt_trace
*trace
;
370 * PREEMPT_RT does not allow spinlocks to be taken within preempt
371 * disable sections (spinlock taken in wake_up). However, mainline won't
372 * allow mutex to be taken in interrupt context. Ugly.
373 * Take a standard RCU read lock for RT kernels, which imply that we
374 * also have to synchronize_rcu() upon updates.
376 #ifndef CONFIG_PREEMPT_RT
377 rcu_read_lock_sched();
381 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
382 trace_async_wakeup(trace
);
384 #ifndef CONFIG_PREEMPT_RT
385 rcu_read_unlock_sched();
390 mod_timer(<t_async_wakeup_timer
, jiffies
+ LTT_PERCPU_TIMER_INTERVAL
);
394 * _ltt_trace_find - find a trace by given name.
395 * trace_name: trace name
397 * Returns a pointer to the trace structure, NULL if not found.
399 static struct ltt_trace
*_ltt_trace_find(const char *trace_name
)
401 struct ltt_trace
*trace
;
403 list_for_each_entry(trace
, <t_traces
.head
, list
)
404 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
410 /* _ltt_trace_find_setup :
411 * find a trace in setup list by given name.
413 * Returns a pointer to the trace structure, NULL if not found.
415 struct ltt_trace
*_ltt_trace_find_setup(const char *trace_name
)
417 struct ltt_trace
*trace
;
419 list_for_each_entry(trace
, <t_traces
.setup_head
, list
)
420 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
425 EXPORT_SYMBOL_GPL(_ltt_trace_find_setup
);
428 * ltt_release_trace - Release a LTT trace
429 * @kref : reference count on the trace
431 void ltt_release_trace(struct kref
*kref
)
433 struct ltt_trace
*trace
= container_of(kref
, struct ltt_trace
, kref
);
435 trace
->ops
->remove_dirs(trace
);
436 module_put(trace
->transport
->owner
);
437 ltt_channels_trace_free(trace
->channels
, trace
->nr_channels
);
440 EXPORT_SYMBOL_GPL(ltt_release_trace
);
442 static inline void prepare_chan_size_num(unsigned int *subbuf_size
,
443 unsigned int *n_subbufs
)
445 /* Make sure the subbuffer size is larger than a page */
446 *subbuf_size
= max_t(unsigned int, *subbuf_size
, PAGE_SIZE
);
448 /* round to next power of 2 */
449 *subbuf_size
= 1 << get_count_order(*subbuf_size
);
450 *n_subbufs
= 1 << get_count_order(*n_subbufs
);
452 /* Subbuf size and number must both be power of two */
453 WARN_ON(hweight32(*subbuf_size
) != 1);
454 WARN_ON(hweight32(*n_subbufs
) != 1);
457 int _ltt_trace_setup(const char *trace_name
)
460 struct ltt_trace
*new_trace
= NULL
;
463 enum ltt_channels chantype
;
465 if (_ltt_trace_find_setup(trace_name
)) {
466 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
472 if (_ltt_trace_find(trace_name
)) {
473 printk(KERN_ERR
"LTT : Trace name %s already used.\n",
479 new_trace
= kzalloc(sizeof(struct ltt_trace
), GFP_KERNEL
);
482 "LTT : Unable to allocate memory for trace %s\n",
487 strncpy(new_trace
->trace_name
, trace_name
, NAME_MAX
);
488 new_trace
->channels
= ltt_channels_trace_alloc(&new_trace
->nr_channels
,
490 if (!new_trace
->channels
) {
492 "LTT : Unable to allocate memory for chaninfo %s\n",
499 * Force metadata channel to active, no overwrite.
501 metadata_index
= ltt_channels_get_index_from_name("metadata");
502 WARN_ON(metadata_index
< 0);
503 new_trace
->channels
[metadata_index
].overwrite
= 0;
504 new_trace
->channels
[metadata_index
].active
= 1;
507 * Set hardcoded tracer defaults for some channels
509 for (chan
= 0; chan
< new_trace
->nr_channels
; chan
++) {
510 if (!(new_trace
->channels
[chan
].active
))
513 chantype
= get_channel_type_from_name(
514 ltt_channels_get_name_from_index(chan
));
515 new_trace
->channels
[chan
].a
.sb_size
=
516 chan_infos
[chantype
].def_sb_size
;
517 new_trace
->channels
[chan
].a
.n_sb
=
518 chan_infos
[chantype
].def_n_sb
;
521 list_add(&new_trace
->list
, <t_traces
.setup_head
);
529 EXPORT_SYMBOL_GPL(_ltt_trace_setup
);
532 int ltt_trace_setup(const char *trace_name
)
536 ret
= _ltt_trace_setup(trace_name
);
540 EXPORT_SYMBOL_GPL(ltt_trace_setup
);
542 /* must be called from within a traces lock. */
543 static void _ltt_trace_free(struct ltt_trace
*trace
)
545 list_del(&trace
->list
);
549 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
)
552 struct ltt_trace
*trace
;
553 struct ltt_transport
*tran_iter
, *transport
= NULL
;
557 trace
= _ltt_trace_find_setup(trace_name
);
559 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
564 list_for_each_entry(tran_iter
, <t_transport_list
, node
) {
565 if (!strcmp(tran_iter
->name
, trace_type
)) {
566 transport
= tran_iter
;
571 printk(KERN_ERR
"LTT : Transport %s is not present.\n",
577 trace
->transport
= transport
;
583 EXPORT_SYMBOL_GPL(ltt_trace_set_type
);
585 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
586 const char *channel_name
,
590 struct ltt_trace
*trace
;
595 trace
= _ltt_trace_find_setup(trace_name
);
597 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
602 index
= ltt_channels_get_index_from_name(channel_name
);
604 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
608 trace
->channels
[index
].a
.sb_size
= size
;
614 EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize
);
616 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
617 const char *channel_name
,
621 struct ltt_trace
*trace
;
626 trace
= _ltt_trace_find_setup(trace_name
);
628 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
633 index
= ltt_channels_get_index_from_name(channel_name
);
635 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
639 trace
->channels
[index
].a
.n_sb
= cnt
;
645 EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount
);
647 int ltt_trace_set_channel_switch_timer(const char *trace_name
,
648 const char *channel_name
,
649 unsigned long interval
)
652 struct ltt_trace
*trace
;
657 trace
= _ltt_trace_find_setup(trace_name
);
659 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
664 index
= ltt_channels_get_index_from_name(channel_name
);
666 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
670 ltt_channels_trace_set_timer(&trace
->channels
[index
], interval
);
676 EXPORT_SYMBOL_GPL(ltt_trace_set_channel_switch_timer
);
678 int ltt_trace_set_channel_enable(const char *trace_name
,
679 const char *channel_name
, unsigned int enable
)
682 struct ltt_trace
*trace
;
687 trace
= _ltt_trace_find_setup(trace_name
);
689 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
695 * Datas in metadata channel(marker info) is necessary to be able to
696 * read the trace, we always enable this channel.
698 if (!enable
&& !strcmp(channel_name
, "metadata")) {
699 printk(KERN_ERR
"LTT : Trying to disable metadata channel\n");
704 index
= ltt_channels_get_index_from_name(channel_name
);
706 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
711 trace
->channels
[index
].active
= enable
;
717 EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable
);
719 int ltt_trace_set_channel_overwrite(const char *trace_name
,
720 const char *channel_name
,
721 unsigned int overwrite
)
724 struct ltt_trace
*trace
;
729 trace
= _ltt_trace_find_setup(trace_name
);
731 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
737 * Always put the metadata channel in non-overwrite mode :
738 * This is a very low traffic channel and it can't afford to have its
739 * data overwritten : this data (marker info) is necessary to be
740 * able to read the trace.
742 if (overwrite
&& !strcmp(channel_name
, "metadata")) {
743 printk(KERN_ERR
"LTT : Trying to set metadata channel to "
749 index
= ltt_channels_get_index_from_name(channel_name
);
751 printk(KERN_ERR
"LTT : Channel %s not found\n", channel_name
);
756 trace
->channels
[index
].overwrite
= overwrite
;
762 EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite
);
764 int ltt_trace_alloc(const char *trace_name
)
767 struct ltt_trace
*trace
;
771 const char *channel_name
;
775 trace
= _ltt_trace_find_setup(trace_name
);
777 printk(KERN_ERR
"LTT : Trace not found %s\n", trace_name
);
782 kref_init(&trace
->kref
);
783 init_waitqueue_head(&trace
->kref_wq
);
786 trace
->freq_scale
= trace_clock_freq_scale();
788 if (!trace
->transport
) {
789 printk(KERN_ERR
"LTT : Transport is not set.\n");
791 goto transport_error
;
793 if (!try_module_get(trace
->transport
->owner
)) {
794 printk(KERN_ERR
"LTT : Can't lock transport module.\n");
796 goto transport_error
;
798 trace
->ops
= &trace
->transport
->ops
;
800 err
= trace
->ops
->create_dirs(trace
);
802 printk(KERN_ERR
"LTT : Can't create dir for trace %s.\n",
807 local_irq_save(flags
);
808 trace
->start_freq
= trace_clock_frequency();
809 trace
->start_tsc
= trace_clock_read64();
810 do_gettimeofday(&trace
->start_time
);
811 local_irq_restore(flags
);
813 for (chan
= 0; chan
< trace
->nr_channels
; chan
++) {
814 if (!(trace
->channels
[chan
].active
))
817 channel_name
= ltt_channels_get_name_from_index(chan
);
818 WARN_ON(!channel_name
);
820 * note: sb_size and n_sb will be overwritten with updated
821 * values by channel creation.
823 sb_size
= trace
->channels
[chan
].a
.sb_size
;
824 n_sb
= trace
->channels
[chan
].a
.n_sb
;
825 prepare_chan_size_num(&sb_size
, &n_sb
);
826 err
= trace
->ops
->create_channel(channel_name
,
827 &trace
->channels
[chan
],
828 trace
->dentry
.trace_root
,
830 trace
->channels
[chan
].overwrite
, trace
);
832 printk(KERN_ERR
"LTT : Can't create channel %s.\n",
834 goto create_channel_error
;
838 list_del(&trace
->list
);
839 if (list_empty(<t_traces
.head
)) {
840 mod_timer(<t_async_wakeup_timer
,
841 jiffies
+ LTT_PERCPU_TIMER_INTERVAL
);
842 set_kernel_trace_flag_all_tasks();
844 list_add_rcu(&trace
->list
, <t_traces
.head
);
851 create_channel_error
:
852 for (chan
--; chan
>= 0; chan
--) {
853 if (trace
->channels
[chan
].active
) {
854 struct ltt_chan
*chanp
= &trace
->channels
[chan
];
855 trace
->ops
->remove_channel_files(chanp
);
856 kref_put(&chanp
->a
.kref
, trace
->ops
->remove_channel
);
859 trace
->ops
->remove_dirs(trace
);
862 module_put(trace
->transport
->owner
);
869 EXPORT_SYMBOL_GPL(ltt_trace_alloc
);
872 * It is worked as a wrapper for current version of ltt_control.ko.
873 * We will make a new ltt_control based on debugfs, and control each channel's
877 int ltt_trace_create(const char *trace_name
, const char *trace_type
,
878 enum trace_mode mode
,
879 unsigned int subbuf_size_low
, unsigned int n_subbufs_low
,
880 unsigned int subbuf_size_med
, unsigned int n_subbufs_med
,
881 unsigned int subbuf_size_high
, unsigned int n_subbufs_high
)
885 err
= ltt_trace_setup(trace_name
);
886 if (IS_ERR_VALUE(err
))
889 err
= ltt_trace_set_type(trace_name
, trace_type
);
890 if (IS_ERR_VALUE(err
))
893 err
= ltt_trace_alloc(trace_name
);
894 if (IS_ERR_VALUE(err
))
900 /* Must be called while sure that trace is in the list. */
901 static int _ltt_trace_destroy(struct ltt_trace
*trace
)
911 "LTT : Can't destroy trace %s : tracer is active\n",
916 /* Everything went fine */
917 list_del_rcu(&trace
->list
);
919 if (list_empty(<t_traces
.head
)) {
920 clear_kernel_trace_flag_all_tasks();
922 * We stop the asynchronous delivery of reader wakeup, but
923 * we must make one last check for reader wakeups pending
924 * later in __ltt_trace_destroy.
926 del_timer_sync(<t_async_wakeup_timer
);
936 /* Sleepable part of the destroy */
937 static void __ltt_trace_destroy(struct ltt_trace
*trace
)
940 struct ltt_chan
*chan
;
942 for (i
= 0; i
< trace
->nr_channels
; i
++) {
943 chan
= &trace
->channels
[i
];
945 trace
->ops
->finish_channel(chan
);
948 flush_scheduled_work();
951 * The currently destroyed trace is not in the trace list anymore,
952 * so it's safe to call the async wakeup ourself. It will deliver
953 * the last subbuffers.
955 trace_async_wakeup(trace
);
957 for (i
= 0; i
< trace
->nr_channels
; i
++) {
958 chan
= &trace
->channels
[i
];
960 trace
->ops
->remove_channel_files(chan
);
961 kref_put(&chan
->a
.kref
,
962 trace
->ops
->remove_channel
);
967 * Wait for lttd readers to release the files, therefore making sure
968 * the last subbuffers have been read.
970 if (atomic_read(&trace
->kref
.refcount
) > 1) {
973 * Unlock traces and CPU hotplug while we wait for lttd to
977 __wait_event_interruptible(trace
->kref_wq
,
978 (atomic_read(&trace
->kref
.refcount
) == 1), ret
);
982 kref_put(&trace
->kref
, ltt_release_trace
);
985 int ltt_trace_destroy(const char *trace_name
)
988 struct ltt_trace
*trace
;
992 trace
= _ltt_trace_find(trace_name
);
994 err
= _ltt_trace_destroy(trace
);
998 __ltt_trace_destroy(trace
);
1005 trace
= _ltt_trace_find_setup(trace_name
);
1007 _ltt_trace_free(trace
);
1008 ltt_unlock_traces();
1014 /* Error handling */
1016 ltt_unlock_traces();
1019 EXPORT_SYMBOL_GPL(ltt_trace_destroy
);
1022 * called with trace lock held.
1025 void ltt_channels_trace_start_timer(struct ltt_chan
*channels
,
1026 unsigned int nr_channels
)
1030 for (i
= 0; i
< nr_channels
; i
++) {
1031 struct ltt_chan
*chan
= &channels
[i
];
1032 chan
->a
.trace
->ops
->start_switch_timer(chan
);
1037 * called with trace lock held.
1040 void ltt_channels_trace_stop_timer(struct ltt_chan
*channels
,
1041 unsigned int nr_channels
)
1045 for (i
= 0; i
< nr_channels
; i
++) {
1046 struct ltt_chan
*chan
= &channels
[i
];
1047 chan
->a
.trace
->ops
->stop_switch_timer(chan
);
1051 /* must be called from within a traces lock. */
1052 static int _ltt_trace_start(struct ltt_trace
*trace
)
1056 if (trace
== NULL
) {
1061 printk(KERN_INFO
"LTT : Tracing already active for trace %s\n",
1063 if (!try_module_get(ltt_run_filter_owner
)) {
1065 printk(KERN_ERR
"LTT : Can't lock filter module.\n");
1066 goto get_ltt_run_filter_error
;
1068 ltt_channels_trace_start_timer(trace
->channels
, trace
->nr_channels
);
1070 /* Read by trace points without protection : be careful */
1071 ltt_traces
.num_active_traces
++;
1074 /* error handling */
1075 get_ltt_run_filter_error
:
1080 int ltt_trace_start(const char *trace_name
)
1083 struct ltt_trace
*trace
;
1087 trace
= _ltt_trace_find(trace_name
);
1088 err
= _ltt_trace_start(trace
);
1092 ltt_unlock_traces();
1095 * Call the kernel state dump.
1096 * Events will be mixed with real kernel events, it's ok.
1097 * Notice that there is no protection on the trace : that's exactly
1098 * why we iterate on the list and check for trace equality instead of
1099 * directly using this trace handle inside the logging function.
1102 ltt_dump_marker_state(trace
);
1104 if (!try_module_get(ltt_statedump_owner
)) {
1107 "LTT : Can't lock state dump module.\n");
1109 ltt_statedump_functor(trace
);
1110 module_put(ltt_statedump_owner
);
1115 /* Error handling */
1117 ltt_unlock_traces();
1120 EXPORT_SYMBOL_GPL(ltt_trace_start
);
1122 /* must be called from within traces lock */
1123 static int _ltt_trace_stop(struct ltt_trace
*trace
)
1127 if (trace
== NULL
) {
1132 printk(KERN_INFO
"LTT : Tracing not active for trace %s\n",
1134 if (trace
->active
) {
1135 ltt_channels_trace_stop_timer(trace
->channels
,
1136 trace
->nr_channels
);
1138 ltt_traces
.num_active_traces
--;
1139 synchronize_trace(); /* Wait for each tracing to be finished */
1141 module_put(ltt_run_filter_owner
);
1142 /* Everything went fine */
1145 /* Error handling */
1150 int ltt_trace_stop(const char *trace_name
)
1153 struct ltt_trace
*trace
;
1156 trace
= _ltt_trace_find(trace_name
);
1157 err
= _ltt_trace_stop(trace
);
1158 ltt_unlock_traces();
1161 EXPORT_SYMBOL_GPL(ltt_trace_stop
);
1164 * ltt_control - Trace control in-kernel API
1165 * @msg: Action to perform
1166 * @trace_name: Trace on which the action must be done
1167 * @trace_type: Type of trace (normal, flight, hybrid)
1168 * @args: Arguments specific to the action
1170 int ltt_control(enum ltt_control_msg msg
, const char *trace_name
,
1171 const char *trace_type
, union ltt_control_args args
)
1175 printk(KERN_ALERT
"ltt_control : trace %s\n", trace_name
);
1177 case LTT_CONTROL_START
:
1178 printk(KERN_DEBUG
"Start tracing %s\n", trace_name
);
1179 err
= ltt_trace_start(trace_name
);
1181 case LTT_CONTROL_STOP
:
1182 printk(KERN_DEBUG
"Stop tracing %s\n", trace_name
);
1183 err
= ltt_trace_stop(trace_name
);
1185 case LTT_CONTROL_CREATE_TRACE
:
1186 printk(KERN_DEBUG
"Creating trace %s\n", trace_name
);
1187 err
= ltt_trace_create(trace_name
, trace_type
,
1188 args
.new_trace
.mode
,
1189 args
.new_trace
.subbuf_size_low
,
1190 args
.new_trace
.n_subbufs_low
,
1191 args
.new_trace
.subbuf_size_med
,
1192 args
.new_trace
.n_subbufs_med
,
1193 args
.new_trace
.subbuf_size_high
,
1194 args
.new_trace
.n_subbufs_high
);
1196 case LTT_CONTROL_DESTROY_TRACE
:
1197 printk(KERN_DEBUG
"Destroying trace %s\n", trace_name
);
1198 err
= ltt_trace_destroy(trace_name
);
1203 EXPORT_SYMBOL_GPL(ltt_control
);
1206 * ltt_filter_control - Trace filter control in-kernel API
1207 * @msg: Action to perform on the filter
1208 * @trace_name: Trace on which the action must be done
1210 int ltt_filter_control(enum ltt_filter_control_msg msg
, const char *trace_name
)
1213 struct ltt_trace
*trace
;
1215 printk(KERN_DEBUG
"ltt_filter_control : trace %s\n", trace_name
);
1217 trace
= _ltt_trace_find(trace_name
);
1218 if (trace
== NULL
) {
1220 "Trace does not exist. Cannot proxy control request\n");
1224 if (!try_module_get(ltt_filter_control_owner
)) {
1226 goto get_module_error
;
1229 case LTT_FILTER_DEFAULT_ACCEPT
:
1231 "Proxy filter default accept %s\n", trace_name
);
1232 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1234 case LTT_FILTER_DEFAULT_REJECT
:
1236 "Proxy filter default reject %s\n", trace_name
);
1237 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1242 module_put(ltt_filter_control_owner
);
1246 ltt_unlock_traces();
1249 EXPORT_SYMBOL_GPL(ltt_filter_control
);
1251 int __init
ltt_init(void)
1253 /* Make sure no page fault can be triggered by this module */
1255 init_timer_deferrable(<t_async_wakeup_timer
);
1259 module_init(ltt_init
)
1261 static void __exit
ltt_exit(void)
1263 struct ltt_trace
*trace
;
1264 struct list_head
*pos
, *n
;
1267 /* Stop each trace, currently being read by RCU read-side */
1268 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
)
1269 _ltt_trace_stop(trace
);
1270 /* Wait for quiescent state. Readers have preemption disabled. */
1271 synchronize_trace();
1272 /* Safe iteration is now permitted. It does not have to be RCU-safe
1273 * because no readers are left. */
1274 list_for_each_safe(pos
, n
, <t_traces
.head
) {
1275 trace
= container_of(pos
, struct ltt_trace
, list
);
1276 /* _ltt_trace_destroy does a synchronize_trace() */
1277 _ltt_trace_destroy(trace
);
1278 __ltt_trace_destroy(trace
);
1280 /* free traces in pre-alloc status */
1281 list_for_each_safe(pos
, n
, <t_traces
.setup_head
) {
1282 trace
= container_of(pos
, struct ltt_trace
, list
);
1283 _ltt_trace_free(trace
);
1286 ltt_unlock_traces();
1289 module_exit(ltt_exit
)
1291 MODULE_LICENSE("GPL and additional rights");
1292 MODULE_AUTHOR("Mathieu Desnoyers");
1293 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API");