4 * (C) Copyright 2005-2008 -
5 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * Karim Yaghmour (karim@opersys.com)
23 * Tom Zanussi (zanussi@us.ibm.com)
24 * Bob Wisniewski (bob@watson.ibm.com)
26 * Bob Wisniewski (bob@watson.ibm.com)
29 * 22/09/06, Move to the marker/probes mechanism.
30 * 19/10/05, Complete lockless mechanism.
31 * 27/05/05, Modular redesign and rewrite.
35 #include <urcu/rculist.h>
37 #include <ust/kernelcompat.h>
38 #include "tracercore.h"
42 //ust// static void async_wakeup(unsigned long data);
44 //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0);
46 /* Default callbacks for modules */
47 notrace
int ltt_filter_control_default(enum ltt_filter_control_msg msg
,
48 struct ltt_trace_struct
*trace
)
53 int ltt_statedump_default(struct ltt_trace_struct
*trace
)
58 /* Callbacks for registered modules */
60 int (*ltt_filter_control_functor
)
61 (enum ltt_filter_control_msg msg
, struct ltt_trace_struct
*trace
) =
62 ltt_filter_control_default
;
63 struct module
*ltt_filter_control_owner
;
65 /* These function pointers are protected by a trace activation check */
66 struct module
*ltt_run_filter_owner
;
67 int (*ltt_statedump_functor
)(struct ltt_trace_struct
*trace
) =
68 ltt_statedump_default
;
69 struct module
*ltt_statedump_owner
;
71 struct chan_info_struct
{
73 unsigned int def_subbufsize
;
74 unsigned int def_subbufcount
;
76 [LTT_CHANNEL_METADATA
] = {
78 LTT_DEFAULT_SUBBUF_SIZE_LOW
,
79 LTT_DEFAULT_N_SUBBUFS_LOW
,
83 LTT_DEFAULT_SUBBUF_SIZE_HIGH
,
84 LTT_DEFAULT_N_SUBBUFS_HIGH
,
88 static enum ltt_channels
get_channel_type_from_name(const char *name
)
93 return LTT_CHANNEL_UST
;
95 for (i
= 0; i
< ARRAY_SIZE(chan_infos
); i
++)
96 if (chan_infos
[i
].name
&& !strcmp(name
, chan_infos
[i
].name
))
97 return (enum ltt_channels
)i
;
99 return LTT_CHANNEL_UST
;
103 * ltt_module_register - LTT module registration
105 * @function: callback to register
106 * @owner: module which owns the callback
108 * The module calling this registration function must ensure that no
109 * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all()
110 * must be called between a vmalloc and the moment the memory is made visible to
111 * "function". This registration acts as a vmalloc_sync_all. Therefore, only if
112 * the module allocates virtual memory after its registration must it
113 * synchronize the TLBs.
115 //ust// int ltt_module_register(enum ltt_module_function name, void *function,
116 //ust// struct module *owner)
121 //ust// * Make sure no page fault can be triggered by the module about to be
122 //ust// * registered. We deal with this here so we don't have to call
123 //ust// * vmalloc_sync_all() in each module's init.
125 //ust// vmalloc_sync_all();
127 //ust// switch (name) {
128 //ust// case LTT_FUNCTION_RUN_FILTER:
129 //ust// if (ltt_run_filter_owner != NULL) {
130 //ust// ret = -EEXIST;
133 //ust// ltt_filter_register((ltt_run_filter_functor)function);
134 //ust// ltt_run_filter_owner = owner;
136 //ust// case LTT_FUNCTION_FILTER_CONTROL:
137 //ust// if (ltt_filter_control_owner != NULL) {
138 //ust// ret = -EEXIST;
141 //ust// ltt_filter_control_functor =
142 //ust// (int (*)(enum ltt_filter_control_msg,
143 //ust// struct ltt_trace_struct *))function;
144 //ust// ltt_filter_control_owner = owner;
146 //ust// case LTT_FUNCTION_STATEDUMP:
147 //ust// if (ltt_statedump_owner != NULL) {
148 //ust// ret = -EEXIST;
151 //ust// ltt_statedump_functor =
152 //ust// (int (*)(struct ltt_trace_struct *))function;
153 //ust// ltt_statedump_owner = owner;
163 * ltt_module_unregister - LTT module unregistration
166 //ust// void ltt_module_unregister(enum ltt_module_function name)
168 //ust// switch (name) {
169 //ust// case LTT_FUNCTION_RUN_FILTER:
170 //ust// ltt_filter_unregister();
171 //ust// ltt_run_filter_owner = NULL;
172 //ust// /* Wait for preempt sections to finish */
173 //ust// synchronize_sched();
175 //ust// case LTT_FUNCTION_FILTER_CONTROL:
176 //ust// ltt_filter_control_functor = ltt_filter_control_default;
177 //ust// ltt_filter_control_owner = NULL;
179 //ust// case LTT_FUNCTION_STATEDUMP:
180 //ust// ltt_statedump_functor = ltt_statedump_default;
181 //ust// ltt_statedump_owner = NULL;
187 static LIST_HEAD(ltt_transport_list
);
190 * ltt_transport_register - LTT transport registration
191 * @transport: transport structure
193 * Registers a transport which can be used as output to extract the data out of
194 * LTTng. The module calling this registration function must ensure that no
195 * trap-inducing code will be executed by the transport functions. E.g.
196 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
197 * is made visible to the transport function. This registration acts as a
198 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
199 * after its registration must it synchronize the TLBs.
201 void ltt_transport_register(struct ltt_transport
*transport
)
204 * Make sure no page fault can be triggered by the module about to be
205 * registered. We deal with this here so we don't have to call
206 * vmalloc_sync_all() in each module's init.
208 //ust// vmalloc_sync_all();
211 list_add_tail(&transport
->node
, <t_transport_list
);
216 * ltt_transport_unregister - LTT transport unregistration
217 * @transport: transport structure
219 void ltt_transport_unregister(struct ltt_transport
*transport
)
222 list_del(&transport
->node
);
226 static inline int is_channel_overwrite(enum ltt_channels chan
,
227 enum trace_mode mode
)
230 case LTT_TRACE_NORMAL
:
232 case LTT_TRACE_FLIGHT
:
234 case LTT_CHANNEL_METADATA
:
239 case LTT_TRACE_HYBRID
:
241 case LTT_CHANNEL_METADATA
:
252 * ltt_write_trace_header - Write trace header
253 * @trace: Trace information
254 * @header: Memory address where the information must be written to
256 void notrace
ltt_write_trace_header(struct ltt_trace_struct
*trace
,
257 struct ltt_subbuffer_header
*header
)
259 header
->magic_number
= LTT_TRACER_MAGIC_NUMBER
;
260 header
->major_version
= LTT_TRACER_VERSION_MAJOR
;
261 header
->minor_version
= LTT_TRACER_VERSION_MINOR
;
262 header
->arch_size
= sizeof(void *);
263 header
->alignment
= ltt_get_alignment();
264 header
->start_time_sec
= trace
->start_time
.tv_sec
;
265 header
->start_time_usec
= trace
->start_time
.tv_usec
;
266 header
->start_freq
= trace
->start_freq
;
267 header
->freq_scale
= trace
->freq_scale
;
270 static void trace_async_wakeup(struct ltt_trace_struct
*trace
)
273 struct ust_channel
*chan
;
275 /* Must check each channel for pending read wakeup */
276 for (i
= 0; i
< trace
->nr_channels
; i
++) {
277 chan
= &trace
->channels
[i
];
279 trace
->ops
->wakeup_channel(chan
);
283 //ust// /* Timer to send async wakeups to the readers */
284 //ust// static void async_wakeup(unsigned long data)
286 //ust// struct ltt_trace_struct *trace;
289 //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt
290 //ust// * disable sections (spinlock taken in wake_up). However, mainline won't
291 //ust// * allow mutex to be taken in interrupt context. Ugly.
292 //ust// * A proper way to do this would be to turn the timer into a
293 //ust// * periodically woken up thread, but it adds to the footprint.
295 //ust// #ifndef CONFIG_PREEMPT_RT
296 //ust// rcu_read_lock_sched();
298 //ust// ltt_lock_traces();
300 //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) {
301 //ust// trace_async_wakeup(trace);
303 //ust// #ifndef CONFIG_PREEMPT_RT
304 //ust// rcu_read_unlock_sched();
306 //ust// ltt_unlock_traces();
309 //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL);
313 * _ltt_trace_find - find a trace by given name.
314 * trace_name: trace name
316 * Returns a pointer to the trace structure, NULL if not found.
318 struct ltt_trace_struct
*_ltt_trace_find(const char *trace_name
)
320 struct ltt_trace_struct
*trace
;
322 list_for_each_entry(trace
, <t_traces
.head
, list
)
323 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
329 /* _ltt_trace_find_setup :
330 * find a trace in setup list by given name.
332 * Returns a pointer to the trace structure, NULL if not found.
334 struct ltt_trace_struct
*_ltt_trace_find_setup(const char *trace_name
)
336 struct ltt_trace_struct
*trace
;
338 list_for_each_entry(trace
, <t_traces
.setup_head
, list
)
339 if (!strncmp(trace
->trace_name
, trace_name
, NAME_MAX
))
346 * ltt_release_transport - Release an LTT transport
347 * @kref : reference count on the transport
349 void ltt_release_transport(struct kref
*kref
)
351 //ust// struct ltt_trace_struct *trace = container_of(kref,
352 //ust// struct ltt_trace_struct, ltt_transport_kref);
353 //ust// trace->ops->remove_dirs(trace);
357 * ltt_release_trace - Release a LTT trace
358 * @kref : reference count on the trace
360 void ltt_release_trace(struct kref
*kref
)
362 struct ltt_trace_struct
*trace
= container_of(kref
,
363 struct ltt_trace_struct
, kref
);
364 ltt_channels_trace_free(trace
->channels
);
368 static inline void prepare_chan_size_num(unsigned int *subbuf_size
,
369 unsigned int *n_subbufs
)
371 *subbuf_size
= 1 << get_count_order(*subbuf_size
);
372 *n_subbufs
= 1 << get_count_order(*n_subbufs
);
374 /* Subbuf size and number must both be power of two */
375 WARN_ON(hweight32(*subbuf_size
) != 1);
376 WARN_ON(hweight32(*n_subbufs
) != 1);
379 int _ltt_trace_setup(const char *trace_name
)
382 struct ltt_trace_struct
*new_trace
= NULL
;
385 enum ltt_channels chantype
;
387 if (_ltt_trace_find_setup(trace_name
)) {
388 ERR("Trace name %s already used", trace_name
);
393 if (_ltt_trace_find(trace_name
)) {
394 ERR("Trace name %s already used", trace_name
);
399 new_trace
= kzalloc(sizeof(struct ltt_trace_struct
), GFP_KERNEL
);
401 ERR("Unable to allocate memory for trace %s", trace_name
);
405 strncpy(new_trace
->trace_name
, trace_name
, NAME_MAX
);
406 new_trace
->channels
= ltt_channels_trace_alloc(&new_trace
->nr_channels
,
408 if (!new_trace
->channels
) {
409 ERR("Unable to allocate memory for chaninfo %s\n", trace_name
);
415 * Force metadata channel to active, no overwrite.
417 metadata_index
= ltt_channels_get_index_from_name("metadata");
418 WARN_ON(metadata_index
< 0);
419 new_trace
->channels
[metadata_index
].overwrite
= 0;
420 new_trace
->channels
[metadata_index
].active
= 1;
423 * Set hardcoded tracer defaults for some channels
425 for (chan
= 0; chan
< new_trace
->nr_channels
; chan
++) {
426 if (!(new_trace
->channels
[chan
].active
))
429 chantype
= get_channel_type_from_name(
430 ltt_channels_get_name_from_index(chan
));
431 new_trace
->channels
[chan
].subbuf_size
=
432 chan_infos
[chantype
].def_subbufsize
;
433 new_trace
->channels
[chan
].subbuf_cnt
=
434 chan_infos
[chantype
].def_subbufcount
;
437 list_add(&new_trace
->list
, <t_traces
.setup_head
);
447 int ltt_trace_setup(const char *trace_name
)
451 ret
= _ltt_trace_setup(trace_name
);
456 /* must be called from within a traces lock. */
457 static void _ltt_trace_free(struct ltt_trace_struct
*trace
)
459 list_del(&trace
->list
);
463 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
)
466 struct ltt_trace_struct
*trace
;
467 struct ltt_transport
*tran_iter
, *transport
= NULL
;
471 trace
= _ltt_trace_find_setup(trace_name
);
473 ERR("Trace not found %s", trace_name
);
478 list_for_each_entry(tran_iter
, <t_transport_list
, node
) {
479 if (!strcmp(tran_iter
->name
, trace_type
)) {
480 transport
= tran_iter
;
485 ERR("Transport %s is not present", trace_type
);
490 trace
->transport
= transport
;
497 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
498 const char *channel_name
, unsigned int size
)
501 struct ltt_trace_struct
*trace
;
506 trace
= _ltt_trace_find_setup(trace_name
);
508 ERR("Trace not found %s", trace_name
);
513 index
= ltt_channels_get_index_from_name(channel_name
);
515 ERR("Channel %s not found", channel_name
);
519 trace
->channels
[index
].subbuf_size
= size
;
526 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
527 const char *channel_name
, unsigned int cnt
)
530 struct ltt_trace_struct
*trace
;
535 trace
= _ltt_trace_find_setup(trace_name
);
537 ERR("Trace not found %s", trace_name
);
542 index
= ltt_channels_get_index_from_name(channel_name
);
544 ERR("Channel %s not found", channel_name
);
548 trace
->channels
[index
].subbuf_cnt
= cnt
;
555 int ltt_trace_set_channel_enable(const char *trace_name
,
556 const char *channel_name
, unsigned int enable
)
559 struct ltt_trace_struct
*trace
;
564 trace
= _ltt_trace_find_setup(trace_name
);
566 ERR("Trace not found %s", trace_name
);
572 * Datas in metadata channel(marker info) is necessary to be able to
573 * read the trace, we always enable this channel.
575 if (!enable
&& !strcmp(channel_name
, "metadata")) {
576 ERR("Trying to disable metadata channel");
581 index
= ltt_channels_get_index_from_name(channel_name
);
583 ERR("Channel %s not found", channel_name
);
588 trace
->channels
[index
].active
= enable
;
595 int ltt_trace_set_channel_overwrite(const char *trace_name
,
596 const char *channel_name
, unsigned int overwrite
)
599 struct ltt_trace_struct
*trace
;
604 trace
= _ltt_trace_find_setup(trace_name
);
606 ERR("Trace not found %s", trace_name
);
612 * Always put the metadata channel in non-overwrite mode :
613 * This is a very low traffic channel and it can't afford to have its
614 * data overwritten : this data (marker info) is necessary to be
615 * able to read the trace.
617 if (overwrite
&& !strcmp(channel_name
, "metadata")) {
618 ERR("Trying to set metadata channel to overwrite mode");
623 index
= ltt_channels_get_index_from_name(channel_name
);
625 ERR("Channel %s not found", channel_name
);
630 trace
->channels
[index
].overwrite
= overwrite
;
637 int ltt_trace_alloc(const char *trace_name
)
640 struct ltt_trace_struct
*trace
;
641 unsigned int subbuf_size
, subbuf_cnt
;
642 //ust// unsigned long flags;
644 const char *channel_name
;
648 trace
= _ltt_trace_find_setup(trace_name
);
650 ERR("Trace not found %s", trace_name
);
655 kref_init(&trace
->kref
);
656 kref_init(&trace
->ltt_transport_kref
);
657 //ust// init_waitqueue_head(&trace->kref_wq);
659 //ust// get_trace_clock();
660 trace
->freq_scale
= trace_clock_freq_scale();
662 if (!trace
->transport
) {
663 ERR("Transport is not set");
665 goto transport_error
;
667 //ust// if (!try_module_get(trace->transport->owner)) {
668 //ust// ERR("Can't lock transport module");
669 //ust// err = -ENODEV;
670 //ust// goto transport_error;
672 trace
->ops
= &trace
->transport
->ops
;
674 //ust// err = trace->ops->create_dirs(trace);
676 //ust// ERR("Can't create dir for trace %s", trace_name);
677 //ust// goto dirs_error;
680 //ust// local_irq_save(flags);
681 trace
->start_freq
= trace_clock_frequency();
682 trace
->start_tsc
= trace_clock_read64();
683 gettimeofday(&trace
->start_time
, NULL
); //ust// changed
684 //ust// local_irq_restore(flags);
686 for (chan
= 0; chan
< trace
->nr_channels
; chan
++) {
687 if (!(trace
->channels
[chan
].active
))
690 channel_name
= ltt_channels_get_name_from_index(chan
);
691 WARN_ON(!channel_name
);
692 subbuf_size
= trace
->channels
[chan
].subbuf_size
;
693 subbuf_cnt
= trace
->channels
[chan
].subbuf_cnt
;
694 prepare_chan_size_num(&subbuf_size
, &subbuf_cnt
);
695 err
= trace
->ops
->create_channel(trace_name
, trace
,
697 &trace
->channels
[chan
],
700 trace
->channels
[chan
].overwrite
);
702 ERR("Cannot create channel %s", channel_name
);
703 goto create_channel_error
;
707 list_del(&trace
->list
);
708 //ust// if (list_empty(<t_traces.head)) {
709 //ust// mod_timer(<t_async_wakeup_timer,
710 //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL);
711 //ust// set_kernel_trace_flag_all_tasks();
713 list_add_rcu(&trace
->list
, <t_traces
.head
);
714 //ust// synchronize_sched();
720 create_channel_error
:
721 for (chan
--; chan
>= 0; chan
--)
722 if (trace
->channels
[chan
].active
)
723 trace
->ops
->remove_channel(&trace
->channels
[chan
]);
726 //ust// module_put(trace->transport->owner);
728 //ust// put_trace_clock();
735 * It is worked as a wrapper for current version of ltt_control.ko.
736 * We will make a new ltt_control based on debugfs, and control each channel's
739 //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type,
740 //ust// enum trace_mode mode,
741 //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low,
742 //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med,
743 //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high)
747 //ust// err = ltt_trace_setup(trace_name);
748 //ust// if (IS_ERR_VALUE(err))
751 //ust// err = ltt_trace_set_type(trace_name, trace_type);
752 //ust// if (IS_ERR_VALUE(err))
755 //ust// err = ltt_trace_alloc(trace_name);
756 //ust// if (IS_ERR_VALUE(err))
762 /* Must be called while sure that trace is in the list. */
763 static int _ltt_trace_destroy(struct ltt_trace_struct
*trace
)
772 ERR("Can't destroy trace %s : tracer is active", trace
->trace_name
);
776 /* Everything went fine */
777 list_del_rcu(&trace
->list
);
779 if (list_empty(<t_traces
.head
)) {
780 //ust// clear_kernel_trace_flag_all_tasks();
782 * We stop the asynchronous delivery of reader wakeup, but
783 * we must make one last check for reader wakeups pending
784 * later in __ltt_trace_destroy.
786 //ust// del_timer_sync(<t_async_wakeup_timer);
796 /* Sleepable part of the destroy */
797 static void __ltt_trace_destroy(struct ltt_trace_struct
*trace
)
800 struct ust_channel
*chan
;
802 for (i
= 0; i
< trace
->nr_channels
; i
++) {
803 chan
= &trace
->channels
[i
];
805 trace
->ops
->finish_channel(chan
);
808 return; /* FIXME: temporary for ust */
809 //ust// flush_scheduled_work();
812 * The currently destroyed trace is not in the trace list anymore,
813 * so it's safe to call the async wakeup ourself. It will deliver
814 * the last subbuffers.
816 trace_async_wakeup(trace
);
818 for (i
= 0; i
< trace
->nr_channels
; i
++) {
819 chan
= &trace
->channels
[i
];
821 trace
->ops
->remove_channel(chan
);
824 kref_put(&trace
->ltt_transport_kref
, ltt_release_transport
);
826 //ust// module_put(trace->transport->owner);
829 * Wait for lttd readers to release the files, therefore making sure
830 * the last subbuffers have been read.
832 //ust// if (atomic_read(&trace->kref.refcount) > 1) {
834 //ust// __wait_event_interruptible(trace->kref_wq,
835 //ust// (atomic_read(&trace->kref.refcount) == 1), ret);
837 kref_put(&trace
->kref
, ltt_release_trace
);
840 int ltt_trace_destroy(const char *trace_name
)
843 struct ltt_trace_struct
*trace
;
847 trace
= _ltt_trace_find(trace_name
);
849 err
= _ltt_trace_destroy(trace
);
855 __ltt_trace_destroy(trace
);
856 //ust// put_trace_clock();
861 trace
= _ltt_trace_find_setup(trace_name
);
863 _ltt_trace_free(trace
);
876 /* must be called from within a traces lock. */
877 static int _ltt_trace_start(struct ltt_trace_struct
*trace
)
886 DBG("Tracing already active for trace %s", trace
->trace_name
);
887 //ust// if (!try_module_get(ltt_run_filter_owner)) {
888 //ust// err = -ENODEV;
889 //ust// ERR("Cannot lock filter module");
890 //ust// goto get_ltt_run_filter_error;
893 /* Read by trace points without protection : be careful */
894 ltt_traces
.num_active_traces
++;
898 //ust// get_ltt_run_filter_error:
903 int ltt_trace_start(const char *trace_name
)
906 struct ltt_trace_struct
*trace
;
910 trace
= _ltt_trace_find(trace_name
);
911 err
= _ltt_trace_start(trace
);
918 * Call the kernel state dump.
919 * Events will be mixed with real kernel events, it's ok.
920 * Notice that there is no protection on the trace : that's exactly
921 * why we iterate on the list and check for trace equality instead of
922 * directly using this trace handle inside the logging function.
925 ltt_dump_marker_state(trace
);
927 //ust// if (!try_module_get(ltt_statedump_owner)) {
928 //ust// err = -ENODEV;
929 //ust// ERR("Cannot lock state dump module");
931 ltt_statedump_functor(trace
);
932 //ust// module_put(ltt_statedump_owner);
943 /* must be called from within traces lock */
944 static int _ltt_trace_stop(struct ltt_trace_struct
*trace
)
953 DBG("LTT : Tracing not active for trace %s", trace
->trace_name
);
956 ltt_traces
.num_active_traces
--;
957 //ust// synchronize_sched(); /* Wait for each tracing to be finished */
959 //ust// module_put(ltt_run_filter_owner);
960 /* Everything went fine */
968 int ltt_trace_stop(const char *trace_name
)
971 struct ltt_trace_struct
*trace
;
974 trace
= _ltt_trace_find(trace_name
);
975 err
= _ltt_trace_stop(trace
);
981 * ltt_filter_control - Trace filter control in-kernel API
982 * @msg: Action to perform on the filter
983 * @trace_name: Trace on which the action must be done
985 int ltt_filter_control(enum ltt_filter_control_msg msg
, const char *trace_name
)
988 struct ltt_trace_struct
*trace
;
990 DBG("ltt_filter_control : trace %s", trace_name
);
992 trace
= _ltt_trace_find(trace_name
);
994 ERR("Trace does not exist. Cannot proxy control request");
998 //ust// if (!try_module_get(ltt_filter_control_owner)) {
999 //ust// err = -ENODEV;
1000 //ust// goto get_module_error;
1003 case LTT_FILTER_DEFAULT_ACCEPT
:
1004 DBG("Proxy filter default accept %s", trace_name
);
1005 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1007 case LTT_FILTER_DEFAULT_REJECT
:
1008 DBG("Proxy filter default reject %s", trace_name
);
1009 err
= (*ltt_filter_control_functor
)(msg
, trace
);
1014 //ust// module_put(ltt_filter_control_owner);
1016 //ust// get_module_error:
1018 ltt_unlock_traces();