Commit | Line | Data |
---|---|---|
9dad1eb8 | 1 | /* |
c1f20530 | 2 | * tracer.c |
9dad1eb8 PMF |
3 | * |
4 | * (C) Copyright 2005-2008 - | |
5 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
6 | * | |
34e4b7db PMF |
7 | * This library is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2.1 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | * | |
9dad1eb8 PMF |
21 | * Inspired from LTT : |
22 | * Karim Yaghmour (karim@opersys.com) | |
23 | * Tom Zanussi (zanussi@us.ibm.com) | |
24 | * Bob Wisniewski (bob@watson.ibm.com) | |
25 | * And from K42 : | |
26 | * Bob Wisniewski (bob@watson.ibm.com) | |
27 | * | |
28 | * Changelog: | |
29 | * 22/09/06, Move to the marker/probes mechanism. | |
30 | * 19/10/05, Complete lockless mechanism. | |
31 | * 27/05/05, Modular redesign and rewrite. | |
32 | */ | |
33 | ||
b5b073e2 | 34 | #include <urcu-bp.h> |
b7ea1a1c | 35 | #include <urcu/rculist.h> |
4268fdcd | 36 | |
fbca6b62 | 37 | #include <ust/kernelcompat.h> |
b6bf28ec | 38 | #include "tracercore.h" |
c93858f1 | 39 | #include "tracer.h" |
b6bf28ec PMF |
40 | #include "usterr.h" |
41 | ||
42 | //ust// static void async_wakeup(unsigned long data); | |
43 | //ust// | |
44 | //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); | |
9dad1eb8 PMF |
45 | |
46 | /* Default callbacks for modules */ | |
47 | notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg, | |
b73a4c47 | 48 | struct ust_trace *trace) |
9dad1eb8 PMF |
49 | { |
50 | return 0; | |
51 | } | |
52 | ||
b73a4c47 | 53 | int ltt_statedump_default(struct ust_trace *trace) |
9dad1eb8 PMF |
54 | { |
55 | return 0; | |
56 | } | |
57 | ||
58 | /* Callbacks for registered modules */ | |
59 | ||
60 | int (*ltt_filter_control_functor) | |
b73a4c47 | 61 | (enum ltt_filter_control_msg msg, struct ust_trace *trace) = |
9dad1eb8 PMF |
62 | ltt_filter_control_default; |
63 | struct module *ltt_filter_control_owner; | |
64 | ||
65 | /* These function pointers are protected by a trace activation check */ | |
66 | struct module *ltt_run_filter_owner; | |
b73a4c47 | 67 | int (*ltt_statedump_functor)(struct ust_trace *trace) = |
9dad1eb8 PMF |
68 | ltt_statedump_default; |
69 | struct module *ltt_statedump_owner; | |
70 | ||
71 | struct chan_info_struct { | |
72 | const char *name; | |
73 | unsigned int def_subbufsize; | |
74 | unsigned int def_subbufcount; | |
75 | } chan_infos[] = { | |
76 | [LTT_CHANNEL_METADATA] = { | |
77 | LTT_METADATA_CHANNEL, | |
78 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
79 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
80 | }, | |
b6bf28ec PMF |
81 | [LTT_CHANNEL_UST] = { |
82 | LTT_UST_CHANNEL, | |
9dad1eb8 PMF |
83 | LTT_DEFAULT_SUBBUF_SIZE_HIGH, |
84 | LTT_DEFAULT_N_SUBBUFS_HIGH, | |
85 | }, | |
9dad1eb8 PMF |
86 | }; |
87 | ||
88 | static enum ltt_channels get_channel_type_from_name(const char *name) | |
89 | { | |
90 | int i; | |
91 | ||
92 | if (!name) | |
b6bf28ec | 93 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
94 | |
95 | for (i = 0; i < ARRAY_SIZE(chan_infos); i++) | |
96 | if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) | |
97 | return (enum ltt_channels)i; | |
98 | ||
b6bf28ec | 99 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
100 | } |
101 | ||
102 | /** | |
103 | * ltt_module_register - LTT module registration | |
104 | * @name: module type | |
105 | * @function: callback to register | |
106 | * @owner: module which owns the callback | |
107 | * | |
108 | * The module calling this registration function must ensure that no | |
109 | * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() | |
110 | * must be called between a vmalloc and the moment the memory is made visible to | |
111 | * "function". This registration acts as a vmalloc_sync_all. Therefore, only if | |
112 | * the module allocates virtual memory after its registration must it | |
113 | * synchronize the TLBs. | |
114 | */ | |
b6bf28ec PMF |
115 | //ust// int ltt_module_register(enum ltt_module_function name, void *function, |
116 | //ust// struct module *owner) | |
117 | //ust// { | |
118 | //ust// int ret = 0; | |
119 | //ust// | |
120 | //ust// /* | |
121 | //ust// * Make sure no page fault can be triggered by the module about to be | |
122 | //ust// * registered. We deal with this here so we don't have to call | |
123 | //ust// * vmalloc_sync_all() in each module's init. | |
124 | //ust// */ | |
125 | //ust// vmalloc_sync_all(); | |
126 | //ust// | |
127 | //ust// switch (name) { | |
128 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
129 | //ust// if (ltt_run_filter_owner != NULL) { | |
130 | //ust// ret = -EEXIST; | |
131 | //ust// goto end; | |
132 | //ust// } | |
133 | //ust// ltt_filter_register((ltt_run_filter_functor)function); | |
134 | //ust// ltt_run_filter_owner = owner; | |
135 | //ust// break; | |
136 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
137 | //ust// if (ltt_filter_control_owner != NULL) { | |
138 | //ust// ret = -EEXIST; | |
139 | //ust// goto end; | |
140 | //ust// } | |
141 | //ust// ltt_filter_control_functor = | |
142 | //ust// (int (*)(enum ltt_filter_control_msg, | |
b73a4c47 | 143 | //ust// struct ust_trace *))function; |
b6bf28ec PMF |
144 | //ust// ltt_filter_control_owner = owner; |
145 | //ust// break; | |
146 | //ust// case LTT_FUNCTION_STATEDUMP: | |
147 | //ust// if (ltt_statedump_owner != NULL) { | |
148 | //ust// ret = -EEXIST; | |
149 | //ust// goto end; | |
150 | //ust// } | |
151 | //ust// ltt_statedump_functor = | |
b73a4c47 | 152 | //ust// (int (*)(struct ust_trace *))function; |
b6bf28ec PMF |
153 | //ust// ltt_statedump_owner = owner; |
154 | //ust// break; | |
155 | //ust// } | |
156 | //ust// | |
157 | //ust// end: | |
158 | //ust// | |
159 | //ust// return ret; | |
160 | //ust// } | |
9dad1eb8 PMF |
161 | |
162 | /** | |
163 | * ltt_module_unregister - LTT module unregistration | |
164 | * @name: module type | |
165 | */ | |
b6bf28ec PMF |
166 | //ust// void ltt_module_unregister(enum ltt_module_function name) |
167 | //ust// { | |
168 | //ust// switch (name) { | |
169 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
170 | //ust// ltt_filter_unregister(); | |
171 | //ust// ltt_run_filter_owner = NULL; | |
172 | //ust// /* Wait for preempt sections to finish */ | |
173 | //ust// synchronize_sched(); | |
174 | //ust// break; | |
175 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
176 | //ust// ltt_filter_control_functor = ltt_filter_control_default; | |
177 | //ust// ltt_filter_control_owner = NULL; | |
178 | //ust// break; | |
179 | //ust// case LTT_FUNCTION_STATEDUMP: | |
180 | //ust// ltt_statedump_functor = ltt_statedump_default; | |
181 | //ust// ltt_statedump_owner = NULL; | |
182 | //ust// break; | |
183 | //ust// } | |
184 | //ust// | |
185 | //ust// } | |
9dad1eb8 PMF |
186 | |
187 | static LIST_HEAD(ltt_transport_list); | |
188 | ||
189 | /** | |
190 | * ltt_transport_register - LTT transport registration | |
191 | * @transport: transport structure | |
192 | * | |
193 | * Registers a transport which can be used as output to extract the data out of | |
194 | * LTTng. The module calling this registration function must ensure that no | |
195 | * trap-inducing code will be executed by the transport functions. E.g. | |
196 | * vmalloc_sync_all() must be called between a vmalloc and the moment the memory | |
197 | * is made visible to the transport function. This registration acts as a | |
198 | * vmalloc_sync_all. Therefore, only if the module allocates virtual memory | |
199 | * after its registration must it synchronize the TLBs. | |
200 | */ | |
5f54827b PMF |
201 | void ltt_transport_register(struct ltt_transport *transport) |
202 | { | |
203 | /* | |
204 | * Make sure no page fault can be triggered by the module about to be | |
205 | * registered. We deal with this here so we don't have to call | |
206 | * vmalloc_sync_all() in each module's init. | |
207 | */ | |
bb07823d | 208 | //ust// vmalloc_sync_all(); |
5f54827b PMF |
209 | |
210 | ltt_lock_traces(); | |
211 | list_add_tail(&transport->node, <t_transport_list); | |
212 | ltt_unlock_traces(); | |
213 | } | |
9dad1eb8 PMF |
214 | |
215 | /** | |
216 | * ltt_transport_unregister - LTT transport unregistration | |
217 | * @transport: transport structure | |
218 | */ | |
bb07823d PMF |
219 | void ltt_transport_unregister(struct ltt_transport *transport) |
220 | { | |
221 | ltt_lock_traces(); | |
222 | list_del(&transport->node); | |
223 | ltt_unlock_traces(); | |
224 | } | |
9dad1eb8 PMF |
225 | |
226 | static inline int is_channel_overwrite(enum ltt_channels chan, | |
227 | enum trace_mode mode) | |
228 | { | |
229 | switch (mode) { | |
230 | case LTT_TRACE_NORMAL: | |
231 | return 0; | |
232 | case LTT_TRACE_FLIGHT: | |
233 | switch (chan) { | |
234 | case LTT_CHANNEL_METADATA: | |
235 | return 0; | |
236 | default: | |
237 | return 1; | |
238 | } | |
239 | case LTT_TRACE_HYBRID: | |
240 | switch (chan) { | |
b6bf28ec | 241 | case LTT_CHANNEL_METADATA: |
9dad1eb8 | 242 | return 0; |
b6bf28ec PMF |
243 | default: |
244 | return 1; | |
9dad1eb8 PMF |
245 | } |
246 | default: | |
247 | return 0; | |
248 | } | |
249 | } | |
250 | ||
b73a4c47 | 251 | static void trace_async_wakeup(struct ust_trace *trace) |
9dad1eb8 PMF |
252 | { |
253 | int i; | |
b5b073e2 | 254 | struct ust_channel *chan; |
9dad1eb8 PMF |
255 | |
256 | /* Must check each channel for pending read wakeup */ | |
257 | for (i = 0; i < trace->nr_channels; i++) { | |
258 | chan = &trace->channels[i]; | |
259 | if (chan->active) | |
260 | trace->ops->wakeup_channel(chan); | |
261 | } | |
262 | } | |
263 | ||
b6bf28ec PMF |
264 | //ust// /* Timer to send async wakeups to the readers */ |
265 | //ust// static void async_wakeup(unsigned long data) | |
266 | //ust// { | |
b73a4c47 | 267 | //ust// struct ust_trace *trace; |
b6bf28ec PMF |
268 | //ust// |
269 | //ust// /* | |
270 | //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt | |
271 | //ust// * disable sections (spinlock taken in wake_up). However, mainline won't | |
272 | //ust// * allow mutex to be taken in interrupt context. Ugly. | |
273 | //ust// * A proper way to do this would be to turn the timer into a | |
274 | //ust// * periodically woken up thread, but it adds to the footprint. | |
275 | //ust// */ | |
276 | //ust// #ifndef CONFIG_PREEMPT_RT | |
277 | //ust// rcu_read_lock_sched(); | |
278 | //ust// #else | |
279 | //ust// ltt_lock_traces(); | |
280 | //ust// #endif | |
281 | //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
282 | //ust// trace_async_wakeup(trace); | |
283 | //ust// } | |
284 | //ust// #ifndef CONFIG_PREEMPT_RT | |
285 | //ust// rcu_read_unlock_sched(); | |
286 | //ust// #else | |
287 | //ust// ltt_unlock_traces(); | |
288 | //ust// #endif | |
289 | //ust// | |
290 | //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
291 | //ust// } | |
9dad1eb8 PMF |
292 | |
293 | /** | |
294 | * _ltt_trace_find - find a trace by given name. | |
295 | * trace_name: trace name | |
296 | * | |
297 | * Returns a pointer to the trace structure, NULL if not found. | |
298 | */ | |
b73a4c47 | 299 | struct ust_trace *_ltt_trace_find(const char *trace_name) |
9dad1eb8 | 300 | { |
b73a4c47 | 301 | struct ust_trace *trace; |
9dad1eb8 PMF |
302 | |
303 | list_for_each_entry(trace, <t_traces.head, list) | |
304 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
305 | return trace; | |
306 | ||
307 | return NULL; | |
308 | } | |
309 | ||
310 | /* _ltt_trace_find_setup : | |
311 | * find a trace in setup list by given name. | |
312 | * | |
313 | * Returns a pointer to the trace structure, NULL if not found. | |
314 | */ | |
b73a4c47 | 315 | struct ust_trace *_ltt_trace_find_setup(const char *trace_name) |
9dad1eb8 | 316 | { |
b73a4c47 | 317 | struct ust_trace *trace; |
9dad1eb8 PMF |
318 | |
319 | list_for_each_entry(trace, <t_traces.setup_head, list) | |
320 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
321 | return trace; | |
322 | ||
323 | return NULL; | |
324 | } | |
9dad1eb8 PMF |
325 | |
326 | /** | |
327 | * ltt_release_transport - Release an LTT transport | |
328 | * @kref : reference count on the transport | |
329 | */ | |
330 | void ltt_release_transport(struct kref *kref) | |
331 | { | |
b73a4c47 PMF |
332 | //ust// struct ust_trace *trace = container_of(kref, |
333 | //ust// struct ust_trace, ltt_transport_kref); | |
772030fe | 334 | //ust// trace->ops->remove_dirs(trace); |
9dad1eb8 | 335 | } |
9dad1eb8 PMF |
336 | |
337 | /** | |
338 | * ltt_release_trace - Release a LTT trace | |
339 | * @kref : reference count on the trace | |
340 | */ | |
341 | void ltt_release_trace(struct kref *kref) | |
342 | { | |
b73a4c47 PMF |
343 | struct ust_trace *trace = container_of(kref, |
344 | struct ust_trace, kref); | |
9dad1eb8 PMF |
345 | ltt_channels_trace_free(trace->channels); |
346 | kfree(trace); | |
347 | } | |
9dad1eb8 PMF |
348 | |
349 | static inline void prepare_chan_size_num(unsigned int *subbuf_size, | |
350 | unsigned int *n_subbufs) | |
351 | { | |
b73a4c47 PMF |
352 | /* Make sure the subbuffer size is larger than a page */ |
353 | *subbuf_size = max_t(unsigned int, *subbuf_size, PAGE_SIZE); | |
354 | ||
355 | /* round to next power of 2 */ | |
9dad1eb8 PMF |
356 | *subbuf_size = 1 << get_count_order(*subbuf_size); |
357 | *n_subbufs = 1 << get_count_order(*n_subbufs); | |
358 | ||
359 | /* Subbuf size and number must both be power of two */ | |
360 | WARN_ON(hweight32(*subbuf_size) != 1); | |
361 | WARN_ON(hweight32(*n_subbufs) != 1); | |
362 | } | |
363 | ||
364 | int _ltt_trace_setup(const char *trace_name) | |
365 | { | |
366 | int err = 0; | |
b73a4c47 | 367 | struct ust_trace *new_trace = NULL; |
9dad1eb8 PMF |
368 | int metadata_index; |
369 | unsigned int chan; | |
370 | enum ltt_channels chantype; | |
371 | ||
372 | if (_ltt_trace_find_setup(trace_name)) { | |
c1f20530 | 373 | ERR("Trace name %s already used", trace_name); |
9dad1eb8 PMF |
374 | err = -EEXIST; |
375 | goto traces_error; | |
376 | } | |
377 | ||
378 | if (_ltt_trace_find(trace_name)) { | |
c1f20530 | 379 | ERR("Trace name %s already used", trace_name); |
9dad1eb8 PMF |
380 | err = -EEXIST; |
381 | goto traces_error; | |
382 | } | |
383 | ||
b73a4c47 | 384 | new_trace = kzalloc(sizeof(struct ust_trace), GFP_KERNEL); |
9dad1eb8 | 385 | if (!new_trace) { |
c1f20530 | 386 | ERR("Unable to allocate memory for trace %s", trace_name); |
9dad1eb8 PMF |
387 | err = -ENOMEM; |
388 | goto traces_error; | |
389 | } | |
390 | strncpy(new_trace->trace_name, trace_name, NAME_MAX); | |
391 | new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels, | |
392 | 0, 1); | |
393 | if (!new_trace->channels) { | |
c1f20530 | 394 | ERR("Unable to allocate memory for chaninfo %s\n", trace_name); |
9dad1eb8 PMF |
395 | err = -ENOMEM; |
396 | goto trace_free; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Force metadata channel to active, no overwrite. | |
401 | */ | |
402 | metadata_index = ltt_channels_get_index_from_name("metadata"); | |
403 | WARN_ON(metadata_index < 0); | |
404 | new_trace->channels[metadata_index].overwrite = 0; | |
405 | new_trace->channels[metadata_index].active = 1; | |
406 | ||
407 | /* | |
408 | * Set hardcoded tracer defaults for some channels | |
409 | */ | |
410 | for (chan = 0; chan < new_trace->nr_channels; chan++) { | |
411 | if (!(new_trace->channels[chan].active)) | |
412 | continue; | |
413 | ||
414 | chantype = get_channel_type_from_name( | |
415 | ltt_channels_get_name_from_index(chan)); | |
416 | new_trace->channels[chan].subbuf_size = | |
417 | chan_infos[chantype].def_subbufsize; | |
418 | new_trace->channels[chan].subbuf_cnt = | |
419 | chan_infos[chantype].def_subbufcount; | |
420 | } | |
421 | ||
422 | list_add(&new_trace->list, <t_traces.setup_head); | |
423 | return 0; | |
424 | ||
425 | trace_free: | |
426 | kfree(new_trace); | |
427 | traces_error: | |
428 | return err; | |
429 | } | |
9dad1eb8 PMF |
430 | |
431 | ||
432 | int ltt_trace_setup(const char *trace_name) | |
433 | { | |
434 | int ret; | |
435 | ltt_lock_traces(); | |
436 | ret = _ltt_trace_setup(trace_name); | |
437 | ltt_unlock_traces(); | |
438 | return ret; | |
439 | } | |
9dad1eb8 PMF |
440 | |
441 | /* must be called from within a traces lock. */ | |
b73a4c47 | 442 | static void _ltt_trace_free(struct ust_trace *trace) |
9dad1eb8 PMF |
443 | { |
444 | list_del(&trace->list); | |
445 | kfree(trace); | |
446 | } | |
447 | ||
448 | int ltt_trace_set_type(const char *trace_name, const char *trace_type) | |
449 | { | |
450 | int err = 0; | |
b73a4c47 | 451 | struct ust_trace *trace; |
9dad1eb8 PMF |
452 | struct ltt_transport *tran_iter, *transport = NULL; |
453 | ||
454 | ltt_lock_traces(); | |
455 | ||
456 | trace = _ltt_trace_find_setup(trace_name); | |
457 | if (!trace) { | |
c1f20530 | 458 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
459 | err = -ENOENT; |
460 | goto traces_error; | |
461 | } | |
462 | ||
463 | list_for_each_entry(tran_iter, <t_transport_list, node) { | |
464 | if (!strcmp(tran_iter->name, trace_type)) { | |
465 | transport = tran_iter; | |
466 | break; | |
467 | } | |
468 | } | |
469 | if (!transport) { | |
c1f20530 | 470 | ERR("Transport %s is not present", trace_type); |
9dad1eb8 PMF |
471 | err = -EINVAL; |
472 | goto traces_error; | |
473 | } | |
474 | ||
475 | trace->transport = transport; | |
476 | ||
477 | traces_error: | |
478 | ltt_unlock_traces(); | |
479 | return err; | |
480 | } | |
9dad1eb8 PMF |
481 | |
482 | int ltt_trace_set_channel_subbufsize(const char *trace_name, | |
483 | const char *channel_name, unsigned int size) | |
484 | { | |
485 | int err = 0; | |
b73a4c47 | 486 | struct ust_trace *trace; |
9dad1eb8 PMF |
487 | int index; |
488 | ||
489 | ltt_lock_traces(); | |
490 | ||
491 | trace = _ltt_trace_find_setup(trace_name); | |
492 | if (!trace) { | |
c1f20530 | 493 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
494 | err = -ENOENT; |
495 | goto traces_error; | |
496 | } | |
497 | ||
498 | index = ltt_channels_get_index_from_name(channel_name); | |
499 | if (index < 0) { | |
c1f20530 | 500 | ERR("Channel %s not found", channel_name); |
9dad1eb8 PMF |
501 | err = -ENOENT; |
502 | goto traces_error; | |
503 | } | |
504 | trace->channels[index].subbuf_size = size; | |
505 | ||
506 | traces_error: | |
507 | ltt_unlock_traces(); | |
508 | return err; | |
509 | } | |
9dad1eb8 PMF |
510 | |
511 | int ltt_trace_set_channel_subbufcount(const char *trace_name, | |
512 | const char *channel_name, unsigned int cnt) | |
513 | { | |
514 | int err = 0; | |
b73a4c47 | 515 | struct ust_trace *trace; |
9dad1eb8 PMF |
516 | int index; |
517 | ||
518 | ltt_lock_traces(); | |
519 | ||
520 | trace = _ltt_trace_find_setup(trace_name); | |
521 | if (!trace) { | |
c1f20530 | 522 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
523 | err = -ENOENT; |
524 | goto traces_error; | |
525 | } | |
526 | ||
527 | index = ltt_channels_get_index_from_name(channel_name); | |
528 | if (index < 0) { | |
c1f20530 | 529 | ERR("Channel %s not found", channel_name); |
9dad1eb8 PMF |
530 | err = -ENOENT; |
531 | goto traces_error; | |
532 | } | |
533 | trace->channels[index].subbuf_cnt = cnt; | |
534 | ||
535 | traces_error: | |
536 | ltt_unlock_traces(); | |
537 | return err; | |
538 | } | |
9dad1eb8 PMF |
539 | |
540 | int ltt_trace_set_channel_enable(const char *trace_name, | |
541 | const char *channel_name, unsigned int enable) | |
542 | { | |
543 | int err = 0; | |
b73a4c47 | 544 | struct ust_trace *trace; |
9dad1eb8 PMF |
545 | int index; |
546 | ||
547 | ltt_lock_traces(); | |
548 | ||
549 | trace = _ltt_trace_find_setup(trace_name); | |
550 | if (!trace) { | |
c1f20530 | 551 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
552 | err = -ENOENT; |
553 | goto traces_error; | |
554 | } | |
555 | ||
556 | /* | |
557 | * Datas in metadata channel(marker info) is necessary to be able to | |
558 | * read the trace, we always enable this channel. | |
559 | */ | |
560 | if (!enable && !strcmp(channel_name, "metadata")) { | |
c1f20530 | 561 | ERR("Trying to disable metadata channel"); |
9dad1eb8 PMF |
562 | err = -EINVAL; |
563 | goto traces_error; | |
564 | } | |
565 | ||
566 | index = ltt_channels_get_index_from_name(channel_name); | |
567 | if (index < 0) { | |
c1f20530 | 568 | ERR("Channel %s not found", channel_name); |
9dad1eb8 PMF |
569 | err = -ENOENT; |
570 | goto traces_error; | |
571 | } | |
572 | ||
573 | trace->channels[index].active = enable; | |
574 | ||
575 | traces_error: | |
576 | ltt_unlock_traces(); | |
577 | return err; | |
578 | } | |
9dad1eb8 PMF |
579 | |
580 | int ltt_trace_set_channel_overwrite(const char *trace_name, | |
581 | const char *channel_name, unsigned int overwrite) | |
582 | { | |
583 | int err = 0; | |
b73a4c47 | 584 | struct ust_trace *trace; |
9dad1eb8 PMF |
585 | int index; |
586 | ||
587 | ltt_lock_traces(); | |
588 | ||
589 | trace = _ltt_trace_find_setup(trace_name); | |
590 | if (!trace) { | |
c1f20530 | 591 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
592 | err = -ENOENT; |
593 | goto traces_error; | |
594 | } | |
595 | ||
596 | /* | |
597 | * Always put the metadata channel in non-overwrite mode : | |
598 | * This is a very low traffic channel and it can't afford to have its | |
599 | * data overwritten : this data (marker info) is necessary to be | |
600 | * able to read the trace. | |
601 | */ | |
602 | if (overwrite && !strcmp(channel_name, "metadata")) { | |
c1f20530 | 603 | ERR("Trying to set metadata channel to overwrite mode"); |
9dad1eb8 PMF |
604 | err = -EINVAL; |
605 | goto traces_error; | |
606 | } | |
607 | ||
608 | index = ltt_channels_get_index_from_name(channel_name); | |
609 | if (index < 0) { | |
c1f20530 | 610 | ERR("Channel %s not found", channel_name); |
9dad1eb8 PMF |
611 | err = -ENOENT; |
612 | goto traces_error; | |
613 | } | |
614 | ||
615 | trace->channels[index].overwrite = overwrite; | |
616 | ||
617 | traces_error: | |
618 | ltt_unlock_traces(); | |
619 | return err; | |
620 | } | |
9dad1eb8 PMF |
621 | |
622 | int ltt_trace_alloc(const char *trace_name) | |
623 | { | |
624 | int err = 0; | |
b73a4c47 | 625 | struct ust_trace *trace; |
c697d411 | 626 | unsigned int subbuf_size, subbuf_cnt; |
772030fe | 627 | //ust// unsigned long flags; |
9dad1eb8 PMF |
628 | int chan; |
629 | const char *channel_name; | |
630 | ||
631 | ltt_lock_traces(); | |
632 | ||
763f41e5 DS |
633 | if (_ltt_trace_find(trace_name)) { /* Trace already allocated */ |
634 | err = 1; | |
635 | goto traces_error; | |
636 | } | |
637 | ||
9dad1eb8 PMF |
638 | trace = _ltt_trace_find_setup(trace_name); |
639 | if (!trace) { | |
c1f20530 | 640 | ERR("Trace not found %s", trace_name); |
9dad1eb8 PMF |
641 | err = -ENOENT; |
642 | goto traces_error; | |
643 | } | |
644 | ||
645 | kref_init(&trace->kref); | |
646 | kref_init(&trace->ltt_transport_kref); | |
b6bf28ec | 647 | //ust// init_waitqueue_head(&trace->kref_wq); |
9dad1eb8 | 648 | trace->active = 0; |
b6bf28ec | 649 | //ust// get_trace_clock(); |
9dad1eb8 PMF |
650 | trace->freq_scale = trace_clock_freq_scale(); |
651 | ||
652 | if (!trace->transport) { | |
c1f20530 | 653 | ERR("Transport is not set"); |
9dad1eb8 PMF |
654 | err = -EINVAL; |
655 | goto transport_error; | |
656 | } | |
b6bf28ec | 657 | //ust// if (!try_module_get(trace->transport->owner)) { |
c1f20530 | 658 | //ust// ERR("Can't lock transport module"); |
b6bf28ec PMF |
659 | //ust// err = -ENODEV; |
660 | //ust// goto transport_error; | |
661 | //ust// } | |
9dad1eb8 PMF |
662 | trace->ops = &trace->transport->ops; |
663 | ||
b6bf28ec PMF |
664 | //ust// err = trace->ops->create_dirs(trace); |
665 | //ust// if (err) { | |
c1f20530 | 666 | //ust// ERR("Can't create dir for trace %s", trace_name); |
b6bf28ec PMF |
667 | //ust// goto dirs_error; |
668 | //ust// } | |
9dad1eb8 | 669 | |
b6bf28ec | 670 | //ust// local_irq_save(flags); |
9dad1eb8 PMF |
671 | trace->start_freq = trace_clock_frequency(); |
672 | trace->start_tsc = trace_clock_read64(); | |
204141ee | 673 | gettimeofday(&trace->start_time, NULL); //ust// changed /* FIXME: is this ok? */ |
b6bf28ec | 674 | //ust// local_irq_restore(flags); |
9dad1eb8 PMF |
675 | |
676 | for (chan = 0; chan < trace->nr_channels; chan++) { | |
677 | if (!(trace->channels[chan].active)) | |
678 | continue; | |
679 | ||
680 | channel_name = ltt_channels_get_name_from_index(chan); | |
681 | WARN_ON(!channel_name); | |
682 | subbuf_size = trace->channels[chan].subbuf_size; | |
683 | subbuf_cnt = trace->channels[chan].subbuf_cnt; | |
684 | prepare_chan_size_num(&subbuf_size, &subbuf_cnt); | |
685 | err = trace->ops->create_channel(trace_name, trace, | |
9dad1eb8 PMF |
686 | channel_name, |
687 | &trace->channels[chan], | |
688 | subbuf_size, | |
689 | subbuf_cnt, | |
690 | trace->channels[chan].overwrite); | |
691 | if (err != 0) { | |
c1f20530 | 692 | ERR("Cannot create channel %s", channel_name); |
9dad1eb8 PMF |
693 | goto create_channel_error; |
694 | } | |
695 | } | |
696 | ||
697 | list_del(&trace->list); | |
b6bf28ec PMF |
698 | //ust// if (list_empty(<t_traces.head)) { |
699 | //ust// mod_timer(<t_async_wakeup_timer, | |
700 | //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
701 | //ust// set_kernel_trace_flag_all_tasks(); | |
702 | //ust// } | |
8d938dbd | 703 | list_add_rcu(&trace->list, <t_traces.head); |
b6bf28ec | 704 | //ust// synchronize_sched(); |
9dad1eb8 PMF |
705 | |
706 | ltt_unlock_traces(); | |
707 | ||
708 | return 0; | |
709 | ||
710 | create_channel_error: | |
711 | for (chan--; chan >= 0; chan--) | |
712 | if (trace->channels[chan].active) | |
713 | trace->ops->remove_channel(&trace->channels[chan]); | |
714 | ||
772030fe | 715 | //ust// dirs_error: |
b6bf28ec | 716 | //ust// module_put(trace->transport->owner); |
9dad1eb8 | 717 | transport_error: |
b6bf28ec | 718 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
719 | traces_error: |
720 | ltt_unlock_traces(); | |
721 | return err; | |
722 | } | |
9dad1eb8 PMF |
723 | |
724 | /* | |
725 | * It is worked as a wrapper for current version of ltt_control.ko. | |
726 | * We will make a new ltt_control based on debugfs, and control each channel's | |
727 | * buffer. | |
728 | */ | |
772030fe PMF |
729 | //ust// static int ltt_trace_create(const char *trace_name, const char *trace_type, |
730 | //ust// enum trace_mode mode, | |
731 | //ust// unsigned int subbuf_size_low, unsigned int n_subbufs_low, | |
732 | //ust// unsigned int subbuf_size_med, unsigned int n_subbufs_med, | |
733 | //ust// unsigned int subbuf_size_high, unsigned int n_subbufs_high) | |
734 | //ust// { | |
735 | //ust// int err = 0; | |
736 | //ust// | |
737 | //ust// err = ltt_trace_setup(trace_name); | |
738 | //ust// if (IS_ERR_VALUE(err)) | |
739 | //ust// return err; | |
740 | //ust// | |
741 | //ust// err = ltt_trace_set_type(trace_name, trace_type); | |
742 | //ust// if (IS_ERR_VALUE(err)) | |
743 | //ust// return err; | |
744 | //ust// | |
745 | //ust// err = ltt_trace_alloc(trace_name); | |
746 | //ust// if (IS_ERR_VALUE(err)) | |
747 | //ust// return err; | |
748 | //ust// | |
749 | //ust// return err; | |
750 | //ust// } | |
9dad1eb8 PMF |
751 | |
752 | /* Must be called while sure that trace is in the list. */ | |
b73a4c47 | 753 | static int _ltt_trace_destroy(struct ust_trace *trace) |
9dad1eb8 PMF |
754 | { |
755 | int err = -EPERM; | |
756 | ||
757 | if (trace == NULL) { | |
758 | err = -ENOENT; | |
759 | goto traces_error; | |
760 | } | |
761 | if (trace->active) { | |
c1f20530 | 762 | ERR("Can't destroy trace %s : tracer is active", trace->trace_name); |
9dad1eb8 PMF |
763 | err = -EBUSY; |
764 | goto active_error; | |
765 | } | |
766 | /* Everything went fine */ | |
a3272941 PMF |
767 | list_del_rcu(&trace->list); |
768 | synchronize_rcu(); | |
9dad1eb8 | 769 | if (list_empty(<t_traces.head)) { |
b6bf28ec | 770 | //ust// clear_kernel_trace_flag_all_tasks(); |
9dad1eb8 PMF |
771 | /* |
772 | * We stop the asynchronous delivery of reader wakeup, but | |
773 | * we must make one last check for reader wakeups pending | |
774 | * later in __ltt_trace_destroy. | |
775 | */ | |
b6bf28ec | 776 | //ust// del_timer_sync(<t_async_wakeup_timer); |
9dad1eb8 PMF |
777 | } |
778 | return 0; | |
779 | ||
780 | /* error handling */ | |
781 | active_error: | |
782 | traces_error: | |
783 | return err; | |
784 | } | |
785 | ||
786 | /* Sleepable part of the destroy */ | |
b73a4c47 | 787 | static void __ltt_trace_destroy(struct ust_trace *trace) |
9dad1eb8 PMF |
788 | { |
789 | int i; | |
b5b073e2 | 790 | struct ust_channel *chan; |
9dad1eb8 PMF |
791 | |
792 | for (i = 0; i < trace->nr_channels; i++) { | |
793 | chan = &trace->channels[i]; | |
794 | if (chan->active) | |
795 | trace->ops->finish_channel(chan); | |
796 | } | |
797 | ||
98963de4 | 798 | return; /* FIXME: temporary for ust */ |
b6bf28ec | 799 | //ust// flush_scheduled_work(); |
9dad1eb8 PMF |
800 | |
801 | /* | |
802 | * The currently destroyed trace is not in the trace list anymore, | |
803 | * so it's safe to call the async wakeup ourself. It will deliver | |
804 | * the last subbuffers. | |
805 | */ | |
806 | trace_async_wakeup(trace); | |
807 | ||
808 | for (i = 0; i < trace->nr_channels; i++) { | |
809 | chan = &trace->channels[i]; | |
810 | if (chan->active) | |
811 | trace->ops->remove_channel(chan); | |
812 | } | |
813 | ||
814 | kref_put(&trace->ltt_transport_kref, ltt_release_transport); | |
815 | ||
b6bf28ec | 816 | //ust// module_put(trace->transport->owner); |
9dad1eb8 PMF |
817 | |
818 | /* | |
819 | * Wait for lttd readers to release the files, therefore making sure | |
820 | * the last subbuffers have been read. | |
821 | */ | |
b6bf28ec PMF |
822 | //ust// if (atomic_read(&trace->kref.refcount) > 1) { |
823 | //ust// int ret = 0; | |
824 | //ust// __wait_event_interruptible(trace->kref_wq, | |
825 | //ust// (atomic_read(&trace->kref.refcount) == 1), ret); | |
826 | //ust// } | |
9dad1eb8 PMF |
827 | kref_put(&trace->kref, ltt_release_trace); |
828 | } | |
829 | ||
830 | int ltt_trace_destroy(const char *trace_name) | |
831 | { | |
832 | int err = 0; | |
b73a4c47 | 833 | struct ust_trace *trace; |
9dad1eb8 PMF |
834 | |
835 | ltt_lock_traces(); | |
836 | ||
837 | trace = _ltt_trace_find(trace_name); | |
838 | if (trace) { | |
839 | err = _ltt_trace_destroy(trace); | |
840 | if (err) | |
841 | goto error; | |
842 | ||
843 | ltt_unlock_traces(); | |
844 | ||
845 | __ltt_trace_destroy(trace); | |
b6bf28ec | 846 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
847 | |
848 | return 0; | |
849 | } | |
850 | ||
851 | trace = _ltt_trace_find_setup(trace_name); | |
852 | if (trace) { | |
853 | _ltt_trace_free(trace); | |
854 | ltt_unlock_traces(); | |
855 | return 0; | |
856 | } | |
857 | ||
858 | err = -ENOENT; | |
859 | ||
860 | /* Error handling */ | |
861 | error: | |
862 | ltt_unlock_traces(); | |
863 | return err; | |
864 | } | |
9dad1eb8 PMF |
865 | |
866 | /* must be called from within a traces lock. */ | |
b73a4c47 | 867 | static int _ltt_trace_start(struct ust_trace *trace) |
9dad1eb8 PMF |
868 | { |
869 | int err = 0; | |
870 | ||
871 | if (trace == NULL) { | |
872 | err = -ENOENT; | |
873 | goto traces_error; | |
874 | } | |
875 | if (trace->active) | |
c1f20530 | 876 | DBG("Tracing already active for trace %s", trace->trace_name); |
b6bf28ec PMF |
877 | //ust// if (!try_module_get(ltt_run_filter_owner)) { |
878 | //ust// err = -ENODEV; | |
c1f20530 | 879 | //ust// ERR("Cannot lock filter module"); |
b6bf28ec PMF |
880 | //ust// goto get_ltt_run_filter_error; |
881 | //ust// } | |
9dad1eb8 PMF |
882 | trace->active = 1; |
883 | /* Read by trace points without protection : be careful */ | |
884 | ltt_traces.num_active_traces++; | |
885 | return err; | |
886 | ||
887 | /* error handling */ | |
772030fe | 888 | //ust// get_ltt_run_filter_error: |
9dad1eb8 PMF |
889 | traces_error: |
890 | return err; | |
891 | } | |
892 | ||
893 | int ltt_trace_start(const char *trace_name) | |
894 | { | |
895 | int err = 0; | |
b73a4c47 | 896 | struct ust_trace *trace; |
9dad1eb8 PMF |
897 | |
898 | ltt_lock_traces(); | |
899 | ||
900 | trace = _ltt_trace_find(trace_name); | |
901 | err = _ltt_trace_start(trace); | |
902 | if (err) | |
903 | goto no_trace; | |
904 | ||
905 | ltt_unlock_traces(); | |
906 | ||
907 | /* | |
908 | * Call the kernel state dump. | |
909 | * Events will be mixed with real kernel events, it's ok. | |
910 | * Notice that there is no protection on the trace : that's exactly | |
911 | * why we iterate on the list and check for trace equality instead of | |
912 | * directly using this trace handle inside the logging function. | |
913 | */ | |
914 | ||
9c67dc50 | 915 | ltt_dump_marker_state(trace); |
9dad1eb8 | 916 | |
b6bf28ec PMF |
917 | //ust// if (!try_module_get(ltt_statedump_owner)) { |
918 | //ust// err = -ENODEV; | |
c1f20530 | 919 | //ust// ERR("Cannot lock state dump module"); |
b6bf28ec | 920 | //ust// } else { |
9dad1eb8 | 921 | ltt_statedump_functor(trace); |
b6bf28ec PMF |
922 | //ust// module_put(ltt_statedump_owner); |
923 | //ust// } | |
9dad1eb8 PMF |
924 | |
925 | return err; | |
926 | ||
927 | /* Error handling */ | |
928 | no_trace: | |
929 | ltt_unlock_traces(); | |
930 | return err; | |
931 | } | |
9dad1eb8 PMF |
932 | |
933 | /* must be called from within traces lock */ | |
b73a4c47 | 934 | static int _ltt_trace_stop(struct ust_trace *trace) |
9dad1eb8 PMF |
935 | { |
936 | int err = -EPERM; | |
937 | ||
938 | if (trace == NULL) { | |
939 | err = -ENOENT; | |
940 | goto traces_error; | |
941 | } | |
942 | if (!trace->active) | |
c1f20530 | 943 | DBG("LTT : Tracing not active for trace %s", trace->trace_name); |
9dad1eb8 PMF |
944 | if (trace->active) { |
945 | trace->active = 0; | |
946 | ltt_traces.num_active_traces--; | |
b6bf28ec | 947 | //ust// synchronize_sched(); /* Wait for each tracing to be finished */ |
9dad1eb8 | 948 | } |
b6bf28ec | 949 | //ust// module_put(ltt_run_filter_owner); |
9dad1eb8 PMF |
950 | /* Everything went fine */ |
951 | return 0; | |
952 | ||
953 | /* Error handling */ | |
954 | traces_error: | |
955 | return err; | |
956 | } | |
957 | ||
958 | int ltt_trace_stop(const char *trace_name) | |
959 | { | |
960 | int err = 0; | |
b73a4c47 | 961 | struct ust_trace *trace; |
9dad1eb8 PMF |
962 | |
963 | ltt_lock_traces(); | |
964 | trace = _ltt_trace_find(trace_name); | |
965 | err = _ltt_trace_stop(trace); | |
966 | ltt_unlock_traces(); | |
967 | return err; | |
968 | } | |
9dad1eb8 PMF |
969 | |
970 | /** | |
971 | * ltt_filter_control - Trace filter control in-kernel API | |
972 | * @msg: Action to perform on the filter | |
973 | * @trace_name: Trace on which the action must be done | |
974 | */ | |
975 | int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) | |
976 | { | |
977 | int err; | |
b73a4c47 | 978 | struct ust_trace *trace; |
9dad1eb8 | 979 | |
c1f20530 | 980 | DBG("ltt_filter_control : trace %s", trace_name); |
9dad1eb8 PMF |
981 | ltt_lock_traces(); |
982 | trace = _ltt_trace_find(trace_name); | |
983 | if (trace == NULL) { | |
c1f20530 | 984 | ERR("Trace does not exist. Cannot proxy control request"); |
9dad1eb8 PMF |
985 | err = -ENOENT; |
986 | goto trace_error; | |
987 | } | |
b6bf28ec PMF |
988 | //ust// if (!try_module_get(ltt_filter_control_owner)) { |
989 | //ust// err = -ENODEV; | |
990 | //ust// goto get_module_error; | |
991 | //ust// } | |
9dad1eb8 PMF |
992 | switch (msg) { |
993 | case LTT_FILTER_DEFAULT_ACCEPT: | |
c1f20530 | 994 | DBG("Proxy filter default accept %s", trace_name); |
9dad1eb8 PMF |
995 | err = (*ltt_filter_control_functor)(msg, trace); |
996 | break; | |
997 | case LTT_FILTER_DEFAULT_REJECT: | |
c1f20530 | 998 | DBG("Proxy filter default reject %s", trace_name); |
9dad1eb8 PMF |
999 | err = (*ltt_filter_control_functor)(msg, trace); |
1000 | break; | |
1001 | default: | |
1002 | err = -EPERM; | |
1003 | } | |
b6bf28ec | 1004 | //ust// module_put(ltt_filter_control_owner); |
9dad1eb8 | 1005 | |
772030fe | 1006 | //ust// get_module_error: |
9dad1eb8 PMF |
1007 | trace_error: |
1008 | ltt_unlock_traces(); | |
1009 | return err; | |
1010 | } |