822311edbf8123f89d6aa835d2ee64685a640d2d
[lttng-ust.git] / libtracing / tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 *
4 * This contains the definitions for the Linux Trace Toolkit tracer.
5 */
6
7 #ifndef _LTT_TRACER_H
8 #define _LTT_TRACER_H
9
10 #include <stdarg.h>
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/list.h>
14 #include <linux/cache.h>
15 #include <linux/kernel.h>
16 #include <linux/timex.h>
17 #include <linux/wait.h>
18 #include <linux/ltt-relay.h>
19 #include <linux/ltt-channels.h>
20 #include <linux/ltt-core.h>
21 #include <linux/marker.h>
22 #include <linux/trace-clock.h>
23 #include <asm/atomic.h>
24 #include <asm/local.h>
25
26 /* Number of bytes to log with a read/write event */
27 #define LTT_LOG_RW_SIZE 32L
28
29 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
30 #define LTT_PERCPU_TIMER_INTERVAL 1
31
32 #ifndef LTT_ARCH_TYPE
33 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
34 #endif
35
36 #ifndef LTT_ARCH_VARIANT
37 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
38 #endif
39
40 struct ltt_active_marker;
41
42 /* Maximum number of callbacks per marker */
43 #define LTT_NR_CALLBACKS 10
44
45 struct ltt_serialize_closure;
46 struct ltt_probe_private_data;
47
48 /* Serialization callback '%k' */
49 typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
50 struct ltt_serialize_closure *closure,
51 void *serialize_private, int *largest_align,
52 const char *fmt, va_list *args);
53
54 struct ltt_serialize_closure {
55 ltt_serialize_cb *callbacks;
56 long cb_args[LTT_NR_CALLBACKS];
57 unsigned int cb_idx;
58 };
59
60 size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
61 struct ltt_serialize_closure *closure,
62 void *serialize_private,
63 int *largest_align, const char *fmt, va_list *args);
64
65 struct ltt_available_probe {
66 const char *name; /* probe name */
67 const char *format;
68 marker_probe_func *probe_func;
69 ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
70 struct list_head node; /* registered probes list */
71 };
72
73 struct ltt_probe_private_data {
74 struct ltt_trace_struct *trace; /*
75 * Target trace, for metadata
76 * or statedump.
77 */
78 ltt_serialize_cb serializer; /*
79 * Serialization function override.
80 */
81 void *serialize_private; /*
82 * Private data for serialization
83 * functions.
84 */
85 };
86
87 enum ltt_channels {
88 LTT_CHANNEL_METADATA,
89 LTT_CHANNEL_FD_STATE,
90 LTT_CHANNEL_GLOBAL_STATE,
91 LTT_CHANNEL_IRQ_STATE,
92 LTT_CHANNEL_MODULE_STATE,
93 LTT_CHANNEL_NETIF_STATE,
94 LTT_CHANNEL_SOFTIRQ_STATE,
95 LTT_CHANNEL_SWAP_STATE,
96 LTT_CHANNEL_SYSCALL_STATE,
97 LTT_CHANNEL_TASK_STATE,
98 LTT_CHANNEL_VM_STATE,
99 LTT_CHANNEL_FS,
100 LTT_CHANNEL_INPUT,
101 LTT_CHANNEL_IPC,
102 LTT_CHANNEL_KERNEL,
103 LTT_CHANNEL_MM,
104 LTT_CHANNEL_RCU,
105 LTT_CHANNEL_DEFAULT,
106 };
107
108 struct ltt_active_marker {
109 struct list_head node; /* active markers list */
110 const char *channel;
111 const char *name;
112 const char *format;
113 struct ltt_available_probe *probe;
114 };
115
116 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
117 void *call_data, const char *fmt, va_list *args);
118 extern void ltt_trace(const struct marker *mdata, void *probe_data,
119 void *call_data, const char *fmt, ...);
120
121 /*
122 * Unique ID assigned to each registered probe.
123 */
124 enum marker_id {
125 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
126 MARKER_ID_SET_MARKER_FORMAT,
127 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
128 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
129 };
130
131 /* static ids 0-1 reserved for internal use. */
132 #define MARKER_CORE_IDS 2
133 static inline enum marker_id marker_id_type(uint16_t id)
134 {
135 if (id < MARKER_CORE_IDS)
136 return (enum marker_id)id;
137 else
138 return MARKER_ID_DYNAMIC;
139 }
140
141 #ifdef CONFIG_LTT
142
143 struct user_dbg_data {
144 unsigned long avail_size;
145 unsigned long write;
146 unsigned long read;
147 };
148
149 struct ltt_trace_ops {
150 /* First 32 bytes cache-hot cacheline */
151 int (*reserve_slot) (struct ltt_trace_struct *trace,
152 struct ltt_channel_struct *channel,
153 void **transport_data, size_t data_size,
154 size_t *slot_size, long *buf_offset, u64 *tsc,
155 unsigned int *rflags,
156 int largest_align,
157 int cpu);
158 void (*commit_slot) (struct ltt_channel_struct *channel,
159 void **transport_data, long buf_offset,
160 size_t slot_size);
161 void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
162 int (*user_blocking) (struct ltt_trace_struct *trace,
163 unsigned int index, size_t data_size,
164 struct user_dbg_data *dbg);
165 /* End of first 32 bytes cacheline */
166 int (*create_dirs) (struct ltt_trace_struct *new_trace);
167 void (*remove_dirs) (struct ltt_trace_struct *new_trace);
168 int (*create_channel) (const char *trace_name,
169 struct ltt_trace_struct *trace,
170 struct dentry *dir, const char *channel_name,
171 struct ltt_channel_struct *ltt_chan,
172 unsigned int subbuf_size,
173 unsigned int n_subbufs, int overwrite);
174 void (*finish_channel) (struct ltt_channel_struct *channel);
175 void (*remove_channel) (struct ltt_channel_struct *channel);
176 void (*user_errors) (struct ltt_trace_struct *trace,
177 unsigned int index, size_t data_size,
178 struct user_dbg_data *dbg, int cpu);
179 #ifdef CONFIG_HOTPLUG_CPU
180 int (*handle_cpuhp) (struct notifier_block *nb,
181 unsigned long action, void *hcpu,
182 struct ltt_trace_struct *trace);
183 #endif
184 } ____cacheline_aligned;
185
186 struct ltt_transport {
187 char *name;
188 struct module *owner;
189 struct list_head node;
190 struct ltt_trace_ops ops;
191 };
192
193 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
194
195 #define CHANNEL_FLAG_ENABLE (1U<<0)
196 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
197
198 /* Per-trace information - each trace/flight recorder represented by one */
199 struct ltt_trace_struct {
200 /* First 32 bytes cache-hot cacheline */
201 struct list_head list;
202 struct ltt_trace_ops *ops;
203 int active;
204 /* Second 32 bytes cache-hot cacheline */
205 struct ltt_channel_struct *channels;
206 unsigned int nr_channels;
207 u32 freq_scale;
208 u64 start_freq;
209 u64 start_tsc;
210 unsigned long long start_monotonic;
211 struct timeval start_time;
212 struct ltt_channel_setting *settings;
213 struct {
214 struct dentry *trace_root;
215 } dentry;
216 struct rchan_callbacks callbacks;
217 struct kref kref; /* Each channel has a kref of the trace struct */
218 struct ltt_transport *transport;
219 struct kref ltt_transport_kref;
220 wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
221 char trace_name[NAME_MAX];
222 } ____cacheline_aligned;
223
224 /* Hardcoded event headers
225 *
226 * event header for a trace with active heartbeat : 27 bits timestamps
227 *
228 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
229 * trace alignment value must be done.
230 *
231 * Remember that the C compiler does align each member on the boundary
232 * equivalent to their own size.
233 *
234 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
235 * bytes aligned, so the buffer header and trace header are aligned.
236 *
237 * Event headers are aligned depending on the trace alignment option.
238 *
239 * Note using C structure bitfields for cross-endianness and portability
240 * concerns.
241 */
242
243 #define LTT_RESERVED_EVENTS 3
244 #define LTT_EVENT_BITS 5
245 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
246 #define LTT_TSC_BITS 27
247 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
248
249 struct ltt_event_header {
250 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
251 };
252
253 /* Reservation flags */
254 #define LTT_RFLAG_ID (1 << 0)
255 #define LTT_RFLAG_ID_SIZE (1 << 1)
256 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
257
258 /*
259 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
260 * specifically with CPU frequency scaling someday, so using an interpolation
261 * between the start and end of buffer values is not flexible enough. Using an
262 * immediate frequency value permits to calculate directly the times for parts
263 * of a buffer that would be before a frequency change.
264 *
265 * Keep the natural field alignment for _each field_ within this structure if
266 * you ever add/remove a field from this header. Packed attribute is not used
267 * because gcc generates poor code on at least powerpc and mips. Don't ever
268 * let gcc add padding between the structure elements.
269 */
270 struct ltt_subbuffer_header {
271 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
272 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
273 uint32_t magic_number; /*
274 * Trace magic number.
275 * contains endianness information.
276 */
277 uint8_t major_version;
278 uint8_t minor_version;
279 uint8_t arch_size; /* Architecture pointer size */
280 uint8_t alignment; /* LTT data alignment */
281 uint64_t start_time_sec; /* NTP-corrected start time */
282 uint64_t start_time_usec;
283 uint64_t start_freq; /*
284 * Frequency at trace start,
285 * used all along the trace.
286 */
287 uint32_t freq_scale; /* Frequency scaling (divisor) */
288 uint32_t lost_size; /* Size unused at end of subbuffer */
289 uint32_t buf_size; /* Size of this subbuffer */
290 uint32_t events_lost; /*
291 * Events lost in this subbuffer since
292 * the beginning of the trace.
293 * (may overflow)
294 */
295 uint32_t subbuf_corrupt; /*
296 * Corrupted (lost) subbuffers since
297 * the begginig of the trace.
298 * (may overflow)
299 */
300 uint8_t header_end[0]; /* End of header */
301 };
302
303 /**
304 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
305 *
306 * Return header size without padding after the structure. Don't use packed
307 * structure because gcc generates inefficient code on some architectures
308 * (powerpc, mips..)
309 */
310 static inline size_t ltt_subbuffer_header_size(void)
311 {
312 return offsetof(struct ltt_subbuffer_header, header_end);
313 }
314
315 /*
316 * ltt_get_header_size
317 *
318 * Calculate alignment offset to 32-bits. This is the alignment offset of the
319 * event header.
320 *
321 * Important note :
322 * The event header must be 32-bits. The total offset calculated here :
323 *
324 * Alignment of header struct on 32 bits (min arch size, header size)
325 * + sizeof(header struct) (32-bits)
326 * + (opt) u16 (ext. event id)
327 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
328 * + (opt) u32 (ext. event size)
329 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
330 *
331 * The payload must itself determine its own alignment from the biggest type it
332 * contains.
333 * */
334 static inline unsigned char ltt_get_header_size(
335 struct ltt_channel_struct *channel,
336 size_t offset,
337 size_t data_size,
338 size_t *before_hdr_pad,
339 unsigned int rflags)
340 {
341 size_t orig_offset = offset;
342 size_t padding;
343
344 BUILD_BUG_ON(sizeof(struct ltt_event_header) != sizeof(u32));
345
346 padding = ltt_align(offset, sizeof(struct ltt_event_header));
347 offset += padding;
348 offset += sizeof(struct ltt_event_header);
349
350 switch (rflags) {
351 case LTT_RFLAG_ID_SIZE_TSC:
352 offset += sizeof(u16) + sizeof(u16);
353 if (data_size >= 0xFFFFU)
354 offset += sizeof(u32);
355 offset += ltt_align(offset, sizeof(u64));
356 offset += sizeof(u64);
357 break;
358 case LTT_RFLAG_ID_SIZE:
359 offset += sizeof(u16) + sizeof(u16);
360 if (data_size >= 0xFFFFU)
361 offset += sizeof(u32);
362 break;
363 case LTT_RFLAG_ID:
364 offset += sizeof(u16);
365 break;
366 }
367
368 *before_hdr_pad = padding;
369 return offset - orig_offset;
370 }
371
372 /*
373 * ltt_write_event_header
374 *
375 * Writes the event header to the offset (already aligned on 32-bits).
376 *
377 * @trace : trace to write to.
378 * @channel : pointer to the channel structure..
379 * @buf : buffer to write to.
380 * @buf_offset : buffer offset to write to (aligned on 32 bits).
381 * @eID : event ID
382 * @event_size : size of the event, excluding the event header.
383 * @tsc : time stamp counter.
384 * @rflags : reservation flags.
385 *
386 * returns : offset where the event data must be written.
387 */
388 static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
389 struct ltt_channel_struct *channel,
390 struct rchan_buf *buf, long buf_offset,
391 u16 eID, size_t event_size,
392 u64 tsc, unsigned int rflags)
393 {
394 struct ltt_event_header header;
395 size_t small_size;
396
397 switch (rflags) {
398 case LTT_RFLAG_ID_SIZE_TSC:
399 header.id_time = 29 << LTT_TSC_BITS;
400 break;
401 case LTT_RFLAG_ID_SIZE:
402 header.id_time = 30 << LTT_TSC_BITS;
403 break;
404 case LTT_RFLAG_ID:
405 header.id_time = 31 << LTT_TSC_BITS;
406 break;
407 default:
408 header.id_time = eID << LTT_TSC_BITS;
409 break;
410 }
411 header.id_time |= (u32)tsc & LTT_TSC_MASK;
412 ltt_relay_write(buf, buf_offset, &header, sizeof(header));
413 buf_offset += sizeof(header);
414
415 switch (rflags) {
416 case LTT_RFLAG_ID_SIZE_TSC:
417 small_size = min_t(size_t, event_size, 0xFFFFU);
418 ltt_relay_write(buf, buf_offset,
419 (u16[]){ (u16)eID }, sizeof(u16));
420 buf_offset += sizeof(u16);
421 ltt_relay_write(buf, buf_offset,
422 (u16[]){ (u16)small_size }, sizeof(u16));
423 buf_offset += sizeof(u16);
424 if (small_size == 0xFFFFU) {
425 ltt_relay_write(buf, buf_offset,
426 (u32[]){ (u32)event_size }, sizeof(u32));
427 buf_offset += sizeof(u32);
428 }
429 buf_offset += ltt_align(buf_offset, sizeof(u64));
430 ltt_relay_write(buf, buf_offset,
431 (u64[]){ (u64)tsc }, sizeof(u64));
432 buf_offset += sizeof(u64);
433 break;
434 case LTT_RFLAG_ID_SIZE:
435 small_size = min_t(size_t, event_size, 0xFFFFU);
436 ltt_relay_write(buf, buf_offset,
437 (u16[]){ (u16)eID }, sizeof(u16));
438 buf_offset += sizeof(u16);
439 ltt_relay_write(buf, buf_offset,
440 (u16[]){ (u16)small_size }, sizeof(u16));
441 buf_offset += sizeof(u16);
442 if (small_size == 0xFFFFU) {
443 ltt_relay_write(buf, buf_offset,
444 (u32[]){ (u32)event_size }, sizeof(u32));
445 buf_offset += sizeof(u32);
446 }
447 break;
448 case LTT_RFLAG_ID:
449 ltt_relay_write(buf, buf_offset,
450 (u16[]){ (u16)eID }, sizeof(u16));
451 buf_offset += sizeof(u16);
452 break;
453 default:
454 break;
455 }
456
457 return buf_offset;
458 }
459
460 /* Lockless LTTng */
461
462 /* Buffer offset macros */
463
464 /*
465 * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
466 * the offset, which leaves only the buffer number.
467 */
468 #define BUFFER_TRUNC(offset, chan) \
469 ((offset) & (~((chan)->alloc_size-1)))
470 #define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
471 #define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
472 #define SUBBUF_ALIGN(offset, chan) \
473 (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
474 #define SUBBUF_TRUNC(offset, chan) \
475 ((offset) & (~((chan)->subbuf_size - 1)))
476 #define SUBBUF_INDEX(offset, chan) \
477 (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
478
479 /*
480 * ltt_reserve_slot
481 *
482 * Atomic slot reservation in a LTTng buffer. It will take care of
483 * sub-buffer switching.
484 *
485 * Parameters:
486 *
487 * @trace : the trace structure to log to.
488 * @channel : the chanel to reserve space into.
489 * @transport_data : specific transport data.
490 * @data_size : size of the variable length data to log.
491 * @slot_size : pointer to total size of the slot (out)
492 * @buf_offset : pointer to reserve offset (out)
493 * @tsc : pointer to the tsc at the slot reservation (out)
494 * @rflags : reservation flags (header specificity)
495 * @cpu : cpu id
496 *
497 * Return : -ENOSPC if not enough space, else 0.
498 */
499 static inline int ltt_reserve_slot(
500 struct ltt_trace_struct *trace,
501 struct ltt_channel_struct *channel,
502 void **transport_data,
503 size_t data_size,
504 size_t *slot_size,
505 long *buf_offset,
506 u64 *tsc,
507 unsigned int *rflags,
508 int largest_align,
509 int cpu)
510 {
511 return trace->ops->reserve_slot(trace, channel, transport_data,
512 data_size, slot_size, buf_offset, tsc, rflags,
513 largest_align, cpu);
514 }
515
516
517 /*
518 * ltt_commit_slot
519 *
520 * Atomic unordered slot commit. Increments the commit count in the
521 * specified sub-buffer, and delivers it if necessary.
522 *
523 * Parameters:
524 *
525 * @channel : the chanel to reserve space into.
526 * @transport_data : specific transport data.
527 * @buf_offset : offset of beginning of reserved slot
528 * @slot_size : size of the reserved slot.
529 */
530 static inline void ltt_commit_slot(
531 struct ltt_channel_struct *channel,
532 void **transport_data,
533 long buf_offset,
534 size_t slot_size)
535 {
536 struct ltt_trace_struct *trace = channel->trace;
537
538 trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
539 }
540
541 /*
542 * Control channels :
543 * control/metadata
544 * control/interrupts
545 * control/...
546 *
547 * cpu channel :
548 * cpu
549 */
550 #define LTT_RELAY_ROOT "ltt"
551 #define LTT_RELAY_LOCKED_ROOT "ltt-locked"
552
553 #define LTT_METADATA_CHANNEL "metadata_state"
554 #define LTT_FD_STATE_CHANNEL "fd_state"
555 #define LTT_GLOBAL_STATE_CHANNEL "global_state"
556 #define LTT_IRQ_STATE_CHANNEL "irq_state"
557 #define LTT_MODULE_STATE_CHANNEL "module_state"
558 #define LTT_NETIF_STATE_CHANNEL "netif_state"
559 #define LTT_SOFTIRQ_STATE_CHANNEL "softirq_state"
560 #define LTT_SWAP_STATE_CHANNEL "swap_state"
561 #define LTT_SYSCALL_STATE_CHANNEL "syscall_state"
562 #define LTT_TASK_STATE_CHANNEL "task_state"
563 #define LTT_VM_STATE_CHANNEL "vm_state"
564 #define LTT_FS_CHANNEL "fs"
565 #define LTT_INPUT_CHANNEL "input"
566 #define LTT_IPC_CHANNEL "ipc"
567 #define LTT_KERNEL_CHANNEL "kernel"
568 #define LTT_MM_CHANNEL "mm"
569 #define LTT_RCU_CHANNEL "rcu"
570
571 #define LTT_FLIGHT_PREFIX "flight-"
572
573 /* Tracer properties */
574 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
575 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
576 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
577 #define LTT_DEFAULT_N_SUBBUFS_MED 2
578 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
579 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
580 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
581 #define LTT_TRACER_VERSION_MAJOR 2
582 #define LTT_TRACER_VERSION_MINOR 3
583
584 /*
585 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
586 * nearly full buffer. User space won't use this last amount of space when in
587 * blocking mode. This space also includes the event header that would be
588 * written by this user space event.
589 */
590 #define LTT_RESERVE_CRITICAL 4096
591
592 /* Register and unregister function pointers */
593
594 enum ltt_module_function {
595 LTT_FUNCTION_RUN_FILTER,
596 LTT_FUNCTION_FILTER_CONTROL,
597 LTT_FUNCTION_STATEDUMP
598 };
599
600 extern int ltt_module_register(enum ltt_module_function name, void *function,
601 struct module *owner);
602 extern void ltt_module_unregister(enum ltt_module_function name);
603
604 void ltt_transport_register(struct ltt_transport *transport);
605 void ltt_transport_unregister(struct ltt_transport *transport);
606
607 /* Exported control function */
608
609 enum ltt_control_msg {
610 LTT_CONTROL_START,
611 LTT_CONTROL_STOP,
612 LTT_CONTROL_CREATE_TRACE,
613 LTT_CONTROL_DESTROY_TRACE
614 };
615
616 union ltt_control_args {
617 struct {
618 enum trace_mode mode;
619 unsigned int subbuf_size_low;
620 unsigned int n_subbufs_low;
621 unsigned int subbuf_size_med;
622 unsigned int n_subbufs_med;
623 unsigned int subbuf_size_high;
624 unsigned int n_subbufs_high;
625 } new_trace;
626 };
627
628 int _ltt_trace_setup(const char *trace_name);
629 int ltt_trace_setup(const char *trace_name);
630 struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
631 int ltt_trace_set_type(const char *trace_name, const char *trace_type);
632 int ltt_trace_set_channel_subbufsize(const char *trace_name,
633 const char *channel_name, unsigned int size);
634 int ltt_trace_set_channel_subbufcount(const char *trace_name,
635 const char *channel_name, unsigned int cnt);
636 int ltt_trace_set_channel_enable(const char *trace_name,
637 const char *channel_name, unsigned int enable);
638 int ltt_trace_set_channel_overwrite(const char *trace_name,
639 const char *channel_name, unsigned int overwrite);
640 int ltt_trace_alloc(const char *trace_name);
641 int ltt_trace_destroy(const char *trace_name);
642 int ltt_trace_start(const char *trace_name);
643 int ltt_trace_stop(const char *trace_name);
644
645 extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
646 const char *trace_type, union ltt_control_args args);
647
648 enum ltt_filter_control_msg {
649 LTT_FILTER_DEFAULT_ACCEPT,
650 LTT_FILTER_DEFAULT_REJECT
651 };
652
653 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
654 const char *trace_name);
655
656 extern struct dentry *get_filter_root(void);
657
658 void ltt_write_trace_header(struct ltt_trace_struct *trace,
659 struct ltt_subbuffer_header *header);
660 extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
661
662 void ltt_core_register(int (*function)(u8, void *));
663
664 void ltt_core_unregister(void);
665
666 void ltt_release_trace(struct kref *kref);
667 void ltt_release_transport(struct kref *kref);
668
669 extern int ltt_probe_register(struct ltt_available_probe *pdata);
670 extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
671 extern int ltt_marker_connect(const char *channel, const char *mname,
672 const char *pname);
673 extern int ltt_marker_disconnect(const char *channel, const char *mname,
674 const char *pname);
675 extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
676
677 void ltt_lock_traces(void);
678 void ltt_unlock_traces(void);
679
680 extern void ltt_dump_softirq_vec(void *call_data);
681
682 #ifdef CONFIG_HAVE_LTT_DUMP_TABLES
683 extern void ltt_dump_sys_call_table(void *call_data);
684 extern void ltt_dump_idt_table(void *call_data);
685 #else
686 static inline void ltt_dump_sys_call_table(void *call_data)
687 {
688 }
689
690 static inline void ltt_dump_idt_table(void *call_data)
691 {
692 }
693 #endif
694
695 #ifdef CONFIG_LTT_KPROBES
696 extern void ltt_dump_kprobes_table(void *call_data);
697 #else
698 static inline void ltt_dump_kprobes_table(void *call_data)
699 {
700 }
701 #endif
702
703 /* Relay IOCTL */
704
705 /* Get the next sub buffer that can be read. */
706 #define RELAY_GET_SUBBUF _IOR(0xF5, 0x00, __u32)
707 /* Release the oldest reserved (by "get") sub buffer. */
708 #define RELAY_PUT_SUBBUF _IOW(0xF5, 0x01, __u32)
709 /* returns the number of sub buffers in the per cpu channel. */
710 #define RELAY_GET_N_SUBBUFS _IOR(0xF5, 0x02, __u32)
711 /* returns the size of the sub buffers. */
712 #define RELAY_GET_SUBBUF_SIZE _IOR(0xF5, 0x03, __u32)
713
714 #endif /* CONFIG_LTT */
715
716 #endif /* _LTT_TRACER_H */
This page took 0.068064 seconds and 3 git commands to generate.