ust: continue work
[lttng-ust.git] / libtracing / tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 *
4 * This contains the definitions for the Linux Trace Toolkit tracer.
5 */
6
7 #ifndef _LTT_TRACER_H
8 #define _LTT_TRACER_H
9
10 //ust// #include <stdarg.h>
11 //ust// #include <linux/types.h>
12 //ust// #include <linux/limits.h>
13 //ust// #include <linux/list.h>
14 //ust// #include <linux/cache.h>
15 //ust// #include <linux/kernel.h>
16 //ust// #include <linux/timex.h>
17 //ust// #include <linux/wait.h>
18 //ust// #include <linux/ltt-relay.h>
19 //ust// #include <linux/ltt-channels.h>
20 //ust// #include <linux/ltt-core.h>
21 //ust// #include <linux/marker.h>
22 //ust// #include <linux/trace-clock.h>
23 //ust// #include <asm/atomic.h>
24 //ust// #include <asm/local.h>
25 #include <sys/types.h>
26 #include <stdarg.h>
27 #include "relay.h"
28 #include "list.h"
29 #include "kernelcompat.h"
30 #include "channels.h"
31
32 /* Number of bytes to log with a read/write event */
33 #define LTT_LOG_RW_SIZE 32L
34
35 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
36 #define LTT_PERCPU_TIMER_INTERVAL 1
37
38 #ifndef LTT_ARCH_TYPE
39 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
40 #endif
41
42 #ifndef LTT_ARCH_VARIANT
43 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
44 #endif
45
46 struct ltt_active_marker;
47
48 /* Maximum number of callbacks per marker */
49 #define LTT_NR_CALLBACKS 10
50
51 struct ltt_serialize_closure;
52 struct ltt_probe_private_data;
53
54 /* Serialization callback '%k' */
55 typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
56 struct ltt_serialize_closure *closure,
57 void *serialize_private, int *largest_align,
58 const char *fmt, va_list *args);
59
60 struct ltt_serialize_closure {
61 ltt_serialize_cb *callbacks;
62 long cb_args[LTT_NR_CALLBACKS];
63 unsigned int cb_idx;
64 };
65
66 size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
67 struct ltt_serialize_closure *closure,
68 void *serialize_private,
69 int *largest_align, const char *fmt, va_list *args);
70
71 //ust// struct ltt_available_probe {
72 //ust// const char *name; /* probe name */
73 //ust// const char *format;
74 //ust// marker_probe_func *probe_func;
75 //ust// ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
76 //ust// struct list_head node; /* registered probes list */
77 //ust// };
78
79 struct ltt_probe_private_data {
80 struct ltt_trace_struct *trace; /*
81 * Target trace, for metadata
82 * or statedump.
83 */
84 ltt_serialize_cb serializer; /*
85 * Serialization function override.
86 */
87 void *serialize_private; /*
88 * Private data for serialization
89 * functions.
90 */
91 };
92
93 enum ltt_channels {
94 LTT_CHANNEL_METADATA,
95 LTT_CHANNEL_UST,
96 };
97
98 struct ltt_active_marker {
99 struct list_head node; /* active markers list */
100 const char *channel;
101 const char *name;
102 const char *format;
103 struct ltt_available_probe *probe;
104 };
105
106 struct marker; //ust//
107 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
108 void *call_data, const char *fmt, va_list *args);
109 extern void ltt_trace(const struct marker *mdata, void *probe_data,
110 void *call_data, const char *fmt, ...);
111
112 /*
113 * Unique ID assigned to each registered probe.
114 */
115 enum marker_id {
116 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
117 MARKER_ID_SET_MARKER_FORMAT,
118 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
119 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
120 };
121
122 /* static ids 0-1 reserved for internal use. */
123 #define MARKER_CORE_IDS 2
124 static inline enum marker_id marker_id_type(uint16_t id)
125 {
126 if (id < MARKER_CORE_IDS)
127 return (enum marker_id)id;
128 else
129 return MARKER_ID_DYNAMIC;
130 }
131
132 //ust// #ifdef CONFIG_LTT
133
134 struct user_dbg_data {
135 unsigned long avail_size;
136 unsigned long write;
137 unsigned long read;
138 };
139
140 struct ltt_trace_ops {
141 /* First 32 bytes cache-hot cacheline */
142 int (*reserve_slot) (struct ltt_trace_struct *trace,
143 struct ltt_channel_struct *channel,
144 void **transport_data, size_t data_size,
145 size_t *slot_size, long *buf_offset, u64 *tsc,
146 unsigned int *rflags,
147 int largest_align,
148 int cpu);
149 void (*commit_slot) (struct ltt_channel_struct *channel,
150 void **transport_data, long buf_offset,
151 size_t slot_size);
152 void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
153 int (*user_blocking) (struct ltt_trace_struct *trace,
154 unsigned int index, size_t data_size,
155 struct user_dbg_data *dbg);
156 //ust// /* End of first 32 bytes cacheline */
157 //ust// int (*create_dirs) (struct ltt_trace_struct *new_trace);
158 //ust// void (*remove_dirs) (struct ltt_trace_struct *new_trace);
159 int (*create_channel) (const char *trace_name,
160 struct ltt_trace_struct *trace,
161 struct dentry *dir, const char *channel_name,
162 struct ltt_channel_struct *ltt_chan,
163 unsigned int subbuf_size,
164 unsigned int n_subbufs, int overwrite);
165 void (*finish_channel) (struct ltt_channel_struct *channel);
166 void (*remove_channel) (struct ltt_channel_struct *channel);
167 void (*user_errors) (struct ltt_trace_struct *trace,
168 unsigned int index, size_t data_size,
169 struct user_dbg_data *dbg, int cpu);
170 //ust// #ifdef CONFIG_HOTPLUG_CPU
171 //ust// int (*handle_cpuhp) (struct notifier_block *nb,
172 //ust// unsigned long action, void *hcpu,
173 //ust// struct ltt_trace_struct *trace);
174 //ust// #endif
175 } ____cacheline_aligned;
176
177 struct ltt_transport {
178 char *name;
179 struct module *owner;
180 struct list_head node;
181 struct ltt_trace_ops ops;
182 };
183
184 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
185
186 #define CHANNEL_FLAG_ENABLE (1U<<0)
187 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
188
189 /* Per-trace information - each trace/flight recorder represented by one */
190 struct ltt_trace_struct {
191 /* First 32 bytes cache-hot cacheline */
192 struct list_head list;
193 struct ltt_trace_ops *ops;
194 int active;
195 /* Second 32 bytes cache-hot cacheline */
196 struct ltt_channel_struct *channels;
197 unsigned int nr_channels;
198 u32 freq_scale;
199 u64 start_freq;
200 u64 start_tsc;
201 unsigned long long start_monotonic;
202 struct timeval start_time;
203 struct ltt_channel_setting *settings;
204 struct {
205 struct dentry *trace_root;
206 } dentry;
207 //ust// struct rchan_callbacks callbacks;
208 struct kref kref; /* Each channel has a kref of the trace struct */
209 struct ltt_transport *transport;
210 struct kref ltt_transport_kref;
211 //ust// wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
212 char trace_name[NAME_MAX];
213 } ____cacheline_aligned;
214
215 /* Hardcoded event headers
216 *
217 * event header for a trace with active heartbeat : 27 bits timestamps
218 *
219 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
220 * trace alignment value must be done.
221 *
222 * Remember that the C compiler does align each member on the boundary
223 * equivalent to their own size.
224 *
225 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
226 * bytes aligned, so the buffer header and trace header are aligned.
227 *
228 * Event headers are aligned depending on the trace alignment option.
229 *
230 * Note using C structure bitfields for cross-endianness and portability
231 * concerns.
232 */
233
234 #define LTT_RESERVED_EVENTS 3
235 #define LTT_EVENT_BITS 5
236 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
237 #define LTT_TSC_BITS 27
238 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
239
240 struct ltt_event_header {
241 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
242 };
243
244 /* Reservation flags */
245 #define LTT_RFLAG_ID (1 << 0)
246 #define LTT_RFLAG_ID_SIZE (1 << 1)
247 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
248
249 /*
250 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
251 * specifically with CPU frequency scaling someday, so using an interpolation
252 * between the start and end of buffer values is not flexible enough. Using an
253 * immediate frequency value permits to calculate directly the times for parts
254 * of a buffer that would be before a frequency change.
255 *
256 * Keep the natural field alignment for _each field_ within this structure if
257 * you ever add/remove a field from this header. Packed attribute is not used
258 * because gcc generates poor code on at least powerpc and mips. Don't ever
259 * let gcc add padding between the structure elements.
260 */
261 struct ltt_subbuffer_header {
262 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
263 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
264 uint32_t magic_number; /*
265 * Trace magic number.
266 * contains endianness information.
267 */
268 uint8_t major_version;
269 uint8_t minor_version;
270 uint8_t arch_size; /* Architecture pointer size */
271 uint8_t alignment; /* LTT data alignment */
272 uint64_t start_time_sec; /* NTP-corrected start time */
273 uint64_t start_time_usec;
274 uint64_t start_freq; /*
275 * Frequency at trace start,
276 * used all along the trace.
277 */
278 uint32_t freq_scale; /* Frequency scaling (divisor) */
279 uint32_t lost_size; /* Size unused at end of subbuffer */
280 uint32_t buf_size; /* Size of this subbuffer */
281 uint32_t events_lost; /*
282 * Events lost in this subbuffer since
283 * the beginning of the trace.
284 * (may overflow)
285 */
286 uint32_t subbuf_corrupt; /*
287 * Corrupted (lost) subbuffers since
288 * the begginig of the trace.
289 * (may overflow)
290 */
291 uint8_t header_end[0]; /* End of header */
292 };
293
294 /**
295 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
296 *
297 * Return header size without padding after the structure. Don't use packed
298 * structure because gcc generates inefficient code on some architectures
299 * (powerpc, mips..)
300 */
301 static inline size_t ltt_subbuffer_header_size(void)
302 {
303 return offsetof(struct ltt_subbuffer_header, header_end);
304 }
305
306 /*
307 * ltt_get_header_size
308 *
309 * Calculate alignment offset to 32-bits. This is the alignment offset of the
310 * event header.
311 *
312 * Important note :
313 * The event header must be 32-bits. The total offset calculated here :
314 *
315 * Alignment of header struct on 32 bits (min arch size, header size)
316 * + sizeof(header struct) (32-bits)
317 * + (opt) u16 (ext. event id)
318 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
319 * + (opt) u32 (ext. event size)
320 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
321 *
322 * The payload must itself determine its own alignment from the biggest type it
323 * contains.
324 * */
325 static inline unsigned char ltt_get_header_size(
326 struct ltt_channel_struct *channel,
327 size_t offset,
328 size_t data_size,
329 size_t *before_hdr_pad,
330 unsigned int rflags)
331 {
332 size_t orig_offset = offset;
333 size_t padding;
334
335 BUILD_BUG_ON(sizeof(struct ltt_event_header) != sizeof(u32));
336
337 padding = ltt_align(offset, sizeof(struct ltt_event_header));
338 offset += padding;
339 offset += sizeof(struct ltt_event_header);
340
341 switch (rflags) {
342 case LTT_RFLAG_ID_SIZE_TSC:
343 offset += sizeof(u16) + sizeof(u16);
344 if (data_size >= 0xFFFFU)
345 offset += sizeof(u32);
346 offset += ltt_align(offset, sizeof(u64));
347 offset += sizeof(u64);
348 break;
349 case LTT_RFLAG_ID_SIZE:
350 offset += sizeof(u16) + sizeof(u16);
351 if (data_size >= 0xFFFFU)
352 offset += sizeof(u32);
353 break;
354 case LTT_RFLAG_ID:
355 offset += sizeof(u16);
356 break;
357 }
358
359 *before_hdr_pad = padding;
360 return offset - orig_offset;
361 }
362
363 /*
364 * ltt_write_event_header
365 *
366 * Writes the event header to the offset (already aligned on 32-bits).
367 *
368 * @trace : trace to write to.
369 * @channel : pointer to the channel structure..
370 * @buf : buffer to write to.
371 * @buf_offset : buffer offset to write to (aligned on 32 bits).
372 * @eID : event ID
373 * @event_size : size of the event, excluding the event header.
374 * @tsc : time stamp counter.
375 * @rflags : reservation flags.
376 *
377 * returns : offset where the event data must be written.
378 */
379 static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
380 struct ltt_channel_struct *channel,
381 struct rchan_buf *buf, long buf_offset,
382 u16 eID, size_t event_size,
383 u64 tsc, unsigned int rflags)
384 {
385 struct ltt_event_header header;
386 size_t small_size;
387
388 switch (rflags) {
389 case LTT_RFLAG_ID_SIZE_TSC:
390 header.id_time = 29 << LTT_TSC_BITS;
391 break;
392 case LTT_RFLAG_ID_SIZE:
393 header.id_time = 30 << LTT_TSC_BITS;
394 break;
395 case LTT_RFLAG_ID:
396 header.id_time = 31 << LTT_TSC_BITS;
397 break;
398 default:
399 header.id_time = eID << LTT_TSC_BITS;
400 break;
401 }
402 header.id_time |= (u32)tsc & LTT_TSC_MASK;
403 ltt_relay_write(buf, buf_offset, &header, sizeof(header));
404 buf_offset += sizeof(header);
405
406 switch (rflags) {
407 case LTT_RFLAG_ID_SIZE_TSC:
408 small_size = min_t(size_t, event_size, 0xFFFFU);
409 ltt_relay_write(buf, buf_offset,
410 (u16[]){ (u16)eID }, sizeof(u16));
411 buf_offset += sizeof(u16);
412 ltt_relay_write(buf, buf_offset,
413 (u16[]){ (u16)small_size }, sizeof(u16));
414 buf_offset += sizeof(u16);
415 if (small_size == 0xFFFFU) {
416 ltt_relay_write(buf, buf_offset,
417 (u32[]){ (u32)event_size }, sizeof(u32));
418 buf_offset += sizeof(u32);
419 }
420 buf_offset += ltt_align(buf_offset, sizeof(u64));
421 ltt_relay_write(buf, buf_offset,
422 (u64[]){ (u64)tsc }, sizeof(u64));
423 buf_offset += sizeof(u64);
424 break;
425 case LTT_RFLAG_ID_SIZE:
426 small_size = min_t(size_t, event_size, 0xFFFFU);
427 ltt_relay_write(buf, buf_offset,
428 (u16[]){ (u16)eID }, sizeof(u16));
429 buf_offset += sizeof(u16);
430 ltt_relay_write(buf, buf_offset,
431 (u16[]){ (u16)small_size }, sizeof(u16));
432 buf_offset += sizeof(u16);
433 if (small_size == 0xFFFFU) {
434 ltt_relay_write(buf, buf_offset,
435 (u32[]){ (u32)event_size }, sizeof(u32));
436 buf_offset += sizeof(u32);
437 }
438 break;
439 case LTT_RFLAG_ID:
440 ltt_relay_write(buf, buf_offset,
441 (u16[]){ (u16)eID }, sizeof(u16));
442 buf_offset += sizeof(u16);
443 break;
444 default:
445 break;
446 }
447
448 return buf_offset;
449 }
450
451 /* Lockless LTTng */
452
453 /* Buffer offset macros */
454
455 /*
456 * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
457 * the offset, which leaves only the buffer number.
458 */
459 #define BUFFER_TRUNC(offset, chan) \
460 ((offset) & (~((chan)->alloc_size-1)))
461 #define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
462 #define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
463 #define SUBBUF_ALIGN(offset, chan) \
464 (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
465 #define SUBBUF_TRUNC(offset, chan) \
466 ((offset) & (~((chan)->subbuf_size - 1)))
467 #define SUBBUF_INDEX(offset, chan) \
468 (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
469
470 /*
471 * ltt_reserve_slot
472 *
473 * Atomic slot reservation in a LTTng buffer. It will take care of
474 * sub-buffer switching.
475 *
476 * Parameters:
477 *
478 * @trace : the trace structure to log to.
479 * @channel : the chanel to reserve space into.
480 * @transport_data : specific transport data.
481 * @data_size : size of the variable length data to log.
482 * @slot_size : pointer to total size of the slot (out)
483 * @buf_offset : pointer to reserve offset (out)
484 * @tsc : pointer to the tsc at the slot reservation (out)
485 * @rflags : reservation flags (header specificity)
486 * @cpu : cpu id
487 *
488 * Return : -ENOSPC if not enough space, else 0.
489 */
490 static inline int ltt_reserve_slot(
491 struct ltt_trace_struct *trace,
492 struct ltt_channel_struct *channel,
493 void **transport_data,
494 size_t data_size,
495 size_t *slot_size,
496 long *buf_offset,
497 u64 *tsc,
498 unsigned int *rflags,
499 int largest_align,
500 int cpu)
501 {
502 return trace->ops->reserve_slot(trace, channel, transport_data,
503 data_size, slot_size, buf_offset, tsc, rflags,
504 largest_align, cpu);
505 }
506
507
508 /*
509 * ltt_commit_slot
510 *
511 * Atomic unordered slot commit. Increments the commit count in the
512 * specified sub-buffer, and delivers it if necessary.
513 *
514 * Parameters:
515 *
516 * @channel : the chanel to reserve space into.
517 * @transport_data : specific transport data.
518 * @buf_offset : offset of beginning of reserved slot
519 * @slot_size : size of the reserved slot.
520 */
521 static inline void ltt_commit_slot(
522 struct ltt_channel_struct *channel,
523 void **transport_data,
524 long buf_offset,
525 size_t slot_size)
526 {
527 struct ltt_trace_struct *trace = channel->trace;
528
529 trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
530 }
531
532 /*
533 * Control channels :
534 * control/metadata
535 * control/interrupts
536 * control/...
537 *
538 * cpu channel :
539 * cpu
540 */
541 //ust// #define LTT_RELAY_ROOT "ltt"
542 //ust// #define LTT_RELAY_LOCKED_ROOT "ltt-locked"
543
544 #define LTT_METADATA_CHANNEL "metadata_state"
545 #define LTT_UST_CHANNEL "ust"
546
547 #define LTT_FLIGHT_PREFIX "flight-"
548
549 /* Tracer properties */
550 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
551 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
552 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
553 #define LTT_DEFAULT_N_SUBBUFS_MED 2
554 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
555 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
556 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
557 #define LTT_TRACER_VERSION_MAJOR 2
558 #define LTT_TRACER_VERSION_MINOR 3
559
560 /*
561 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
562 * nearly full buffer. User space won't use this last amount of space when in
563 * blocking mode. This space also includes the event header that would be
564 * written by this user space event.
565 */
566 #define LTT_RESERVE_CRITICAL 4096
567
568 /* Register and unregister function pointers */
569
570 enum ltt_module_function {
571 LTT_FUNCTION_RUN_FILTER,
572 LTT_FUNCTION_FILTER_CONTROL,
573 LTT_FUNCTION_STATEDUMP
574 };
575
576 //ust// extern int ltt_module_register(enum ltt_module_function name, void *function,
577 //ust// struct module *owner);
578 //ust// extern void ltt_module_unregister(enum ltt_module_function name);
579
580 void ltt_transport_register(struct ltt_transport *transport);
581 void ltt_transport_unregister(struct ltt_transport *transport);
582
583 /* Exported control function */
584
585 //ust// enum ltt_control_msg {
586 //ust// LTT_CONTROL_START,
587 //ust// LTT_CONTROL_STOP,
588 //ust// LTT_CONTROL_CREATE_TRACE,
589 //ust// LTT_CONTROL_DESTROY_TRACE
590 //ust// };
591
592 union ltt_control_args {
593 struct {
594 enum trace_mode mode;
595 unsigned int subbuf_size_low;
596 unsigned int n_subbufs_low;
597 unsigned int subbuf_size_med;
598 unsigned int n_subbufs_med;
599 unsigned int subbuf_size_high;
600 unsigned int n_subbufs_high;
601 } new_trace;
602 };
603
604 int _ltt_trace_setup(const char *trace_name);
605 int ltt_trace_setup(const char *trace_name);
606 struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
607 int ltt_trace_set_type(const char *trace_name, const char *trace_type);
608 int ltt_trace_set_channel_subbufsize(const char *trace_name,
609 const char *channel_name, unsigned int size);
610 int ltt_trace_set_channel_subbufcount(const char *trace_name,
611 const char *channel_name, unsigned int cnt);
612 int ltt_trace_set_channel_enable(const char *trace_name,
613 const char *channel_name, unsigned int enable);
614 int ltt_trace_set_channel_overwrite(const char *trace_name,
615 const char *channel_name, unsigned int overwrite);
616 int ltt_trace_alloc(const char *trace_name);
617 int ltt_trace_destroy(const char *trace_name);
618 int ltt_trace_start(const char *trace_name);
619 int ltt_trace_stop(const char *trace_name);
620
621 //ust// extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
622 //ust// const char *trace_type, union ltt_control_args args);
623
624 enum ltt_filter_control_msg {
625 LTT_FILTER_DEFAULT_ACCEPT,
626 LTT_FILTER_DEFAULT_REJECT
627 };
628
629 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
630 const char *trace_name);
631
632 extern struct dentry *get_filter_root(void);
633
634 void ltt_write_trace_header(struct ltt_trace_struct *trace,
635 struct ltt_subbuffer_header *header);
636 extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
637
638 void ltt_core_register(int (*function)(u8, void *));
639
640 void ltt_core_unregister(void);
641
642 void ltt_release_trace(struct kref *kref);
643 void ltt_release_transport(struct kref *kref);
644
645 extern int ltt_probe_register(struct ltt_available_probe *pdata);
646 extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
647 extern int ltt_marker_connect(const char *channel, const char *mname,
648 const char *pname);
649 extern int ltt_marker_disconnect(const char *channel, const char *mname,
650 const char *pname);
651 extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
652
653 void ltt_lock_traces(void);
654 void ltt_unlock_traces(void);
655
656 //ust// extern void ltt_dump_softirq_vec(void *call_data);
657 //ust//
658 //ust// #ifdef CONFIG_HAVE_LTT_DUMP_TABLES
659 //ust// extern void ltt_dump_sys_call_table(void *call_data);
660 //ust// extern void ltt_dump_idt_table(void *call_data);
661 //ust// #else
662 //ust// static inline void ltt_dump_sys_call_table(void *call_data)
663 //ust// {
664 //ust// }
665 //ust//
666 //ust// static inline void ltt_dump_idt_table(void *call_data)
667 //ust// {
668 //ust// }
669 //ust// #endif
670
671 //ust// #ifdef CONFIG_LTT_KPROBES
672 //ust// extern void ltt_dump_kprobes_table(void *call_data);
673 //ust// #else
674 //ust// static inline void ltt_dump_kprobes_table(void *call_data)
675 //ust// {
676 //ust// }
677 //ust// #endif
678
679 //ust// /* Relay IOCTL */
680 //ust//
681 //ust// /* Get the next sub buffer that can be read. */
682 //ust// #define RELAY_GET_SUBBUF _IOR(0xF5, 0x00, __u32)
683 //ust// /* Release the oldest reserved (by "get") sub buffer. */
684 //ust// #define RELAY_PUT_SUBBUF _IOW(0xF5, 0x01, __u32)
685 //ust// /* returns the number of sub buffers in the per cpu channel. */
686 //ust// #define RELAY_GET_N_SUBBUFS _IOR(0xF5, 0x02, __u32)
687 //ust// /* returns the size of the sub buffers. */
688 //ust// #define RELAY_GET_SUBBUF_SIZE _IOR(0xF5, 0x03, __u32)
689
690 //ust// #endif /* CONFIG_LTT */
691
692 #endif /* _LTT_TRACER_H */
This page took 0.062274 seconds and 4 git commands to generate.