ust: continue port
[lttng-ust.git] / libtracing / tracer.h
CommitLineData
9dad1eb8
PMF
1/*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 *
4 * This contains the definitions for the Linux Trace Toolkit tracer.
5 */
6
7#ifndef _LTT_TRACER_H
8#define _LTT_TRACER_H
9
b6bf28ec
PMF
10//ust// #include <stdarg.h>
11//ust// #include <linux/types.h>
12//ust// #include <linux/limits.h>
13//ust// #include <linux/list.h>
14//ust// #include <linux/cache.h>
15//ust// #include <linux/kernel.h>
16//ust// #include <linux/timex.h>
17//ust// #include <linux/wait.h>
18//ust// #include <linux/ltt-relay.h>
19//ust// #include <linux/ltt-channels.h>
20//ust// #include <linux/ltt-core.h>
21//ust// #include <linux/marker.h>
22//ust// #include <linux/trace-clock.h>
23//ust// #include <asm/atomic.h>
24//ust// #include <asm/local.h>
25#include <sys/types.h>
9dad1eb8 26#include <stdarg.h>
b6bf28ec
PMF
27#include "relay.h"
28#include "list.h"
29#include "kernelcompat.h"
30#include "channels.h"
bb07823d 31#include "tracercore.h"
9dad1eb8
PMF
32
33/* Number of bytes to log with a read/write event */
34#define LTT_LOG_RW_SIZE 32L
35
36/* Interval (in jiffies) at which the LTT per-CPU timer fires */
37#define LTT_PERCPU_TIMER_INTERVAL 1
38
39#ifndef LTT_ARCH_TYPE
40#define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
41#endif
42
43#ifndef LTT_ARCH_VARIANT
44#define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
45#endif
46
47struct ltt_active_marker;
48
49/* Maximum number of callbacks per marker */
50#define LTT_NR_CALLBACKS 10
51
52struct ltt_serialize_closure;
53struct ltt_probe_private_data;
54
55/* Serialization callback '%k' */
56typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
57 struct ltt_serialize_closure *closure,
58 void *serialize_private, int *largest_align,
59 const char *fmt, va_list *args);
60
61struct ltt_serialize_closure {
62 ltt_serialize_cb *callbacks;
63 long cb_args[LTT_NR_CALLBACKS];
64 unsigned int cb_idx;
65};
66
67size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
68 struct ltt_serialize_closure *closure,
69 void *serialize_private,
70 int *largest_align, const char *fmt, va_list *args);
71
b6bf28ec
PMF
72//ust// struct ltt_available_probe {
73//ust// const char *name; /* probe name */
74//ust// const char *format;
75//ust// marker_probe_func *probe_func;
76//ust// ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
77//ust// struct list_head node; /* registered probes list */
78//ust// };
9dad1eb8
PMF
79
80struct ltt_probe_private_data {
81 struct ltt_trace_struct *trace; /*
82 * Target trace, for metadata
83 * or statedump.
84 */
85 ltt_serialize_cb serializer; /*
86 * Serialization function override.
87 */
88 void *serialize_private; /*
89 * Private data for serialization
90 * functions.
91 */
92};
93
94enum ltt_channels {
95 LTT_CHANNEL_METADATA,
b6bf28ec 96 LTT_CHANNEL_UST,
9dad1eb8
PMF
97};
98
99struct ltt_active_marker {
100 struct list_head node; /* active markers list */
101 const char *channel;
102 const char *name;
103 const char *format;
104 struct ltt_available_probe *probe;
105};
106
b6bf28ec 107struct marker; //ust//
9dad1eb8
PMF
108extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
109 void *call_data, const char *fmt, va_list *args);
110extern void ltt_trace(const struct marker *mdata, void *probe_data,
111 void *call_data, const char *fmt, ...);
112
113/*
114 * Unique ID assigned to each registered probe.
115 */
116enum marker_id {
117 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
118 MARKER_ID_SET_MARKER_FORMAT,
119 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
120 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
121};
122
123/* static ids 0-1 reserved for internal use. */
124#define MARKER_CORE_IDS 2
125static inline enum marker_id marker_id_type(uint16_t id)
126{
127 if (id < MARKER_CORE_IDS)
128 return (enum marker_id)id;
129 else
130 return MARKER_ID_DYNAMIC;
131}
132
b6bf28ec 133//ust// #ifdef CONFIG_LTT
9dad1eb8
PMF
134
135struct user_dbg_data {
136 unsigned long avail_size;
137 unsigned long write;
138 unsigned long read;
139};
140
141struct ltt_trace_ops {
142 /* First 32 bytes cache-hot cacheline */
143 int (*reserve_slot) (struct ltt_trace_struct *trace,
144 struct ltt_channel_struct *channel,
145 void **transport_data, size_t data_size,
146 size_t *slot_size, long *buf_offset, u64 *tsc,
147 unsigned int *rflags,
148 int largest_align,
149 int cpu);
150 void (*commit_slot) (struct ltt_channel_struct *channel,
151 void **transport_data, long buf_offset,
152 size_t slot_size);
153 void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
bb07823d
PMF
154 int (*user_blocking) (struct ltt_trace_struct *trace,
155 unsigned int index, size_t data_size,
156 struct user_dbg_data *dbg);
157 /* End of first 32 bytes cacheline */
158 int (*create_dirs) (struct ltt_trace_struct *new_trace);
159 void (*remove_dirs) (struct ltt_trace_struct *new_trace);
b6bf28ec
PMF
160 int (*create_channel) (const char *trace_name,
161 struct ltt_trace_struct *trace,
162 struct dentry *dir, const char *channel_name,
163 struct ltt_channel_struct *ltt_chan,
164 unsigned int subbuf_size,
165 unsigned int n_subbufs, int overwrite);
166 void (*finish_channel) (struct ltt_channel_struct *channel);
167 void (*remove_channel) (struct ltt_channel_struct *channel);
168 void (*user_errors) (struct ltt_trace_struct *trace,
169 unsigned int index, size_t data_size,
170 struct user_dbg_data *dbg, int cpu);
171//ust// #ifdef CONFIG_HOTPLUG_CPU
172//ust// int (*handle_cpuhp) (struct notifier_block *nb,
173//ust// unsigned long action, void *hcpu,
174//ust// struct ltt_trace_struct *trace);
175//ust// #endif
9dad1eb8
PMF
176} ____cacheline_aligned;
177
178struct ltt_transport {
179 char *name;
180 struct module *owner;
181 struct list_head node;
182 struct ltt_trace_ops ops;
183};
184
185enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
186
187#define CHANNEL_FLAG_ENABLE (1U<<0)
188#define CHANNEL_FLAG_OVERWRITE (1U<<1)
189
190/* Per-trace information - each trace/flight recorder represented by one */
191struct ltt_trace_struct {
192 /* First 32 bytes cache-hot cacheline */
193 struct list_head list;
194 struct ltt_trace_ops *ops;
195 int active;
196 /* Second 32 bytes cache-hot cacheline */
197 struct ltt_channel_struct *channels;
198 unsigned int nr_channels;
199 u32 freq_scale;
200 u64 start_freq;
201 u64 start_tsc;
202 unsigned long long start_monotonic;
203 struct timeval start_time;
204 struct ltt_channel_setting *settings;
205 struct {
206 struct dentry *trace_root;
207 } dentry;
b6bf28ec 208//ust// struct rchan_callbacks callbacks;
9dad1eb8
PMF
209 struct kref kref; /* Each channel has a kref of the trace struct */
210 struct ltt_transport *transport;
211 struct kref ltt_transport_kref;
b6bf28ec 212//ust// wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
9dad1eb8
PMF
213 char trace_name[NAME_MAX];
214} ____cacheline_aligned;
215
216/* Hardcoded event headers
217 *
218 * event header for a trace with active heartbeat : 27 bits timestamps
219 *
220 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
221 * trace alignment value must be done.
222 *
223 * Remember that the C compiler does align each member on the boundary
224 * equivalent to their own size.
225 *
226 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
227 * bytes aligned, so the buffer header and trace header are aligned.
228 *
229 * Event headers are aligned depending on the trace alignment option.
230 *
231 * Note using C structure bitfields for cross-endianness and portability
232 * concerns.
233 */
234
235#define LTT_RESERVED_EVENTS 3
236#define LTT_EVENT_BITS 5
237#define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
238#define LTT_TSC_BITS 27
239#define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
240
241struct ltt_event_header {
242 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
243};
244
245/* Reservation flags */
246#define LTT_RFLAG_ID (1 << 0)
247#define LTT_RFLAG_ID_SIZE (1 << 1)
248#define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
249
250/*
251 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
252 * specifically with CPU frequency scaling someday, so using an interpolation
253 * between the start and end of buffer values is not flexible enough. Using an
254 * immediate frequency value permits to calculate directly the times for parts
255 * of a buffer that would be before a frequency change.
256 *
257 * Keep the natural field alignment for _each field_ within this structure if
258 * you ever add/remove a field from this header. Packed attribute is not used
259 * because gcc generates poor code on at least powerpc and mips. Don't ever
260 * let gcc add padding between the structure elements.
261 */
262struct ltt_subbuffer_header {
263 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
264 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
265 uint32_t magic_number; /*
266 * Trace magic number.
267 * contains endianness information.
268 */
269 uint8_t major_version;
270 uint8_t minor_version;
271 uint8_t arch_size; /* Architecture pointer size */
272 uint8_t alignment; /* LTT data alignment */
273 uint64_t start_time_sec; /* NTP-corrected start time */
274 uint64_t start_time_usec;
275 uint64_t start_freq; /*
276 * Frequency at trace start,
277 * used all along the trace.
278 */
279 uint32_t freq_scale; /* Frequency scaling (divisor) */
280 uint32_t lost_size; /* Size unused at end of subbuffer */
281 uint32_t buf_size; /* Size of this subbuffer */
282 uint32_t events_lost; /*
283 * Events lost in this subbuffer since
284 * the beginning of the trace.
285 * (may overflow)
286 */
287 uint32_t subbuf_corrupt; /*
288 * Corrupted (lost) subbuffers since
289 * the begginig of the trace.
290 * (may overflow)
291 */
292 uint8_t header_end[0]; /* End of header */
293};
294
295/**
296 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
297 *
298 * Return header size without padding after the structure. Don't use packed
299 * structure because gcc generates inefficient code on some architectures
300 * (powerpc, mips..)
301 */
302static inline size_t ltt_subbuffer_header_size(void)
303{
304 return offsetof(struct ltt_subbuffer_header, header_end);
305}
306
307/*
308 * ltt_get_header_size
309 *
310 * Calculate alignment offset to 32-bits. This is the alignment offset of the
311 * event header.
312 *
313 * Important note :
314 * The event header must be 32-bits. The total offset calculated here :
315 *
316 * Alignment of header struct on 32 bits (min arch size, header size)
317 * + sizeof(header struct) (32-bits)
318 * + (opt) u16 (ext. event id)
319 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
320 * + (opt) u32 (ext. event size)
321 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
322 *
323 * The payload must itself determine its own alignment from the biggest type it
324 * contains.
325 * */
326static inline unsigned char ltt_get_header_size(
327 struct ltt_channel_struct *channel,
328 size_t offset,
329 size_t data_size,
330 size_t *before_hdr_pad,
331 unsigned int rflags)
332{
333 size_t orig_offset = offset;
334 size_t padding;
335
bb07823d 336//ust// BUILD_BUG_ON(sizeof(struct ltt_event_header) != sizeof(u32));
9dad1eb8
PMF
337
338 padding = ltt_align(offset, sizeof(struct ltt_event_header));
339 offset += padding;
340 offset += sizeof(struct ltt_event_header);
341
342 switch (rflags) {
343 case LTT_RFLAG_ID_SIZE_TSC:
344 offset += sizeof(u16) + sizeof(u16);
345 if (data_size >= 0xFFFFU)
346 offset += sizeof(u32);
347 offset += ltt_align(offset, sizeof(u64));
348 offset += sizeof(u64);
349 break;
350 case LTT_RFLAG_ID_SIZE:
351 offset += sizeof(u16) + sizeof(u16);
352 if (data_size >= 0xFFFFU)
353 offset += sizeof(u32);
354 break;
355 case LTT_RFLAG_ID:
356 offset += sizeof(u16);
357 break;
358 }
359
360 *before_hdr_pad = padding;
361 return offset - orig_offset;
362}
363
364/*
365 * ltt_write_event_header
366 *
367 * Writes the event header to the offset (already aligned on 32-bits).
368 *
369 * @trace : trace to write to.
370 * @channel : pointer to the channel structure..
371 * @buf : buffer to write to.
372 * @buf_offset : buffer offset to write to (aligned on 32 bits).
373 * @eID : event ID
374 * @event_size : size of the event, excluding the event header.
375 * @tsc : time stamp counter.
376 * @rflags : reservation flags.
377 *
378 * returns : offset where the event data must be written.
379 */
380static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
381 struct ltt_channel_struct *channel,
382 struct rchan_buf *buf, long buf_offset,
383 u16 eID, size_t event_size,
384 u64 tsc, unsigned int rflags)
385{
386 struct ltt_event_header header;
387 size_t small_size;
388
389 switch (rflags) {
390 case LTT_RFLAG_ID_SIZE_TSC:
391 header.id_time = 29 << LTT_TSC_BITS;
392 break;
393 case LTT_RFLAG_ID_SIZE:
394 header.id_time = 30 << LTT_TSC_BITS;
395 break;
396 case LTT_RFLAG_ID:
397 header.id_time = 31 << LTT_TSC_BITS;
398 break;
399 default:
400 header.id_time = eID << LTT_TSC_BITS;
401 break;
402 }
403 header.id_time |= (u32)tsc & LTT_TSC_MASK;
404 ltt_relay_write(buf, buf_offset, &header, sizeof(header));
405 buf_offset += sizeof(header);
406
407 switch (rflags) {
408 case LTT_RFLAG_ID_SIZE_TSC:
409 small_size = min_t(size_t, event_size, 0xFFFFU);
410 ltt_relay_write(buf, buf_offset,
411 (u16[]){ (u16)eID }, sizeof(u16));
412 buf_offset += sizeof(u16);
413 ltt_relay_write(buf, buf_offset,
414 (u16[]){ (u16)small_size }, sizeof(u16));
415 buf_offset += sizeof(u16);
416 if (small_size == 0xFFFFU) {
417 ltt_relay_write(buf, buf_offset,
418 (u32[]){ (u32)event_size }, sizeof(u32));
419 buf_offset += sizeof(u32);
420 }
421 buf_offset += ltt_align(buf_offset, sizeof(u64));
422 ltt_relay_write(buf, buf_offset,
423 (u64[]){ (u64)tsc }, sizeof(u64));
424 buf_offset += sizeof(u64);
425 break;
426 case LTT_RFLAG_ID_SIZE:
427 small_size = min_t(size_t, event_size, 0xFFFFU);
428 ltt_relay_write(buf, buf_offset,
429 (u16[]){ (u16)eID }, sizeof(u16));
430 buf_offset += sizeof(u16);
431 ltt_relay_write(buf, buf_offset,
432 (u16[]){ (u16)small_size }, sizeof(u16));
433 buf_offset += sizeof(u16);
434 if (small_size == 0xFFFFU) {
435 ltt_relay_write(buf, buf_offset,
436 (u32[]){ (u32)event_size }, sizeof(u32));
437 buf_offset += sizeof(u32);
438 }
439 break;
440 case LTT_RFLAG_ID:
441 ltt_relay_write(buf, buf_offset,
442 (u16[]){ (u16)eID }, sizeof(u16));
443 buf_offset += sizeof(u16);
444 break;
445 default:
446 break;
447 }
448
449 return buf_offset;
450}
451
452/* Lockless LTTng */
453
454/* Buffer offset macros */
455
456/*
457 * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
458 * the offset, which leaves only the buffer number.
459 */
460#define BUFFER_TRUNC(offset, chan) \
461 ((offset) & (~((chan)->alloc_size-1)))
462#define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
463#define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
464#define SUBBUF_ALIGN(offset, chan) \
465 (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
466#define SUBBUF_TRUNC(offset, chan) \
467 ((offset) & (~((chan)->subbuf_size - 1)))
468#define SUBBUF_INDEX(offset, chan) \
469 (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
470
471/*
472 * ltt_reserve_slot
473 *
474 * Atomic slot reservation in a LTTng buffer. It will take care of
475 * sub-buffer switching.
476 *
477 * Parameters:
478 *
479 * @trace : the trace structure to log to.
480 * @channel : the chanel to reserve space into.
481 * @transport_data : specific transport data.
482 * @data_size : size of the variable length data to log.
483 * @slot_size : pointer to total size of the slot (out)
484 * @buf_offset : pointer to reserve offset (out)
485 * @tsc : pointer to the tsc at the slot reservation (out)
486 * @rflags : reservation flags (header specificity)
487 * @cpu : cpu id
488 *
489 * Return : -ENOSPC if not enough space, else 0.
490 */
491static inline int ltt_reserve_slot(
492 struct ltt_trace_struct *trace,
493 struct ltt_channel_struct *channel,
494 void **transport_data,
495 size_t data_size,
496 size_t *slot_size,
497 long *buf_offset,
498 u64 *tsc,
499 unsigned int *rflags,
500 int largest_align,
501 int cpu)
502{
503 return trace->ops->reserve_slot(trace, channel, transport_data,
504 data_size, slot_size, buf_offset, tsc, rflags,
505 largest_align, cpu);
506}
507
508
509/*
510 * ltt_commit_slot
511 *
512 * Atomic unordered slot commit. Increments the commit count in the
513 * specified sub-buffer, and delivers it if necessary.
514 *
515 * Parameters:
516 *
517 * @channel : the chanel to reserve space into.
518 * @transport_data : specific transport data.
519 * @buf_offset : offset of beginning of reserved slot
520 * @slot_size : size of the reserved slot.
521 */
522static inline void ltt_commit_slot(
523 struct ltt_channel_struct *channel,
524 void **transport_data,
525 long buf_offset,
526 size_t slot_size)
527{
528 struct ltt_trace_struct *trace = channel->trace;
529
530 trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
531}
532
533/*
534 * Control channels :
535 * control/metadata
536 * control/interrupts
537 * control/...
538 *
539 * cpu channel :
540 * cpu
541 */
b6bf28ec
PMF
542//ust// #define LTT_RELAY_ROOT "ltt"
543//ust// #define LTT_RELAY_LOCKED_ROOT "ltt-locked"
9dad1eb8
PMF
544
545#define LTT_METADATA_CHANNEL "metadata_state"
b6bf28ec 546#define LTT_UST_CHANNEL "ust"
9dad1eb8
PMF
547
548#define LTT_FLIGHT_PREFIX "flight-"
549
550/* Tracer properties */
551#define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
552#define LTT_DEFAULT_N_SUBBUFS_LOW 2
553#define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
554#define LTT_DEFAULT_N_SUBBUFS_MED 2
555#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
556#define LTT_DEFAULT_N_SUBBUFS_HIGH 2
557#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
558#define LTT_TRACER_VERSION_MAJOR 2
559#define LTT_TRACER_VERSION_MINOR 3
560
561/*
562 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
563 * nearly full buffer. User space won't use this last amount of space when in
564 * blocking mode. This space also includes the event header that would be
565 * written by this user space event.
566 */
567#define LTT_RESERVE_CRITICAL 4096
568
569/* Register and unregister function pointers */
570
571enum ltt_module_function {
572 LTT_FUNCTION_RUN_FILTER,
573 LTT_FUNCTION_FILTER_CONTROL,
574 LTT_FUNCTION_STATEDUMP
575};
576
b6bf28ec
PMF
577//ust// extern int ltt_module_register(enum ltt_module_function name, void *function,
578//ust// struct module *owner);
579//ust// extern void ltt_module_unregister(enum ltt_module_function name);
9dad1eb8
PMF
580
581void ltt_transport_register(struct ltt_transport *transport);
582void ltt_transport_unregister(struct ltt_transport *transport);
583
584/* Exported control function */
585
b6bf28ec
PMF
586//ust// enum ltt_control_msg {
587//ust// LTT_CONTROL_START,
588//ust// LTT_CONTROL_STOP,
589//ust// LTT_CONTROL_CREATE_TRACE,
590//ust// LTT_CONTROL_DESTROY_TRACE
591//ust// };
9dad1eb8
PMF
592
593union ltt_control_args {
594 struct {
595 enum trace_mode mode;
596 unsigned int subbuf_size_low;
597 unsigned int n_subbufs_low;
598 unsigned int subbuf_size_med;
599 unsigned int n_subbufs_med;
600 unsigned int subbuf_size_high;
601 unsigned int n_subbufs_high;
602 } new_trace;
603};
604
605int _ltt_trace_setup(const char *trace_name);
606int ltt_trace_setup(const char *trace_name);
607struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
608int ltt_trace_set_type(const char *trace_name, const char *trace_type);
609int ltt_trace_set_channel_subbufsize(const char *trace_name,
610 const char *channel_name, unsigned int size);
611int ltt_trace_set_channel_subbufcount(const char *trace_name,
612 const char *channel_name, unsigned int cnt);
613int ltt_trace_set_channel_enable(const char *trace_name,
614 const char *channel_name, unsigned int enable);
615int ltt_trace_set_channel_overwrite(const char *trace_name,
616 const char *channel_name, unsigned int overwrite);
617int ltt_trace_alloc(const char *trace_name);
618int ltt_trace_destroy(const char *trace_name);
619int ltt_trace_start(const char *trace_name);
620int ltt_trace_stop(const char *trace_name);
621
b6bf28ec
PMF
622//ust// extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
623//ust// const char *trace_type, union ltt_control_args args);
9dad1eb8
PMF
624
625enum ltt_filter_control_msg {
626 LTT_FILTER_DEFAULT_ACCEPT,
627 LTT_FILTER_DEFAULT_REJECT
628};
629
630extern int ltt_filter_control(enum ltt_filter_control_msg msg,
631 const char *trace_name);
632
633extern struct dentry *get_filter_root(void);
634
635void ltt_write_trace_header(struct ltt_trace_struct *trace,
636 struct ltt_subbuffer_header *header);
637extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
638
639void ltt_core_register(int (*function)(u8, void *));
640
641void ltt_core_unregister(void);
642
643void ltt_release_trace(struct kref *kref);
644void ltt_release_transport(struct kref *kref);
645
646extern int ltt_probe_register(struct ltt_available_probe *pdata);
647extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
648extern int ltt_marker_connect(const char *channel, const char *mname,
649 const char *pname);
650extern int ltt_marker_disconnect(const char *channel, const char *mname,
651 const char *pname);
652extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
653
654void ltt_lock_traces(void);
655void ltt_unlock_traces(void);
656
b6bf28ec
PMF
657//ust// extern void ltt_dump_softirq_vec(void *call_data);
658//ust//
659//ust// #ifdef CONFIG_HAVE_LTT_DUMP_TABLES
660//ust// extern void ltt_dump_sys_call_table(void *call_data);
661//ust// extern void ltt_dump_idt_table(void *call_data);
662//ust// #else
663//ust// static inline void ltt_dump_sys_call_table(void *call_data)
664//ust// {
665//ust// }
666//ust//
667//ust// static inline void ltt_dump_idt_table(void *call_data)
668//ust// {
669//ust// }
670//ust// #endif
671
672//ust// #ifdef CONFIG_LTT_KPROBES
673//ust// extern void ltt_dump_kprobes_table(void *call_data);
674//ust// #else
675//ust// static inline void ltt_dump_kprobes_table(void *call_data)
676//ust// {
677//ust// }
678//ust// #endif
679
680//ust// /* Relay IOCTL */
681//ust//
682//ust// /* Get the next sub buffer that can be read. */
683//ust// #define RELAY_GET_SUBBUF _IOR(0xF5, 0x00, __u32)
684//ust// /* Release the oldest reserved (by "get") sub buffer. */
685//ust// #define RELAY_PUT_SUBBUF _IOW(0xF5, 0x01, __u32)
686//ust// /* returns the number of sub buffers in the per cpu channel. */
687//ust// #define RELAY_GET_N_SUBBUFS _IOR(0xF5, 0x02, __u32)
688//ust// /* returns the size of the sub buffers. */
689//ust// #define RELAY_GET_SUBBUF_SIZE _IOR(0xF5, 0x03, __u32)
690
691//ust// #endif /* CONFIG_LTT */
9dad1eb8
PMF
692
693#endif /* _LTT_TRACER_H */
This page took 0.059391 seconds and 4 git commands to generate.