2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
4 * This contains the definitions for the Linux Trace Toolkit tracer.
10 //ust// #include <stdarg.h>
11 //ust// #include <linux/types.h>
12 //ust// #include <linux/limits.h>
13 //ust// #include <linux/list.h>
14 //ust// #include <linux/cache.h>
15 //ust// #include <linux/kernel.h>
16 //ust// #include <linux/timex.h>
17 //ust// #include <linux/wait.h>
18 //ust// #include <linux/ltt-relay.h>
19 //ust// #include <linux/ltt-channels.h>
20 //ust// #include <linux/ltt-core.h>
21 //ust// #include <linux/marker.h>
22 //ust// #include <linux/trace-clock.h>
23 //ust// #include <asm/atomic.h>
24 //ust// #include <asm/local.h>
25 #include <sys/types.h>
29 #include "kernelcompat.h"
31 #include "tracercore.h"
33 /* Number of bytes to log with a read/write event */
34 #define LTT_LOG_RW_SIZE 32L
36 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
37 #define LTT_PERCPU_TIMER_INTERVAL 1
40 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
43 #ifndef LTT_ARCH_VARIANT
44 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
47 struct ltt_active_marker
;
49 /* Maximum number of callbacks per marker */
50 #define LTT_NR_CALLBACKS 10
52 struct ltt_serialize_closure
;
53 struct ltt_probe_private_data
;
55 /* Serialization callback '%k' */
56 typedef size_t (*ltt_serialize_cb
)(struct rchan_buf
*buf
, size_t buf_offset
,
57 struct ltt_serialize_closure
*closure
,
58 void *serialize_private
, int *largest_align
,
59 const char *fmt
, va_list *args
);
61 struct ltt_serialize_closure
{
62 ltt_serialize_cb
*callbacks
;
63 long cb_args
[LTT_NR_CALLBACKS
];
67 size_t ltt_serialize_data(struct rchan_buf
*buf
, size_t buf_offset
,
68 struct ltt_serialize_closure
*closure
,
69 void *serialize_private
,
70 int *largest_align
, const char *fmt
, va_list *args
);
72 //ust// struct ltt_available_probe {
73 //ust// const char *name; /* probe name */
74 //ust// const char *format;
75 //ust// marker_probe_func *probe_func;
76 //ust// ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
77 //ust// struct list_head node; /* registered probes list */
80 struct ltt_probe_private_data
{
81 struct ltt_trace_struct
*trace
; /*
82 * Target trace, for metadata
85 ltt_serialize_cb serializer
; /*
86 * Serialization function override.
88 void *serialize_private
; /*
89 * Private data for serialization
99 struct ltt_active_marker
{
100 struct list_head node
; /* active markers list */
104 struct ltt_available_probe
*probe
;
107 struct marker
; //ust//
108 extern void ltt_vtrace(const struct marker
*mdata
, void *probe_data
,
109 void *call_data
, const char *fmt
, va_list *args
);
110 extern void ltt_trace(const struct marker
*mdata
, void *probe_data
,
111 void *call_data
, const char *fmt
, ...);
114 * Unique ID assigned to each registered probe.
117 MARKER_ID_SET_MARKER_ID
= 0, /* Static IDs available (range 0-7) */
118 MARKER_ID_SET_MARKER_FORMAT
,
119 MARKER_ID_COMPACT
, /* Compact IDs (range: 8-127) */
120 MARKER_ID_DYNAMIC
, /* Dynamic IDs (range: 128-65535) */
123 /* static ids 0-1 reserved for internal use. */
124 #define MARKER_CORE_IDS 2
125 static inline enum marker_id
marker_id_type(uint16_t id
)
127 if (id
< MARKER_CORE_IDS
)
128 return (enum marker_id
)id
;
130 return MARKER_ID_DYNAMIC
;
133 //ust// #ifdef CONFIG_LTT
135 struct user_dbg_data
{
136 unsigned long avail_size
;
141 struct ltt_trace_ops
{
142 /* First 32 bytes cache-hot cacheline */
143 int (*reserve_slot
) (struct ltt_trace_struct
*trace
,
144 struct ltt_channel_struct
*channel
,
145 void **transport_data
, size_t data_size
,
146 size_t *slot_size
, long *buf_offset
, u64
*tsc
,
147 unsigned int *rflags
,
150 void (*commit_slot
) (struct ltt_channel_struct
*channel
,
151 void **transport_data
, long buf_offset
,
153 void (*wakeup_channel
) (struct ltt_channel_struct
*ltt_channel
);
154 int (*user_blocking
) (struct ltt_trace_struct
*trace
,
155 unsigned int index
, size_t data_size
,
156 struct user_dbg_data
*dbg
);
157 /* End of first 32 bytes cacheline */
158 int (*create_dirs
) (struct ltt_trace_struct
*new_trace
);
159 void (*remove_dirs
) (struct ltt_trace_struct
*new_trace
);
160 int (*create_channel
) (const char *trace_name
,
161 struct ltt_trace_struct
*trace
,
162 struct dentry
*dir
, const char *channel_name
,
163 struct ltt_channel_struct
*ltt_chan
,
164 unsigned int subbuf_size
,
165 unsigned int n_subbufs
, int overwrite
);
166 void (*finish_channel
) (struct ltt_channel_struct
*channel
);
167 void (*remove_channel
) (struct ltt_channel_struct
*channel
);
168 void (*user_errors
) (struct ltt_trace_struct
*trace
,
169 unsigned int index
, size_t data_size
,
170 struct user_dbg_data
*dbg
, int cpu
);
171 //ust// #ifdef CONFIG_HOTPLUG_CPU
172 //ust// int (*handle_cpuhp) (struct notifier_block *nb,
173 //ust// unsigned long action, void *hcpu,
174 //ust// struct ltt_trace_struct *trace);
176 } ____cacheline_aligned
;
178 struct ltt_transport
{
180 struct module
*owner
;
181 struct list_head node
;
182 struct ltt_trace_ops ops
;
185 enum trace_mode
{ LTT_TRACE_NORMAL
, LTT_TRACE_FLIGHT
, LTT_TRACE_HYBRID
};
187 #define CHANNEL_FLAG_ENABLE (1U<<0)
188 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
190 /* Per-trace information - each trace/flight recorder represented by one */
191 struct ltt_trace_struct
{
192 /* First 32 bytes cache-hot cacheline */
193 struct list_head list
;
194 struct ltt_trace_ops
*ops
;
196 /* Second 32 bytes cache-hot cacheline */
197 struct ltt_channel_struct
*channels
;
198 unsigned int nr_channels
;
202 unsigned long long start_monotonic
;
203 struct timeval start_time
;
204 struct ltt_channel_setting
*settings
;
206 struct dentry
*trace_root
;
208 //ust// struct rchan_callbacks callbacks;
209 struct kref kref
; /* Each channel has a kref of the trace struct */
210 struct ltt_transport
*transport
;
211 struct kref ltt_transport_kref
;
212 //ust// wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
213 char trace_name
[NAME_MAX
];
214 } ____cacheline_aligned
;
216 /* Hardcoded event headers
218 * event header for a trace with active heartbeat : 27 bits timestamps
220 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
221 * trace alignment value must be done.
223 * Remember that the C compiler does align each member on the boundary
224 * equivalent to their own size.
226 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
227 * bytes aligned, so the buffer header and trace header are aligned.
229 * Event headers are aligned depending on the trace alignment option.
231 * Note using C structure bitfields for cross-endianness and portability
235 #define LTT_RESERVED_EVENTS 3
236 #define LTT_EVENT_BITS 5
237 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
238 #define LTT_TSC_BITS 27
239 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
241 struct ltt_event_header
{
242 u32 id_time
; /* 5 bits event id (MSB); 27 bits time (LSB) */
245 /* Reservation flags */
246 #define LTT_RFLAG_ID (1 << 0)
247 #define LTT_RFLAG_ID_SIZE (1 << 1)
248 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
251 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
252 * specifically with CPU frequency scaling someday, so using an interpolation
253 * between the start and end of buffer values is not flexible enough. Using an
254 * immediate frequency value permits to calculate directly the times for parts
255 * of a buffer that would be before a frequency change.
257 * Keep the natural field alignment for _each field_ within this structure if
258 * you ever add/remove a field from this header. Packed attribute is not used
259 * because gcc generates poor code on at least powerpc and mips. Don't ever
260 * let gcc add padding between the structure elements.
262 struct ltt_subbuffer_header
{
263 uint64_t cycle_count_begin
; /* Cycle count at subbuffer start */
264 uint64_t cycle_count_end
; /* Cycle count at subbuffer end */
265 uint32_t magic_number
; /*
266 * Trace magic number.
267 * contains endianness information.
269 uint8_t major_version
;
270 uint8_t minor_version
;
271 uint8_t arch_size
; /* Architecture pointer size */
272 uint8_t alignment
; /* LTT data alignment */
273 uint64_t start_time_sec
; /* NTP-corrected start time */
274 uint64_t start_time_usec
;
275 uint64_t start_freq
; /*
276 * Frequency at trace start,
277 * used all along the trace.
279 uint32_t freq_scale
; /* Frequency scaling (divisor) */
280 uint32_t lost_size
; /* Size unused at end of subbuffer */
281 uint32_t buf_size
; /* Size of this subbuffer */
282 uint32_t events_lost
; /*
283 * Events lost in this subbuffer since
284 * the beginning of the trace.
287 uint32_t subbuf_corrupt
; /*
288 * Corrupted (lost) subbuffers since
289 * the begginig of the trace.
292 uint8_t header_end
[0]; /* End of header */
296 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
298 * Return header size without padding after the structure. Don't use packed
299 * structure because gcc generates inefficient code on some architectures
302 static inline size_t ltt_subbuffer_header_size(void)
304 return offsetof(struct ltt_subbuffer_header
, header_end
);
308 * ltt_get_header_size
310 * Calculate alignment offset to 32-bits. This is the alignment offset of the
314 * The event header must be 32-bits. The total offset calculated here :
316 * Alignment of header struct on 32 bits (min arch size, header size)
317 * + sizeof(header struct) (32-bits)
318 * + (opt) u16 (ext. event id)
319 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
320 * + (opt) u32 (ext. event size)
321 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
323 * The payload must itself determine its own alignment from the biggest type it
326 static inline unsigned char ltt_get_header_size(
327 struct ltt_channel_struct
*channel
,
330 size_t *before_hdr_pad
,
333 size_t orig_offset
= offset
;
336 //ust// BUILD_BUG_ON(sizeof(struct ltt_event_header) != sizeof(u32));
338 padding
= ltt_align(offset
, sizeof(struct ltt_event_header
));
340 offset
+= sizeof(struct ltt_event_header
);
343 case LTT_RFLAG_ID_SIZE_TSC
:
344 offset
+= sizeof(u16
) + sizeof(u16
);
345 if (data_size
>= 0xFFFFU
)
346 offset
+= sizeof(u32
);
347 offset
+= ltt_align(offset
, sizeof(u64
));
348 offset
+= sizeof(u64
);
350 case LTT_RFLAG_ID_SIZE
:
351 offset
+= sizeof(u16
) + sizeof(u16
);
352 if (data_size
>= 0xFFFFU
)
353 offset
+= sizeof(u32
);
356 offset
+= sizeof(u16
);
360 *before_hdr_pad
= padding
;
361 return offset
- orig_offset
;
365 * ltt_write_event_header
367 * Writes the event header to the offset (already aligned on 32-bits).
369 * @trace : trace to write to.
370 * @channel : pointer to the channel structure..
371 * @buf : buffer to write to.
372 * @buf_offset : buffer offset to write to (aligned on 32 bits).
374 * @event_size : size of the event, excluding the event header.
375 * @tsc : time stamp counter.
376 * @rflags : reservation flags.
378 * returns : offset where the event data must be written.
380 static inline size_t ltt_write_event_header(struct ltt_trace_struct
*trace
,
381 struct ltt_channel_struct
*channel
,
382 struct rchan_buf
*buf
, long buf_offset
,
383 u16 eID
, size_t event_size
,
384 u64 tsc
, unsigned int rflags
)
386 struct ltt_event_header header
;
390 case LTT_RFLAG_ID_SIZE_TSC
:
391 header
.id_time
= 29 << LTT_TSC_BITS
;
393 case LTT_RFLAG_ID_SIZE
:
394 header
.id_time
= 30 << LTT_TSC_BITS
;
397 header
.id_time
= 31 << LTT_TSC_BITS
;
400 header
.id_time
= eID
<< LTT_TSC_BITS
;
403 header
.id_time
|= (u32
)tsc
& LTT_TSC_MASK
;
404 ltt_relay_write(buf
, buf_offset
, &header
, sizeof(header
));
405 buf_offset
+= sizeof(header
);
408 case LTT_RFLAG_ID_SIZE_TSC
:
409 small_size
= min_t(size_t, event_size
, 0xFFFFU
);
410 ltt_relay_write(buf
, buf_offset
,
411 (u16
[]){ (u16
)eID
}, sizeof(u16
));
412 buf_offset
+= sizeof(u16
);
413 ltt_relay_write(buf
, buf_offset
,
414 (u16
[]){ (u16
)small_size
}, sizeof(u16
));
415 buf_offset
+= sizeof(u16
);
416 if (small_size
== 0xFFFFU
) {
417 ltt_relay_write(buf
, buf_offset
,
418 (u32
[]){ (u32
)event_size
}, sizeof(u32
));
419 buf_offset
+= sizeof(u32
);
421 buf_offset
+= ltt_align(buf_offset
, sizeof(u64
));
422 ltt_relay_write(buf
, buf_offset
,
423 (u64
[]){ (u64
)tsc
}, sizeof(u64
));
424 buf_offset
+= sizeof(u64
);
426 case LTT_RFLAG_ID_SIZE
:
427 small_size
= min_t(size_t, event_size
, 0xFFFFU
);
428 ltt_relay_write(buf
, buf_offset
,
429 (u16
[]){ (u16
)eID
}, sizeof(u16
));
430 buf_offset
+= sizeof(u16
);
431 ltt_relay_write(buf
, buf_offset
,
432 (u16
[]){ (u16
)small_size
}, sizeof(u16
));
433 buf_offset
+= sizeof(u16
);
434 if (small_size
== 0xFFFFU
) {
435 ltt_relay_write(buf
, buf_offset
,
436 (u32
[]){ (u32
)event_size
}, sizeof(u32
));
437 buf_offset
+= sizeof(u32
);
441 ltt_relay_write(buf
, buf_offset
,
442 (u16
[]){ (u16
)eID
}, sizeof(u16
));
443 buf_offset
+= sizeof(u16
);
454 /* Buffer offset macros */
457 * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
458 * the offset, which leaves only the buffer number.
460 #define BUFFER_TRUNC(offset, chan) \
461 ((offset) & (~((chan)->alloc_size-1)))
462 #define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
463 #define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
464 #define SUBBUF_ALIGN(offset, chan) \
465 (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
466 #define SUBBUF_TRUNC(offset, chan) \
467 ((offset) & (~((chan)->subbuf_size - 1)))
468 #define SUBBUF_INDEX(offset, chan) \
469 (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
474 * Atomic slot reservation in a LTTng buffer. It will take care of
475 * sub-buffer switching.
479 * @trace : the trace structure to log to.
480 * @channel : the chanel to reserve space into.
481 * @transport_data : specific transport data.
482 * @data_size : size of the variable length data to log.
483 * @slot_size : pointer to total size of the slot (out)
484 * @buf_offset : pointer to reserve offset (out)
485 * @tsc : pointer to the tsc at the slot reservation (out)
486 * @rflags : reservation flags (header specificity)
489 * Return : -ENOSPC if not enough space, else 0.
491 static inline int ltt_reserve_slot(
492 struct ltt_trace_struct
*trace
,
493 struct ltt_channel_struct
*channel
,
494 void **transport_data
,
499 unsigned int *rflags
,
503 return trace
->ops
->reserve_slot(trace
, channel
, transport_data
,
504 data_size
, slot_size
, buf_offset
, tsc
, rflags
,
512 * Atomic unordered slot commit. Increments the commit count in the
513 * specified sub-buffer, and delivers it if necessary.
517 * @channel : the chanel to reserve space into.
518 * @transport_data : specific transport data.
519 * @buf_offset : offset of beginning of reserved slot
520 * @slot_size : size of the reserved slot.
522 static inline void ltt_commit_slot(
523 struct ltt_channel_struct
*channel
,
524 void **transport_data
,
528 struct ltt_trace_struct
*trace
= channel
->trace
;
530 trace
->ops
->commit_slot(channel
, transport_data
, buf_offset
, slot_size
);
542 //ust// #define LTT_RELAY_ROOT "ltt"
543 //ust// #define LTT_RELAY_LOCKED_ROOT "ltt-locked"
545 #define LTT_METADATA_CHANNEL "metadata_state"
546 #define LTT_UST_CHANNEL "ust"
548 #define LTT_FLIGHT_PREFIX "flight-"
550 /* Tracer properties */
551 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
552 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
553 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
554 #define LTT_DEFAULT_N_SUBBUFS_MED 2
555 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
556 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
557 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
558 #define LTT_TRACER_VERSION_MAJOR 2
559 #define LTT_TRACER_VERSION_MINOR 3
562 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
563 * nearly full buffer. User space won't use this last amount of space when in
564 * blocking mode. This space also includes the event header that would be
565 * written by this user space event.
567 #define LTT_RESERVE_CRITICAL 4096
569 /* Register and unregister function pointers */
571 enum ltt_module_function
{
572 LTT_FUNCTION_RUN_FILTER
,
573 LTT_FUNCTION_FILTER_CONTROL
,
574 LTT_FUNCTION_STATEDUMP
577 //ust// extern int ltt_module_register(enum ltt_module_function name, void *function,
578 //ust// struct module *owner);
579 //ust// extern void ltt_module_unregister(enum ltt_module_function name);
581 void ltt_transport_register(struct ltt_transport
*transport
);
582 void ltt_transport_unregister(struct ltt_transport
*transport
);
584 /* Exported control function */
586 //ust// enum ltt_control_msg {
587 //ust// LTT_CONTROL_START,
588 //ust// LTT_CONTROL_STOP,
589 //ust// LTT_CONTROL_CREATE_TRACE,
590 //ust// LTT_CONTROL_DESTROY_TRACE
593 union ltt_control_args
{
595 enum trace_mode mode
;
596 unsigned int subbuf_size_low
;
597 unsigned int n_subbufs_low
;
598 unsigned int subbuf_size_med
;
599 unsigned int n_subbufs_med
;
600 unsigned int subbuf_size_high
;
601 unsigned int n_subbufs_high
;
605 int _ltt_trace_setup(const char *trace_name
);
606 int ltt_trace_setup(const char *trace_name
);
607 struct ltt_trace_struct
*_ltt_trace_find_setup(const char *trace_name
);
608 int ltt_trace_set_type(const char *trace_name
, const char *trace_type
);
609 int ltt_trace_set_channel_subbufsize(const char *trace_name
,
610 const char *channel_name
, unsigned int size
);
611 int ltt_trace_set_channel_subbufcount(const char *trace_name
,
612 const char *channel_name
, unsigned int cnt
);
613 int ltt_trace_set_channel_enable(const char *trace_name
,
614 const char *channel_name
, unsigned int enable
);
615 int ltt_trace_set_channel_overwrite(const char *trace_name
,
616 const char *channel_name
, unsigned int overwrite
);
617 int ltt_trace_alloc(const char *trace_name
);
618 int ltt_trace_destroy(const char *trace_name
);
619 int ltt_trace_start(const char *trace_name
);
620 int ltt_trace_stop(const char *trace_name
);
622 //ust// extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
623 //ust// const char *trace_type, union ltt_control_args args);
625 enum ltt_filter_control_msg
{
626 LTT_FILTER_DEFAULT_ACCEPT
,
627 LTT_FILTER_DEFAULT_REJECT
630 extern int ltt_filter_control(enum ltt_filter_control_msg msg
,
631 const char *trace_name
);
633 extern struct dentry
*get_filter_root(void);
635 void ltt_write_trace_header(struct ltt_trace_struct
*trace
,
636 struct ltt_subbuffer_header
*header
);
637 extern void ltt_buffer_destroy(struct ltt_channel_struct
*ltt_chan
);
639 void ltt_core_register(int (*function
)(u8
, void *));
641 void ltt_core_unregister(void);
643 void ltt_release_trace(struct kref
*kref
);
644 void ltt_release_transport(struct kref
*kref
);
646 extern int ltt_probe_register(struct ltt_available_probe
*pdata
);
647 extern int ltt_probe_unregister(struct ltt_available_probe
*pdata
);
648 extern int ltt_marker_connect(const char *channel
, const char *mname
,
650 extern int ltt_marker_disconnect(const char *channel
, const char *mname
,
652 extern void ltt_dump_marker_state(struct ltt_trace_struct
*trace
);
654 void ltt_lock_traces(void);
655 void ltt_unlock_traces(void);
657 //ust// extern void ltt_dump_softirq_vec(void *call_data);
659 //ust// #ifdef CONFIG_HAVE_LTT_DUMP_TABLES
660 //ust// extern void ltt_dump_sys_call_table(void *call_data);
661 //ust// extern void ltt_dump_idt_table(void *call_data);
663 //ust// static inline void ltt_dump_sys_call_table(void *call_data)
667 //ust// static inline void ltt_dump_idt_table(void *call_data)
672 //ust// #ifdef CONFIG_LTT_KPROBES
673 //ust// extern void ltt_dump_kprobes_table(void *call_data);
675 //ust// static inline void ltt_dump_kprobes_table(void *call_data)
680 //ust// /* Relay IOCTL */
682 //ust// /* Get the next sub buffer that can be read. */
683 //ust// #define RELAY_GET_SUBBUF _IOR(0xF5, 0x00, __u32)
684 //ust// /* Release the oldest reserved (by "get") sub buffer. */
685 //ust// #define RELAY_PUT_SUBBUF _IOW(0xF5, 0x01, __u32)
686 //ust// /* returns the number of sub buffers in the per cpu channel. */
687 //ust// #define RELAY_GET_N_SUBBUFS _IOR(0xF5, 0x02, __u32)
688 //ust// /* returns the size of the sub buffers. */
689 //ust// #define RELAY_GET_SUBBUF_SIZE _IOR(0xF5, 0x03, __u32)
691 //ust// #endif /* CONFIG_LTT */
693 #endif /* _LTT_TRACER_H */