Add ltt-debugfs-abi.h
[lttng-modules.git] / ltt-tracer.h
CommitLineData
1c8284eb
MD
1/*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 *
4 * This contains the definitions for the Linux Trace Toolkit tracer.
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9#ifndef _LTT_TRACER_H
10#define _LTT_TRACER_H
11
7514523f
MD
12#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13/* Align data on its natural alignment */
14#define RING_BUFFER_ALIGN
15#endif
16
17#include <linux/ringbuffer/config.h>
18
1c8284eb
MD
19#include <stdarg.h>
20#include <linux/types.h>
21#include <linux/limits.h>
22#include <linux/list.h>
23#include <linux/cache.h>
24#include <linux/kernel.h>
25#include <linux/timex.h>
26#include <linux/wait.h>
1c8284eb 27#include <linux/trace-clock.h>
1c8284eb
MD
28#include <asm/atomic.h>
29#include <asm/local.h>
30
31#include "ltt-tracer-core.h"
1c8284eb
MD
32
33/* Number of bytes to log with a read/write event */
34#define LTT_LOG_RW_SIZE 32L
35
1c8284eb
MD
36/* Maximum number of callbacks per marker */
37#define LTT_NR_CALLBACKS 10
38
11b5a3c2
MD
39struct ltt_serialize_closure;
40
41/* Serialization callback */
42typedef size_t (*ltt_serialize_cb)(struct lib_ring_buffer *buf,
43 size_t buf_offset,
44 struct ltt_serialize_closure *closure,
45 void *serialize_private,
46 unsigned int stack_pos_ctx,
47 int *largest_align,
48 const char *fmt, va_list *args);
49
1c8284eb
MD
50struct ltt_serialize_closure {
51 ltt_serialize_cb *callbacks;
52 long cb_args[LTT_NR_CALLBACKS];
53 unsigned int cb_idx;
54};
55
11b5a3c2 56size_t ltt_serialize_data(struct lib_ring_buffer *buf, size_t buf_offset,
1c8284eb
MD
57 struct ltt_serialize_closure *closure,
58 void *serialize_private, unsigned int stack_pos_ctx,
59 int *largest_align, const char *fmt, va_list *args);
60
1c8284eb
MD
61enum ltt_channels {
62 LTT_CHANNEL_METADATA,
63 LTT_CHANNEL_FD_STATE,
64 LTT_CHANNEL_GLOBAL_STATE,
65 LTT_CHANNEL_IRQ_STATE,
66 LTT_CHANNEL_MODULE_STATE,
67 LTT_CHANNEL_NETIF_STATE,
68 LTT_CHANNEL_SOFTIRQ_STATE,
69 LTT_CHANNEL_SWAP_STATE,
70 LTT_CHANNEL_SYSCALL_STATE,
71 LTT_CHANNEL_TASK_STATE,
72 LTT_CHANNEL_VM_STATE,
73 LTT_CHANNEL_FS,
74 LTT_CHANNEL_INPUT,
75 LTT_CHANNEL_IPC,
76 LTT_CHANNEL_KERNEL,
77 LTT_CHANNEL_MM,
78 LTT_CHANNEL_RCU,
79 LTT_CHANNEL_DEFAULT,
80};
81
11b5a3c2
MD
82#if 0
83size_t ltt_serialize_printf(struct lib_ring_buffer *buf, unsigned long buf_offset,
1c8284eb
MD
84 size_t *msg_size, char *output, size_t outlen,
85 const char *fmt);
86
87/*
88 * Unique ID assigned to each registered probe.
89 */
90enum marker_id {
91 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
92 MARKER_ID_SET_MARKER_FORMAT,
93 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
94 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
95};
96
97/* static ids 0-1 reserved for internal use. */
98#define MARKER_CORE_IDS 2
99static __inline__ enum marker_id marker_id_type(uint16_t id)
100{
101 if (id < MARKER_CORE_IDS)
102 return (enum marker_id)id;
103 else
104 return MARKER_ID_DYNAMIC;
105}
106
107struct user_dbg_data {
108 unsigned long avail_size;
109 unsigned long write;
110 unsigned long read;
111};
112
1c8284eb
MD
113enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
114
115#define CHANNEL_FLAG_ENABLE (1U<<0)
116#define CHANNEL_FLAG_OVERWRITE (1U<<1)
11b5a3c2 117#endif //0
1c8284eb 118
c0e31d2e 119#if 0
1c8284eb
MD
120/* Per-trace information - each trace/flight recorder represented by one */
121struct ltt_trace {
122 /* First 32 bytes cache-hot cacheline */
123 struct list_head list;
7514523f 124 struct ltt_chan **channels;
1c8284eb
MD
125 unsigned int nr_channels;
126 int active;
127 /* Second 32 bytes cache-hot cacheline */
128 struct ltt_trace_ops *ops;
129 u32 freq_scale;
130 u64 start_freq;
131 u64 start_tsc;
132 unsigned long long start_monotonic;
133 struct timeval start_time;
134 struct ltt_channel_setting *settings;
135 struct {
136 struct dentry *trace_root;
137 struct dentry *ascii_root;
138 } dentry;
139 struct kref kref; /* Each channel has a kref of the trace struct */
140 struct ltt_transport *transport;
141 struct kref ltt_transport_kref;
142 wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
143 char trace_name[NAME_MAX];
144} ____cacheline_aligned;
c0e31d2e 145#endif //0
1c8284eb 146
7514523f
MD
147/*
148 * Hardcoded event headers
1c8284eb
MD
149 *
150 * event header for a trace with active heartbeat : 27 bits timestamps
151 *
152 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
153 * trace alignment value must be done.
154 *
155 * Remember that the C compiler does align each member on the boundary
156 * equivalent to their own size.
157 *
158 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
159 * bytes aligned, so the buffer header and trace header are aligned.
160 *
161 * Event headers are aligned depending on the trace alignment option.
162 *
163 * Note using C structure bitfields for cross-endianness and portability
164 * concerns.
165 */
166
167#define LTT_RESERVED_EVENTS 3
168#define LTT_EVENT_BITS 5
169#define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
170#define LTT_TSC_BITS 27
171#define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
172
7514523f 173struct event_header {
1c8284eb
MD
174 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
175};
176
177/* Reservation flags */
178#define LTT_RFLAG_ID (1 << 0)
179#define LTT_RFLAG_ID_SIZE (1 << 1)
180#define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
181
182#define LTT_MAX_SMALL_SIZE 0xFFFFU
183
184/*
185 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
186 * specifically with CPU frequency scaling someday, so using an interpolation
187 * between the start and end of buffer values is not flexible enough. Using an
188 * immediate frequency value permits to calculate directly the times for parts
189 * of a buffer that would be before a frequency change.
190 *
191 * Keep the natural field alignment for _each field_ within this structure if
192 * you ever add/remove a field from this header. Packed attribute is not used
193 * because gcc generates poor code on at least powerpc and mips. Don't ever
194 * let gcc add padding between the structure elements.
195 */
7514523f 196struct subbuffer_header {
1c8284eb
MD
197 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
198 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
199 uint32_t magic_number; /*
200 * Trace magic number.
201 * contains endianness information.
202 */
203 uint8_t major_version;
204 uint8_t minor_version;
205 uint8_t arch_size; /* Architecture pointer size */
206 uint8_t alignment; /* LTT data alignment */
207 uint64_t start_time_sec; /* NTP-corrected start time */
208 uint64_t start_time_usec;
209 uint64_t start_freq; /*
210 * Frequency at trace start,
211 * used all along the trace.
212 */
213 uint32_t freq_scale; /* Frequency scaling (divisor) */
214 uint32_t data_size; /* Size of data in subbuffer */
215 uint32_t sb_size; /* Subbuffer size (include padding) */
216 uint32_t events_lost; /*
217 * Events lost in this subbuffer since
218 * the beginning of the trace.
219 * (may overflow)
220 */
221 uint32_t subbuf_corrupt; /*
222 * Corrupted (lost) subbuffers since
223 * the begginig of the trace.
224 * (may overflow)
225 */
226 uint8_t header_end[0]; /* End of header */
227};
228
7514523f 229static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
1c8284eb 230{
7514523f 231 return trace_clock_read64();
1c8284eb
MD
232}
233
234/*
7514523f
MD
235 * record_header_size - Calculate the header size and padding necessary.
236 * @config: ring buffer instance configuration
237 * @chan: channel
238 * @offset: offset in the write buffer
239 * @data_size: size of the payload
240 * @pre_header_padding: padding to add before the header (output)
241 * @rflags: reservation flags
242 * @ctx: reservation context
1c8284eb 243 *
7514523f 244 * Returns the event header size (including padding).
1c8284eb
MD
245 *
246 * Important note :
247 * The event header must be 32-bits. The total offset calculated here :
248 *
249 * Alignment of header struct on 32 bits (min arch size, header size)
250 * + sizeof(header struct) (32-bits)
251 * + (opt) u16 (ext. event id)
252 * + (opt) u16 (event_size)
253 * (if event_size == LTT_MAX_SMALL_SIZE, has ext. event size)
254 * + (opt) u32 (ext. event size)
255 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
256 *
257 * The payload must itself determine its own alignment from the biggest type it
258 * contains.
7514523f 259 */
1c8284eb 260static __inline__
7514523f
MD
261unsigned char record_header_size(const struct lib_ring_buffer_config *config,
262 struct channel *chan, size_t offset,
263 size_t data_size, size_t *pre_header_padding,
264 unsigned int rflags,
265 struct lib_ring_buffer_ctx *ctx)
1c8284eb
MD
266{
267 size_t orig_offset = offset;
268 size_t padding;
269
7514523f 270 BUILD_BUG_ON(sizeof(struct event_header) != sizeof(u32));
1c8284eb 271
7514523f
MD
272 padding = lib_ring_buffer_align(config, offset,
273 sizeof(struct event_header));
1c8284eb 274 offset += padding;
7514523f 275 offset += sizeof(struct event_header);
1c8284eb
MD
276
277 if (unlikely(rflags)) {
278 switch (rflags) {
279 case LTT_RFLAG_ID_SIZE_TSC:
280 offset += sizeof(u16) + sizeof(u16);
281 if (data_size >= LTT_MAX_SMALL_SIZE)
282 offset += sizeof(u32);
283 offset += ltt_align(offset, sizeof(u64));
284 offset += sizeof(u64);
285 break;
286 case LTT_RFLAG_ID_SIZE:
287 offset += sizeof(u16) + sizeof(u16);
288 if (data_size >= LTT_MAX_SMALL_SIZE)
289 offset += sizeof(u32);
290 break;
291 case LTT_RFLAG_ID:
292 offset += sizeof(u16);
293 break;
294 }
295 }
296
7514523f 297 *pre_header_padding = padding;
1c8284eb
MD
298 return offset - orig_offset;
299}
300
7514523f
MD
301#include <linux/ringbuffer/api.h>
302
1c8284eb 303extern
11b5a3c2
MD
304size_t ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
305 struct lib_ring_buffer_ctx *ctx,
306 u16 eID, u32 event_size);
1c8284eb
MD
307
308/*
309 * ltt_write_event_header
310 *
311 * Writes the event header to the offset (already aligned on 32-bits).
312 *
7514523f
MD
313 * @config: ring buffer instance configuration
314 * @ctx: reservation context
1c8284eb
MD
315 * @eID : event ID
316 * @event_size : size of the event, excluding the event header.
1c8284eb
MD
317 */
318static __inline__
7514523f
MD
319void ltt_write_event_header(const struct lib_ring_buffer_config *config,
320 struct lib_ring_buffer_ctx *ctx,
321 u16 eID, u32 event_size)
1c8284eb 322{
7514523f 323 struct event_header header;
1c8284eb 324
7514523f 325 if (unlikely(ctx->rflags))
1c8284eb
MD
326 goto slow_path;
327
328 header.id_time = eID << LTT_TSC_BITS;
7514523f
MD
329 header.id_time |= (u32)ctx->tsc & LTT_TSC_MASK;
330 lib_ring_buffer_write(config, ctx, &header, sizeof(header));
1c8284eb
MD
331
332slow_path:
7514523f 333 ltt_write_event_header_slow(config, ctx, eID, event_size);
1c8284eb
MD
334}
335
7514523f 336#if 0
1c8284eb
MD
337/*
338 * ltt_read_event_header
339 * buf_offset must aligned on 32 bits
340 */
341static __inline__
342size_t ltt_read_event_header(struct ltt_chanbuf_alloc *bufa, long buf_offset,
343 u64 *tsc, u32 *event_size, u16 *eID,
344 unsigned int *rflags)
345{
346 struct ltt_event_header header;
347 u16 small_size;
348
349 ltt_relay_read(bufa, buf_offset, &header, sizeof(header));
350 buf_offset += sizeof(header);
351
352 *event_size = INT_MAX;
353 *eID = header.id_time >> LTT_TSC_BITS;
354 *tsc = header.id_time & LTT_TSC_MASK;
355
356 switch (*eID) {
357 case 29:
358 *rflags = LTT_RFLAG_ID_SIZE_TSC;
359 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
360 buf_offset += sizeof(u16);
361 ltt_relay_read(bufa, buf_offset, &small_size, sizeof(u16));
362 buf_offset += sizeof(u16);
363 if (small_size == LTT_MAX_SMALL_SIZE) {
364 ltt_relay_read(bufa, buf_offset, event_size,
365 sizeof(u32));
366 buf_offset += sizeof(u32);
367 } else
368 *event_size = small_size;
369 buf_offset += ltt_align(buf_offset, sizeof(u64));
370 ltt_relay_read(bufa, buf_offset, tsc, sizeof(u64));
371 buf_offset += sizeof(u64);
372 break;
373 case 30:
374 *rflags = LTT_RFLAG_ID_SIZE;
375 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
376 buf_offset += sizeof(u16);
377 ltt_relay_read(bufa, buf_offset, &small_size, sizeof(u16));
378 buf_offset += sizeof(u16);
379 if (small_size == LTT_MAX_SMALL_SIZE) {
380 ltt_relay_read(bufa, buf_offset, event_size,
381 sizeof(u32));
382 buf_offset += sizeof(u32);
383 } else
384 *event_size = small_size;
385 break;
386 case 31:
387 *rflags = LTT_RFLAG_ID;
388 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
389 buf_offset += sizeof(u16);
390 break;
391 default:
392 *rflags = 0;
393 break;
394 }
395
396 return buf_offset;
397}
7514523f 398#endif //0
1c8284eb
MD
399
400/*
401 * Control channels :
402 * control/metadata
403 * control/interrupts
404 * control/...
405 *
406 * cpu channel :
407 * cpu
408 */
409#define LTT_RELAY_ROOT "ltt"
1c8284eb
MD
410
411#define LTT_METADATA_CHANNEL "metadata_state"
412#define LTT_FD_STATE_CHANNEL "fd_state"
413#define LTT_GLOBAL_STATE_CHANNEL "global_state"
414#define LTT_IRQ_STATE_CHANNEL "irq_state"
415#define LTT_MODULE_STATE_CHANNEL "module_state"
416#define LTT_NETIF_STATE_CHANNEL "netif_state"
417#define LTT_SOFTIRQ_STATE_CHANNEL "softirq_state"
418#define LTT_SWAP_STATE_CHANNEL "swap_state"
419#define LTT_SYSCALL_STATE_CHANNEL "syscall_state"
420#define LTT_TASK_STATE_CHANNEL "task_state"
421#define LTT_VM_STATE_CHANNEL "vm_state"
422#define LTT_FS_CHANNEL "fs"
423#define LTT_INPUT_CHANNEL "input"
424#define LTT_IPC_CHANNEL "ipc"
425#define LTT_KERNEL_CHANNEL "kernel"
426#define LTT_MM_CHANNEL "mm"
427#define LTT_RCU_CHANNEL "rcu"
428
429#define LTT_FLIGHT_PREFIX "flight-"
430
431#define LTT_ASCII "ascii"
432
433/* Tracer properties */
434#define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
435#define LTT_DEFAULT_N_SUBBUFS_LOW 2
436#define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
437#define LTT_DEFAULT_N_SUBBUFS_MED 2
438#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
439#define LTT_DEFAULT_N_SUBBUFS_HIGH 2
440#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
441#define LTT_TRACER_VERSION_MAJOR 2
442#define LTT_TRACER_VERSION_MINOR 6
443
444/**
445 * ltt_write_trace_header - Write trace header
7514523f 446 * @priv: Private data (struct trace)
1c8284eb
MD
447 * @header: Memory address where the information must be written to
448 */
449static __inline__
7514523f
MD
450void ltt_write_trace_header(void *priv,
451 struct subbuffer_header *header)
1c8284eb 452{
11b5a3c2 453 struct ltt_session *session = priv;
7514523f 454
1c8284eb
MD
455 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
456 header->major_version = LTT_TRACER_VERSION_MAJOR;
457 header->minor_version = LTT_TRACER_VERSION_MINOR;
458 header->arch_size = sizeof(void *);
7514523f 459 header->alignment = lib_ring_buffer_get_alignment();
11b5a3c2
MD
460 header->start_time_sec = session->start_time.tv_sec;
461 header->start_time_usec = session->start_time.tv_usec;
462 header->start_freq = session->start_freq;
463 header->freq_scale = session->freq_scale;
1c8284eb
MD
464}
465
466/*
467 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
468 * nearly full buffer. User space won't use this last amount of space when in
469 * blocking mode. This space also includes the event header that would be
470 * written by this user space event.
471 */
472#define LTT_RESERVE_CRITICAL 4096
473
474/* Register and unregister function pointers */
475
476enum ltt_module_function {
477 LTT_FUNCTION_RUN_FILTER,
478 LTT_FUNCTION_FILTER_CONTROL,
479 LTT_FUNCTION_STATEDUMP
480};
481
482extern int ltt_module_register(enum ltt_module_function name, void *function,
483 struct module *owner);
484extern void ltt_module_unregister(enum ltt_module_function name);
485
1c8284eb
MD
486/* Exported control function */
487
488enum ltt_control_msg {
489 LTT_CONTROL_START,
490 LTT_CONTROL_STOP,
491 LTT_CONTROL_CREATE_TRACE,
492 LTT_CONTROL_DESTROY_TRACE
493};
494
495union ltt_control_args {
496 struct {
497 enum trace_mode mode;
498 unsigned int subbuf_size_low;
499 unsigned int n_subbufs_low;
500 unsigned int subbuf_size_med;
501 unsigned int n_subbufs_med;
502 unsigned int subbuf_size_high;
503 unsigned int n_subbufs_high;
504 } new_trace;
505};
506
1c8284eb
MD
507void ltt_core_register(int (*function)(u8, void *));
508
509void ltt_core_unregister(void);
510
1c8284eb
MD
511extern int ltt_marker_connect(const char *channel, const char *mname,
512 const char *pname);
513extern int ltt_marker_disconnect(const char *channel, const char *mname,
514 const char *pname);
515extern void ltt_dump_marker_state(struct ltt_trace *trace);
516
1c8284eb
MD
517extern
518void ltt_statedump_register_kprobes_dump(void (*callback)(void *call_data));
519extern
520void ltt_statedump_unregister_kprobes_dump(void (*callback)(void *call_data));
521
522extern void ltt_dump_softirq_vec(void *call_data);
523
f277b4c1 524#ifdef CONFIG_HAVE_LTT_DUMP_TABLES
1c8284eb
MD
525extern void ltt_dump_sys_call_table(void *call_data);
526extern void ltt_dump_idt_table(void *call_data);
f277b4c1
MD
527#else
528static inline void ltt_dump_sys_call_table(void *call_data)
529{
530}
531
532static inline void ltt_dump_idt_table(void *call_data)
533{
534}
535#endif
1c8284eb 536
1c8284eb 537#endif /* _LTT_TRACER_H */
This page took 0.055995 seconds and 4 git commands to generate.