Add comment on manual registration of "ust" channel
[ust.git] / libust / tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This contains the definitions for the Linux Trace Toolkit tracer.
6 *
7 * Ported to userspace by Pierre-Marc Fournier.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #ifndef _LTT_TRACER_H
25 #define _LTT_TRACER_H
26
27 #include <sys/types.h>
28 #include <stdarg.h>
29 //#include "list.h"
30 #include <ust/kernelcompat.h>
31 #include "buffers.h"
32 #include "channels.h"
33 #include "tracercore.h"
34 #include <ust/marker.h>
35 #include <ust/probe.h>
36
37 /* Number of bytes to log with a read/write event */
38 #define LTT_LOG_RW_SIZE 32L
39
40 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
41 #define LTT_PERCPU_TIMER_INTERVAL 1
42
43 #ifndef LTT_ARCH_TYPE
44 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
45 #endif
46
47 #ifndef LTT_ARCH_VARIANT
48 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
49 #endif
50
51 struct ltt_active_marker;
52
53 /* Maximum number of callbacks per marker */
54 #define LTT_NR_CALLBACKS 10
55
56 struct ltt_serialize_closure;
57 struct ltt_probe_private_data;
58
59 struct ltt_serialize_closure {
60 ltt_serialize_cb *callbacks;
61 long cb_args[LTT_NR_CALLBACKS];
62 unsigned int cb_idx;
63 };
64
65 extern size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
66 struct ltt_serialize_closure *closure,
67 void *serialize_private,
68 int *largest_align, const char *fmt, va_list *args);
69
70 struct ltt_probe_private_data {
71 struct ltt_trace_struct *trace; /*
72 * Target trace, for metadata
73 * or statedump.
74 */
75 ltt_serialize_cb serializer; /*
76 * Serialization function override.
77 */
78 void *serialize_private; /*
79 * Private data for serialization
80 * functions.
81 */
82 };
83
84 enum ltt_channels {
85 LTT_CHANNEL_METADATA,
86 LTT_CHANNEL_UST,
87 };
88
89 struct ltt_active_marker {
90 struct list_head node; /* active markers list */
91 const char *channel;
92 const char *name;
93 const char *format;
94 struct ltt_available_probe *probe;
95 };
96
97 struct marker; //ust//
98 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
99 struct registers *regs, void *call_data, const char *fmt, va_list *args);
100 extern void ltt_trace(const struct marker *mdata, void *probe_data,
101 struct registers *regs, void *call_data, const char *fmt, ...);
102
103 /*
104 * Unique ID assigned to each registered probe.
105 */
106 enum marker_id {
107 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
108 MARKER_ID_SET_MARKER_FORMAT,
109 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
110 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
111 };
112
113 /* static ids 0-1 reserved for internal use. */
114 #define MARKER_CORE_IDS 2
115 static __inline__ enum marker_id marker_id_type(uint16_t id)
116 {
117 if (id < MARKER_CORE_IDS)
118 return (enum marker_id)id;
119 else
120 return MARKER_ID_DYNAMIC;
121 }
122
123 struct user_dbg_data {
124 unsigned long avail_size;
125 unsigned long write;
126 unsigned long read;
127 };
128
129 struct ltt_trace_ops {
130 /* First 32 bytes cache-hot cacheline */
131 int (*reserve_slot) (struct ltt_trace_struct *trace,
132 struct ust_channel *channel,
133 void **transport_data, size_t data_size,
134 size_t *slot_size, long *buf_offset, u64 *tsc,
135 unsigned int *rflags,
136 int largest_align);
137 //ust// void (*commit_slot) (struct ltt_channel_struct *channel,
138 //ust// void **transport_data, long buf_offset,
139 //ust// size_t slot_size);
140 void (*wakeup_channel) (struct ust_channel *channel);
141 int (*user_blocking) (struct ltt_trace_struct *trace,
142 unsigned int index, size_t data_size,
143 struct user_dbg_data *dbg);
144 /* End of first 32 bytes cacheline */
145 int (*create_dirs) (struct ltt_trace_struct *new_trace);
146 void (*remove_dirs) (struct ltt_trace_struct *new_trace);
147 int (*create_channel) (const char *trace_name,
148 struct ltt_trace_struct *trace,
149 const char *channel_name,
150 struct ust_channel *channel,
151 unsigned int subbuf_size,
152 unsigned int n_subbufs, int overwrite);
153 void (*finish_channel) (struct ust_channel *channel);
154 void (*remove_channel) (struct ust_channel *channel);
155 void (*user_errors) (struct ltt_trace_struct *trace,
156 unsigned int index, size_t data_size,
157 struct user_dbg_data *dbg);
158 } ____cacheline_aligned;
159
160 struct ltt_transport {
161 char *name;
162 struct module *owner;
163 struct list_head node;
164 struct ltt_trace_ops ops;
165 };
166
167 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
168
169 #define CHANNEL_FLAG_ENABLE (1U<<0)
170 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
171
172 /* Per-trace information - each trace/flight recorder represented by one */
173 struct ltt_trace_struct {
174 /* First 32 bytes cache-hot cacheline */
175 struct list_head list;
176 struct ltt_trace_ops *ops;
177 int active;
178 /* Second 32 bytes cache-hot cacheline */
179 struct ust_channel *channels;
180 unsigned int nr_channels;
181 u32 freq_scale;
182 u64 start_freq;
183 u64 start_tsc;
184 unsigned long long start_monotonic;
185 struct timeval start_time;
186 struct ltt_channel_setting *settings;
187 struct {
188 struct dentry *trace_root;
189 } dentry;
190 struct kref kref; /* Each channel has a kref of the trace struct */
191 struct ltt_transport *transport;
192 struct kref ltt_transport_kref;
193 char trace_name[NAME_MAX];
194 } ____cacheline_aligned;
195
196 /* Hardcoded event headers
197 *
198 * event header for a trace with active heartbeat : 27 bits timestamps
199 *
200 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
201 * trace alignment value must be done.
202 *
203 * Remember that the C compiler does align each member on the boundary
204 * equivalent to their own size.
205 *
206 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
207 * bytes aligned, so the buffer header and trace header are aligned.
208 *
209 * Event headers are aligned depending on the trace alignment option.
210 *
211 * Note using C structure bitfields for cross-endianness and portability
212 * concerns.
213 */
214
215 #define LTT_RESERVED_EVENTS 3
216 #define LTT_EVENT_BITS 5
217 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
218 #define LTT_TSC_BITS 27
219 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
220
221 struct ltt_event_header {
222 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
223 };
224
225 /* Reservation flags */
226 #define LTT_RFLAG_ID (1 << 0)
227 #define LTT_RFLAG_ID_SIZE (1 << 1)
228 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
229
230 /*
231 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
232 * specifically with CPU frequency scaling someday, so using an interpolation
233 * between the start and end of buffer values is not flexible enough. Using an
234 * immediate frequency value permits to calculate directly the times for parts
235 * of a buffer that would be before a frequency change.
236 *
237 * Keep the natural field alignment for _each field_ within this structure if
238 * you ever add/remove a field from this header. Packed attribute is not used
239 * because gcc generates poor code on at least powerpc and mips. Don't ever
240 * let gcc add padding between the structure elements.
241 */
242 struct ltt_subbuffer_header {
243 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
244 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
245 uint32_t magic_number; /*
246 * Trace magic number.
247 * contains endianness information.
248 */
249 uint8_t major_version;
250 uint8_t minor_version;
251 uint8_t arch_size; /* Architecture pointer size */
252 uint8_t alignment; /* LTT data alignment */
253 uint64_t start_time_sec; /* NTP-corrected start time */
254 uint64_t start_time_usec;
255 uint64_t start_freq; /*
256 * Frequency at trace start,
257 * used all along the trace.
258 */
259 uint32_t freq_scale; /* Frequency scaling (divisor) */
260 uint32_t lost_size; /* Size unused at end of subbuffer */
261 uint32_t buf_size; /* Size of this subbuffer */
262 uint32_t events_lost; /*
263 * Events lost in this subbuffer since
264 * the beginning of the trace.
265 * (may overflow)
266 */
267 uint32_t subbuf_corrupt; /*
268 * Corrupted (lost) subbuffers since
269 * the begginig of the trace.
270 * (may overflow)
271 */
272 uint8_t header_end[0]; /* End of header */
273 };
274
275 /**
276 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
277 *
278 * Return header size without padding after the structure. Don't use packed
279 * structure because gcc generates inefficient code on some architectures
280 * (powerpc, mips..)
281 */
282 static __inline__ size_t ltt_subbuffer_header_size(void)
283 {
284 return offsetof(struct ltt_subbuffer_header, header_end);
285 }
286
287 /*
288 * ust_get_header_size
289 *
290 * Calculate alignment offset to 32-bits. This is the alignment offset of the
291 * event header.
292 *
293 * Important note :
294 * The event header must be 32-bits. The total offset calculated here :
295 *
296 * Alignment of header struct on 32 bits (min arch size, header size)
297 * + sizeof(header struct) (32-bits)
298 * + (opt) u16 (ext. event id)
299 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
300 * + (opt) u32 (ext. event size)
301 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
302 *
303 * The payload must itself determine its own alignment from the biggest type it
304 * contains.
305 * */
306 static __inline__ unsigned char ust_get_header_size(
307 struct ust_channel *channel,
308 size_t offset,
309 size_t data_size,
310 size_t *before_hdr_pad,
311 unsigned int rflags)
312 {
313 size_t orig_offset = offset;
314 size_t padding;
315
316 padding = ltt_align(offset, sizeof(struct ltt_event_header));
317 offset += padding;
318 offset += sizeof(struct ltt_event_header);
319
320 switch (rflags) {
321 case LTT_RFLAG_ID_SIZE_TSC:
322 offset += sizeof(u16) + sizeof(u16);
323 if (data_size >= 0xFFFFU)
324 offset += sizeof(u32);
325 offset += ltt_align(offset, sizeof(u64));
326 offset += sizeof(u64);
327 break;
328 case LTT_RFLAG_ID_SIZE:
329 offset += sizeof(u16) + sizeof(u16);
330 if (data_size >= 0xFFFFU)
331 offset += sizeof(u32);
332 break;
333 case LTT_RFLAG_ID:
334 offset += sizeof(u16);
335 break;
336 }
337
338 *before_hdr_pad = padding;
339 return offset - orig_offset;
340 }
341
342 /*
343 * ltt_write_event_header
344 *
345 * Writes the event header to the offset (already aligned on 32-bits).
346 *
347 * @trace : trace to write to.
348 * @channel : pointer to the channel structure..
349 * @buf : buffer to write to.
350 * @buf_offset : buffer offset to write to (aligned on 32 bits).
351 * @eID : event ID
352 * @event_size : size of the event, excluding the event header.
353 * @tsc : time stamp counter.
354 * @rflags : reservation flags.
355 *
356 * returns : offset where the event data must be written.
357 */
358 static __inline__ size_t ltt_write_event_header(struct ltt_trace_struct *trace,
359 struct ust_buffer *buf, long buf_offset,
360 u16 eID, size_t event_size,
361 u64 tsc, unsigned int rflags)
362 {
363 struct ltt_event_header header;
364 size_t small_size;
365
366 switch (rflags) {
367 case LTT_RFLAG_ID_SIZE_TSC:
368 header.id_time = 29 << LTT_TSC_BITS;
369 break;
370 case LTT_RFLAG_ID_SIZE:
371 header.id_time = 30 << LTT_TSC_BITS;
372 break;
373 case LTT_RFLAG_ID:
374 header.id_time = 31 << LTT_TSC_BITS;
375 break;
376 default:
377 header.id_time = eID << LTT_TSC_BITS;
378 break;
379 }
380 header.id_time |= (u32)tsc & LTT_TSC_MASK;
381 ust_buffers_write(buf, buf_offset, &header, sizeof(header));
382 buf_offset += sizeof(header);
383
384 switch (rflags) {
385 case LTT_RFLAG_ID_SIZE_TSC:
386 small_size = min_t(size_t, event_size, 0xFFFFU);
387 ust_buffers_write(buf, buf_offset,
388 (u16[]){ (u16)eID }, sizeof(u16));
389 buf_offset += sizeof(u16);
390 ust_buffers_write(buf, buf_offset,
391 (u16[]){ (u16)small_size }, sizeof(u16));
392 buf_offset += sizeof(u16);
393 if (small_size == 0xFFFFU) {
394 ust_buffers_write(buf, buf_offset,
395 (u32[]){ (u32)event_size }, sizeof(u32));
396 buf_offset += sizeof(u32);
397 }
398 buf_offset += ltt_align(buf_offset, sizeof(u64));
399 ust_buffers_write(buf, buf_offset,
400 (u64[]){ (u64)tsc }, sizeof(u64));
401 buf_offset += sizeof(u64);
402 break;
403 case LTT_RFLAG_ID_SIZE:
404 small_size = min_t(size_t, event_size, 0xFFFFU);
405 ust_buffers_write(buf, buf_offset,
406 (u16[]){ (u16)eID }, sizeof(u16));
407 buf_offset += sizeof(u16);
408 ust_buffers_write(buf, buf_offset,
409 (u16[]){ (u16)small_size }, sizeof(u16));
410 buf_offset += sizeof(u16);
411 if (small_size == 0xFFFFU) {
412 ust_buffers_write(buf, buf_offset,
413 (u32[]){ (u32)event_size }, sizeof(u32));
414 buf_offset += sizeof(u32);
415 }
416 break;
417 case LTT_RFLAG_ID:
418 ust_buffers_write(buf, buf_offset,
419 (u16[]){ (u16)eID }, sizeof(u16));
420 buf_offset += sizeof(u16);
421 break;
422 default:
423 break;
424 }
425
426 return buf_offset;
427 }
428
429 /* Lockless LTTng */
430
431 /*
432 * ltt_reserve_slot
433 *
434 * Atomic slot reservation in a LTTng buffer. It will take care of
435 * sub-buffer switching.
436 *
437 * Parameters:
438 *
439 * @trace : the trace structure to log to.
440 * @channel : the chanel to reserve space into.
441 * @transport_data : specific transport data.
442 * @data_size : size of the variable length data to log.
443 * @slot_size : pointer to total size of the slot (out)
444 * @buf_offset : pointer to reserve offset (out)
445 * @tsc : pointer to the tsc at the slot reservation (out)
446 * @rflags : reservation flags (header specificity)
447 * @cpu : cpu id
448 *
449 * Return : -ENOSPC if not enough space, else 0.
450 */
451 static __inline__ int ltt_reserve_slot(
452 struct ltt_trace_struct *trace,
453 struct ust_channel *channel,
454 void **transport_data,
455 size_t data_size,
456 size_t *slot_size,
457 long *buf_offset,
458 u64 *tsc,
459 unsigned int *rflags,
460 int largest_align)
461 {
462 return trace->ops->reserve_slot(trace, channel, transport_data,
463 data_size, slot_size, buf_offset, tsc, rflags,
464 largest_align);
465 }
466
467
468 ///*
469 // * ltt_commit_slot
470 // *
471 // * Atomic unordered slot commit. Increments the commit count in the
472 // * specified sub-buffer, and delivers it if necessary.
473 // *
474 // * Parameters:
475 // *
476 // * @channel : the chanel to reserve space into.
477 // * @transport_data : specific transport data.
478 // * @buf_offset : offset of beginning of reserved slot
479 // * @slot_size : size of the reserved slot.
480 // */
481 //static inline void ltt_commit_slot(
482 // struct ltt_channel_struct *channel,
483 // void **transport_data,
484 // long buf_offset,
485 // size_t slot_size)
486 //{
487 // struct ltt_trace_struct *trace = channel->trace;
488 //
489 // trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
490 //}
491
492 /*
493 * Control channels :
494 * control/metadata
495 * control/interrupts
496 * control/...
497 *
498 * cpu channel :
499 * cpu
500 */
501
502 #define LTT_METADATA_CHANNEL "metadata_state"
503 #define LTT_UST_CHANNEL "ust"
504
505 #define LTT_FLIGHT_PREFIX "flight-"
506
507 /* Tracer properties */
508 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 134217728
509 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
510 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 4096
511 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
512 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 134217728
513 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
514 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 4096
515 #define LTT_DEFAULT_N_SUBBUFS_MED 2
516 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 134217728
517 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
518 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 4096
519 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
520 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
521 #define LTT_TRACER_VERSION_MAJOR 2
522 #define LTT_TRACER_VERSION_MINOR 3
523
524 /*
525 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
526 * nearly full buffer. User space won't use this last amount of space when in
527 * blocking mode. This space also includes the event header that would be
528 * written by this user space event.
529 */
530 #define LTT_RESERVE_CRITICAL 4096
531
532 /* Register and unregister function pointers */
533
534 enum ltt_module_function {
535 LTT_FUNCTION_RUN_FILTER,
536 LTT_FUNCTION_FILTER_CONTROL,
537 LTT_FUNCTION_STATEDUMP
538 };
539
540 extern void ltt_transport_register(struct ltt_transport *transport);
541 extern void ltt_transport_unregister(struct ltt_transport *transport);
542
543 /* Exported control function */
544
545 union ltt_control_args {
546 struct {
547 enum trace_mode mode;
548 unsigned int subbuf_size_low;
549 unsigned int n_subbufs_low;
550 unsigned int subbuf_size_med;
551 unsigned int n_subbufs_med;
552 unsigned int subbuf_size_high;
553 unsigned int n_subbufs_high;
554 } new_trace;
555 };
556
557 extern int _ltt_trace_setup(const char *trace_name);
558 extern int ltt_trace_setup(const char *trace_name);
559 extern struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
560 extern int ltt_trace_set_type(const char *trace_name, const char *trace_type);
561 extern int ltt_trace_set_channel_subbufsize(const char *trace_name,
562 const char *channel_name, unsigned int size);
563 extern int ltt_trace_set_channel_subbufcount(const char *trace_name,
564 const char *channel_name, unsigned int cnt);
565 extern int ltt_trace_set_channel_enable(const char *trace_name,
566 const char *channel_name, unsigned int enable);
567 extern int ltt_trace_set_channel_overwrite(const char *trace_name,
568 const char *channel_name, unsigned int overwrite);
569 extern int ltt_trace_alloc(const char *trace_name);
570 extern int ltt_trace_destroy(const char *trace_name);
571 extern int ltt_trace_start(const char *trace_name);
572 extern int ltt_trace_stop(const char *trace_name);
573
574 enum ltt_filter_control_msg {
575 LTT_FILTER_DEFAULT_ACCEPT,
576 LTT_FILTER_DEFAULT_REJECT
577 };
578
579 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
580 const char *trace_name);
581
582 extern struct dentry *get_filter_root(void);
583
584 extern void ltt_write_trace_header(struct ltt_trace_struct *trace,
585 struct ltt_subbuffer_header *header);
586 extern void ltt_buffer_destroy(struct ust_channel *ltt_chan);
587
588 extern void ltt_core_register(int (*function)(u8, void *));
589
590 extern void ltt_core_unregister(void);
591
592 extern void ltt_release_trace(struct kref *kref);
593 extern void ltt_release_transport(struct kref *kref);
594
595 extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
596
597 extern void ltt_lock_traces(void);
598 extern void ltt_unlock_traces(void);
599
600 extern struct ltt_trace_struct *_ltt_trace_find(const char *trace_name);
601
602 #endif /* _LTT_TRACER_H */
This page took 0.059523 seconds and 4 git commands to generate.