2 /* LTTng user-space "fast" tracing header
4 * Copyright 2006 Mathieu Desnoyers
8 #ifndef _LTT_USERTRACE_FAST_H
9 #define _LTT_USERTRACE_FAST_H
18 #include <semaphore.h>
21 #include <ltt/ltt-facility-id-user_generic.h>
28 #define LTT_N_SUBBUFS 2
29 #endif //LTT_N_SUBBUFS
31 #ifndef LTT_SUBBUF_SIZE_PROCESS
32 #define LTT_SUBBUF_SIZE_PROCESS 1048576
33 #endif //LTT_BUF_SIZE_CPU
35 #define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
37 #ifndef LTT_USERTRACE_ROOT
38 #define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
39 #endif //LTT_USERTRACE_ROOT
42 /* Buffer offset macros */
44 #define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
45 #define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
46 #define SUBBUF_ALIGN(offset, buf) \
47 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
48 #define SUBBUF_TRUNC(offset, buf) \
49 ((offset) & (~(buf->subbuf_size-1)))
50 #define SUBBUF_INDEX(offset, buf) \
51 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
54 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
55 #define LTT_TRACER_VERSION_MAJOR 0
56 #define LTT_TRACER_VERSION_MINOR 7
58 #ifndef atomic_cmpxchg
59 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
60 #endif //atomic_cmpxchg
62 struct ltt_trace_header
{
63 uint32_t magic_number
;
65 uint32_t arch_variant
;
66 uint32_t float_word_order
; /* Only useful for user space traces */
68 //uint32_t system_type;
69 uint8_t major_version
;
70 uint8_t minor_version
;
71 uint8_t flight_recorder
;
72 uint8_t has_heartbeat
;
73 uint8_t has_alignment
; /* Event header alignment */
77 uint64_t start_monotonic
;
78 uint64_t start_time_sec
;
79 uint64_t start_time_usec
;
80 } __attribute((packed
));
83 struct ltt_block_start_header
{
86 uint64_t freq
; /* khz */
90 uint64_t freq
; /* khz */
92 uint32_t lost_size
; /* Size unused at the end of the buffer */
93 uint32_t buf_size
; /* The size of this sub-buffer */
94 struct ltt_trace_header trace
;
95 } __attribute((packed
));
103 atomic_t reserve_count
[LTT_N_SUBBUFS
];
104 atomic_t commit_count
[LTT_N_SUBBUFS
];
106 atomic_t events_lost
;
107 atomic_t corrupted_subbuffers
;
108 sem_t writer_sem
; /* semaphore on which the writer waits */
109 unsigned int alloc_size
;
110 unsigned int subbuf_size
;
113 struct ltt_trace_info
{
119 struct ltt_buf process
;
120 char process_buf
[LTT_BUF_SIZE_PROCESS
] __attribute__ ((aligned (8)));
125 struct ltt_event_header_nohb
{
127 unsigned char facility_id
;
128 unsigned char event_id
;
130 } __attribute((packed
));
132 extern __thread
struct ltt_trace_info
*thread_trace_info
;
134 void ltt_thread_init(void);
136 void __attribute__((no_instrument_function
))
137 ltt_usertrace_fast_buffer_switch(void);
139 /* Get the offset of the channel in the ltt_trace_struct */
140 #define GET_CHANNEL_INDEX(chan) \
141 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
143 /* ltt_get_index_from_facility
145 * Get channel index from facility and event id.
148 * @eID : event number
150 * Get the channel index into which events must be written for the given
151 * facility and event number. We get this structure offset as soon as possible
152 * and remember it so we pass through this logic only once per trace call (not
155 static inline unsigned int __attribute__((no_instrument_function
))
156 ltt_get_index_from_facility(ltt_facility_t fID
,
159 return GET_CHANNEL_INDEX(process
);
163 static inline struct ltt_buf
* __attribute__((no_instrument_function
))
164 ltt_get_channel_from_index(
165 struct ltt_trace_info
*trace
, unsigned int index
)
167 return (struct ltt_buf
*)((void*)trace
+index
);
172 * ltt_get_header_size
174 * Calculate alignment offset for arch size void*. This is the
175 * alignment offset of the event header.
178 * The event header must be a size multiple of the void* size. This is necessary
179 * to be able to calculate statically the alignment offset of the variable
180 * length data fields that follows. The total offset calculated here :
182 * Alignment of header struct on arch size
183 * + sizeof(header struct)
184 * + padding added to end of struct to align on arch size.
186 static inline unsigned char __attribute__((no_instrument_function
))
187 ltt_get_header_size(struct ltt_trace_info
*trace
,
189 size_t *before_hdr_pad
,
190 size_t *after_hdr_pad
,
193 unsigned int padding
;
196 header
= sizeof(struct ltt_event_header_nohb
);
198 /* Padding before the header. Calculated dynamically */
199 *before_hdr_pad
= ltt_align((unsigned long)address
, header
);
200 padding
= *before_hdr_pad
;
202 /* Padding after header, considering header aligned on ltt_align.
203 * Calculated statically if header size if known. */
204 *after_hdr_pad
= ltt_align(header
, sizeof(void*));
205 padding
+= *after_hdr_pad
;
207 *header_size
= header
;
209 return header
+padding
;
213 /* ltt_write_event_header
215 * Writes the event header to the pointer.
217 * @channel : pointer to the channel structure
218 * @ptr : buffer pointer
221 * @event_size : size of the event, excluding the event header.
222 * @offset : offset of the beginning of the header, for alignment.
223 * Calculated by ltt_get_event_header_size.
224 * @tsc : time stamp counter.
226 static inline void __attribute__((no_instrument_function
))
227 ltt_write_event_header(
228 struct ltt_trace_info
*trace
, struct ltt_buf
*buf
,
229 void *ptr
, ltt_facility_t fID
, uint32_t eID
, size_t event_size
,
230 size_t offset
, uint64_t tsc
)
232 struct ltt_event_header_nohb
*nohb
;
234 event_size
= min(event_size
, 0xFFFFU
);
235 nohb
= (struct ltt_event_header_nohb
*)(ptr
+offset
);
236 nohb
->timestamp
= (uint64_t)tsc
;
237 nohb
->facility_id
= fID
;
238 nohb
->event_id
= eID
;
239 nohb
->event_size
= (uint16_t)event_size
;
244 static inline uint64_t __attribute__((no_instrument_function
))
250 static inline unsigned int __attribute__((no_instrument_function
))
251 ltt_subbuf_header_len(struct ltt_buf
*buf
)
253 return sizeof(struct ltt_block_start_header
);
258 static inline void __attribute__((no_instrument_function
))
259 ltt_write_trace_header(struct ltt_trace_header
*header
)
261 header
->magic_number
= LTT_TRACER_MAGIC_NUMBER
;
262 header
->major_version
= LTT_TRACER_VERSION_MAJOR
;
263 header
->minor_version
= LTT_TRACER_VERSION_MINOR
;
264 header
->float_word_order
= 0; //FIXME
265 header
->arch_type
= 0; //FIXME LTT_ARCH_TYPE;
266 header
->arch_size
= sizeof(void*);
267 header
->arch_variant
= 0; //FIXME LTT_ARCH_VARIANT;
268 header
->flight_recorder
= 0;
269 header
->has_heartbeat
= 0;
272 header
->has_alignment
= sizeof(void*);
274 header
->has_alignment
= 0;
278 header
->freq_scale
= 0;
279 header
->start_freq
= 0;
280 header
->start_tsc
= 0;
281 header
->start_monotonic
= 0;
282 header
->start_time_sec
= 0;
283 header
->start_time_usec
= 0;
287 static inline void __attribute__((no_instrument_function
))
288 ltt_buffer_begin_callback(struct ltt_buf
*buf
,
289 uint64_t tsc
, unsigned int subbuf_idx
)
291 struct ltt_block_start_header
*header
=
292 (struct ltt_block_start_header
*)
293 (buf
->start
+ (subbuf_idx
*buf
->subbuf_size
));
295 header
->begin
.cycle_count
= tsc
;
296 header
->begin
.freq
= 0; //ltt_frequency();
298 header
->lost_size
= 0xFFFFFFFF; // for debugging...
300 header
->buf_size
= buf
->subbuf_size
;
302 ltt_write_trace_header(&header
->trace
);
308 static inline void __attribute__((no_instrument_function
))
309 ltt_buffer_end_callback(struct ltt_buf
*buf
,
310 uint64_t tsc
, unsigned int offset
, unsigned int subbuf_idx
)
312 struct ltt_block_start_header
*header
=
313 (struct ltt_block_start_header
*)
314 (buf
->start
+ (subbuf_idx
*buf
->subbuf_size
));
315 /* offset is assumed to never be 0 here : never deliver a completely
316 * empty subbuffer. */
317 /* The lost size is between 0 and subbuf_size-1 */
318 header
->lost_size
= SUBBUF_OFFSET((buf
->subbuf_size
- offset
),
320 header
->end
.cycle_count
= tsc
;
321 header
->end
.freq
= 0; //ltt_frequency();
325 static inline void __attribute__((no_instrument_function
))
326 ltt_deliver_callback(struct ltt_buf
*buf
,
330 ltt_usertrace_fast_buffer_switch();
336 * Atomic slot reservation in a LTTng buffer. It will take care of
337 * sub-buffer switching.
341 * @trace : the trace structure to log to.
342 * @buf : the buffer to reserve space into.
343 * @data_size : size of the variable length data to log.
344 * @slot_size : pointer to total size of the slot (out)
345 * @tsc : pointer to the tsc at the slot reservation (out)
346 * @before_hdr_pad : dynamic padding before the event header.
347 * @after_hdr_pad : dynamic padding after the event header.
349 * Return : NULL if not enough space, else returns the pointer
350 * to the beginning of the reserved slot. */
351 static inline void * __attribute__((no_instrument_function
)) ltt_reserve_slot(
352 struct ltt_trace_info
*trace
,
353 struct ltt_buf
*ltt_buf
,
354 unsigned int data_size
,
357 size_t *before_hdr_pad
,
358 size_t *after_hdr_pad
,
361 int offset_begin
, offset_end
, offset_old
;
363 int begin_switch
, end_switch_current
, end_switch_old
;
364 int reserve_commit_diff
= 0;
366 int consumed_old
, consumed_new
;
367 int commit_count
, reserve_count
;
369 sigset_t oldset
, set
;
370 int signals_disabled
= 0;
373 offset_old
= atomic_read(<t_buf
->offset
);
374 offset_begin
= offset_old
;
377 end_switch_current
= 0;
379 *tsc
= ltt_get_timestamp();
381 /* Error in getting the timestamp, event lost */
382 atomic_inc(<t_buf
->events_lost
);
386 if(SUBBUF_OFFSET(offset_begin
, ltt_buf
) == 0) {
387 begin_switch
= 1; /* For offset_begin */
389 size
= ltt_get_header_size(trace
, ltt_buf
->start
+ offset_begin
,
390 before_hdr_pad
, after_hdr_pad
, header_size
)
393 if((SUBBUF_OFFSET(offset_begin
, ltt_buf
)+size
)>ltt_buf
->subbuf_size
) {
395 end_switch_old
= 1; /* For offset_old */
396 begin_switch
= 1; /* For offset_begin */
402 offset_begin
= SUBBUF_ALIGN(offset_begin
, ltt_buf
);
404 offset_begin
= offset_begin
+ ltt_subbuf_header_len(ltt_buf
);
405 /* Test new buffer integrity */
406 reserve_commit_diff
=
407 atomic_read(<t_buf
->reserve_count
[SUBBUF_INDEX(offset_begin
,
409 - atomic_read(<t_buf
->commit_count
[SUBBUF_INDEX(offset_begin
,
412 if(reserve_commit_diff
== 0) {
413 /* Next buffer not corrupted. */
414 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
415 // - SUBBUF_TRUNC(atomic_read(<t_buf->consumed), ltt_buf))
416 // >= ltt_buf->alloc_size) {
418 /* sem_wait is not signal safe. Disable signals around it.
419 * Signals are kept disabled to make sure we win the cmpxchg. */
421 /* Disable signals */
422 if(!signals_disabled
) {
423 ret
= sigfillset(&set
);
424 if(ret
) perror("LTT Error in sigfillset\n");
426 ret
= pthread_sigmask(SIG_BLOCK
, &set
, &oldset
);
427 if(ret
) perror("LTT Error in pthread_sigmask\n");
428 signals_disabled
= 1;
430 sem_wait(<t_buf
->writer_sem
);
433 /* go on with the write */
436 // /* next buffer not corrupted, we are either in overwrite mode or
437 // * the buffer is not full. It's safe to write in this new subbuffer.*/
440 /* Next subbuffer corrupted. Force pushing reader even in normal
441 * mode. It's safe to write in this new subbuffer. */
442 /* No sem_post is required because we fall through without doing a
445 size
= ltt_get_header_size(trace
, ltt_buf
->start
+ offset_begin
,
446 before_hdr_pad
, after_hdr_pad
, header_size
) + data_size
;
447 if((SUBBUF_OFFSET(offset_begin
,ltt_buf
)+size
)>ltt_buf
->subbuf_size
) {
448 /* Event too big for subbuffers, report error, don't complete
449 * the sub-buffer switch. */
450 atomic_inc(<t_buf
->events_lost
);
451 if(reserve_commit_diff
== 0) {
452 ret
= pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
453 if(ret
) perror("LTT Error in pthread_sigmask\n");
457 /* We just made a successful buffer switch and the event fits in the
458 * new subbuffer. Let's write. */
461 /* Event fits in the current buffer and we are not on a switch boundary.
462 * It's safe to write */
464 offset_end
= offset_begin
+ size
;
466 if((SUBBUF_OFFSET(offset_end
, ltt_buf
)) == 0) {
467 /* The offset_end will fall at the very beginning of the next subbuffer.
469 end_switch_current
= 1; /* For offset_begin */
472 } while(atomic_cmpxchg(<t_buf
->offset
, offset_old
, offset_end
)
475 /* Push the reader if necessary */
477 consumed_old
= atomic_read(<t_buf
->consumed
);
478 /* If buffer is in overwrite mode, push the reader consumed count if
479 the write position has reached it and we are not at the first
480 iteration (don't push the reader farther than the writer).
481 This operation can be done concurrently by many writers in the
482 same buffer, the writer being at the fartest write position sub-buffer
483 index in the buffer being the one which will win this loop. */
484 /* If the buffer is not in overwrite mode, pushing the reader only
485 happen if a sub-buffer is corrupted */
486 if((SUBBUF_TRUNC(offset_end
-1, ltt_buf
)
487 - SUBBUF_TRUNC(consumed_old
, ltt_buf
))
488 >= ltt_buf
->alloc_size
)
489 consumed_new
= SUBBUF_ALIGN(consumed_old
, ltt_buf
);
491 consumed_new
= consumed_old
;
494 } while(atomic_cmpxchg(<t_buf
->consumed
, consumed_old
, consumed_new
)
497 if(consumed_old
!= consumed_new
) {
498 /* Reader pushed : we are the winner of the push, we can therefore
499 reequilibrate reserve and commit. Atomic increment of the commit
500 count permits other writers to play around with this variable
501 before us. We keep track of corrupted_subbuffers even in overwrite mode :
502 we never want to write over a non completely committed sub-buffer :
503 possible causes : the buffer size is too low compared to the unordered
504 data input, or there is a writer who died between the reserve and the
506 if(reserve_commit_diff
) {
507 /* We have to alter the sub-buffer commit count : a sub-buffer is
508 corrupted. We do not deliver it. */
509 atomic_add(reserve_commit_diff
,
510 <t_buf
->commit_count
[SUBBUF_INDEX(offset_begin
, ltt_buf
)]);
511 atomic_inc(<t_buf
->corrupted_subbuffers
);
518 /* Concurrency safe because we are the last and only thread to alter this
519 sub-buffer. As long as it is not delivered and read, no other thread can
520 alter the offset, alter the reserve_count or call the
521 client_buffer_end_callback on this sub-buffer.
522 The only remaining threads could be the ones with pending commits. They
523 will have to do the deliver themself.
524 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
525 with commit and reserve counts. We keep a corrupted sub-buffers count
526 and push the readers across these sub-buffers.
527 Not concurrency safe if a writer is stalled in a subbuffer and
528 another writer switches in, finding out it's corrupted. The result will
529 be than the old (uncommited) subbuffer will be declared corrupted, and
530 that the new subbuffer will be declared corrupted too because of the
531 commit count adjustment.
532 Note : offset_old should never be 0 here.*/
533 ltt_buffer_end_callback(ltt_buf
, *tsc
, offset_old
,
534 SUBBUF_INDEX((offset_old
-1), ltt_buf
));
535 /* Setting this reserve_count will allow the sub-buffer to be delivered by
536 the last committer. */
538 atomic_add_return((SUBBUF_OFFSET((offset_old
-1), ltt_buf
)+1),
539 <t_buf
->reserve_count
[SUBBUF_INDEX((offset_old
-1), ltt_buf
)]);
541 == atomic_read(<t_buf
->commit_count
[SUBBUF_INDEX((offset_old
-1),
543 ltt_deliver_callback(ltt_buf
, SUBBUF_INDEX((offset_old
-1), ltt_buf
),
549 /* Enable signals : this is what guaranteed that same reserve which did the
550 * sem_wait does in fact win the cmpxchg for the offset. We only call
551 * these system calls on buffer boundaries because of their performance
553 if(signals_disabled
) {
554 ret
= pthread_sigmask(SIG_SETMASK
, &oldset
, NULL
);
555 if(ret
) perror("LTT Error in pthread_sigmask\n");
558 /* This code can be executed unordered : writers may already have written
559 to the sub-buffer before this code gets executed, caution. */
560 /* The commit makes sure that this code is executed before the deliver
561 of this sub-buffer */
562 ltt_buffer_begin_callback(ltt_buf
, *tsc
, SUBBUF_INDEX(offset_begin
, ltt_buf
));
563 commit_count
= atomic_add_return(ltt_subbuf_header_len(ltt_buf
),
564 <t_buf
->commit_count
[SUBBUF_INDEX(offset_begin
, ltt_buf
)]);
565 /* Check if the written buffer has to be delivered */
567 == atomic_read(<t_buf
->reserve_count
[SUBBUF_INDEX(offset_begin
,
569 ltt_deliver_callback(ltt_buf
, SUBBUF_INDEX(offset_begin
, ltt_buf
), NULL
);
573 if(end_switch_current
) {
574 /* current subbuffer */
575 /* Concurrency safe because we are the last and only thread to alter this
576 sub-buffer. As long as it is not delivered and read, no other thread can
577 alter the offset, alter the reserve_count or call the
578 client_buffer_end_callback on this sub-buffer.
579 The only remaining threads could be the ones with pending commits. They
580 will have to do the deliver themself.
581 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
582 with commit and reserve counts. We keep a corrupted sub-buffers count
583 and push the readers across these sub-buffers.
584 Not concurrency safe if a writer is stalled in a subbuffer and
585 another writer switches in, finding out it's corrupted. The result will
586 be than the old (uncommited) subbuffer will be declared corrupted, and
587 that the new subbuffer will be declared corrupted too because of the
588 commit count adjustment. */
589 ltt_buffer_end_callback(ltt_buf
, *tsc
, offset_end
,
590 SUBBUF_INDEX((offset_end
-1), ltt_buf
));
591 /* Setting this reserve_count will allow the sub-buffer to be delivered by
592 the last committer. */
594 atomic_add_return((SUBBUF_OFFSET((offset_end
-1), ltt_buf
)+1),
595 <t_buf
->reserve_count
[SUBBUF_INDEX((offset_end
-1), ltt_buf
)]);
597 == atomic_read(<t_buf
->commit_count
[SUBBUF_INDEX((offset_end
-1),
599 ltt_deliver_callback(ltt_buf
, SUBBUF_INDEX((offset_end
-1), ltt_buf
), NULL
);
605 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
606 //BUG_ON(*slot_size != (offset_end - offset_begin));
608 return ltt_buf
->start
+ BUFFER_OFFSET(offset_begin
, ltt_buf
);
614 * Atomic unordered slot commit. Increments the commit count in the
615 * specified sub-buffer, and delivers it if necessary.
619 * @buf : the buffer to commit to.
620 * @reserved : address of the beginnig of the reserved slot.
621 * @slot_size : size of the reserved slot.
624 static inline void __attribute__((no_instrument_function
)) ltt_commit_slot(
625 struct ltt_buf
*ltt_buf
,
627 unsigned int slot_size
)
629 unsigned int offset_begin
= reserved
- ltt_buf
->start
;
632 commit_count
= atomic_add_return(slot_size
,
633 <t_buf
->commit_count
[SUBBUF_INDEX(offset_begin
,
636 /* Check if all commits have been done */
638 atomic_read(<t_buf
->reserve_count
[SUBBUF_INDEX(offset_begin
, ltt_buf
)])) {
639 ltt_deliver_callback(ltt_buf
, SUBBUF_INDEX(offset_begin
, ltt_buf
), NULL
);
644 } /* end of extern "C" */
647 #endif //LTT_TRACE_FAST
649 #endif //_LTT_USERTRACE_FAST_H