it runs
[lttv.git] / usertrace-fast / ltt / ltt-usertrace-fast.h
1
2 /* LTTng user-space "fast" tracing header
3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
8 #ifndef _LTT_USERTRACE_FAST_H
9 #define _LTT_USERTRACE_FAST_H
10
11 #ifdef LTT_TRACE
12
13 #include <errno.h>
14 #include <asm/atomic.h>
15 #include <pthread.h>
16 #include <stdint.h>
17 #include <syscall.h>
18 #include <linux/futex.h>
19 #include <asm/timex.h>
20
21 #include <ltt/ltt-facility-id-user_generic.h>
22 #include <ltt/ltt-generic.h>
23
24 #ifndef futex
25 static inline __attribute__((no_instrument_function))
26 _syscall6(long, futex, unsigned long, uaddr, int, op, int, val,
27 unsigned long, timeout, unsigned long, uaddr2, int, val2)
28 #endif //futex
29
30
31
32 #ifndef LTT_N_SUBBUFS
33 #define LTT_N_SUBBUFS 2
34 #endif //LTT_N_SUBBUFS
35
36 #ifndef LTT_SUBBUF_SIZE_CPU
37 #define LTT_SUBBUF_SIZE_CPU 1048576
38 #endif //LTT_BUF_SIZE_CPU
39
40 #define LTT_BUF_SIZE_CPU (LTT_SUBBUF_SIZE_CPU * LTT_N_SUBBUFS)
41
42 #ifndef LTT_SUBBUF_SIZE_FACILITIES
43 #define LTT_SUBBUF_SIZE_FACILITIES 4096
44 #endif //LTT_BUF_SIZE_FACILITIES
45
46 #define LTT_BUF_SIZE_FACILITIES (LTT_SUBBUF_SIZE_FACILITIES * LTT_N_SUBBUFS)
47
48 #ifndef LTT_USERTRACE_ROOT
49 #define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
50 #endif //LTT_USERTRACE_ROOT
51
52
53 /* Buffer offset macros */
54
55 #define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
56 #define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
57 #define SUBBUF_ALIGN(offset, buf) \
58 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
59 #define SUBBUF_TRUNC(offset, buf) \
60 ((offset) & (~(buf->subbuf_size-1)))
61 #define SUBBUF_INDEX(offset, buf) \
62 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
63
64
65 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
66 #define LTT_TRACER_VERSION_MAJOR 0
67 #define LTT_TRACER_VERSION_MINOR 7
68
69 #ifndef atomic_cmpxchg
70 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
71 #endif //atomic_cmpxchg
72
73 typedef unsigned int ltt_facility_t;
74
75 struct ltt_trace_header {
76 uint32_t magic_number;
77 uint32_t arch_type;
78 uint32_t arch_variant;
79 uint32_t float_word_order; /* Only useful for user space traces */
80 uint8_t arch_size;
81 //uint32_t system_type;
82 uint8_t major_version;
83 uint8_t minor_version;
84 uint8_t flight_recorder;
85 uint8_t has_heartbeat;
86 uint8_t has_alignment; /* Event header alignment */
87 uint32_t freq_scale;
88 uint64_t start_freq;
89 uint64_t start_tsc;
90 uint64_t start_monotonic;
91 uint64_t start_time_sec;
92 uint64_t start_time_usec;
93 } __attribute((packed));
94
95
96 struct ltt_block_start_header {
97 struct {
98 uint64_t cycle_count;
99 uint64_t freq; /* khz */
100 } begin;
101 struct {
102 uint64_t cycle_count;
103 uint64_t freq; /* khz */
104 } end;
105 uint32_t lost_size; /* Size unused at the end of the buffer */
106 uint32_t buf_size; /* The size of this sub-buffer */
107 struct ltt_trace_header trace;
108 } __attribute((packed));
109
110
111
112 struct ltt_buf {
113 void *start;
114 atomic_t offset;
115 atomic_t consumed;
116 atomic_t reserve_count[LTT_N_SUBBUFS];
117 atomic_t commit_count[LTT_N_SUBBUFS];
118
119 atomic_t events_lost;
120 atomic_t corrupted_subbuffers;
121 atomic_t full; /* futex on which the writer waits : 1 : full */
122 unsigned int alloc_size;
123 unsigned int subbuf_size;
124 };
125
126 struct ltt_trace_info {
127 int init;
128 int filter;
129 pid_t daemon_id;
130 int nesting;
131 struct {
132 struct ltt_buf facilities;
133 struct ltt_buf cpu;
134 char facilities_buf[LTT_BUF_SIZE_FACILITIES] __attribute__ ((aligned (8)));
135 char cpu_buf[LTT_BUF_SIZE_CPU] __attribute__ ((aligned (8)));
136 } channel;
137 };
138
139
140 struct ltt_event_header_nohb {
141 uint64_t timestamp;
142 unsigned char facility_id;
143 unsigned char event_id;
144 uint16_t event_size;
145 } __attribute((packed));
146
147 extern __thread struct ltt_trace_info *thread_trace_info;
148
149 void ltt_thread_init(void);
150
151 void __attribute__((no_instrument_function))
152 ltt_usertrace_fast_buffer_switch(void);
153
154 /* Get the offset of the channel in the ltt_trace_struct */
155 #define GET_CHANNEL_INDEX(chan) \
156 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
157
158 /* ltt_get_index_from_facility
159 *
160 * Get channel index from facility and event id.
161 *
162 * @fID : facility ID
163 * @eID : event number
164 *
165 * Get the channel index into which events must be written for the given
166 * facility and event number. We get this structure offset as soon as possible
167 * and remember it so we pass through this logic only once per trace call (not
168 * for every trace).
169 */
170 static inline unsigned int __attribute__((no_instrument_function))
171 ltt_get_index_from_facility(ltt_facility_t fID,
172 uint8_t eID)
173 {
174 return GET_CHANNEL_INDEX(cpu);
175 }
176
177
178 static inline struct ltt_buf * __attribute__((no_instrument_function))
179 ltt_get_channel_from_index(
180 struct ltt_trace_info *trace, unsigned int index)
181 {
182 return (struct ltt_buf *)((void*)trace+index);
183 }
184
185
186 /*
187 * ltt_get_header_size
188 *
189 * Calculate alignment offset for arch size void*. This is the
190 * alignment offset of the event header.
191 *
192 * Important note :
193 * The event header must be a size multiple of the void* size. This is necessary
194 * to be able to calculate statically the alignment offset of the variable
195 * length data fields that follows. The total offset calculated here :
196 *
197 * Alignment of header struct on arch size
198 * + sizeof(header struct)
199 * + padding added to end of struct to align on arch size.
200 * */
201 static inline unsigned char __attribute__((no_instrument_function))
202 ltt_get_header_size(struct ltt_trace_info *trace,
203 void *address,
204 size_t *before_hdr_pad,
205 size_t *after_hdr_pad,
206 size_t *header_size)
207 {
208 unsigned int padding;
209 unsigned int header;
210
211 header = sizeof(struct ltt_event_header_nohb);
212
213 /* Padding before the header. Calculated dynamically */
214 *before_hdr_pad = ltt_align((unsigned long)address, header);
215 padding = *before_hdr_pad;
216
217 /* Padding after header, considering header aligned on ltt_align.
218 * Calculated statically if header size if known. */
219 *after_hdr_pad = ltt_align(header, sizeof(void*));
220 padding += *after_hdr_pad;
221
222 *header_size = header;
223
224 return header+padding;
225 }
226
227
228 /* ltt_write_event_header
229 *
230 * Writes the event header to the pointer.
231 *
232 * @channel : pointer to the channel structure
233 * @ptr : buffer pointer
234 * @fID : facility ID
235 * @eID : event ID
236 * @event_size : size of the event, excluding the event header.
237 * @offset : offset of the beginning of the header, for alignment.
238 * Calculated by ltt_get_event_header_size.
239 * @tsc : time stamp counter.
240 */
241 static inline void __attribute__((no_instrument_function))
242 ltt_write_event_header(
243 struct ltt_trace_info *trace, struct ltt_buf *buf,
244 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
245 size_t offset, uint64_t tsc)
246 {
247 struct ltt_event_header_nohb *nohb;
248
249 event_size = min(event_size, 0xFFFFU);
250 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
251 nohb->timestamp = (uint64_t)tsc;
252 nohb->facility_id = fID;
253 nohb->event_id = eID;
254 nohb->event_size = (uint16_t)event_size;
255 }
256
257
258
259 static inline uint64_t __attribute__((no_instrument_function))
260 ltt_get_timestamp()
261 {
262 return get_cycles();
263 }
264
265 static inline unsigned int __attribute__((no_instrument_function))
266 ltt_subbuf_header_len(struct ltt_buf *buf)
267 {
268 return sizeof(struct ltt_block_start_header);
269 }
270
271
272
273 static inline void __attribute__((no_instrument_function))
274 ltt_write_trace_header(struct ltt_trace_header *header)
275 {
276 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
277 header->major_version = LTT_TRACER_VERSION_MAJOR;
278 header->minor_version = LTT_TRACER_VERSION_MINOR;
279 header->float_word_order = 0; //FIXME
280 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
281 header->arch_size = sizeof(void*);
282 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
283 header->flight_recorder = 0;
284 header->has_heartbeat = 0;
285
286 #ifndef LTT_PACK
287 header->has_alignment = sizeof(void*);
288 #else
289 header->has_alignment = 0;
290 #endif
291
292 //FIXME
293 header->freq_scale = 0;
294 header->start_freq = 0;
295 header->start_tsc = 0;
296 header->start_monotonic = 0;
297 header->start_time_sec = 0;
298 header->start_time_usec = 0;
299 }
300
301
302 static inline void __attribute__((no_instrument_function))
303 ltt_buffer_begin_callback(struct ltt_buf *buf,
304 uint64_t tsc, unsigned int subbuf_idx)
305 {
306 struct ltt_block_start_header *header =
307 (struct ltt_block_start_header*)
308 (buf->start + (subbuf_idx*buf->subbuf_size));
309
310 header->begin.cycle_count = tsc;
311 header->begin.freq = 0; //ltt_frequency();
312
313 header->lost_size = 0xFFFFFFFF; // for debugging...
314
315 header->buf_size = buf->subbuf_size;
316
317 ltt_write_trace_header(&header->trace);
318
319 }
320
321
322
323 static inline void __attribute__((no_instrument_function))
324 ltt_buffer_end_callback(struct ltt_buf *buf,
325 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
326 {
327 struct ltt_block_start_header *header =
328 (struct ltt_block_start_header*)
329 (buf->start + (subbuf_idx*buf->subbuf_size));
330 /* offset is assumed to never be 0 here : never deliver a completely
331 * empty subbuffer. */
332 /* The lost size is between 0 and subbuf_size-1 */
333 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
334 buf);
335 header->end.cycle_count = tsc;
336 header->end.freq = 0; //ltt_frequency();
337 }
338
339
340 static inline void __attribute__((no_instrument_function))
341 ltt_deliver_callback(struct ltt_buf *buf,
342 unsigned subbuf_idx,
343 void *subbuf)
344 {
345 ltt_usertrace_fast_buffer_switch();
346 }
347
348
349 /* ltt_reserve_slot
350 *
351 * Atomic slot reservation in a LTTng buffer. It will take care of
352 * sub-buffer switching.
353 *
354 * Parameters:
355 *
356 * @trace : the trace structure to log to.
357 * @buf : the buffer to reserve space into.
358 * @data_size : size of the variable length data to log.
359 * @slot_size : pointer to total size of the slot (out)
360 * @tsc : pointer to the tsc at the slot reservation (out)
361 * @before_hdr_pad : dynamic padding before the event header.
362 * @after_hdr_pad : dynamic padding after the event header.
363 *
364 * Return : NULL if not enough space, else returns the pointer
365 * to the beginning of the reserved slot. */
366 static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
367 struct ltt_trace_info *trace,
368 struct ltt_buf *ltt_buf,
369 unsigned int data_size,
370 unsigned int *slot_size,
371 uint64_t *tsc,
372 size_t *before_hdr_pad,
373 size_t *after_hdr_pad,
374 size_t *header_size)
375 {
376 int offset_begin, offset_end, offset_old;
377 //int has_switch;
378 int begin_switch, end_switch_current, end_switch_old;
379 int reserve_commit_diff = 0;
380 unsigned int size;
381 int consumed_old, consumed_new;
382 int commit_count, reserve_count;
383 int ret;
384
385 do {
386 offset_old = atomic_read(&ltt_buf->offset);
387 offset_begin = offset_old;
388 //has_switch = 0;
389 begin_switch = 0;
390 end_switch_current = 0;
391 end_switch_old = 0;
392 *tsc = ltt_get_timestamp();
393 if(*tsc == 0) {
394 /* Error in getting the timestamp, event lost */
395 atomic_inc(&ltt_buf->events_lost);
396 return NULL;
397 }
398
399 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
400 begin_switch = 1; /* For offset_begin */
401 } else {
402 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
403 before_hdr_pad, after_hdr_pad, header_size)
404 + data_size;
405
406 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
407 //has_switch = 1;
408 end_switch_old = 1; /* For offset_old */
409 begin_switch = 1; /* For offset_begin */
410 }
411 }
412
413 if(begin_switch) {
414 if(end_switch_old) {
415 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
416 }
417 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
418 /* Test new buffer integrity */
419 reserve_commit_diff =
420 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
421 ltt_buf)])
422 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
423 ltt_buf)]);
424 if(reserve_commit_diff == 0) {
425 /* Next buffer not corrupted. */
426 if((SUBBUF_TRUNC(offset_begin, ltt_buf)
427 - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
428 >= ltt_buf->alloc_size) {
429 /* We block until the reader unblocks us */
430 atomic_set(&ltt_buf->full, 1);
431 /* We block until the reader tells us to wake up.
432 Signals will simply cause this loop to restart.
433 */
434 do {
435 ret = futex((unsigned long)&ltt_buf->full, FUTEX_WAIT, 1, 0, 0, 0);
436 } while(ret != 0 && ret != EWOULDBLOCK);
437 /* go on with the write */
438
439 } else {
440 /* next buffer not corrupted, we are either in overwrite mode or
441 * the buffer is not full. It's safe to write in this new subbuffer.*/
442 }
443 } else {
444 /* Next subbuffer corrupted. Force pushing reader even in normal
445 * mode. It's safe to write in this new subbuffer. */
446 }
447 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
448 before_hdr_pad, after_hdr_pad, header_size) + data_size;
449 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
450 /* Event too big for subbuffers, report error, don't complete
451 * the sub-buffer switch. */
452 atomic_inc(&ltt_buf->events_lost);
453 return NULL;
454 } else {
455 /* We just made a successful buffer switch and the event fits in the
456 * new subbuffer. Let's write. */
457 }
458 } else {
459 /* Event fits in the current buffer and we are not on a switch boundary.
460 * It's safe to write */
461 }
462 offset_end = offset_begin + size;
463
464 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
465 /* The offset_end will fall at the very beginning of the next subbuffer.
466 */
467 end_switch_current = 1; /* For offset_begin */
468 }
469
470 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
471 != offset_old);
472
473
474 /* Push the reader if necessary */
475 do {
476 consumed_old = atomic_read(&ltt_buf->consumed);
477 /* If buffer is in overwrite mode, push the reader consumed count if
478 the write position has reached it and we are not at the first
479 iteration (don't push the reader farther than the writer).
480 This operation can be done concurrently by many writers in the
481 same buffer, the writer being at the fartest write position sub-buffer
482 index in the buffer being the one which will win this loop. */
483 /* If the buffer is not in overwrite mode, pushing the reader only
484 happen if a sub-buffer is corrupted */
485 if((SUBBUF_TRUNC(offset_end, ltt_buf)
486 - SUBBUF_TRUNC(consumed_old, ltt_buf))
487 >= ltt_buf->alloc_size)
488 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
489 else {
490 consumed_new = consumed_old;
491 break;
492 }
493 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
494 != consumed_old);
495
496 if(consumed_old != consumed_new) {
497 /* Reader pushed : we are the winner of the push, we can therefore
498 reequilibrate reserve and commit. Atomic increment of the commit
499 count permits other writers to play around with this variable
500 before us. We keep track of corrupted_subbuffers even in overwrite mode :
501 we never want to write over a non completely committed sub-buffer :
502 possible causes : the buffer size is too low compared to the unordered
503 data input, or there is a writer who died between the reserve and the
504 commit. */
505 if(reserve_commit_diff) {
506 /* We have to alter the sub-buffer commit count : a sub-buffer is
507 corrupted. We do not deliver it. */
508 atomic_add(reserve_commit_diff,
509 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
510 atomic_inc(&ltt_buf->corrupted_subbuffers);
511 }
512 }
513
514
515 if(end_switch_old) {
516 /* old subbuffer */
517 /* Concurrency safe because we are the last and only thread to alter this
518 sub-buffer. As long as it is not delivered and read, no other thread can
519 alter the offset, alter the reserve_count or call the
520 client_buffer_end_callback on this sub-buffer.
521 The only remaining threads could be the ones with pending commits. They
522 will have to do the deliver themself.
523 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
524 with commit and reserve counts. We keep a corrupted sub-buffers count
525 and push the readers across these sub-buffers.
526 Not concurrency safe if a writer is stalled in a subbuffer and
527 another writer switches in, finding out it's corrupted. The result will
528 be than the old (uncommited) subbuffer will be declared corrupted, and
529 that the new subbuffer will be declared corrupted too because of the
530 commit count adjustment.
531 Note : offset_old should never be 0 here.*/
532 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
533 SUBBUF_INDEX((offset_old-1), ltt_buf));
534 /* Setting this reserve_count will allow the sub-buffer to be delivered by
535 the last committer. */
536 reserve_count =
537 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
538 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
539 if(reserve_count
540 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
541 ltt_buf)])) {
542 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
543 NULL);
544 }
545 }
546
547 if(begin_switch) {
548 /* New sub-buffer */
549 /* This code can be executed unordered : writers may already have written
550 to the sub-buffer before this code gets executed, caution. */
551 /* The commit makes sure that this code is executed before the deliver
552 of this sub-buffer */
553 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
554 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
555 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
556 /* Check if the written buffer has to be delivered */
557 if(commit_count
558 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
559 ltt_buf)])) {
560 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
561 }
562 }
563
564 if(end_switch_current) {
565 /* current subbuffer */
566 /* Concurrency safe because we are the last and only thread to alter this
567 sub-buffer. As long as it is not delivered and read, no other thread can
568 alter the offset, alter the reserve_count or call the
569 client_buffer_end_callback on this sub-buffer.
570 The only remaining threads could be the ones with pending commits. They
571 will have to do the deliver themself.
572 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
573 with commit and reserve counts. We keep a corrupted sub-buffers count
574 and push the readers across these sub-buffers.
575 Not concurrency safe if a writer is stalled in a subbuffer and
576 another writer switches in, finding out it's corrupted. The result will
577 be than the old (uncommited) subbuffer will be declared corrupted, and
578 that the new subbuffer will be declared corrupted too because of the
579 commit count adjustment. */
580 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
581 SUBBUF_INDEX((offset_end-1), ltt_buf));
582 /* Setting this reserve_count will allow the sub-buffer to be delivered by
583 the last committer. */
584 reserve_count =
585 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
586 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
587 if(reserve_count
588 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
589 ltt_buf)])) {
590 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
591 }
592 }
593
594 *slot_size = size;
595
596 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
597 //BUG_ON(*slot_size != (offset_end - offset_begin));
598
599 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
600 }
601
602
603 /* ltt_commit_slot
604 *
605 * Atomic unordered slot commit. Increments the commit count in the
606 * specified sub-buffer, and delivers it if necessary.
607 *
608 * Parameters:
609 *
610 * @buf : the buffer to commit to.
611 * @reserved : address of the beginnig of the reserved slot.
612 * @slot_size : size of the reserved slot.
613 *
614 */
615 static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
616 struct ltt_buf *ltt_buf,
617 void *reserved,
618 unsigned int slot_size)
619 {
620 unsigned int offset_begin = reserved - ltt_buf->start;
621 int commit_count;
622
623 commit_count = atomic_add_return(slot_size,
624 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
625 ltt_buf)]);
626
627 /* Check if all commits have been done */
628 if(commit_count ==
629 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
630 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
631 }
632 }
633
634
635 #endif //LTT_TRACE
636
637
638 #endif //_LTT_USERTRACE_FAST_H
This page took 0.054667 seconds and 5 git commands to generate.