new ltt-usertrace
[lttv.git] / ltt-usertrace / ltt / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
8b30e7bc 11#ifdef LTT_TRACE
38f24d5c 12#ifdef LTT_TRACE_FAST
8b30e7bc 13
b09f3215 14#include <errno.h>
b09f3215 15#include <asm/atomic.h>
700d350d 16#include <pthread.h>
32f2b04a 17#include <stdint.h>
18#include <syscall.h>
b5d612cb 19#include <asm/timex.h>
85b94320 20#include <semaphore.h>
be5cc22c 21#include <signal.h>
32f2b04a 22
8b30e7bc 23#include <ltt/ltt-facility-id-user_generic.h>
24#include <ltt/ltt-generic.h>
25
47d7d576 26#ifndef LTT_N_SUBBUFS
27#define LTT_N_SUBBUFS 2
28#endif //LTT_N_SUBBUFS
29
b402c055 30#ifndef LTT_SUBBUF_SIZE_PROCESS
31#define LTT_SUBBUF_SIZE_PROCESS 1048576
51bf1553 32#endif //LTT_BUF_SIZE_CPU
b09f3215 33
b402c055 34#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
47d7d576 35
77b31f39 36#ifndef LTT_USERTRACE_ROOT
37#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
38#endif //LTT_USERTRACE_ROOT
39
47d7d576 40
41/* Buffer offset macros */
42
43#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
44#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
45#define SUBBUF_ALIGN(offset, buf) \
46 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
47#define SUBBUF_TRUNC(offset, buf) \
48 ((offset) & (~(buf->subbuf_size-1)))
49#define SUBBUF_INDEX(offset, buf) \
50 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
51
52
32f2b04a 53#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
54#define LTT_TRACER_VERSION_MAJOR 0
55#define LTT_TRACER_VERSION_MINOR 7
56
57#ifndef atomic_cmpxchg
58#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
59#endif //atomic_cmpxchg
5ffa9d14 60
61typedef unsigned int ltt_facility_t;
62
32f2b04a 63struct ltt_trace_header {
64 uint32_t magic_number;
65 uint32_t arch_type;
66 uint32_t arch_variant;
67 uint32_t float_word_order; /* Only useful for user space traces */
68 uint8_t arch_size;
69 //uint32_t system_type;
70 uint8_t major_version;
71 uint8_t minor_version;
72 uint8_t flight_recorder;
73 uint8_t has_heartbeat;
74 uint8_t has_alignment; /* Event header alignment */
75 uint32_t freq_scale;
76 uint64_t start_freq;
77 uint64_t start_tsc;
78 uint64_t start_monotonic;
79 uint64_t start_time_sec;
80 uint64_t start_time_usec;
81} __attribute((packed));
82
83
84struct ltt_block_start_header {
85 struct {
86 uint64_t cycle_count;
87 uint64_t freq; /* khz */
88 } begin;
89 struct {
90 uint64_t cycle_count;
91 uint64_t freq; /* khz */
92 } end;
93 uint32_t lost_size; /* Size unused at the end of the buffer */
94 uint32_t buf_size; /* The size of this sub-buffer */
95 struct ltt_trace_header trace;
96} __attribute((packed));
97
98
99
b09f3215 100struct ltt_buf {
32f2b04a 101 void *start;
b09f3215 102 atomic_t offset;
47d7d576 103 atomic_t consumed;
104 atomic_t reserve_count[LTT_N_SUBBUFS];
105 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 106
107 atomic_t events_lost;
32f2b04a 108 atomic_t corrupted_subbuffers;
85b94320 109 sem_t writer_sem; /* semaphore on which the writer waits */
47d7d576 110 unsigned int alloc_size;
111 unsigned int subbuf_size;
b09f3215 112};
113
700d350d 114struct ltt_trace_info {
1c48e587 115 int init;
b09f3215 116 int filter;
700d350d 117 pid_t daemon_id;
8b30e7bc 118 int nesting;
b09f3215 119 struct {
b402c055 120 struct ltt_buf process;
121 char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
b09f3215 122 } channel;
123};
124
32f2b04a 125
5ffa9d14 126struct ltt_event_header_nohb {
127 uint64_t timestamp;
128 unsigned char facility_id;
129 unsigned char event_id;
130 uint16_t event_size;
131} __attribute((packed));
32f2b04a 132
700d350d 133extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 134
51bf1553 135void ltt_thread_init(void);
b09f3215 136
5ffa9d14 137void __attribute__((no_instrument_function))
138 ltt_usertrace_fast_buffer_switch(void);
139
5ffa9d14 140/* Get the offset of the channel in the ltt_trace_struct */
141#define GET_CHANNEL_INDEX(chan) \
142 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
143
144/* ltt_get_index_from_facility
145 *
146 * Get channel index from facility and event id.
147 *
148 * @fID : facility ID
149 * @eID : event number
150 *
151 * Get the channel index into which events must be written for the given
152 * facility and event number. We get this structure offset as soon as possible
153 * and remember it so we pass through this logic only once per trace call (not
154 * for every trace).
155 */
156static inline unsigned int __attribute__((no_instrument_function))
157 ltt_get_index_from_facility(ltt_facility_t fID,
158 uint8_t eID)
159{
b402c055 160 return GET_CHANNEL_INDEX(process);
5ffa9d14 161}
162
163
164static inline struct ltt_buf * __attribute__((no_instrument_function))
165 ltt_get_channel_from_index(
166 struct ltt_trace_info *trace, unsigned int index)
167{
b5d612cb 168 return (struct ltt_buf *)((void*)trace+index);
5ffa9d14 169}
170
171
172/*
173 * ltt_get_header_size
174 *
175 * Calculate alignment offset for arch size void*. This is the
176 * alignment offset of the event header.
177 *
178 * Important note :
179 * The event header must be a size multiple of the void* size. This is necessary
180 * to be able to calculate statically the alignment offset of the variable
181 * length data fields that follows. The total offset calculated here :
182 *
183 * Alignment of header struct on arch size
184 * + sizeof(header struct)
185 * + padding added to end of struct to align on arch size.
186 * */
187static inline unsigned char __attribute__((no_instrument_function))
188 ltt_get_header_size(struct ltt_trace_info *trace,
189 void *address,
190 size_t *before_hdr_pad,
191 size_t *after_hdr_pad,
192 size_t *header_size)
193{
194 unsigned int padding;
195 unsigned int header;
196
197 header = sizeof(struct ltt_event_header_nohb);
198
199 /* Padding before the header. Calculated dynamically */
200 *before_hdr_pad = ltt_align((unsigned long)address, header);
201 padding = *before_hdr_pad;
202
203 /* Padding after header, considering header aligned on ltt_align.
204 * Calculated statically if header size if known. */
205 *after_hdr_pad = ltt_align(header, sizeof(void*));
206 padding += *after_hdr_pad;
207
208 *header_size = header;
209
210 return header+padding;
211}
212
213
214/* ltt_write_event_header
215 *
216 * Writes the event header to the pointer.
217 *
218 * @channel : pointer to the channel structure
219 * @ptr : buffer pointer
220 * @fID : facility ID
221 * @eID : event ID
222 * @event_size : size of the event, excluding the event header.
223 * @offset : offset of the beginning of the header, for alignment.
224 * Calculated by ltt_get_event_header_size.
225 * @tsc : time stamp counter.
226 */
227static inline void __attribute__((no_instrument_function))
228 ltt_write_event_header(
229 struct ltt_trace_info *trace, struct ltt_buf *buf,
230 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
231 size_t offset, uint64_t tsc)
232{
233 struct ltt_event_header_nohb *nohb;
234
235 event_size = min(event_size, 0xFFFFU);
236 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
237 nohb->timestamp = (uint64_t)tsc;
238 nohb->facility_id = fID;
239 nohb->event_id = eID;
240 nohb->event_size = (uint16_t)event_size;
241}
700d350d 242
32f2b04a 243
244
5ffa9d14 245static inline uint64_t __attribute__((no_instrument_function))
246ltt_get_timestamp()
32f2b04a 247{
248 return get_cycles();
249}
250
5ffa9d14 251static inline unsigned int __attribute__((no_instrument_function))
252ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 253{
254 return sizeof(struct ltt_block_start_header);
255}
256
257
258
5ffa9d14 259static inline void __attribute__((no_instrument_function))
260ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 261{
262 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
263 header->major_version = LTT_TRACER_VERSION_MAJOR;
264 header->minor_version = LTT_TRACER_VERSION_MINOR;
265 header->float_word_order = 0; //FIXME
266 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
267 header->arch_size = sizeof(void*);
268 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
269 header->flight_recorder = 0;
270 header->has_heartbeat = 0;
271
5ffa9d14 272#ifndef LTT_PACK
32f2b04a 273 header->has_alignment = sizeof(void*);
274#else
275 header->has_alignment = 0;
276#endif
277
278 //FIXME
279 header->freq_scale = 0;
280 header->start_freq = 0;
281 header->start_tsc = 0;
282 header->start_monotonic = 0;
283 header->start_time_sec = 0;
284 header->start_time_usec = 0;
285}
286
287
5ffa9d14 288static inline void __attribute__((no_instrument_function))
289ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 290 uint64_t tsc, unsigned int subbuf_idx)
291{
292 struct ltt_block_start_header *header =
293 (struct ltt_block_start_header*)
294 (buf->start + (subbuf_idx*buf->subbuf_size));
295
296 header->begin.cycle_count = tsc;
297 header->begin.freq = 0; //ltt_frequency();
298
299 header->lost_size = 0xFFFFFFFF; // for debugging...
300
301 header->buf_size = buf->subbuf_size;
302
303 ltt_write_trace_header(&header->trace);
304
305}
306
307
308
5ffa9d14 309static inline void __attribute__((no_instrument_function))
310ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 311 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
312{
313 struct ltt_block_start_header *header =
314 (struct ltt_block_start_header*)
315 (buf->start + (subbuf_idx*buf->subbuf_size));
316 /* offset is assumed to never be 0 here : never deliver a completely
317 * empty subbuffer. */
318 /* The lost size is between 0 and subbuf_size-1 */
319 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
320 buf);
321 header->end.cycle_count = tsc;
322 header->end.freq = 0; //ltt_frequency();
323}
324
325
5ffa9d14 326static inline void __attribute__((no_instrument_function))
327ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 328 unsigned subbuf_idx,
329 void *subbuf)
330{
331 ltt_usertrace_fast_buffer_switch();
332}
5ffa9d14 333
334
335/* ltt_reserve_slot
336 *
337 * Atomic slot reservation in a LTTng buffer. It will take care of
338 * sub-buffer switching.
339 *
340 * Parameters:
341 *
342 * @trace : the trace structure to log to.
343 * @buf : the buffer to reserve space into.
344 * @data_size : size of the variable length data to log.
345 * @slot_size : pointer to total size of the slot (out)
346 * @tsc : pointer to the tsc at the slot reservation (out)
347 * @before_hdr_pad : dynamic padding before the event header.
348 * @after_hdr_pad : dynamic padding after the event header.
349 *
350 * Return : NULL if not enough space, else returns the pointer
351 * to the beginning of the reserved slot. */
352static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
353 struct ltt_trace_info *trace,
354 struct ltt_buf *ltt_buf,
355 unsigned int data_size,
356 unsigned int *slot_size,
357 uint64_t *tsc,
358 size_t *before_hdr_pad,
359 size_t *after_hdr_pad,
360 size_t *header_size)
361{
362 int offset_begin, offset_end, offset_old;
363 //int has_switch;
364 int begin_switch, end_switch_current, end_switch_old;
365 int reserve_commit_diff = 0;
366 unsigned int size;
367 int consumed_old, consumed_new;
368 int commit_count, reserve_count;
369 int ret;
370
371 do {
372 offset_old = atomic_read(&ltt_buf->offset);
373 offset_begin = offset_old;
374 //has_switch = 0;
375 begin_switch = 0;
376 end_switch_current = 0;
377 end_switch_old = 0;
378 *tsc = ltt_get_timestamp();
379 if(*tsc == 0) {
380 /* Error in getting the timestamp, event lost */
381 atomic_inc(&ltt_buf->events_lost);
382 return NULL;
383 }
384
385 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
386 begin_switch = 1; /* For offset_begin */
387 } else {
388 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
389 before_hdr_pad, after_hdr_pad, header_size)
390 + data_size;
391
392 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
393 //has_switch = 1;
394 end_switch_old = 1; /* For offset_old */
395 begin_switch = 1; /* For offset_begin */
396 }
397 }
398
399 if(begin_switch) {
400 if(end_switch_old) {
401 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
402 }
403 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
404 /* Test new buffer integrity */
405 reserve_commit_diff =
406 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
407 ltt_buf)])
408 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
409 ltt_buf)]);
410 if(reserve_commit_diff == 0) {
411 /* Next buffer not corrupted. */
b402c055 412 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
413 // - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
414 // >= ltt_buf->alloc_size) {
be5cc22c 415 /* sem_wait is not signal safe. Disable signals around it. */
416 {
417 sigset_t oldset, set;
418
419 /* Disable signals */
420 ret = sigfillset(&set);
421 if(ret) perror("LTT Error in sigfillset\n");
422
423 ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
424 if(ret) perror("LTT Error in pthread_sigmask\n");
425
426 sem_wait(&ltt_buf->writer_sem);
427
428 /* Enable signals */
429 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
430 if(ret) perror("LTT Error in pthread_sigmask\n");
431 }
432
5ffa9d14 433 /* go on with the write */
434
b402c055 435 //} else {
436 // /* next buffer not corrupted, we are either in overwrite mode or
437 // * the buffer is not full. It's safe to write in this new subbuffer.*/
438 //}
5ffa9d14 439 } else {
440 /* Next subbuffer corrupted. Force pushing reader even in normal
441 * mode. It's safe to write in this new subbuffer. */
85b94320 442 sem_post(&ltt_buf->writer_sem);
5ffa9d14 443 }
444 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
445 before_hdr_pad, after_hdr_pad, header_size) + data_size;
446 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
447 /* Event too big for subbuffers, report error, don't complete
448 * the sub-buffer switch. */
449 atomic_inc(&ltt_buf->events_lost);
450 return NULL;
451 } else {
452 /* We just made a successful buffer switch and the event fits in the
453 * new subbuffer. Let's write. */
454 }
455 } else {
456 /* Event fits in the current buffer and we are not on a switch boundary.
457 * It's safe to write */
458 }
459 offset_end = offset_begin + size;
460
461 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
462 /* The offset_end will fall at the very beginning of the next subbuffer.
463 */
464 end_switch_current = 1; /* For offset_begin */
465 }
466
467 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
468 != offset_old);
469
470
471 /* Push the reader if necessary */
472 do {
473 consumed_old = atomic_read(&ltt_buf->consumed);
474 /* If buffer is in overwrite mode, push the reader consumed count if
475 the write position has reached it and we are not at the first
476 iteration (don't push the reader farther than the writer).
477 This operation can be done concurrently by many writers in the
478 same buffer, the writer being at the fartest write position sub-buffer
479 index in the buffer being the one which will win this loop. */
480 /* If the buffer is not in overwrite mode, pushing the reader only
481 happen if a sub-buffer is corrupted */
482 if((SUBBUF_TRUNC(offset_end, ltt_buf)
483 - SUBBUF_TRUNC(consumed_old, ltt_buf))
484 >= ltt_buf->alloc_size)
485 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
486 else {
487 consumed_new = consumed_old;
488 break;
489 }
490 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
491 != consumed_old);
492
493 if(consumed_old != consumed_new) {
494 /* Reader pushed : we are the winner of the push, we can therefore
495 reequilibrate reserve and commit. Atomic increment of the commit
496 count permits other writers to play around with this variable
497 before us. We keep track of corrupted_subbuffers even in overwrite mode :
498 we never want to write over a non completely committed sub-buffer :
499 possible causes : the buffer size is too low compared to the unordered
500 data input, or there is a writer who died between the reserve and the
501 commit. */
502 if(reserve_commit_diff) {
503 /* We have to alter the sub-buffer commit count : a sub-buffer is
504 corrupted. We do not deliver it. */
505 atomic_add(reserve_commit_diff,
506 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
507 atomic_inc(&ltt_buf->corrupted_subbuffers);
508 }
509 }
510
511
512 if(end_switch_old) {
513 /* old subbuffer */
514 /* Concurrency safe because we are the last and only thread to alter this
515 sub-buffer. As long as it is not delivered and read, no other thread can
516 alter the offset, alter the reserve_count or call the
517 client_buffer_end_callback on this sub-buffer.
518 The only remaining threads could be the ones with pending commits. They
519 will have to do the deliver themself.
520 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
521 with commit and reserve counts. We keep a corrupted sub-buffers count
522 and push the readers across these sub-buffers.
523 Not concurrency safe if a writer is stalled in a subbuffer and
524 another writer switches in, finding out it's corrupted. The result will
525 be than the old (uncommited) subbuffer will be declared corrupted, and
526 that the new subbuffer will be declared corrupted too because of the
527 commit count adjustment.
528 Note : offset_old should never be 0 here.*/
529 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
530 SUBBUF_INDEX((offset_old-1), ltt_buf));
531 /* Setting this reserve_count will allow the sub-buffer to be delivered by
532 the last committer. */
533 reserve_count =
534 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
535 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
536 if(reserve_count
537 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
538 ltt_buf)])) {
539 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
540 NULL);
541 }
542 }
543
544 if(begin_switch) {
545 /* New sub-buffer */
546 /* This code can be executed unordered : writers may already have written
547 to the sub-buffer before this code gets executed, caution. */
548 /* The commit makes sure that this code is executed before the deliver
549 of this sub-buffer */
550 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
551 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
552 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
553 /* Check if the written buffer has to be delivered */
554 if(commit_count
555 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
556 ltt_buf)])) {
557 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
558 }
559 }
560
561 if(end_switch_current) {
562 /* current subbuffer */
563 /* Concurrency safe because we are the last and only thread to alter this
564 sub-buffer. As long as it is not delivered and read, no other thread can
565 alter the offset, alter the reserve_count or call the
566 client_buffer_end_callback on this sub-buffer.
567 The only remaining threads could be the ones with pending commits. They
568 will have to do the deliver themself.
569 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
570 with commit and reserve counts. We keep a corrupted sub-buffers count
571 and push the readers across these sub-buffers.
572 Not concurrency safe if a writer is stalled in a subbuffer and
573 another writer switches in, finding out it's corrupted. The result will
574 be than the old (uncommited) subbuffer will be declared corrupted, and
575 that the new subbuffer will be declared corrupted too because of the
576 commit count adjustment. */
577 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
578 SUBBUF_INDEX((offset_end-1), ltt_buf));
579 /* Setting this reserve_count will allow the sub-buffer to be delivered by
580 the last committer. */
581 reserve_count =
582 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
583 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
584 if(reserve_count
585 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
586 ltt_buf)])) {
587 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
588 }
589 }
590
591 *slot_size = size;
592
593 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
594 //BUG_ON(*slot_size != (offset_end - offset_begin));
595
596 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
597}
598
599
600/* ltt_commit_slot
601 *
602 * Atomic unordered slot commit. Increments the commit count in the
603 * specified sub-buffer, and delivers it if necessary.
604 *
605 * Parameters:
606 *
607 * @buf : the buffer to commit to.
608 * @reserved : address of the beginnig of the reserved slot.
609 * @slot_size : size of the reserved slot.
610 *
611 */
612static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
613 struct ltt_buf *ltt_buf,
614 void *reserved,
615 unsigned int slot_size)
616{
617 unsigned int offset_begin = reserved - ltt_buf->start;
618 int commit_count;
619
620 commit_count = atomic_add_return(slot_size,
621 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
622 ltt_buf)]);
623
624 /* Check if all commits have been done */
625 if(commit_count ==
626 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
627 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
628 }
629}
630
631
38f24d5c 632#endif //LTT_TRACE_FAST
8b30e7bc 633#endif //LTT_TRACE
04180f7f 634#endif //_LTT_USERTRACE_FAST_H
This page took 0.065065 seconds and 4 git commands to generate.