b09f3215 |
1 | |
04180f7f |
2 | /* LTTng user-space "fast" tracing header |
b09f3215 |
3 | * |
4 | * Copyright 2006 Mathieu Desnoyers |
5 | * |
6 | */ |
7 | |
04180f7f |
8 | #ifndef _LTT_USERTRACE_FAST_H |
9 | #define _LTT_USERTRACE_FAST_H |
b09f3215 |
10 | |
8b30e7bc |
11 | #ifdef LTT_TRACE |
38f24d5c |
12 | #ifdef LTT_TRACE_FAST |
8b30e7bc |
13 | |
b09f3215 |
14 | #include <errno.h> |
700d350d |
15 | #include <pthread.h> |
32f2b04a |
16 | #include <stdint.h> |
17 | #include <syscall.h> |
85b94320 |
18 | #include <semaphore.h> |
be5cc22c |
19 | #include <signal.h> |
32f2b04a |
20 | |
8b30e7bc |
21 | #include <ltt/ltt-facility-id-user_generic.h> |
8b30e7bc |
22 | |
895ad115 |
23 | #ifdef __cplusplus |
24 | extern "C" { |
25 | #endif |
26 | |
47d7d576 |
27 | #ifndef LTT_N_SUBBUFS |
28 | #define LTT_N_SUBBUFS 2 |
29 | #endif //LTT_N_SUBBUFS |
30 | |
b402c055 |
31 | #ifndef LTT_SUBBUF_SIZE_PROCESS |
32 | #define LTT_SUBBUF_SIZE_PROCESS 1048576 |
51bf1553 |
33 | #endif //LTT_BUF_SIZE_CPU |
b09f3215 |
34 | |
b402c055 |
35 | #define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS) |
47d7d576 |
36 | |
77b31f39 |
37 | #ifndef LTT_USERTRACE_ROOT |
38 | #define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace" |
39 | #endif //LTT_USERTRACE_ROOT |
40 | |
47d7d576 |
41 | |
42 | /* Buffer offset macros */ |
43 | |
44 | #define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1)) |
45 | #define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1)) |
46 | #define SUBBUF_ALIGN(offset, buf) \ |
47 | (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1))) |
48 | #define SUBBUF_TRUNC(offset, buf) \ |
49 | ((offset) & (~(buf->subbuf_size-1))) |
50 | #define SUBBUF_INDEX(offset, buf) \ |
51 | (BUFFER_OFFSET(offset,buf)/buf->subbuf_size) |
52 | |
53 | |
32f2b04a |
54 | #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED |
55 | #define LTT_TRACER_VERSION_MAJOR 0 |
27867702 |
56 | #define LTT_TRACER_VERSION_MINOR 8 |
32f2b04a |
57 | |
58 | #ifndef atomic_cmpxchg |
59 | #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) |
60 | #endif //atomic_cmpxchg |
5ffa9d14 |
61 | |
32f2b04a |
62 | struct ltt_trace_header { |
63 | uint32_t magic_number; |
64 | uint32_t arch_type; |
65 | uint32_t arch_variant; |
66 | uint32_t float_word_order; /* Only useful for user space traces */ |
67 | uint8_t arch_size; |
68 | //uint32_t system_type; |
69 | uint8_t major_version; |
70 | uint8_t minor_version; |
71 | uint8_t flight_recorder; |
72 | uint8_t has_heartbeat; |
73 | uint8_t has_alignment; /* Event header alignment */ |
27867702 |
74 | uint8_t tsc_lsb_truncate; |
75 | uint8_t tscbits; |
32f2b04a |
76 | uint32_t freq_scale; |
77 | uint64_t start_freq; |
78 | uint64_t start_tsc; |
79 | uint64_t start_monotonic; |
27867702 |
80 | uint64_t start_time_sec; |
81 | uint64_t start_time_usec; |
32f2b04a |
82 | } __attribute((packed)); |
83 | |
84 | |
85 | struct ltt_block_start_header { |
86 | struct { |
87 | uint64_t cycle_count; |
88 | uint64_t freq; /* khz */ |
89 | } begin; |
90 | struct { |
91 | uint64_t cycle_count; |
92 | uint64_t freq; /* khz */ |
93 | } end; |
94 | uint32_t lost_size; /* Size unused at the end of the buffer */ |
95 | uint32_t buf_size; /* The size of this sub-buffer */ |
96 | struct ltt_trace_header trace; |
97 | } __attribute((packed)); |
98 | |
99 | |
100 | |
b09f3215 |
101 | struct ltt_buf { |
32f2b04a |
102 | void *start; |
b09f3215 |
103 | atomic_t offset; |
47d7d576 |
104 | atomic_t consumed; |
105 | atomic_t reserve_count[LTT_N_SUBBUFS]; |
106 | atomic_t commit_count[LTT_N_SUBBUFS]; |
b09f3215 |
107 | |
108 | atomic_t events_lost; |
32f2b04a |
109 | atomic_t corrupted_subbuffers; |
85b94320 |
110 | sem_t writer_sem; /* semaphore on which the writer waits */ |
47d7d576 |
111 | unsigned int alloc_size; |
112 | unsigned int subbuf_size; |
b09f3215 |
113 | }; |
114 | |
700d350d |
115 | struct ltt_trace_info { |
1c48e587 |
116 | int init; |
b09f3215 |
117 | int filter; |
700d350d |
118 | pid_t daemon_id; |
8b30e7bc |
119 | int nesting; |
b09f3215 |
120 | struct { |
b402c055 |
121 | struct ltt_buf process; |
122 | char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8))); |
b09f3215 |
123 | } channel; |
124 | }; |
125 | |
32f2b04a |
126 | |
5ffa9d14 |
127 | struct ltt_event_header_nohb { |
128 | uint64_t timestamp; |
129 | unsigned char facility_id; |
130 | unsigned char event_id; |
131 | uint16_t event_size; |
132 | } __attribute((packed)); |
32f2b04a |
133 | |
700d350d |
134 | extern __thread struct ltt_trace_info *thread_trace_info; |
b09f3215 |
135 | |
51bf1553 |
136 | void ltt_thread_init(void); |
b09f3215 |
137 | |
5ffa9d14 |
138 | void __attribute__((no_instrument_function)) |
139 | ltt_usertrace_fast_buffer_switch(void); |
140 | |
5ffa9d14 |
141 | /* Get the offset of the channel in the ltt_trace_struct */ |
142 | #define GET_CHANNEL_INDEX(chan) \ |
143 | (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan |
144 | |
145 | /* ltt_get_index_from_facility |
146 | * |
147 | * Get channel index from facility and event id. |
148 | * |
149 | * @fID : facility ID |
150 | * @eID : event number |
151 | * |
152 | * Get the channel index into which events must be written for the given |
153 | * facility and event number. We get this structure offset as soon as possible |
154 | * and remember it so we pass through this logic only once per trace call (not |
155 | * for every trace). |
156 | */ |
157 | static inline unsigned int __attribute__((no_instrument_function)) |
158 | ltt_get_index_from_facility(ltt_facility_t fID, |
159 | uint8_t eID) |
160 | { |
b402c055 |
161 | return GET_CHANNEL_INDEX(process); |
5ffa9d14 |
162 | } |
163 | |
164 | |
165 | static inline struct ltt_buf * __attribute__((no_instrument_function)) |
166 | ltt_get_channel_from_index( |
167 | struct ltt_trace_info *trace, unsigned int index) |
168 | { |
b5d612cb |
169 | return (struct ltt_buf *)((void*)trace+index); |
5ffa9d14 |
170 | } |
171 | |
172 | |
173 | /* |
174 | * ltt_get_header_size |
175 | * |
176 | * Calculate alignment offset for arch size void*. This is the |
177 | * alignment offset of the event header. |
178 | * |
179 | * Important note : |
180 | * The event header must be a size multiple of the void* size. This is necessary |
181 | * to be able to calculate statically the alignment offset of the variable |
182 | * length data fields that follows. The total offset calculated here : |
183 | * |
184 | * Alignment of header struct on arch size |
185 | * + sizeof(header struct) |
186 | * + padding added to end of struct to align on arch size. |
187 | * */ |
188 | static inline unsigned char __attribute__((no_instrument_function)) |
189 | ltt_get_header_size(struct ltt_trace_info *trace, |
190 | void *address, |
191 | size_t *before_hdr_pad, |
192 | size_t *after_hdr_pad, |
193 | size_t *header_size) |
194 | { |
195 | unsigned int padding; |
196 | unsigned int header; |
197 | |
198 | header = sizeof(struct ltt_event_header_nohb); |
199 | |
200 | /* Padding before the header. Calculated dynamically */ |
201 | *before_hdr_pad = ltt_align((unsigned long)address, header); |
202 | padding = *before_hdr_pad; |
203 | |
204 | /* Padding after header, considering header aligned on ltt_align. |
205 | * Calculated statically if header size if known. */ |
206 | *after_hdr_pad = ltt_align(header, sizeof(void*)); |
207 | padding += *after_hdr_pad; |
208 | |
209 | *header_size = header; |
210 | |
211 | return header+padding; |
212 | } |
213 | |
214 | |
215 | /* ltt_write_event_header |
216 | * |
217 | * Writes the event header to the pointer. |
218 | * |
219 | * @channel : pointer to the channel structure |
220 | * @ptr : buffer pointer |
221 | * @fID : facility ID |
222 | * @eID : event ID |
223 | * @event_size : size of the event, excluding the event header. |
224 | * @offset : offset of the beginning of the header, for alignment. |
225 | * Calculated by ltt_get_event_header_size. |
226 | * @tsc : time stamp counter. |
227 | */ |
228 | static inline void __attribute__((no_instrument_function)) |
229 | ltt_write_event_header( |
230 | struct ltt_trace_info *trace, struct ltt_buf *buf, |
231 | void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size, |
232 | size_t offset, uint64_t tsc) |
233 | { |
234 | struct ltt_event_header_nohb *nohb; |
235 | |
236 | event_size = min(event_size, 0xFFFFU); |
237 | nohb = (struct ltt_event_header_nohb *)(ptr+offset); |
238 | nohb->timestamp = (uint64_t)tsc; |
239 | nohb->facility_id = fID; |
240 | nohb->event_id = eID; |
241 | nohb->event_size = (uint16_t)event_size; |
242 | } |
700d350d |
243 | |
32f2b04a |
244 | |
245 | |
5ffa9d14 |
246 | static inline uint64_t __attribute__((no_instrument_function)) |
247 | ltt_get_timestamp() |
32f2b04a |
248 | { |
249 | return get_cycles(); |
250 | } |
251 | |
5ffa9d14 |
252 | static inline unsigned int __attribute__((no_instrument_function)) |
253 | ltt_subbuf_header_len(struct ltt_buf *buf) |
32f2b04a |
254 | { |
255 | return sizeof(struct ltt_block_start_header); |
256 | } |
257 | |
258 | |
259 | |
5ffa9d14 |
260 | static inline void __attribute__((no_instrument_function)) |
261 | ltt_write_trace_header(struct ltt_trace_header *header) |
32f2b04a |
262 | { |
263 | header->magic_number = LTT_TRACER_MAGIC_NUMBER; |
264 | header->major_version = LTT_TRACER_VERSION_MAJOR; |
265 | header->minor_version = LTT_TRACER_VERSION_MINOR; |
266 | header->float_word_order = 0; //FIXME |
267 | header->arch_type = 0; //FIXME LTT_ARCH_TYPE; |
268 | header->arch_size = sizeof(void*); |
269 | header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT; |
270 | header->flight_recorder = 0; |
271 | header->has_heartbeat = 0; |
941baece |
272 | header->tsc_lsb_truncate = 0; |
273 | header->tscbits = 0; |
32f2b04a |
274 | |
5ffa9d14 |
275 | #ifndef LTT_PACK |
32f2b04a |
276 | header->has_alignment = sizeof(void*); |
277 | #else |
278 | header->has_alignment = 0; |
279 | #endif |
280 | |
281 | //FIXME |
282 | header->freq_scale = 0; |
283 | header->start_freq = 0; |
284 | header->start_tsc = 0; |
285 | header->start_monotonic = 0; |
286 | header->start_time_sec = 0; |
287 | header->start_time_usec = 0; |
288 | } |
289 | |
290 | |
5ffa9d14 |
291 | static inline void __attribute__((no_instrument_function)) |
292 | ltt_buffer_begin_callback(struct ltt_buf *buf, |
32f2b04a |
293 | uint64_t tsc, unsigned int subbuf_idx) |
294 | { |
295 | struct ltt_block_start_header *header = |
296 | (struct ltt_block_start_header*) |
297 | (buf->start + (subbuf_idx*buf->subbuf_size)); |
298 | |
299 | header->begin.cycle_count = tsc; |
300 | header->begin.freq = 0; //ltt_frequency(); |
301 | |
302 | header->lost_size = 0xFFFFFFFF; // for debugging... |
303 | |
304 | header->buf_size = buf->subbuf_size; |
305 | |
306 | ltt_write_trace_header(&header->trace); |
307 | |
308 | } |
309 | |
310 | |
311 | |
5ffa9d14 |
312 | static inline void __attribute__((no_instrument_function)) |
313 | ltt_buffer_end_callback(struct ltt_buf *buf, |
32f2b04a |
314 | uint64_t tsc, unsigned int offset, unsigned int subbuf_idx) |
315 | { |
316 | struct ltt_block_start_header *header = |
317 | (struct ltt_block_start_header*) |
318 | (buf->start + (subbuf_idx*buf->subbuf_size)); |
319 | /* offset is assumed to never be 0 here : never deliver a completely |
320 | * empty subbuffer. */ |
321 | /* The lost size is between 0 and subbuf_size-1 */ |
322 | header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset), |
323 | buf); |
324 | header->end.cycle_count = tsc; |
325 | header->end.freq = 0; //ltt_frequency(); |
326 | } |
327 | |
328 | |
5ffa9d14 |
329 | static inline void __attribute__((no_instrument_function)) |
330 | ltt_deliver_callback(struct ltt_buf *buf, |
32f2b04a |
331 | unsigned subbuf_idx, |
332 | void *subbuf) |
333 | { |
334 | ltt_usertrace_fast_buffer_switch(); |
335 | } |
5ffa9d14 |
336 | |
337 | |
338 | /* ltt_reserve_slot |
339 | * |
340 | * Atomic slot reservation in a LTTng buffer. It will take care of |
341 | * sub-buffer switching. |
342 | * |
343 | * Parameters: |
344 | * |
345 | * @trace : the trace structure to log to. |
346 | * @buf : the buffer to reserve space into. |
347 | * @data_size : size of the variable length data to log. |
348 | * @slot_size : pointer to total size of the slot (out) |
349 | * @tsc : pointer to the tsc at the slot reservation (out) |
350 | * @before_hdr_pad : dynamic padding before the event header. |
351 | * @after_hdr_pad : dynamic padding after the event header. |
352 | * |
353 | * Return : NULL if not enough space, else returns the pointer |
354 | * to the beginning of the reserved slot. */ |
355 | static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot( |
356 | struct ltt_trace_info *trace, |
357 | struct ltt_buf *ltt_buf, |
358 | unsigned int data_size, |
69b0f48b |
359 | size_t *slot_size, |
5ffa9d14 |
360 | uint64_t *tsc, |
361 | size_t *before_hdr_pad, |
362 | size_t *after_hdr_pad, |
363 | size_t *header_size) |
364 | { |
365 | int offset_begin, offset_end, offset_old; |
366 | //int has_switch; |
367 | int begin_switch, end_switch_current, end_switch_old; |
368 | int reserve_commit_diff = 0; |
369 | unsigned int size; |
370 | int consumed_old, consumed_new; |
371 | int commit_count, reserve_count; |
372 | int ret; |
3a99c38e |
373 | sigset_t oldset, set; |
374 | |
5ffa9d14 |
375 | do { |
376 | offset_old = atomic_read(<t_buf->offset); |
377 | offset_begin = offset_old; |
378 | //has_switch = 0; |
379 | begin_switch = 0; |
380 | end_switch_current = 0; |
381 | end_switch_old = 0; |
382 | *tsc = ltt_get_timestamp(); |
383 | if(*tsc == 0) { |
384 | /* Error in getting the timestamp, event lost */ |
385 | atomic_inc(<t_buf->events_lost); |
386 | return NULL; |
387 | } |
388 | |
389 | if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) { |
390 | begin_switch = 1; /* For offset_begin */ |
391 | } else { |
392 | size = ltt_get_header_size(trace, ltt_buf->start + offset_begin, |
393 | before_hdr_pad, after_hdr_pad, header_size) |
394 | + data_size; |
395 | |
396 | if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) { |
397 | //has_switch = 1; |
398 | end_switch_old = 1; /* For offset_old */ |
399 | begin_switch = 1; /* For offset_begin */ |
400 | } |
401 | } |
402 | |
403 | if(begin_switch) { |
404 | if(end_switch_old) { |
405 | offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf); |
406 | } |
407 | offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf); |
408 | /* Test new buffer integrity */ |
409 | reserve_commit_diff = |
410 | atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, |
411 | ltt_buf)]) |
412 | - atomic_read(<t_buf->commit_count[SUBBUF_INDEX(offset_begin, |
413 | ltt_buf)]); |
9b0645fd |
414 | |
5ffa9d14 |
415 | if(reserve_commit_diff == 0) { |
416 | /* Next buffer not corrupted. */ |
b402c055 |
417 | //if((SUBBUF_TRUNC(offset_begin, ltt_buf) |
418 | // - SUBBUF_TRUNC(atomic_read(<t_buf->consumed), ltt_buf)) |
419 | // >= ltt_buf->alloc_size) { |
be5cc22c |
420 | { |
9b0645fd |
421 | /* sem_wait is not signal safe. Disable signals around it. |
422 | * Signals are kept disabled to make sure we win the cmpxchg. */ |
9b0645fd |
423 | /* Disable signals */ |
5199345f |
424 | ret = sigfillset(&set); |
425 | if(ret) perror("LTT Error in sigfillset\n"); |
426 | |
427 | ret = pthread_sigmask(SIG_BLOCK, &set, &oldset); |
428 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
429 | |
430 | /* We detect if a signal came between |
431 | * the offset read and signal disabling: |
432 | * if it is the case, then we restart |
433 | * the loop after reenabling signals. It |
434 | * means that it's a signal that has |
435 | * won the buffer switch.*/ |
436 | if(offset_old != atomic_read(<t_buf->offset)) { |
437 | ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
1e6e693a |
438 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
5199345f |
439 | continue; |
1e6e693a |
440 | } |
5199345f |
441 | /* If the offset is still the same, then |
442 | * we can safely proceed to do the |
443 | * buffer switch without being |
444 | * interrupted by a signal. */ |
be5cc22c |
445 | sem_wait(<t_buf->writer_sem); |
446 | |
be5cc22c |
447 | } |
5ffa9d14 |
448 | /* go on with the write */ |
449 | |
b402c055 |
450 | //} else { |
451 | // /* next buffer not corrupted, we are either in overwrite mode or |
452 | // * the buffer is not full. It's safe to write in this new subbuffer.*/ |
453 | //} |
5ffa9d14 |
454 | } else { |
455 | /* Next subbuffer corrupted. Force pushing reader even in normal |
456 | * mode. It's safe to write in this new subbuffer. */ |
9b0645fd |
457 | /* No sem_post is required because we fall through without doing a |
458 | * sem_wait. */ |
5ffa9d14 |
459 | } |
460 | size = ltt_get_header_size(trace, ltt_buf->start + offset_begin, |
461 | before_hdr_pad, after_hdr_pad, header_size) + data_size; |
462 | if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) { |
463 | /* Event too big for subbuffers, report error, don't complete |
464 | * the sub-buffer switch. */ |
465 | atomic_inc(<t_buf->events_lost); |
9b0645fd |
466 | if(reserve_commit_diff == 0) { |
467 | ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
468 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
469 | } |
5ffa9d14 |
470 | return NULL; |
471 | } else { |
472 | /* We just made a successful buffer switch and the event fits in the |
473 | * new subbuffer. Let's write. */ |
474 | } |
475 | } else { |
476 | /* Event fits in the current buffer and we are not on a switch boundary. |
477 | * It's safe to write */ |
478 | } |
479 | offset_end = offset_begin + size; |
480 | |
481 | if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) { |
482 | /* The offset_end will fall at the very beginning of the next subbuffer. |
483 | */ |
484 | end_switch_current = 1; /* For offset_begin */ |
485 | } |
486 | |
487 | } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end) |
488 | != offset_old); |
489 | |
5ffa9d14 |
490 | /* Push the reader if necessary */ |
491 | do { |
492 | consumed_old = atomic_read(<t_buf->consumed); |
493 | /* If buffer is in overwrite mode, push the reader consumed count if |
494 | the write position has reached it and we are not at the first |
495 | iteration (don't push the reader farther than the writer). |
496 | This operation can be done concurrently by many writers in the |
497 | same buffer, the writer being at the fartest write position sub-buffer |
498 | index in the buffer being the one which will win this loop. */ |
499 | /* If the buffer is not in overwrite mode, pushing the reader only |
500 | happen if a sub-buffer is corrupted */ |
680b9daa |
501 | if((SUBBUF_TRUNC(offset_end-1, ltt_buf) |
5ffa9d14 |
502 | - SUBBUF_TRUNC(consumed_old, ltt_buf)) |
503 | >= ltt_buf->alloc_size) |
504 | consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf); |
505 | else { |
506 | consumed_new = consumed_old; |
507 | break; |
508 | } |
509 | } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new) |
510 | != consumed_old); |
511 | |
512 | if(consumed_old != consumed_new) { |
513 | /* Reader pushed : we are the winner of the push, we can therefore |
514 | reequilibrate reserve and commit. Atomic increment of the commit |
515 | count permits other writers to play around with this variable |
516 | before us. We keep track of corrupted_subbuffers even in overwrite mode : |
517 | we never want to write over a non completely committed sub-buffer : |
518 | possible causes : the buffer size is too low compared to the unordered |
519 | data input, or there is a writer who died between the reserve and the |
520 | commit. */ |
521 | if(reserve_commit_diff) { |
522 | /* We have to alter the sub-buffer commit count : a sub-buffer is |
523 | corrupted. We do not deliver it. */ |
524 | atomic_add(reserve_commit_diff, |
525 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]); |
526 | atomic_inc(<t_buf->corrupted_subbuffers); |
527 | } |
528 | } |
529 | |
530 | |
531 | if(end_switch_old) { |
532 | /* old subbuffer */ |
533 | /* Concurrency safe because we are the last and only thread to alter this |
534 | sub-buffer. As long as it is not delivered and read, no other thread can |
535 | alter the offset, alter the reserve_count or call the |
536 | client_buffer_end_callback on this sub-buffer. |
537 | The only remaining threads could be the ones with pending commits. They |
538 | will have to do the deliver themself. |
539 | Not concurrency safe in overwrite mode. We detect corrupted subbuffers |
540 | with commit and reserve counts. We keep a corrupted sub-buffers count |
541 | and push the readers across these sub-buffers. |
542 | Not concurrency safe if a writer is stalled in a subbuffer and |
543 | another writer switches in, finding out it's corrupted. The result will |
544 | be than the old (uncommited) subbuffer will be declared corrupted, and |
545 | that the new subbuffer will be declared corrupted too because of the |
546 | commit count adjustment. |
547 | Note : offset_old should never be 0 here.*/ |
548 | ltt_buffer_end_callback(ltt_buf, *tsc, offset_old, |
549 | SUBBUF_INDEX((offset_old-1), ltt_buf)); |
550 | /* Setting this reserve_count will allow the sub-buffer to be delivered by |
551 | the last committer. */ |
552 | reserve_count = |
553 | atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1), |
554 | <t_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]); |
555 | if(reserve_count |
556 | == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_old-1), |
557 | ltt_buf)])) { |
558 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf), |
559 | NULL); |
560 | } |
561 | } |
562 | |
563 | if(begin_switch) { |
9b0645fd |
564 | /* Enable signals : this is what guaranteed that same reserve which did the |
565 | * sem_wait does in fact win the cmpxchg for the offset. We only call |
566 | * these system calls on buffer boundaries because of their performance |
567 | * cost. */ |
5199345f |
568 | if(reserve_commit_diff == 0) { |
9b0645fd |
569 | ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
570 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
571 | } |
5ffa9d14 |
572 | /* New sub-buffer */ |
573 | /* This code can be executed unordered : writers may already have written |
574 | to the sub-buffer before this code gets executed, caution. */ |
575 | /* The commit makes sure that this code is executed before the deliver |
576 | of this sub-buffer */ |
577 | ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf)); |
578 | commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf), |
579 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]); |
580 | /* Check if the written buffer has to be delivered */ |
581 | if(commit_count |
582 | == atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, |
583 | ltt_buf)])) { |
584 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL); |
585 | } |
586 | } |
587 | |
588 | if(end_switch_current) { |
589 | /* current subbuffer */ |
590 | /* Concurrency safe because we are the last and only thread to alter this |
591 | sub-buffer. As long as it is not delivered and read, no other thread can |
592 | alter the offset, alter the reserve_count or call the |
593 | client_buffer_end_callback on this sub-buffer. |
594 | The only remaining threads could be the ones with pending commits. They |
595 | will have to do the deliver themself. |
596 | Not concurrency safe in overwrite mode. We detect corrupted subbuffers |
597 | with commit and reserve counts. We keep a corrupted sub-buffers count |
598 | and push the readers across these sub-buffers. |
599 | Not concurrency safe if a writer is stalled in a subbuffer and |
600 | another writer switches in, finding out it's corrupted. The result will |
601 | be than the old (uncommited) subbuffer will be declared corrupted, and |
602 | that the new subbuffer will be declared corrupted too because of the |
603 | commit count adjustment. */ |
604 | ltt_buffer_end_callback(ltt_buf, *tsc, offset_end, |
605 | SUBBUF_INDEX((offset_end-1), ltt_buf)); |
606 | /* Setting this reserve_count will allow the sub-buffer to be delivered by |
607 | the last committer. */ |
608 | reserve_count = |
609 | atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1), |
610 | <t_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]); |
611 | if(reserve_count |
612 | == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_end-1), |
613 | ltt_buf)])) { |
614 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL); |
615 | } |
616 | } |
617 | |
618 | *slot_size = size; |
619 | |
620 | //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size)); |
621 | //BUG_ON(*slot_size != (offset_end - offset_begin)); |
622 | |
623 | return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf); |
624 | } |
625 | |
626 | |
627 | /* ltt_commit_slot |
628 | * |
629 | * Atomic unordered slot commit. Increments the commit count in the |
630 | * specified sub-buffer, and delivers it if necessary. |
631 | * |
632 | * Parameters: |
633 | * |
634 | * @buf : the buffer to commit to. |
635 | * @reserved : address of the beginnig of the reserved slot. |
636 | * @slot_size : size of the reserved slot. |
637 | * |
638 | */ |
639 | static inline void __attribute__((no_instrument_function)) ltt_commit_slot( |
640 | struct ltt_buf *ltt_buf, |
641 | void *reserved, |
642 | unsigned int slot_size) |
643 | { |
644 | unsigned int offset_begin = reserved - ltt_buf->start; |
645 | int commit_count; |
646 | |
647 | commit_count = atomic_add_return(slot_size, |
648 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, |
649 | ltt_buf)]); |
650 | |
651 | /* Check if all commits have been done */ |
652 | if(commit_count == |
653 | atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) { |
654 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL); |
655 | } |
656 | } |
895ad115 |
657 | |
658 | #ifdef __cplusplus |
659 | } /* end of extern "C" */ |
660 | #endif |
5ffa9d14 |
661 | |
38f24d5c |
662 | #endif //LTT_TRACE_FAST |
8b30e7bc |
663 | #endif //LTT_TRACE |
04180f7f |
664 | #endif //_LTT_USERTRACE_FAST_H |