b09f3215 |
1 | |
04180f7f |
2 | /* LTTng user-space "fast" tracing header |
b09f3215 |
3 | * |
4 | * Copyright 2006 Mathieu Desnoyers |
5 | * |
6 | */ |
7 | |
04180f7f |
8 | #ifndef _LTT_USERTRACE_FAST_H |
9 | #define _LTT_USERTRACE_FAST_H |
b09f3215 |
10 | |
8b30e7bc |
11 | #ifdef LTT_TRACE |
38f24d5c |
12 | #ifdef LTT_TRACE_FAST |
8b30e7bc |
13 | |
b09f3215 |
14 | #include <errno.h> |
b09f3215 |
15 | #include <asm/atomic.h> |
700d350d |
16 | #include <pthread.h> |
32f2b04a |
17 | #include <stdint.h> |
18 | #include <syscall.h> |
b5d612cb |
19 | #include <asm/timex.h> |
85b94320 |
20 | #include <semaphore.h> |
be5cc22c |
21 | #include <signal.h> |
32f2b04a |
22 | |
8b30e7bc |
23 | #include <ltt/ltt-facility-id-user_generic.h> |
8b30e7bc |
24 | |
47d7d576 |
25 | #ifndef LTT_N_SUBBUFS |
26 | #define LTT_N_SUBBUFS 2 |
27 | #endif //LTT_N_SUBBUFS |
28 | |
b402c055 |
29 | #ifndef LTT_SUBBUF_SIZE_PROCESS |
30 | #define LTT_SUBBUF_SIZE_PROCESS 1048576 |
51bf1553 |
31 | #endif //LTT_BUF_SIZE_CPU |
b09f3215 |
32 | |
b402c055 |
33 | #define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS) |
47d7d576 |
34 | |
77b31f39 |
35 | #ifndef LTT_USERTRACE_ROOT |
36 | #define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace" |
37 | #endif //LTT_USERTRACE_ROOT |
38 | |
47d7d576 |
39 | |
40 | /* Buffer offset macros */ |
41 | |
42 | #define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1)) |
43 | #define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1)) |
44 | #define SUBBUF_ALIGN(offset, buf) \ |
45 | (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1))) |
46 | #define SUBBUF_TRUNC(offset, buf) \ |
47 | ((offset) & (~(buf->subbuf_size-1))) |
48 | #define SUBBUF_INDEX(offset, buf) \ |
49 | (BUFFER_OFFSET(offset,buf)/buf->subbuf_size) |
50 | |
51 | |
32f2b04a |
52 | #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED |
53 | #define LTT_TRACER_VERSION_MAJOR 0 |
54 | #define LTT_TRACER_VERSION_MINOR 7 |
55 | |
56 | #ifndef atomic_cmpxchg |
57 | #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) |
58 | #endif //atomic_cmpxchg |
5ffa9d14 |
59 | |
32f2b04a |
60 | struct ltt_trace_header { |
61 | uint32_t magic_number; |
62 | uint32_t arch_type; |
63 | uint32_t arch_variant; |
64 | uint32_t float_word_order; /* Only useful for user space traces */ |
65 | uint8_t arch_size; |
66 | //uint32_t system_type; |
67 | uint8_t major_version; |
68 | uint8_t minor_version; |
69 | uint8_t flight_recorder; |
70 | uint8_t has_heartbeat; |
71 | uint8_t has_alignment; /* Event header alignment */ |
72 | uint32_t freq_scale; |
73 | uint64_t start_freq; |
74 | uint64_t start_tsc; |
75 | uint64_t start_monotonic; |
76 | uint64_t start_time_sec; |
77 | uint64_t start_time_usec; |
78 | } __attribute((packed)); |
79 | |
80 | |
81 | struct ltt_block_start_header { |
82 | struct { |
83 | uint64_t cycle_count; |
84 | uint64_t freq; /* khz */ |
85 | } begin; |
86 | struct { |
87 | uint64_t cycle_count; |
88 | uint64_t freq; /* khz */ |
89 | } end; |
90 | uint32_t lost_size; /* Size unused at the end of the buffer */ |
91 | uint32_t buf_size; /* The size of this sub-buffer */ |
92 | struct ltt_trace_header trace; |
93 | } __attribute((packed)); |
94 | |
95 | |
96 | |
b09f3215 |
97 | struct ltt_buf { |
32f2b04a |
98 | void *start; |
b09f3215 |
99 | atomic_t offset; |
47d7d576 |
100 | atomic_t consumed; |
101 | atomic_t reserve_count[LTT_N_SUBBUFS]; |
102 | atomic_t commit_count[LTT_N_SUBBUFS]; |
b09f3215 |
103 | |
104 | atomic_t events_lost; |
32f2b04a |
105 | atomic_t corrupted_subbuffers; |
85b94320 |
106 | sem_t writer_sem; /* semaphore on which the writer waits */ |
47d7d576 |
107 | unsigned int alloc_size; |
108 | unsigned int subbuf_size; |
b09f3215 |
109 | }; |
110 | |
700d350d |
111 | struct ltt_trace_info { |
1c48e587 |
112 | int init; |
b09f3215 |
113 | int filter; |
700d350d |
114 | pid_t daemon_id; |
8b30e7bc |
115 | int nesting; |
b09f3215 |
116 | struct { |
b402c055 |
117 | struct ltt_buf process; |
118 | char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8))); |
b09f3215 |
119 | } channel; |
120 | }; |
121 | |
32f2b04a |
122 | |
5ffa9d14 |
123 | struct ltt_event_header_nohb { |
124 | uint64_t timestamp; |
125 | unsigned char facility_id; |
126 | unsigned char event_id; |
127 | uint16_t event_size; |
128 | } __attribute((packed)); |
32f2b04a |
129 | |
700d350d |
130 | extern __thread struct ltt_trace_info *thread_trace_info; |
b09f3215 |
131 | |
51bf1553 |
132 | void ltt_thread_init(void); |
b09f3215 |
133 | |
5ffa9d14 |
134 | void __attribute__((no_instrument_function)) |
135 | ltt_usertrace_fast_buffer_switch(void); |
136 | |
5ffa9d14 |
137 | /* Get the offset of the channel in the ltt_trace_struct */ |
138 | #define GET_CHANNEL_INDEX(chan) \ |
139 | (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan |
140 | |
141 | /* ltt_get_index_from_facility |
142 | * |
143 | * Get channel index from facility and event id. |
144 | * |
145 | * @fID : facility ID |
146 | * @eID : event number |
147 | * |
148 | * Get the channel index into which events must be written for the given |
149 | * facility and event number. We get this structure offset as soon as possible |
150 | * and remember it so we pass through this logic only once per trace call (not |
151 | * for every trace). |
152 | */ |
153 | static inline unsigned int __attribute__((no_instrument_function)) |
154 | ltt_get_index_from_facility(ltt_facility_t fID, |
155 | uint8_t eID) |
156 | { |
b402c055 |
157 | return GET_CHANNEL_INDEX(process); |
5ffa9d14 |
158 | } |
159 | |
160 | |
161 | static inline struct ltt_buf * __attribute__((no_instrument_function)) |
162 | ltt_get_channel_from_index( |
163 | struct ltt_trace_info *trace, unsigned int index) |
164 | { |
b5d612cb |
165 | return (struct ltt_buf *)((void*)trace+index); |
5ffa9d14 |
166 | } |
167 | |
168 | |
169 | /* |
170 | * ltt_get_header_size |
171 | * |
172 | * Calculate alignment offset for arch size void*. This is the |
173 | * alignment offset of the event header. |
174 | * |
175 | * Important note : |
176 | * The event header must be a size multiple of the void* size. This is necessary |
177 | * to be able to calculate statically the alignment offset of the variable |
178 | * length data fields that follows. The total offset calculated here : |
179 | * |
180 | * Alignment of header struct on arch size |
181 | * + sizeof(header struct) |
182 | * + padding added to end of struct to align on arch size. |
183 | * */ |
184 | static inline unsigned char __attribute__((no_instrument_function)) |
185 | ltt_get_header_size(struct ltt_trace_info *trace, |
186 | void *address, |
187 | size_t *before_hdr_pad, |
188 | size_t *after_hdr_pad, |
189 | size_t *header_size) |
190 | { |
191 | unsigned int padding; |
192 | unsigned int header; |
193 | |
194 | header = sizeof(struct ltt_event_header_nohb); |
195 | |
196 | /* Padding before the header. Calculated dynamically */ |
197 | *before_hdr_pad = ltt_align((unsigned long)address, header); |
198 | padding = *before_hdr_pad; |
199 | |
200 | /* Padding after header, considering header aligned on ltt_align. |
201 | * Calculated statically if header size if known. */ |
202 | *after_hdr_pad = ltt_align(header, sizeof(void*)); |
203 | padding += *after_hdr_pad; |
204 | |
205 | *header_size = header; |
206 | |
207 | return header+padding; |
208 | } |
209 | |
210 | |
211 | /* ltt_write_event_header |
212 | * |
213 | * Writes the event header to the pointer. |
214 | * |
215 | * @channel : pointer to the channel structure |
216 | * @ptr : buffer pointer |
217 | * @fID : facility ID |
218 | * @eID : event ID |
219 | * @event_size : size of the event, excluding the event header. |
220 | * @offset : offset of the beginning of the header, for alignment. |
221 | * Calculated by ltt_get_event_header_size. |
222 | * @tsc : time stamp counter. |
223 | */ |
224 | static inline void __attribute__((no_instrument_function)) |
225 | ltt_write_event_header( |
226 | struct ltt_trace_info *trace, struct ltt_buf *buf, |
227 | void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size, |
228 | size_t offset, uint64_t tsc) |
229 | { |
230 | struct ltt_event_header_nohb *nohb; |
231 | |
232 | event_size = min(event_size, 0xFFFFU); |
233 | nohb = (struct ltt_event_header_nohb *)(ptr+offset); |
234 | nohb->timestamp = (uint64_t)tsc; |
235 | nohb->facility_id = fID; |
236 | nohb->event_id = eID; |
237 | nohb->event_size = (uint16_t)event_size; |
238 | } |
700d350d |
239 | |
32f2b04a |
240 | |
241 | |
5ffa9d14 |
242 | static inline uint64_t __attribute__((no_instrument_function)) |
243 | ltt_get_timestamp() |
32f2b04a |
244 | { |
245 | return get_cycles(); |
246 | } |
247 | |
5ffa9d14 |
248 | static inline unsigned int __attribute__((no_instrument_function)) |
249 | ltt_subbuf_header_len(struct ltt_buf *buf) |
32f2b04a |
250 | { |
251 | return sizeof(struct ltt_block_start_header); |
252 | } |
253 | |
254 | |
255 | |
5ffa9d14 |
256 | static inline void __attribute__((no_instrument_function)) |
257 | ltt_write_trace_header(struct ltt_trace_header *header) |
32f2b04a |
258 | { |
259 | header->magic_number = LTT_TRACER_MAGIC_NUMBER; |
260 | header->major_version = LTT_TRACER_VERSION_MAJOR; |
261 | header->minor_version = LTT_TRACER_VERSION_MINOR; |
262 | header->float_word_order = 0; //FIXME |
263 | header->arch_type = 0; //FIXME LTT_ARCH_TYPE; |
264 | header->arch_size = sizeof(void*); |
265 | header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT; |
266 | header->flight_recorder = 0; |
267 | header->has_heartbeat = 0; |
268 | |
5ffa9d14 |
269 | #ifndef LTT_PACK |
32f2b04a |
270 | header->has_alignment = sizeof(void*); |
271 | #else |
272 | header->has_alignment = 0; |
273 | #endif |
274 | |
275 | //FIXME |
276 | header->freq_scale = 0; |
277 | header->start_freq = 0; |
278 | header->start_tsc = 0; |
279 | header->start_monotonic = 0; |
280 | header->start_time_sec = 0; |
281 | header->start_time_usec = 0; |
282 | } |
283 | |
284 | |
5ffa9d14 |
285 | static inline void __attribute__((no_instrument_function)) |
286 | ltt_buffer_begin_callback(struct ltt_buf *buf, |
32f2b04a |
287 | uint64_t tsc, unsigned int subbuf_idx) |
288 | { |
289 | struct ltt_block_start_header *header = |
290 | (struct ltt_block_start_header*) |
291 | (buf->start + (subbuf_idx*buf->subbuf_size)); |
292 | |
293 | header->begin.cycle_count = tsc; |
294 | header->begin.freq = 0; //ltt_frequency(); |
295 | |
296 | header->lost_size = 0xFFFFFFFF; // for debugging... |
297 | |
298 | header->buf_size = buf->subbuf_size; |
299 | |
300 | ltt_write_trace_header(&header->trace); |
301 | |
302 | } |
303 | |
304 | |
305 | |
5ffa9d14 |
306 | static inline void __attribute__((no_instrument_function)) |
307 | ltt_buffer_end_callback(struct ltt_buf *buf, |
32f2b04a |
308 | uint64_t tsc, unsigned int offset, unsigned int subbuf_idx) |
309 | { |
310 | struct ltt_block_start_header *header = |
311 | (struct ltt_block_start_header*) |
312 | (buf->start + (subbuf_idx*buf->subbuf_size)); |
313 | /* offset is assumed to never be 0 here : never deliver a completely |
314 | * empty subbuffer. */ |
315 | /* The lost size is between 0 and subbuf_size-1 */ |
316 | header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset), |
317 | buf); |
318 | header->end.cycle_count = tsc; |
319 | header->end.freq = 0; //ltt_frequency(); |
320 | } |
321 | |
322 | |
5ffa9d14 |
323 | static inline void __attribute__((no_instrument_function)) |
324 | ltt_deliver_callback(struct ltt_buf *buf, |
32f2b04a |
325 | unsigned subbuf_idx, |
326 | void *subbuf) |
327 | { |
328 | ltt_usertrace_fast_buffer_switch(); |
329 | } |
5ffa9d14 |
330 | |
331 | |
332 | /* ltt_reserve_slot |
333 | * |
334 | * Atomic slot reservation in a LTTng buffer. It will take care of |
335 | * sub-buffer switching. |
336 | * |
337 | * Parameters: |
338 | * |
339 | * @trace : the trace structure to log to. |
340 | * @buf : the buffer to reserve space into. |
341 | * @data_size : size of the variable length data to log. |
342 | * @slot_size : pointer to total size of the slot (out) |
343 | * @tsc : pointer to the tsc at the slot reservation (out) |
344 | * @before_hdr_pad : dynamic padding before the event header. |
345 | * @after_hdr_pad : dynamic padding after the event header. |
346 | * |
347 | * Return : NULL if not enough space, else returns the pointer |
348 | * to the beginning of the reserved slot. */ |
349 | static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot( |
350 | struct ltt_trace_info *trace, |
351 | struct ltt_buf *ltt_buf, |
352 | unsigned int data_size, |
353 | unsigned int *slot_size, |
354 | uint64_t *tsc, |
355 | size_t *before_hdr_pad, |
356 | size_t *after_hdr_pad, |
357 | size_t *header_size) |
358 | { |
359 | int offset_begin, offset_end, offset_old; |
360 | //int has_switch; |
361 | int begin_switch, end_switch_current, end_switch_old; |
362 | int reserve_commit_diff = 0; |
363 | unsigned int size; |
364 | int consumed_old, consumed_new; |
365 | int commit_count, reserve_count; |
366 | int ret; |
367 | |
368 | do { |
369 | offset_old = atomic_read(<t_buf->offset); |
370 | offset_begin = offset_old; |
371 | //has_switch = 0; |
372 | begin_switch = 0; |
373 | end_switch_current = 0; |
374 | end_switch_old = 0; |
375 | *tsc = ltt_get_timestamp(); |
376 | if(*tsc == 0) { |
377 | /* Error in getting the timestamp, event lost */ |
378 | atomic_inc(<t_buf->events_lost); |
379 | return NULL; |
380 | } |
381 | |
382 | if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) { |
383 | begin_switch = 1; /* For offset_begin */ |
384 | } else { |
385 | size = ltt_get_header_size(trace, ltt_buf->start + offset_begin, |
386 | before_hdr_pad, after_hdr_pad, header_size) |
387 | + data_size; |
388 | |
389 | if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) { |
390 | //has_switch = 1; |
391 | end_switch_old = 1; /* For offset_old */ |
392 | begin_switch = 1; /* For offset_begin */ |
393 | } |
394 | } |
395 | |
396 | if(begin_switch) { |
397 | if(end_switch_old) { |
398 | offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf); |
399 | } |
400 | offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf); |
401 | /* Test new buffer integrity */ |
402 | reserve_commit_diff = |
403 | atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, |
404 | ltt_buf)]) |
405 | - atomic_read(<t_buf->commit_count[SUBBUF_INDEX(offset_begin, |
406 | ltt_buf)]); |
407 | if(reserve_commit_diff == 0) { |
408 | /* Next buffer not corrupted. */ |
b402c055 |
409 | //if((SUBBUF_TRUNC(offset_begin, ltt_buf) |
410 | // - SUBBUF_TRUNC(atomic_read(<t_buf->consumed), ltt_buf)) |
411 | // >= ltt_buf->alloc_size) { |
be5cc22c |
412 | /* sem_wait is not signal safe. Disable signals around it. */ |
413 | { |
414 | sigset_t oldset, set; |
415 | |
416 | /* Disable signals */ |
417 | ret = sigfillset(&set); |
418 | if(ret) perror("LTT Error in sigfillset\n"); |
419 | |
420 | ret = pthread_sigmask(SIG_BLOCK, &set, &oldset); |
421 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
422 | |
423 | sem_wait(<t_buf->writer_sem); |
424 | |
425 | /* Enable signals */ |
426 | ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
427 | if(ret) perror("LTT Error in pthread_sigmask\n"); |
428 | } |
429 | |
5ffa9d14 |
430 | /* go on with the write */ |
431 | |
b402c055 |
432 | //} else { |
433 | // /* next buffer not corrupted, we are either in overwrite mode or |
434 | // * the buffer is not full. It's safe to write in this new subbuffer.*/ |
435 | //} |
5ffa9d14 |
436 | } else { |
437 | /* Next subbuffer corrupted. Force pushing reader even in normal |
438 | * mode. It's safe to write in this new subbuffer. */ |
85b94320 |
439 | sem_post(<t_buf->writer_sem); |
5ffa9d14 |
440 | } |
441 | size = ltt_get_header_size(trace, ltt_buf->start + offset_begin, |
442 | before_hdr_pad, after_hdr_pad, header_size) + data_size; |
443 | if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) { |
444 | /* Event too big for subbuffers, report error, don't complete |
445 | * the sub-buffer switch. */ |
446 | atomic_inc(<t_buf->events_lost); |
447 | return NULL; |
448 | } else { |
449 | /* We just made a successful buffer switch and the event fits in the |
450 | * new subbuffer. Let's write. */ |
451 | } |
452 | } else { |
453 | /* Event fits in the current buffer and we are not on a switch boundary. |
454 | * It's safe to write */ |
455 | } |
456 | offset_end = offset_begin + size; |
457 | |
458 | if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) { |
459 | /* The offset_end will fall at the very beginning of the next subbuffer. |
460 | */ |
461 | end_switch_current = 1; /* For offset_begin */ |
462 | } |
463 | |
464 | } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end) |
465 | != offset_old); |
466 | |
467 | |
468 | /* Push the reader if necessary */ |
469 | do { |
470 | consumed_old = atomic_read(<t_buf->consumed); |
471 | /* If buffer is in overwrite mode, push the reader consumed count if |
472 | the write position has reached it and we are not at the first |
473 | iteration (don't push the reader farther than the writer). |
474 | This operation can be done concurrently by many writers in the |
475 | same buffer, the writer being at the fartest write position sub-buffer |
476 | index in the buffer being the one which will win this loop. */ |
477 | /* If the buffer is not in overwrite mode, pushing the reader only |
478 | happen if a sub-buffer is corrupted */ |
479 | if((SUBBUF_TRUNC(offset_end, ltt_buf) |
480 | - SUBBUF_TRUNC(consumed_old, ltt_buf)) |
481 | >= ltt_buf->alloc_size) |
482 | consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf); |
483 | else { |
484 | consumed_new = consumed_old; |
485 | break; |
486 | } |
487 | } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new) |
488 | != consumed_old); |
489 | |
490 | if(consumed_old != consumed_new) { |
491 | /* Reader pushed : we are the winner of the push, we can therefore |
492 | reequilibrate reserve and commit. Atomic increment of the commit |
493 | count permits other writers to play around with this variable |
494 | before us. We keep track of corrupted_subbuffers even in overwrite mode : |
495 | we never want to write over a non completely committed sub-buffer : |
496 | possible causes : the buffer size is too low compared to the unordered |
497 | data input, or there is a writer who died between the reserve and the |
498 | commit. */ |
499 | if(reserve_commit_diff) { |
500 | /* We have to alter the sub-buffer commit count : a sub-buffer is |
501 | corrupted. We do not deliver it. */ |
502 | atomic_add(reserve_commit_diff, |
503 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]); |
504 | atomic_inc(<t_buf->corrupted_subbuffers); |
505 | } |
506 | } |
507 | |
508 | |
509 | if(end_switch_old) { |
510 | /* old subbuffer */ |
511 | /* Concurrency safe because we are the last and only thread to alter this |
512 | sub-buffer. As long as it is not delivered and read, no other thread can |
513 | alter the offset, alter the reserve_count or call the |
514 | client_buffer_end_callback on this sub-buffer. |
515 | The only remaining threads could be the ones with pending commits. They |
516 | will have to do the deliver themself. |
517 | Not concurrency safe in overwrite mode. We detect corrupted subbuffers |
518 | with commit and reserve counts. We keep a corrupted sub-buffers count |
519 | and push the readers across these sub-buffers. |
520 | Not concurrency safe if a writer is stalled in a subbuffer and |
521 | another writer switches in, finding out it's corrupted. The result will |
522 | be than the old (uncommited) subbuffer will be declared corrupted, and |
523 | that the new subbuffer will be declared corrupted too because of the |
524 | commit count adjustment. |
525 | Note : offset_old should never be 0 here.*/ |
526 | ltt_buffer_end_callback(ltt_buf, *tsc, offset_old, |
527 | SUBBUF_INDEX((offset_old-1), ltt_buf)); |
528 | /* Setting this reserve_count will allow the sub-buffer to be delivered by |
529 | the last committer. */ |
530 | reserve_count = |
531 | atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1), |
532 | <t_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]); |
533 | if(reserve_count |
534 | == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_old-1), |
535 | ltt_buf)])) { |
536 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf), |
537 | NULL); |
538 | } |
539 | } |
540 | |
541 | if(begin_switch) { |
542 | /* New sub-buffer */ |
543 | /* This code can be executed unordered : writers may already have written |
544 | to the sub-buffer before this code gets executed, caution. */ |
545 | /* The commit makes sure that this code is executed before the deliver |
546 | of this sub-buffer */ |
547 | ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf)); |
548 | commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf), |
549 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]); |
550 | /* Check if the written buffer has to be delivered */ |
551 | if(commit_count |
552 | == atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, |
553 | ltt_buf)])) { |
554 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL); |
555 | } |
556 | } |
557 | |
558 | if(end_switch_current) { |
559 | /* current subbuffer */ |
560 | /* Concurrency safe because we are the last and only thread to alter this |
561 | sub-buffer. As long as it is not delivered and read, no other thread can |
562 | alter the offset, alter the reserve_count or call the |
563 | client_buffer_end_callback on this sub-buffer. |
564 | The only remaining threads could be the ones with pending commits. They |
565 | will have to do the deliver themself. |
566 | Not concurrency safe in overwrite mode. We detect corrupted subbuffers |
567 | with commit and reserve counts. We keep a corrupted sub-buffers count |
568 | and push the readers across these sub-buffers. |
569 | Not concurrency safe if a writer is stalled in a subbuffer and |
570 | another writer switches in, finding out it's corrupted. The result will |
571 | be than the old (uncommited) subbuffer will be declared corrupted, and |
572 | that the new subbuffer will be declared corrupted too because of the |
573 | commit count adjustment. */ |
574 | ltt_buffer_end_callback(ltt_buf, *tsc, offset_end, |
575 | SUBBUF_INDEX((offset_end-1), ltt_buf)); |
576 | /* Setting this reserve_count will allow the sub-buffer to be delivered by |
577 | the last committer. */ |
578 | reserve_count = |
579 | atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1), |
580 | <t_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]); |
581 | if(reserve_count |
582 | == atomic_read(<t_buf->commit_count[SUBBUF_INDEX((offset_end-1), |
583 | ltt_buf)])) { |
584 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL); |
585 | } |
586 | } |
587 | |
588 | *slot_size = size; |
589 | |
590 | //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size)); |
591 | //BUG_ON(*slot_size != (offset_end - offset_begin)); |
592 | |
593 | return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf); |
594 | } |
595 | |
596 | |
597 | /* ltt_commit_slot |
598 | * |
599 | * Atomic unordered slot commit. Increments the commit count in the |
600 | * specified sub-buffer, and delivers it if necessary. |
601 | * |
602 | * Parameters: |
603 | * |
604 | * @buf : the buffer to commit to. |
605 | * @reserved : address of the beginnig of the reserved slot. |
606 | * @slot_size : size of the reserved slot. |
607 | * |
608 | */ |
609 | static inline void __attribute__((no_instrument_function)) ltt_commit_slot( |
610 | struct ltt_buf *ltt_buf, |
611 | void *reserved, |
612 | unsigned int slot_size) |
613 | { |
614 | unsigned int offset_begin = reserved - ltt_buf->start; |
615 | int commit_count; |
616 | |
617 | commit_count = atomic_add_return(slot_size, |
618 | <t_buf->commit_count[SUBBUF_INDEX(offset_begin, |
619 | ltt_buf)]); |
620 | |
621 | /* Check if all commits have been done */ |
622 | if(commit_count == |
623 | atomic_read(<t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) { |
624 | ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL); |
625 | } |
626 | } |
627 | |
628 | |
38f24d5c |
629 | #endif //LTT_TRACE_FAST |
8b30e7bc |
630 | #endif //LTT_TRACE |
04180f7f |
631 | #endif //_LTT_USERTRACE_FAST_H |