+ do {
+ offset_old = atomic_read(<t_buf->offset);
+ offset_begin = offset_old;
+ end_switch_old = 0;
+ tsc = ltt_get_timestamp();
+ if(tsc == 0) {
+ /* Error in getting the timestamp : should not happen : it would
+ * mean we are called from an NMI during a write seqlock on xtime. */
+ return;
+ }
+
+ if(SUBBUF_OFFSET(offset_begin, ltt_buf) != 0) {
+ offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
+ end_switch_old = 1;
+ } else {
+ /* we do not have to switch : buffer is empty */
+ return;
+ }
+ if(mode == FORCE_ACTIVE)
+ offset_begin += ltt_subbuf_header_len(ltt_buf);
+ /* Always begin_switch in FORCE_ACTIVE mode */
+
+ /* Test new buffer integrity */
+ reserve_commit_diff =
+ atomic_read(
+ <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])
+ - atomic_read(
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ if(reserve_commit_diff == 0) {
+ /* Next buffer not corrupted. */
+ if(mode == FORCE_ACTIVE
+ && (offset_begin-atomic_read(<t_buf->consumed))
+ >= ltt_buf->alloc_size) {
+ /* We do not overwrite non consumed buffers and we are full : ignore
+ switch while tracing is active. */
+ return;
+ }
+ } else {
+ /* Next subbuffer corrupted. Force pushing reader even in normal mode */
+ }
+
+ offset_end = offset_begin;
+ } while(atomic_cmpxchg(<t_buf->offset, offset_old, offset_end)
+ != offset_old);
+
+
+ if(mode == FORCE_ACTIVE) {
+ /* Push the reader if necessary */
+ do {
+ consumed_old = atomic_read(<t_buf->consumed);
+ /* If buffer is in overwrite mode, push the reader consumed count if
+ the write position has reached it and we are not at the first
+ iteration (don't push the reader farther than the writer).
+ This operation can be done concurrently by many writers in the
+ same buffer, the writer being at the fartest write position sub-buffer
+ index in the buffer being the one which will win this loop. */
+ /* If the buffer is not in overwrite mode, pushing the reader only
+ happen if a sub-buffer is corrupted */
+ if((SUBBUF_TRUNC(offset_end, ltt_buf)
+ - SUBBUF_TRUNC(consumed_old, ltt_buf))
+ >= ltt_buf->alloc_size)
+ consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
+ else {
+ consumed_new = consumed_old;
+ break;
+ }
+ } while(atomic_cmpxchg(<t_buf->consumed, consumed_old, consumed_new)
+ != consumed_old);
+
+ if(consumed_old != consumed_new) {
+ /* Reader pushed : we are the winner of the push, we can therefore
+ reequilibrate reserve and commit. Atomic increment of the commit
+ count permits other writers to play around with this variable
+ before us. We keep track of corrupted_subbuffers even in overwrite
+ mode :
+ we never want to write over a non completely committed sub-buffer :
+ possible causes : the buffer size is too low compared to the unordered
+ data input, or there is a writer who died between the reserve and the
+ commit. */
+ if(reserve_commit_diff) {
+ /* We have to alter the sub-buffer commit count : a sub-buffer is
+ corrupted */
+ atomic_add(reserve_commit_diff,
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ atomic_inc(<t_buf->corrupted_subbuffers);
+ }
+ }
+ }
+
+ /* Always switch */
+
+ if(end_switch_old) {
+ /* old subbuffer */
+ /* Concurrency safe because we are the last and only thread to alter this
+ sub-buffer. As long as it is not delivered and read, no other thread can
+ alter the offset, alter the reserve_count or call the
+ client_buffer_end_callback on this sub-buffer.
+ The only remaining threads could be the ones with pending commits. They
+ will have to do the deliver themself.
+ Not concurrency safe in overwrite mode. We detect corrupted subbuffers with
+ commit and reserve counts. We keep a corrupted sub-buffers count and push
+ the readers across these sub-buffers.
+ Not concurrency safe if a writer is stalled in a subbuffer and
+ another writer switches in, finding out it's corrupted. The result will be
+ than the old (uncommited) subbuffer will be declared corrupted, and that
+ the new subbuffer will be declared corrupted too because of the commit
+ count adjustment.
+ Offset old should never be 0. */
+ ltt_buffer_end_callback(ltt_buf, tsc, offset_old,
+ SUBBUF_INDEX((offset_old), ltt_buf));
+ /* Setting this reserve_count will allow the sub-buffer to be delivered by
+ the last committer. */
+ reserve_count = atomic_add_return((SUBBUF_OFFSET((offset_old-1),
+ ltt_buf) + 1),
+ <t_buf->reserve_count[SUBBUF_INDEX((offset_old),
+ ltt_buf)]);
+ if(reserve_count == atomic_read(
+ <t_buf->commit_count[SUBBUF_INDEX((offset_old), ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old), ltt_buf), NULL);
+ }
+ }
+
+ if(mode == FORCE_ACTIVE) {
+ /* New sub-buffer */
+ /* This code can be executed unordered : writers may already have written
+ to the sub-buffer before this code gets executed, caution. */
+ /* The commit makes sure that this code is executed before the deliver
+ of this sub-buffer */
+ ltt_buffer_begin_callback(ltt_buf, tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
+ commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
+ <t_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
+ /* Check if the written buffer has to be delivered */
+ if(commit_count == atomic_read(
+ <t_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
+ ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
+ }
+ }