1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
5 * libringbuffer/frontend_internal.h
7 * Ring Buffer Library Synchronization Header (internal helpers).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
29 * See ring_buffer_frontend.c for more information on wait-free algorithms.
31 * Dual LGPL v2.1/GPL v2 license.
34 #include <urcu/compiler.h>
38 #include <lttng/ringbuffer-config.h>
39 #include "backend_types.h"
40 #include "frontend_types.h"
43 /* Buffer offset macros */
45 /* buf_trunc mask selects only the buffer number. */
47 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
49 return offset
& ~(chan
->backend
.buf_size
- 1);
53 /* Select the buffer number value (counter). */
55 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
57 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
60 /* buf_offset mask selects only the offset within the current buffer. */
62 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
64 return offset
& (chan
->backend
.buf_size
- 1);
67 /* subbuf_offset mask selects the offset within the current subbuffer. */
69 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
71 return offset
& (chan
->backend
.subbuf_size
- 1);
74 /* subbuf_trunc mask selects the subbuffer number. */
76 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
78 return offset
& ~(chan
->backend
.subbuf_size
- 1);
81 /* subbuf_align aligns the offset to the next subbuffer. */
83 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
85 return (offset
+ chan
->backend
.subbuf_size
)
86 & ~(chan
->backend
.subbuf_size
- 1);
89 /* subbuf_index returns the index of the current subbuffer within the buffer. */
91 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
93 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
97 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
98 * bits from the last TSC read. When overflows are detected, the full 64-bit
99 * timestamp counter should be written in the record header. Reads and writes
100 * last_tsc atomically.
103 #if (CAA_BITS_PER_LONG == 32)
105 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
106 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
108 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
112 * Ensure the compiler performs this update in a single instruction.
114 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
118 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
119 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
121 unsigned long tsc_shifted
;
123 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
126 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
127 if (caa_unlikely(tsc_shifted
128 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
135 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
136 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
138 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
141 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
145 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
146 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
148 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
151 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
152 >> config
->tsc_bits
))
160 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
);
163 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
164 enum switch_mode mode
,
165 struct lttng_ust_shm_handle
*handle
);
167 /* Buffer write helpers */
170 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
171 struct channel
*chan
,
172 unsigned long offset
)
174 unsigned long consumed_old
, consumed_new
;
177 consumed_old
= uatomic_read(&buf
->consumed
);
179 * If buffer is in overwrite mode, push the reader consumed
180 * count if the write position has reached it and we are not
181 * at the first iteration (don't push the reader farther than
182 * the writer). This operation can be done concurrently by many
183 * writers in the same buffer, the writer being at the farthest
184 * write position sub-buffer index in the buffer being the one
185 * which will win this loop.
187 if (caa_unlikely(subbuf_trunc(offset
, chan
)
188 - subbuf_trunc(consumed_old
, chan
)
189 >= chan
->backend
.buf_size
))
190 consumed_new
= subbuf_align(consumed_old
, chan
);
193 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
194 consumed_new
) != consumed_old
));
198 void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
199 struct lttng_ust_lib_ring_buffer
*buf
,
200 unsigned long commit_count
,
202 struct lttng_ust_shm_handle
*handle
)
204 if (config
->oops
== RING_BUFFER_OOPS_CONSISTENCY
)
205 v_set(config
, &shmp_index(handle
, buf
->commit_hot
, idx
)->seq
, commit_count
);
209 int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
210 struct lttng_ust_lib_ring_buffer
*buf
,
211 struct channel
*chan
,
212 struct lttng_ust_shm_handle
*handle
)
214 unsigned long consumed_old
, consumed_idx
, commit_count
, write_offset
;
216 consumed_old
= uatomic_read(&buf
->consumed
);
217 consumed_idx
= subbuf_index(consumed_old
, chan
);
218 commit_count
= v_read(config
, &shmp_index(handle
, buf
->commit_cold
, consumed_idx
)->cc_sb
);
220 * No memory barrier here, since we are only interested
221 * in a statistically correct polling result. The next poll will
222 * get the data is we are racing. The mb() that ensures correct
223 * memory order is in get_subbuf.
225 write_offset
= v_read(config
, &buf
->offset
);
228 * Check that the subbuffer we are trying to consume has been
229 * already fully committed.
232 if (((commit_count
- chan
->backend
.subbuf_size
)
233 & chan
->commit_count_mask
)
234 - (buf_trunc(consumed_old
, chan
)
235 >> chan
->backend
.num_subbuf_order
)
240 * Check that we are not about to read the same subbuffer in
241 * which the writer head is.
243 if (subbuf_trunc(write_offset
, chan
) - subbuf_trunc(consumed_old
, chan
)
252 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
253 struct lttng_ust_lib_ring_buffer
*buf
,
254 struct channel
*chan
)
256 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
260 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
261 struct lttng_ust_lib_ring_buffer
*buf
,
263 struct lttng_ust_shm_handle
*handle
)
265 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
269 * Check if all space reservation in a buffer have been committed. This helps
270 * knowing if an execution context is nested (for per-cpu buffers only).
271 * This is a very specific ftrace use-case, so we keep this as "internal" API.
274 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
275 struct lttng_ust_lib_ring_buffer
*buf
,
276 struct channel
*chan
,
277 struct lttng_ust_shm_handle
*handle
)
279 unsigned long offset
, idx
, commit_count
;
281 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
282 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
285 * Read offset and commit count in a loop so they are both read
286 * atomically wrt interrupts. By deal with interrupt concurrency by
287 * restarting both reads if the offset has been pushed. Note that given
288 * we only have to deal with interrupt concurrency here, an interrupt
289 * modifying the commit count will also modify "offset", so it is safe
290 * to only check for offset modifications.
293 offset
= v_read(config
, &buf
->offset
);
294 idx
= subbuf_index(offset
, chan
);
295 commit_count
= v_read(config
, &shmp_index(handle
, buf
->commit_hot
, idx
)->cc
);
296 } while (offset
!= v_read(config
, &buf
->offset
));
298 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
299 - (commit_count
& chan
->commit_count_mask
) == 0);
303 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
304 struct lttng_ust_lib_ring_buffer
*buf
,
305 struct channel
*chan
,
306 unsigned long offset
,
307 unsigned long commit_count
,
309 struct lttng_ust_shm_handle
*handle
)
311 unsigned long old_commit_count
= commit_count
312 - chan
->backend
.subbuf_size
;
315 /* Check if all commits have been done */
316 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
317 - (old_commit_count
& chan
->commit_count_mask
) == 0)) {
319 * If we succeeded at updating cc_sb below, we are the subbuffer
320 * writer delivering the subbuffer. Deals with concurrent
321 * updates of the "cc" value without adding a add_return atomic
322 * operation to the fast path.
324 * We are doing the delivery in two steps:
325 * - First, we cmpxchg() cc_sb to the new value
326 * old_commit_count + 1. This ensures that we are the only
327 * subbuffer user successfully filling the subbuffer, but we
328 * do _not_ set the cc_sb value to "commit_count" yet.
329 * Therefore, other writers that would wrap around the ring
330 * buffer and try to start writing to our subbuffer would
331 * have to drop records, because it would appear as
333 * We therefore have exclusive access to the subbuffer control
334 * structures. This mutual exclusion with other writers is
335 * crucially important to perform record overruns count in
336 * flight recorder mode locklessly.
337 * - When we are ready to release the subbuffer (either for
338 * reading or for overrun by other writers), we simply set the
339 * cc_sb value to "commit_count" and perform delivery.
341 * The subbuffer size is least 2 bytes (minimum size: 1 page).
342 * This guarantees that old_commit_count + 1 != commit_count.
344 if (caa_likely(v_cmpxchg(config
, &shmp_index(handle
, buf
->commit_cold
, idx
)->cc_sb
,
345 old_commit_count
, old_commit_count
+ 1)
346 == old_commit_count
)) {
348 * Start of exclusive subbuffer access. We are
349 * guaranteed to be the last writer in this subbuffer
350 * and any other writer trying to access this subbuffer
351 * in this state is required to drop records.
353 tsc
= config
->cb
.ring_buffer_clock_read(chan
);
355 subbuffer_get_records_count(config
,
358 &buf
->records_count
);
360 subbuffer_count_records_overrun(config
,
363 &buf
->records_overrun
);
364 config
->cb
.buffer_end(buf
, tsc
, idx
,
365 lib_ring_buffer_get_data_size(config
,
372 * Set noref flag and offset for this subbuffer id.
373 * Contains a memory barrier that ensures counter stores
374 * are ordered before set noref and offset.
376 lib_ring_buffer_set_noref_offset(config
, &buf
->backend
, idx
,
377 buf_trunc_val(offset
, chan
), handle
);
380 * Order set_noref and record counter updates before the
381 * end of subbuffer exclusive access. Orders with
382 * respect to writers coming into the subbuffer after
383 * wrap around, and also order wrt concurrent readers.
386 /* End of exclusive subbuffer access */
387 v_set(config
, &shmp_index(handle
, buf
->commit_cold
, idx
)->cc_sb
,
389 lib_ring_buffer_vmcore_check_deliver(config
, buf
,
390 commit_count
, idx
, handle
);
393 * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
395 if (config
->wakeup
== RING_BUFFER_WAKEUP_BY_WRITER
396 && (uatomic_read(&buf
->active_readers
)
397 || uatomic_read(&buf
->active_shadow_readers
))
398 && lib_ring_buffer_poll_deliver(config
, buf
, chan
, handle
)) {
399 int wakeup_fd
= shm_get_wakeup_fd(handle
, &buf
->self
._ref
);
401 if (wakeup_fd
>= 0) {
402 sigset_t sigpipe_set
, pending_set
, old_set
;
403 int ret
, sigpipe_was_pending
= 0;
406 * Wake-up the other end by
407 * writing a null byte in the
408 * pipe (non-blocking).
409 * Important note: Because
410 * writing into the pipe is
411 * non-blocking (and therefore
412 * we allow dropping wakeup
413 * data, as long as there is
414 * wakeup data present in the
415 * pipe buffer to wake up the
416 * consumer), the consumer
417 * should perform the following
418 * sequence for waiting:
419 * 1) empty the pipe (reads).
420 * 2) check if there is data in
422 * 3) wait on the pipe (poll).
424 * Discard the SIGPIPE from write(), not
425 * disturbing any SIGPIPE that might be
426 * already pending. If a bogus SIGPIPE
427 * is sent to the entire process
428 * concurrently by a malicious user, it
429 * may be simply discarded.
431 ret
= sigemptyset(&pending_set
);
434 * sigpending returns the mask
435 * of signals that are _both_
436 * blocked for the thread _and_
437 * pending for either the thread
438 * or the entire process.
440 ret
= sigpending(&pending_set
);
442 sigpipe_was_pending
= sigismember(&pending_set
, SIGPIPE
);
444 * If sigpipe was pending, it
445 * means it was already blocked,
446 * so no need to block it.
448 if (!sigpipe_was_pending
) {
449 ret
= sigemptyset(&sigpipe_set
);
451 ret
= sigaddset(&sigpipe_set
, SIGPIPE
);
453 ret
= pthread_sigmask(SIG_BLOCK
, &sigpipe_set
, &old_set
);
457 ret
= write(wakeup_fd
, "", 1);
458 } while (ret
== -1L && errno
== EINTR
);
459 if (ret
== -1L && errno
== EPIPE
&& !sigpipe_was_pending
) {
460 struct timespec timeout
= { 0, 0 };
462 ret
= sigtimedwait(&sigpipe_set
, NULL
,
464 } while (ret
== -1L && errno
== EINTR
);
466 if (!sigpipe_was_pending
) {
467 ret
= pthread_sigmask(SIG_SETMASK
, &old_set
, NULL
);
477 * lib_ring_buffer_write_commit_counter
479 * For flight recording. must be called after commit.
480 * This function increments the subbuffer's commit_seq counter each time the
481 * commit count reaches back the reserve offset (modulo subbuffer size). It is
482 * useful for crash dump.
485 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config
*config
,
486 struct lttng_ust_lib_ring_buffer
*buf
,
487 struct channel
*chan
,
489 unsigned long buf_offset
,
490 unsigned long commit_count
,
492 struct lttng_ust_shm_handle
*handle
)
494 unsigned long offset
, commit_seq_old
;
496 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
499 offset
= buf_offset
+ slot_size
;
502 * subbuf_offset includes commit_count_mask. We can simply
503 * compare the offsets within the subbuffer without caring about
504 * buffer full/empty mismatch because offset is never zero here
505 * (subbuffer header and record headers have non-zero length).
507 if (caa_unlikely(subbuf_offset(offset
- commit_count
, chan
)))
510 commit_seq_old
= v_read(config
, &shmp_index(handle
, buf
->commit_hot
, idx
)->seq
);
511 while ((long) (commit_seq_old
- commit_count
) < 0)
512 commit_seq_old
= v_cmpxchg(config
, &shmp_index(handle
, buf
->commit_hot
, idx
)->seq
,
513 commit_seq_old
, commit_count
);
516 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
517 struct channel_backend
*chanb
, int cpu
,
518 struct lttng_ust_shm_handle
*handle
,
519 struct shm_object
*shmobj
);
520 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
521 struct lttng_ust_shm_handle
*handle
);
523 /* Keep track of trap nesting inside ring buffer code */
524 extern __thread
unsigned int lib_ring_buffer_nesting
;
526 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */