2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring buffer backend (internal helpers).
9 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
15 #include <urcu/compiler.h>
17 #include <lttng/ringbuffer-context.h>
18 #include "ringbuffer-config.h"
19 #include "backend_types.h"
20 #include "frontend_types.h"
23 /* Ring buffer backend API presented to the frontend */
25 /* Ring buffer and channel backend create/free */
27 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
28 struct channel_backend
*chan
,
30 struct lttng_ust_shm_handle
*handle
,
31 struct shm_object
*shmobj
)
32 __attribute__((visibility("hidden")));
34 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
35 __attribute__((visibility("hidden")));
37 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend
*bufb
)
38 __attribute__((visibility("hidden")));
40 int channel_backend_init(struct channel_backend
*chanb
,
42 const struct lttng_ust_lib_ring_buffer_config
*config
,
44 size_t num_subbuf
, struct lttng_ust_shm_handle
*handle
,
45 const int *stream_fds
)
46 __attribute__((visibility("hidden")));
48 void channel_backend_free(struct channel_backend
*chanb
,
49 struct lttng_ust_shm_handle
*handle
)
50 __attribute__((visibility("hidden")));
52 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
53 struct lttng_ust_shm_handle
*handle
)
54 __attribute__((visibility("hidden")));
56 void channel_backend_reset(struct channel_backend
*chanb
)
57 __attribute__((visibility("hidden")));
59 int lib_ring_buffer_backend_init(void)
60 __attribute__((visibility("hidden")));
62 void lib_ring_buffer_backend_exit(void)
63 __attribute__((visibility("hidden")));
65 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
66 size_t offset
, const void *src
, size_t len
,
68 __attribute__((visibility("hidden")));
71 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
72 * exchanged atomically.
74 * Top half word, except lowest bit, belongs to "offset", which is used to keep
75 * to count the produced buffers. For overwrite mode, this provides the
76 * consumer with the capacity to read subbuffers in order, handling the
77 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
78 * systems) concurrently with a single execution of get_subbuf (between offset
79 * sampling and subbuffer ID exchange).
82 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
84 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
85 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
86 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
88 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
90 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
91 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
92 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
94 * In overwrite mode: lowest half of word is used for index.
95 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
96 * In producer-consumer mode: whole word used for index.
98 #define SB_ID_INDEX_SHIFT 0
99 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
100 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
103 * Construct the subbuffer id from offset, index and noref. Use only the index
104 * for producer-consumer mode (offset and noref are only used in overwrite
108 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config
*config
,
109 unsigned long offset
, unsigned long noref
,
112 if (config
->mode
== RING_BUFFER_OVERWRITE
)
113 return (offset
<< SB_ID_OFFSET_SHIFT
)
114 | (noref
<< SB_ID_NOREF_SHIFT
)
121 * Compare offset with the offset contained within id. Return 1 if the offset
122 * bits are identical, else 0.
125 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
126 unsigned long id
, unsigned long offset
)
128 return (id
& SB_ID_OFFSET_MASK
) == (offset
<< SB_ID_OFFSET_SHIFT
);
132 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
135 if (config
->mode
== RING_BUFFER_OVERWRITE
)
136 return id
& SB_ID_INDEX_MASK
;
142 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
145 if (config
->mode
== RING_BUFFER_OVERWRITE
)
146 return !!(id
& SB_ID_NOREF_MASK
);
152 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
156 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
159 if (config
->mode
== RING_BUFFER_OVERWRITE
)
160 *id
|= SB_ID_NOREF_MASK
;
164 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
165 unsigned long *id
, unsigned long offset
)
169 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
171 tmp
&= ~SB_ID_OFFSET_MASK
;
172 tmp
|= offset
<< SB_ID_OFFSET_SHIFT
;
173 tmp
|= SB_ID_NOREF_MASK
;
174 /* Volatile store, read concurrently by readers. */
175 CMM_ACCESS_ONCE(*id
) = tmp
;
179 /* No volatile access, since already used locally */
181 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
184 if (config
->mode
== RING_BUFFER_OVERWRITE
)
185 *id
&= ~SB_ID_NOREF_MASK
;
189 * For overwrite mode, cap the number of subbuffers per buffer to:
190 * 2^16 on 32-bit architectures
191 * 2^32 on 64-bit architectures
192 * This is required to fit in the index part of the ID. Return 0 on success,
196 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
197 unsigned long num_subbuf
)
199 if (config
->mode
== RING_BUFFER_OVERWRITE
)
200 return (num_subbuf
> (1UL << HALF_ULONG_BITS
)) ? -EPERM
: 0;
206 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config
*config
,
207 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
208 struct lttng_ust_lib_ring_buffer_backend_pages
**backend_pages
)
210 struct lttng_ust_lib_ring_buffer_ctx_private
*ctx_private
= ctx
->priv
;
211 struct lttng_ust_lib_ring_buffer_backend
*bufb
= &ctx_private
->buf
->backend
;
212 struct channel_backend
*chanb
= &ctx_private
->chan
->backend
;
213 struct lttng_ust_shm_handle
*handle
= ctx_private
->chan
->handle
;
215 size_t offset
= ctx_private
->buf_offset
;
216 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
217 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
218 unsigned long sb_bindex
, id
;
219 struct lttng_ust_lib_ring_buffer_backend_pages
*_backend_pages
;
221 offset
&= chanb
->buf_size
- 1;
222 sbidx
= offset
>> chanb
->subbuf_size_order
;
223 wsb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
224 if (caa_unlikely(!wsb
))
227 sb_bindex
= subbuffer_id_get_index(config
, id
);
228 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
229 if (caa_unlikely(!rpages
))
231 CHAN_WARN_ON(ctx_private
->chan
,
232 config
->mode
== RING_BUFFER_OVERWRITE
233 && subbuffer_id_is_noref(config
, id
));
234 _backend_pages
= shmp(handle
, rpages
->shmp
);
235 if (caa_unlikely(!_backend_pages
))
237 *backend_pages
= _backend_pages
;
241 /* Get backend pages from cache. */
243 struct lttng_ust_lib_ring_buffer_backend_pages
*
244 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config
*config
,
245 struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
247 return ctx
->priv
->backend_pages
;
251 * The ring buffer can count events recorded and overwritten per buffer,
252 * but it is disabled by default due to its performance overhead.
254 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
256 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
257 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
258 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
259 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
261 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
263 backend_pages
= lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
264 if (caa_unlikely(!backend_pages
)) {
265 if (lib_ring_buffer_backend_get_pages(config
, ctx
, &backend_pages
))
268 v_inc(config
, &backend_pages
->records_commit
);
270 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
272 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
273 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
274 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
275 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
278 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
281 * Reader has exclusive subbuffer access for record consumption. No need to
282 * perform the decrement atomically.
285 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
286 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
287 struct lttng_ust_shm_handle
*handle
)
289 unsigned long sb_bindex
;
290 struct lttng_ust_lib_ring_buffer_channel
*chan
;
291 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
292 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
294 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
295 chan
= shmp(handle
, bufb
->chan
);
298 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
301 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
304 CHAN_WARN_ON(chan
, !v_read(config
, &backend_pages
->records_unread
));
305 /* Non-atomic decrement protected by exclusive subbuffer access */
306 _v_dec(config
, &backend_pages
->records_unread
);
307 v_inc(config
, &bufb
->records_read
);
311 unsigned long subbuffer_get_records_count(
312 const struct lttng_ust_lib_ring_buffer_config
*config
,
313 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
315 struct lttng_ust_shm_handle
*handle
)
317 unsigned long sb_bindex
;
318 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
319 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
320 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
322 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
325 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
326 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
329 backend_pages
= shmp(handle
, rpages
->shmp
);
332 return v_read(config
, &backend_pages
->records_commit
);
336 * Must be executed at subbuffer delivery when the writer has _exclusive_
337 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
338 * lib_ring_buffer_get_records_count() must be called to get the records
339 * count before this function, because it resets the records_commit
343 unsigned long subbuffer_count_records_overrun(
344 const struct lttng_ust_lib_ring_buffer_config
*config
,
345 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
347 struct lttng_ust_shm_handle
*handle
)
349 unsigned long overruns
, sb_bindex
;
350 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
351 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
352 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
354 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
357 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
358 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
361 backend_pages
= shmp(handle
, rpages
->shmp
);
364 overruns
= v_read(config
, &backend_pages
->records_unread
);
365 v_set(config
, &backend_pages
->records_unread
,
366 v_read(config
, &backend_pages
->records_commit
));
367 v_set(config
, &backend_pages
->records_commit
, 0);
373 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
374 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
376 unsigned long data_size
,
377 struct lttng_ust_shm_handle
*handle
)
379 unsigned long sb_bindex
;
380 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
381 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
382 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
384 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
387 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
388 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
391 backend_pages
= shmp(handle
, rpages
->shmp
);
394 backend_pages
->data_size
= data_size
;
398 unsigned long subbuffer_get_read_data_size(
399 const struct lttng_ust_lib_ring_buffer_config
*config
,
400 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
401 struct lttng_ust_shm_handle
*handle
)
403 unsigned long sb_bindex
;
404 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
405 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
407 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
408 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
411 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
414 return backend_pages
->data_size
;
418 unsigned long subbuffer_get_data_size(
419 const struct lttng_ust_lib_ring_buffer_config
*config
,
420 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
422 struct lttng_ust_shm_handle
*handle
)
424 unsigned long sb_bindex
;
425 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
426 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
427 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
429 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
432 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
433 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
436 backend_pages
= shmp(handle
, rpages
->shmp
);
439 return backend_pages
->data_size
;
443 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config
*config
,
444 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
445 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
447 struct lttng_ust_lib_ring_buffer_backend_counts
*counts
;
449 counts
= shmp_index(handle
, bufb
->buf_cnt
, idx
);
456 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
460 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
461 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
463 struct lttng_ust_shm_handle
*handle
)
465 unsigned long id
, new_id
;
466 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
468 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
472 * Performing a volatile access to read the sb_pages, because we want to
473 * read a coherent version of the pointer and the associated noref flag.
475 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
478 id
= CMM_ACCESS_ONCE(wsb
->id
);
480 /* This check is called on the fast path for each record. */
481 if (caa_likely(!subbuffer_id_is_noref(config
, id
))) {
483 * Store after load dependency ordering the writes to
484 * the subbuffer after load and test of the noref flag
485 * matches the memory barrier implied by the cmpxchg()
486 * in update_read_sb_index().
488 return; /* Already writing to this buffer */
491 subbuffer_id_clear_noref(config
, &new_id
);
492 new_id
= uatomic_cmpxchg(&wsb
->id
, id
, new_id
);
493 if (caa_likely(new_id
== id
))
500 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
504 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
505 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
506 unsigned long idx
, unsigned long offset
,
507 struct lttng_ust_shm_handle
*handle
)
509 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
510 struct lttng_ust_lib_ring_buffer_channel
*chan
;
512 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
515 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
519 * Because ring_buffer_set_noref() is only called by a single thread
520 * (the one which updated the cc_sb value), there are no concurrent
521 * updates to take care of: other writers have not updated cc_sb, so
522 * they cannot set the noref flag, and concurrent readers cannot modify
523 * the pointer because the noref flag is not set yet.
524 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
525 * to the subbuffer before this set noref operation.
526 * subbuffer_set_noref() uses a volatile store to deal with concurrent
527 * readers of the noref flag.
529 chan
= shmp(handle
, bufb
->chan
);
532 CHAN_WARN_ON(chan
, subbuffer_id_is_noref(config
, wsb
->id
));
534 * Memory barrier that ensures counter stores are ordered before set
538 subbuffer_id_set_noref_offset(config
, &wsb
->id
, offset
);
542 * update_read_sb_index - Read-side subbuffer index update.
545 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
546 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
547 struct channel_backend
*chanb
,
548 unsigned long consumed_idx
,
549 unsigned long consumed_count
,
550 struct lttng_ust_shm_handle
*handle
)
552 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
553 unsigned long old_id
, new_id
;
555 wsb
= shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
);
556 if (caa_unlikely(!wsb
))
559 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
560 struct lttng_ust_lib_ring_buffer_channel
*chan
;
563 * Exchange the target writer subbuffer with our own unused
564 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
565 * old_wpage, because the value read will be confirmed by the
566 * following cmpxchg().
569 if (caa_unlikely(!subbuffer_id_is_noref(config
, old_id
)))
572 * Make sure the offset count we are expecting matches the one
573 * indicated by the writer.
575 if (caa_unlikely(!subbuffer_id_compare_offset(config
, old_id
,
578 chan
= shmp(handle
, bufb
->chan
);
579 if (caa_unlikely(!chan
))
581 CHAN_WARN_ON(chan
, !subbuffer_id_is_noref(config
, bufb
->buf_rsb
.id
));
582 subbuffer_id_set_noref_offset(config
, &bufb
->buf_rsb
.id
,
584 new_id
= uatomic_cmpxchg(&wsb
->id
, old_id
, bufb
->buf_rsb
.id
);
585 if (caa_unlikely(old_id
!= new_id
))
587 bufb
->buf_rsb
.id
= new_id
;
589 /* No page exchange, use the writer page directly */
590 bufb
->buf_rsb
.id
= wsb
->id
;
595 #ifndef inline_memcpy
596 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
600 void lttng_inline_memcpy(void *dest
, const void *src
,
602 __attribute__((always_inline
));
604 void lttng_inline_memcpy(void *dest
, const void *src
,
609 *(uint8_t *) dest
= *(const uint8_t *) src
;
612 *(uint16_t *) dest
= *(const uint16_t *) src
;
615 *(uint32_t *) dest
= *(const uint32_t *) src
;
618 *(uint64_t *) dest
= *(const uint64_t *) src
;
621 inline_memcpy(dest
, src
, len
);
626 * Use the architecture-specific memcpy implementation for constant-sized
627 * inputs, but rely on an inline memcpy for length statically unknown.
628 * The function call to memcpy is just way too expensive for a fast path.
630 #define lib_ring_buffer_do_copy(config, dest, src, len) \
632 size_t __len = (len); \
633 if (__builtin_constant_p(len)) \
634 memcpy(dest, src, __len); \
636 lttng_inline_memcpy(dest, src, __len); \
640 * write len bytes to dest with c
643 void lib_ring_buffer_do_memset(char *dest
, char c
, unsigned long len
)
647 for (i
= 0; i
< len
; i
++)
651 /* arch-agnostic implementation */
653 static inline int lttng_ust_fls(unsigned int x
)
659 if (!(x
& 0xFFFF0000U
)) {
663 if (!(x
& 0xFF000000U
)) {
667 if (!(x
& 0xF0000000U
)) {
671 if (!(x
& 0xC0000000U
)) {
675 if (!(x
& 0x80000000U
)) {
676 /* No need to bit shift on last operation */
682 static inline int get_count_order(unsigned int count
)
686 order
= lttng_ust_fls(count
) - 1;
687 if (count
& (count
- 1))
692 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */