2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring buffer backend (internal helpers).
9 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
15 #include <urcu/compiler.h>
17 #include <lttng/ringbuffer-config.h>
18 #include "backend_types.h"
19 #include "frontend_types.h"
22 /* Ring buffer backend API presented to the frontend */
24 /* Ring buffer and channel backend create/free */
26 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
27 struct channel_backend
*chan
, int cpu
,
28 struct lttng_ust_shm_handle
*handle
,
29 struct shm_object
*shmobj
);
30 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
);
31 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend
*bufb
);
32 int channel_backend_init(struct channel_backend
*chanb
,
34 const struct lttng_ust_lib_ring_buffer_config
*config
,
36 size_t num_subbuf
, struct lttng_ust_shm_handle
*handle
,
37 const int *stream_fds
);
38 void channel_backend_free(struct channel_backend
*chanb
,
39 struct lttng_ust_shm_handle
*handle
);
41 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
42 struct lttng_ust_shm_handle
*handle
);
43 void channel_backend_reset(struct channel_backend
*chanb
);
45 int lib_ring_buffer_backend_init(void);
46 void lib_ring_buffer_backend_exit(void);
48 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
49 size_t offset
, const void *src
, size_t len
,
53 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
54 * exchanged atomically.
56 * Top half word, except lowest bit, belongs to "offset", which is used to keep
57 * to count the produced buffers. For overwrite mode, this provides the
58 * consumer with the capacity to read subbuffers in order, handling the
59 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
60 * systems) concurrently with a single execution of get_subbuf (between offset
61 * sampling and subbuffer ID exchange).
64 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
66 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
67 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
68 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
70 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
72 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
73 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
74 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
76 * In overwrite mode: lowest half of word is used for index.
77 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
78 * In producer-consumer mode: whole word used for index.
80 #define SB_ID_INDEX_SHIFT 0
81 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
82 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
85 * Construct the subbuffer id from offset, index and noref. Use only the index
86 * for producer-consumer mode (offset and noref are only used in overwrite
90 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config
*config
,
91 unsigned long offset
, unsigned long noref
,
94 if (config
->mode
== RING_BUFFER_OVERWRITE
)
95 return (offset
<< SB_ID_OFFSET_SHIFT
)
96 | (noref
<< SB_ID_NOREF_SHIFT
)
103 * Compare offset with the offset contained within id. Return 1 if the offset
104 * bits are identical, else 0.
107 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
108 unsigned long id
, unsigned long offset
)
110 return (id
& SB_ID_OFFSET_MASK
) == (offset
<< SB_ID_OFFSET_SHIFT
);
114 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
117 if (config
->mode
== RING_BUFFER_OVERWRITE
)
118 return id
& SB_ID_INDEX_MASK
;
124 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
127 if (config
->mode
== RING_BUFFER_OVERWRITE
)
128 return !!(id
& SB_ID_NOREF_MASK
);
134 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
138 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
141 if (config
->mode
== RING_BUFFER_OVERWRITE
)
142 *id
|= SB_ID_NOREF_MASK
;
146 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
147 unsigned long *id
, unsigned long offset
)
151 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
153 tmp
&= ~SB_ID_OFFSET_MASK
;
154 tmp
|= offset
<< SB_ID_OFFSET_SHIFT
;
155 tmp
|= SB_ID_NOREF_MASK
;
156 /* Volatile store, read concurrently by readers. */
157 CMM_ACCESS_ONCE(*id
) = tmp
;
161 /* No volatile access, since already used locally */
163 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
166 if (config
->mode
== RING_BUFFER_OVERWRITE
)
167 *id
&= ~SB_ID_NOREF_MASK
;
171 * For overwrite mode, cap the number of subbuffers per buffer to:
172 * 2^16 on 32-bit architectures
173 * 2^32 on 64-bit architectures
174 * This is required to fit in the index part of the ID. Return 0 on success,
178 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
179 unsigned long num_subbuf
)
181 if (config
->mode
== RING_BUFFER_OVERWRITE
)
182 return (num_subbuf
> (1UL << HALF_ULONG_BITS
)) ? -EPERM
: 0;
188 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config
*config
,
189 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
190 struct lttng_ust_lib_ring_buffer_backend_pages
**backend_pages
)
192 struct lttng_ust_lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
193 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
194 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
196 size_t offset
= ctx
->buf_offset
;
197 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
198 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
199 unsigned long sb_bindex
, id
;
200 struct lttng_ust_lib_ring_buffer_backend_pages
*_backend_pages
;
202 offset
&= chanb
->buf_size
- 1;
203 sbidx
= offset
>> chanb
->subbuf_size_order
;
204 wsb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
205 if (caa_unlikely(!wsb
))
208 sb_bindex
= subbuffer_id_get_index(config
, id
);
209 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
210 if (caa_unlikely(!rpages
))
212 CHAN_WARN_ON(ctx
->chan
,
213 config
->mode
== RING_BUFFER_OVERWRITE
214 && subbuffer_id_is_noref(config
, id
));
215 _backend_pages
= shmp(handle
, rpages
->shmp
);
216 if (caa_unlikely(!_backend_pages
))
218 *backend_pages
= _backend_pages
;
222 /* Get backend pages from cache. */
224 struct lttng_ust_lib_ring_buffer_backend_pages
*
225 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config
*config
,
226 struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
228 if (caa_unlikely(ctx
->ctx_len
229 < sizeof(struct lttng_ust_lib_ring_buffer_ctx
)))
231 return ctx
->backend_pages
;
235 * The ring buffer can count events recorded and overwritten per buffer,
236 * but it is disabled by default due to its performance overhead.
238 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
240 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
241 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
242 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
243 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
245 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
247 backend_pages
= lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
248 if (caa_unlikely(!backend_pages
)) {
249 if (lib_ring_buffer_backend_get_pages(config
, ctx
, &backend_pages
))
252 v_inc(config
, &backend_pages
->records_commit
);
254 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
256 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
257 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
258 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
259 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
262 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
265 * Reader has exclusive subbuffer access for record consumption. No need to
266 * perform the decrement atomically.
269 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
270 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
271 struct lttng_ust_shm_handle
*handle
)
273 unsigned long sb_bindex
;
274 struct channel
*chan
;
275 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
276 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
278 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
279 chan
= shmp(handle
, bufb
->chan
);
282 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
285 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
288 CHAN_WARN_ON(chan
, !v_read(config
, &backend_pages
->records_unread
));
289 /* Non-atomic decrement protected by exclusive subbuffer access */
290 _v_dec(config
, &backend_pages
->records_unread
);
291 v_inc(config
, &bufb
->records_read
);
295 unsigned long subbuffer_get_records_count(
296 const struct lttng_ust_lib_ring_buffer_config
*config
,
297 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
299 struct lttng_ust_shm_handle
*handle
)
301 unsigned long sb_bindex
;
302 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
303 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
304 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
306 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
309 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
310 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
313 backend_pages
= shmp(handle
, rpages
->shmp
);
316 return v_read(config
, &backend_pages
->records_commit
);
320 * Must be executed at subbuffer delivery when the writer has _exclusive_
321 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
322 * lib_ring_buffer_get_records_count() must be called to get the records
323 * count before this function, because it resets the records_commit
327 unsigned long subbuffer_count_records_overrun(
328 const struct lttng_ust_lib_ring_buffer_config
*config
,
329 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
331 struct lttng_ust_shm_handle
*handle
)
333 unsigned long overruns
, sb_bindex
;
334 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
335 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
336 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
338 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
341 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
342 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
345 backend_pages
= shmp(handle
, rpages
->shmp
);
348 overruns
= v_read(config
, &backend_pages
->records_unread
);
349 v_set(config
, &backend_pages
->records_unread
,
350 v_read(config
, &backend_pages
->records_commit
));
351 v_set(config
, &backend_pages
->records_commit
, 0);
357 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
358 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
360 unsigned long data_size
,
361 struct lttng_ust_shm_handle
*handle
)
363 unsigned long sb_bindex
;
364 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
365 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
366 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
368 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
371 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
372 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
375 backend_pages
= shmp(handle
, rpages
->shmp
);
378 backend_pages
->data_size
= data_size
;
382 unsigned long subbuffer_get_read_data_size(
383 const struct lttng_ust_lib_ring_buffer_config
*config
,
384 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
385 struct lttng_ust_shm_handle
*handle
)
387 unsigned long sb_bindex
;
388 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
389 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
391 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
392 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
395 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
398 return backend_pages
->data_size
;
402 unsigned long subbuffer_get_data_size(
403 const struct lttng_ust_lib_ring_buffer_config
*config
,
404 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
406 struct lttng_ust_shm_handle
*handle
)
408 unsigned long sb_bindex
;
409 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
410 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
411 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
413 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
416 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
417 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
420 backend_pages
= shmp(handle
, rpages
->shmp
);
423 return backend_pages
->data_size
;
427 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config
*config
,
428 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
429 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
431 struct lttng_ust_lib_ring_buffer_backend_counts
*counts
;
433 counts
= shmp_index(handle
, bufb
->buf_cnt
, idx
);
440 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
444 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
445 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
447 struct lttng_ust_shm_handle
*handle
)
449 unsigned long id
, new_id
;
450 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
452 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
456 * Performing a volatile access to read the sb_pages, because we want to
457 * read a coherent version of the pointer and the associated noref flag.
459 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
462 id
= CMM_ACCESS_ONCE(wsb
->id
);
464 /* This check is called on the fast path for each record. */
465 if (caa_likely(!subbuffer_id_is_noref(config
, id
))) {
467 * Store after load dependency ordering the writes to
468 * the subbuffer after load and test of the noref flag
469 * matches the memory barrier implied by the cmpxchg()
470 * in update_read_sb_index().
472 return; /* Already writing to this buffer */
475 subbuffer_id_clear_noref(config
, &new_id
);
476 new_id
= uatomic_cmpxchg(&wsb
->id
, id
, new_id
);
477 if (caa_likely(new_id
== id
))
484 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
488 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
489 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
490 unsigned long idx
, unsigned long offset
,
491 struct lttng_ust_shm_handle
*handle
)
493 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
494 struct channel
*chan
;
496 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
499 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
503 * Because ring_buffer_set_noref() is only called by a single thread
504 * (the one which updated the cc_sb value), there are no concurrent
505 * updates to take care of: other writers have not updated cc_sb, so
506 * they cannot set the noref flag, and concurrent readers cannot modify
507 * the pointer because the noref flag is not set yet.
508 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
509 * to the subbuffer before this set noref operation.
510 * subbuffer_set_noref() uses a volatile store to deal with concurrent
511 * readers of the noref flag.
513 chan
= shmp(handle
, bufb
->chan
);
516 CHAN_WARN_ON(chan
, subbuffer_id_is_noref(config
, wsb
->id
));
518 * Memory barrier that ensures counter stores are ordered before set
522 subbuffer_id_set_noref_offset(config
, &wsb
->id
, offset
);
526 * update_read_sb_index - Read-side subbuffer index update.
529 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
530 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
531 struct channel_backend
*chanb
,
532 unsigned long consumed_idx
,
533 unsigned long consumed_count
,
534 struct lttng_ust_shm_handle
*handle
)
536 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
537 unsigned long old_id
, new_id
;
539 wsb
= shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
);
540 if (caa_unlikely(!wsb
))
543 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
544 struct channel
*chan
;
547 * Exchange the target writer subbuffer with our own unused
548 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
549 * old_wpage, because the value read will be confirmed by the
550 * following cmpxchg().
553 if (caa_unlikely(!subbuffer_id_is_noref(config
, old_id
)))
556 * Make sure the offset count we are expecting matches the one
557 * indicated by the writer.
559 if (caa_unlikely(!subbuffer_id_compare_offset(config
, old_id
,
562 chan
= shmp(handle
, bufb
->chan
);
563 if (caa_unlikely(!chan
))
565 CHAN_WARN_ON(chan
, !subbuffer_id_is_noref(config
, bufb
->buf_rsb
.id
));
566 subbuffer_id_set_noref_offset(config
, &bufb
->buf_rsb
.id
,
568 new_id
= uatomic_cmpxchg(&wsb
->id
, old_id
, bufb
->buf_rsb
.id
);
569 if (caa_unlikely(old_id
!= new_id
))
571 bufb
->buf_rsb
.id
= new_id
;
573 /* No page exchange, use the writer page directly */
574 bufb
->buf_rsb
.id
= wsb
->id
;
579 #ifndef inline_memcpy
580 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
583 static inline __attribute__((always_inline
))
584 void lttng_inline_memcpy(void *dest
, const void *src
,
589 *(uint8_t *) dest
= *(const uint8_t *) src
;
592 *(uint16_t *) dest
= *(const uint16_t *) src
;
595 *(uint32_t *) dest
= *(const uint32_t *) src
;
598 *(uint64_t *) dest
= *(const uint64_t *) src
;
601 inline_memcpy(dest
, src
, len
);
606 * Use the architecture-specific memcpy implementation for constant-sized
607 * inputs, but rely on an inline memcpy for length statically unknown.
608 * The function call to memcpy is just way too expensive for a fast path.
610 #define lib_ring_buffer_do_copy(config, dest, src, len) \
612 size_t __len = (len); \
613 if (__builtin_constant_p(len)) \
614 memcpy(dest, src, __len); \
616 lttng_inline_memcpy(dest, src, __len); \
620 * write len bytes to dest with c
623 void lib_ring_buffer_do_memset(char *dest
, int c
, unsigned long len
)
627 for (i
= 0; i
< len
; i
++)
631 /* arch-agnostic implementation */
633 static inline int lttng_ust_fls(unsigned int x
)
639 if (!(x
& 0xFFFF0000U
)) {
643 if (!(x
& 0xFF000000U
)) {
647 if (!(x
& 0xF0000000U
)) {
651 if (!(x
& 0xC0000000U
)) {
655 if (!(x
& 0x80000000U
)) {
656 /* No need to bit shift on last operation */
662 static inline int get_count_order(unsigned int count
)
666 order
= lttng_ust_fls(count
) - 1;
667 if (count
& (count
- 1))
672 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */