1 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
5 * libringbuffer/backend_internal.h
7 * Ring buffer backend (internal helpers).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <urcu/compiler.h>
29 #include <lttng/ringbuffer-config.h>
30 #include "backend_types.h"
31 #include "frontend_types.h"
34 /* Ring buffer backend API presented to the frontend */
36 /* Ring buffer and channel backend create/free */
38 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
39 struct channel_backend
*chan
, int cpu
,
40 struct lttng_ust_shm_handle
*handle
,
41 struct shm_object
*shmobj
);
42 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
);
43 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend
*bufb
);
44 int channel_backend_init(struct channel_backend
*chanb
,
46 const struct lttng_ust_lib_ring_buffer_config
*config
,
48 size_t num_subbuf
, struct lttng_ust_shm_handle
*handle
,
49 const int *stream_fds
);
50 void channel_backend_free(struct channel_backend
*chanb
,
51 struct lttng_ust_shm_handle
*handle
);
53 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
54 struct lttng_ust_shm_handle
*handle
);
55 void channel_backend_reset(struct channel_backend
*chanb
);
57 int lib_ring_buffer_backend_init(void);
58 void lib_ring_buffer_backend_exit(void);
60 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
61 size_t offset
, const void *src
, size_t len
,
65 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
66 * exchanged atomically.
68 * Top half word, except lowest bit, belongs to "offset", which is used to keep
69 * to count the produced buffers. For overwrite mode, this provides the
70 * consumer with the capacity to read subbuffers in order, handling the
71 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
72 * systems) concurrently with a single execution of get_subbuf (between offset
73 * sampling and subbuffer ID exchange).
76 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
78 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
79 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
80 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
82 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
84 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
85 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
86 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
88 * In overwrite mode: lowest half of word is used for index.
89 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
90 * In producer-consumer mode: whole word used for index.
92 #define SB_ID_INDEX_SHIFT 0
93 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
94 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
97 * Construct the subbuffer id from offset, index and noref. Use only the index
98 * for producer-consumer mode (offset and noref are only used in overwrite
102 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config
*config
,
103 unsigned long offset
, unsigned long noref
,
106 if (config
->mode
== RING_BUFFER_OVERWRITE
)
107 return (offset
<< SB_ID_OFFSET_SHIFT
)
108 | (noref
<< SB_ID_NOREF_SHIFT
)
115 * Compare offset with the offset contained within id. Return 1 if the offset
116 * bits are identical, else 0.
119 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
120 unsigned long id
, unsigned long offset
)
122 return (id
& SB_ID_OFFSET_MASK
) == (offset
<< SB_ID_OFFSET_SHIFT
);
126 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
129 if (config
->mode
== RING_BUFFER_OVERWRITE
)
130 return id
& SB_ID_INDEX_MASK
;
136 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
139 if (config
->mode
== RING_BUFFER_OVERWRITE
)
140 return !!(id
& SB_ID_NOREF_MASK
);
146 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
150 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
153 if (config
->mode
== RING_BUFFER_OVERWRITE
)
154 *id
|= SB_ID_NOREF_MASK
;
158 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
159 unsigned long *id
, unsigned long offset
)
163 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
165 tmp
&= ~SB_ID_OFFSET_MASK
;
166 tmp
|= offset
<< SB_ID_OFFSET_SHIFT
;
167 tmp
|= SB_ID_NOREF_MASK
;
168 /* Volatile store, read concurrently by readers. */
169 CMM_ACCESS_ONCE(*id
) = tmp
;
173 /* No volatile access, since already used locally */
175 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
178 if (config
->mode
== RING_BUFFER_OVERWRITE
)
179 *id
&= ~SB_ID_NOREF_MASK
;
183 * For overwrite mode, cap the number of subbuffers per buffer to:
184 * 2^16 on 32-bit architectures
185 * 2^32 on 64-bit architectures
186 * This is required to fit in the index part of the ID. Return 0 on success,
190 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
191 unsigned long num_subbuf
)
193 if (config
->mode
== RING_BUFFER_OVERWRITE
)
194 return (num_subbuf
> (1UL << HALF_ULONG_BITS
)) ? -EPERM
: 0;
200 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config
*config
,
201 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
202 struct lttng_ust_lib_ring_buffer_backend_pages
**backend_pages
)
204 struct lttng_ust_lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
205 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
206 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
208 size_t offset
= ctx
->buf_offset
;
209 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
210 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
211 unsigned long sb_bindex
, id
;
212 struct lttng_ust_lib_ring_buffer_backend_pages
*_backend_pages
;
214 offset
&= chanb
->buf_size
- 1;
215 sbidx
= offset
>> chanb
->subbuf_size_order
;
216 wsb
= shmp_index(handle
, bufb
->buf_wsb
, sbidx
);
217 if (caa_unlikely(!wsb
))
220 sb_bindex
= subbuffer_id_get_index(config
, id
);
221 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
222 if (caa_unlikely(!rpages
))
224 CHAN_WARN_ON(ctx
->chan
,
225 config
->mode
== RING_BUFFER_OVERWRITE
226 && subbuffer_id_is_noref(config
, id
));
227 _backend_pages
= shmp(handle
, rpages
->shmp
);
228 if (caa_unlikely(!_backend_pages
))
230 *backend_pages
= _backend_pages
;
234 /* Get backend pages from cache. */
236 struct lttng_ust_lib_ring_buffer_backend_pages
*
237 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config
*config
,
238 struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
240 if (caa_unlikely(ctx
->ctx_len
241 < sizeof(struct lttng_ust_lib_ring_buffer_ctx
)))
243 return ctx
->backend_pages
;
247 * The ring buffer can count events recorded and overwritten per buffer,
248 * but it is disabled by default due to its performance overhead.
250 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
252 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
253 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
254 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
255 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
257 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
259 backend_pages
= lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
260 if (caa_unlikely(!backend_pages
)) {
261 if (lib_ring_buffer_backend_get_pages(config
, ctx
, &backend_pages
))
264 v_inc(config
, &backend_pages
->records_commit
);
266 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
268 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
269 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
270 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
271 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
274 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
277 * Reader has exclusive subbuffer access for record consumption. No need to
278 * perform the decrement atomically.
281 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
282 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
283 struct lttng_ust_shm_handle
*handle
)
285 unsigned long sb_bindex
;
286 struct channel
*chan
;
287 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
288 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
290 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
291 chan
= shmp(handle
, bufb
->chan
);
294 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
297 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
300 CHAN_WARN_ON(chan
, !v_read(config
, &backend_pages
->records_unread
));
301 /* Non-atomic decrement protected by exclusive subbuffer access */
302 _v_dec(config
, &backend_pages
->records_unread
);
303 v_inc(config
, &bufb
->records_read
);
307 unsigned long subbuffer_get_records_count(
308 const struct lttng_ust_lib_ring_buffer_config
*config
,
309 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
311 struct lttng_ust_shm_handle
*handle
)
313 unsigned long sb_bindex
;
314 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
315 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
316 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
318 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
321 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
322 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
325 backend_pages
= shmp(handle
, rpages
->shmp
);
328 return v_read(config
, &backend_pages
->records_commit
);
332 * Must be executed at subbuffer delivery when the writer has _exclusive_
333 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
334 * lib_ring_buffer_get_records_count() must be called to get the records
335 * count before this function, because it resets the records_commit
339 unsigned long subbuffer_count_records_overrun(
340 const struct lttng_ust_lib_ring_buffer_config
*config
,
341 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
343 struct lttng_ust_shm_handle
*handle
)
345 unsigned long overruns
, sb_bindex
;
346 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
347 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
348 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
350 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
353 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
354 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
357 backend_pages
= shmp(handle
, rpages
->shmp
);
360 overruns
= v_read(config
, &backend_pages
->records_unread
);
361 v_set(config
, &backend_pages
->records_unread
,
362 v_read(config
, &backend_pages
->records_commit
));
363 v_set(config
, &backend_pages
->records_commit
, 0);
369 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
370 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
372 unsigned long data_size
,
373 struct lttng_ust_shm_handle
*handle
)
375 unsigned long sb_bindex
;
376 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
377 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
378 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
380 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
383 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
384 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
387 backend_pages
= shmp(handle
, rpages
->shmp
);
390 backend_pages
->data_size
= data_size
;
394 unsigned long subbuffer_get_read_data_size(
395 const struct lttng_ust_lib_ring_buffer_config
*config
,
396 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
397 struct lttng_ust_shm_handle
*handle
)
399 unsigned long sb_bindex
;
400 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages_shmp
;
401 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
403 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
404 pages_shmp
= shmp_index(handle
, bufb
->array
, sb_bindex
);
407 backend_pages
= shmp(handle
, pages_shmp
->shmp
);
410 return backend_pages
->data_size
;
414 unsigned long subbuffer_get_data_size(
415 const struct lttng_ust_lib_ring_buffer_config
*config
,
416 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
418 struct lttng_ust_shm_handle
*handle
)
420 unsigned long sb_bindex
;
421 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
422 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*rpages
;
423 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
425 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
428 sb_bindex
= subbuffer_id_get_index(config
, wsb
->id
);
429 rpages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
432 backend_pages
= shmp(handle
, rpages
->shmp
);
435 return backend_pages
->data_size
;
439 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config
*config
,
440 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
441 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
443 struct lttng_ust_lib_ring_buffer_backend_counts
*counts
;
445 counts
= shmp_index(handle
, bufb
->buf_cnt
, idx
);
452 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
456 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
457 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
459 struct lttng_ust_shm_handle
*handle
)
461 unsigned long id
, new_id
;
462 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
464 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
468 * Performing a volatile access to read the sb_pages, because we want to
469 * read a coherent version of the pointer and the associated noref flag.
471 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
474 id
= CMM_ACCESS_ONCE(wsb
->id
);
476 /* This check is called on the fast path for each record. */
477 if (caa_likely(!subbuffer_id_is_noref(config
, id
))) {
479 * Store after load dependency ordering the writes to
480 * the subbuffer after load and test of the noref flag
481 * matches the memory barrier implied by the cmpxchg()
482 * in update_read_sb_index().
484 return; /* Already writing to this buffer */
487 subbuffer_id_clear_noref(config
, &new_id
);
488 new_id
= uatomic_cmpxchg(&wsb
->id
, id
, new_id
);
489 if (caa_likely(new_id
== id
))
496 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
500 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
501 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
502 unsigned long idx
, unsigned long offset
,
503 struct lttng_ust_shm_handle
*handle
)
505 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
506 struct channel
*chan
;
508 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
511 wsb
= shmp_index(handle
, bufb
->buf_wsb
, idx
);
515 * Because ring_buffer_set_noref() is only called by a single thread
516 * (the one which updated the cc_sb value), there are no concurrent
517 * updates to take care of: other writers have not updated cc_sb, so
518 * they cannot set the noref flag, and concurrent readers cannot modify
519 * the pointer because the noref flag is not set yet.
520 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
521 * to the subbuffer before this set noref operation.
522 * subbuffer_set_noref() uses a volatile store to deal with concurrent
523 * readers of the noref flag.
525 chan
= shmp(handle
, bufb
->chan
);
528 CHAN_WARN_ON(chan
, subbuffer_id_is_noref(config
, wsb
->id
));
530 * Memory barrier that ensures counter stores are ordered before set
534 subbuffer_id_set_noref_offset(config
, &wsb
->id
, offset
);
538 * update_read_sb_index - Read-side subbuffer index update.
541 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
542 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
543 struct channel_backend
*chanb
,
544 unsigned long consumed_idx
,
545 unsigned long consumed_count
,
546 struct lttng_ust_shm_handle
*handle
)
548 struct lttng_ust_lib_ring_buffer_backend_subbuffer
*wsb
;
549 unsigned long old_id
, new_id
;
551 wsb
= shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
);
552 if (caa_unlikely(!wsb
))
555 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
556 struct channel
*chan
;
559 * Exchange the target writer subbuffer with our own unused
560 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
561 * old_wpage, because the value read will be confirmed by the
562 * following cmpxchg().
565 if (caa_unlikely(!subbuffer_id_is_noref(config
, old_id
)))
568 * Make sure the offset count we are expecting matches the one
569 * indicated by the writer.
571 if (caa_unlikely(!subbuffer_id_compare_offset(config
, old_id
,
574 chan
= shmp(handle
, bufb
->chan
);
575 if (caa_unlikely(!chan
))
577 CHAN_WARN_ON(chan
, !subbuffer_id_is_noref(config
, bufb
->buf_rsb
.id
));
578 subbuffer_id_set_noref_offset(config
, &bufb
->buf_rsb
.id
,
580 new_id
= uatomic_cmpxchg(&wsb
->id
, old_id
, bufb
->buf_rsb
.id
);
581 if (caa_unlikely(old_id
!= new_id
))
583 bufb
->buf_rsb
.id
= new_id
;
585 /* No page exchange, use the writer page directly */
586 bufb
->buf_rsb
.id
= wsb
->id
;
591 #ifndef inline_memcpy
592 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
595 static inline __attribute__((always_inline
))
596 void lttng_inline_memcpy(void *dest
, const void *src
,
601 *(uint8_t *) dest
= *(const uint8_t *) src
;
604 *(uint16_t *) dest
= *(const uint16_t *) src
;
607 *(uint32_t *) dest
= *(const uint32_t *) src
;
610 *(uint64_t *) dest
= *(const uint64_t *) src
;
613 inline_memcpy(dest
, src
, len
);
618 * Use the architecture-specific memcpy implementation for constant-sized
619 * inputs, but rely on an inline memcpy for length statically unknown.
620 * The function call to memcpy is just way too expensive for a fast path.
622 #define lib_ring_buffer_do_copy(config, dest, src, len) \
624 size_t __len = (len); \
625 if (__builtin_constant_p(len)) \
626 memcpy(dest, src, __len); \
628 lttng_inline_memcpy(dest, src, __len); \
632 * write len bytes to dest with c
635 void lib_ring_buffer_do_memset(char *dest
, int c
, unsigned long len
)
639 for (i
= 0; i
< len
; i
++)
643 /* arch-agnostic implementation */
645 static inline int lttng_ust_fls(unsigned int x
)
651 if (!(x
& 0xFFFF0000U
)) {
655 if (!(x
& 0xFF000000U
)) {
659 if (!(x
& 0xF0000000U
)) {
663 if (!(x
& 0xC0000000U
)) {
667 if (!(x
& 0x80000000U
)) {
668 /* No need to bit shift on last operation */
674 static inline int get_count_order(unsigned int count
)
678 order
= lttng_ust_fls(count
) - 1;
679 if (count
& (count
- 1))
684 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */