1 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
5 * libringbuffer/backend_internal.h
7 * Ring buffer backend (internal helpers).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <urcu/compiler.h>
29 #include <lttng/ringbuffer-config.h>
30 #include "backend_types.h"
31 #include "frontend_types.h"
34 /* Ring buffer backend API presented to the frontend */
36 /* Ring buffer and channel backend create/free */
38 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
39 struct channel_backend
*chan
, int cpu
,
40 struct lttng_ust_shm_handle
*handle
,
41 struct shm_object
*shmobj
);
42 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
);
43 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend
*bufb
);
44 int channel_backend_init(struct channel_backend
*chanb
,
46 const struct lttng_ust_lib_ring_buffer_config
*config
,
48 size_t num_subbuf
, struct lttng_ust_shm_handle
*handle
);
49 void channel_backend_free(struct channel_backend
*chanb
,
50 struct lttng_ust_shm_handle
*handle
);
52 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
53 struct lttng_ust_shm_handle
*handle
);
54 void channel_backend_reset(struct channel_backend
*chanb
);
56 int lib_ring_buffer_backend_init(void);
57 void lib_ring_buffer_backend_exit(void);
59 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend
*bufb
,
60 size_t offset
, const void *src
, size_t len
,
64 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
65 * exchanged atomically.
67 * Top half word, except lowest bit, belongs to "offset", which is used to keep
68 * to count the produced buffers. For overwrite mode, this provides the
69 * consumer with the capacity to read subbuffers in order, handling the
70 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
71 * systems) concurrently with a single execution of get_subbuf (between offset
72 * sampling and subbuffer ID exchange).
75 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
77 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
78 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
79 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
81 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
83 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
84 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
85 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
87 * In overwrite mode: lowest half of word is used for index.
88 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
89 * In producer-consumer mode: whole word used for index.
91 #define SB_ID_INDEX_SHIFT 0
92 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
93 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
96 * Construct the subbuffer id from offset, index and noref. Use only the index
97 * for producer-consumer mode (offset and noref are only used in overwrite
101 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config
*config
,
102 unsigned long offset
, unsigned long noref
,
105 if (config
->mode
== RING_BUFFER_OVERWRITE
)
106 return (offset
<< SB_ID_OFFSET_SHIFT
)
107 | (noref
<< SB_ID_NOREF_SHIFT
)
114 * Compare offset with the offset contained within id. Return 1 if the offset
115 * bits are identical, else 0.
118 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
119 unsigned long id
, unsigned long offset
)
121 return (id
& SB_ID_OFFSET_MASK
) == (offset
<< SB_ID_OFFSET_SHIFT
);
125 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
128 if (config
->mode
== RING_BUFFER_OVERWRITE
)
129 return id
& SB_ID_INDEX_MASK
;
135 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
138 if (config
->mode
== RING_BUFFER_OVERWRITE
)
139 return !!(id
& SB_ID_NOREF_MASK
);
145 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
149 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
152 if (config
->mode
== RING_BUFFER_OVERWRITE
)
153 *id
|= SB_ID_NOREF_MASK
;
157 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
158 unsigned long *id
, unsigned long offset
)
162 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
164 tmp
&= ~SB_ID_OFFSET_MASK
;
165 tmp
|= offset
<< SB_ID_OFFSET_SHIFT
;
166 tmp
|= SB_ID_NOREF_MASK
;
167 /* Volatile store, read concurrently by readers. */
168 CMM_ACCESS_ONCE(*id
) = tmp
;
172 /* No volatile access, since already used locally */
174 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
177 if (config
->mode
== RING_BUFFER_OVERWRITE
)
178 *id
&= ~SB_ID_NOREF_MASK
;
182 * For overwrite mode, cap the number of subbuffers per buffer to:
183 * 2^16 on 32-bit architectures
184 * 2^32 on 64-bit architectures
185 * This is required to fit in the index part of the ID. Return 0 on success,
189 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
190 unsigned long num_subbuf
)
192 if (config
->mode
== RING_BUFFER_OVERWRITE
)
193 return (num_subbuf
> (1UL << HALF_ULONG_BITS
)) ? -EPERM
: 0;
199 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
200 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
201 unsigned long idx
, struct lttng_ust_shm_handle
*handle
)
203 unsigned long sb_bindex
;
205 sb_bindex
= subbuffer_id_get_index(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
206 v_inc(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, sb_bindex
)->shmp
)->records_commit
);
210 * Reader has exclusive subbuffer access for record consumption. No need to
211 * perform the decrement atomically.
214 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config
*config
,
215 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
216 struct lttng_ust_shm_handle
*handle
)
218 unsigned long sb_bindex
;
220 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
221 CHAN_WARN_ON(shmp(handle
, bufb
->chan
),
222 !v_read(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, sb_bindex
)->shmp
)->records_unread
));
223 /* Non-atomic decrement protected by exclusive subbuffer access */
224 _v_dec(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, sb_bindex
)->shmp
)->records_unread
);
225 v_inc(config
, &bufb
->records_read
);
229 unsigned long subbuffer_get_records_count(
230 const struct lttng_ust_lib_ring_buffer_config
*config
,
231 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
233 struct lttng_ust_shm_handle
*handle
)
235 unsigned long sb_bindex
;
237 sb_bindex
= subbuffer_id_get_index(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
238 return v_read(config
, &shmp(handle
, shmp_index(handle
, bufb
->array
, sb_bindex
)->shmp
)->records_commit
);
242 * Must be executed at subbuffer delivery when the writer has _exclusive_
243 * subbuffer access. See ring_buffer_check_deliver() for details.
244 * ring_buffer_get_records_count() must be called to get the records count
245 * before this function, because it resets the records_commit count.
248 unsigned long subbuffer_count_records_overrun(
249 const struct lttng_ust_lib_ring_buffer_config
*config
,
250 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
252 struct lttng_ust_shm_handle
*handle
)
254 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages
;
255 unsigned long overruns
, sb_bindex
;
257 sb_bindex
= subbuffer_id_get_index(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
258 pages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
259 overruns
= v_read(config
, &shmp(handle
, pages
->shmp
)->records_unread
);
260 v_set(config
, &shmp(handle
, pages
->shmp
)->records_unread
,
261 v_read(config
, &shmp(handle
, pages
->shmp
)->records_commit
));
262 v_set(config
, &shmp(handle
, pages
->shmp
)->records_commit
, 0);
268 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
269 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
271 unsigned long data_size
,
272 struct lttng_ust_shm_handle
*handle
)
274 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages
;
275 unsigned long sb_bindex
;
277 sb_bindex
= subbuffer_id_get_index(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
278 pages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
279 shmp(handle
, pages
->shmp
)->data_size
= data_size
;
283 unsigned long subbuffer_get_read_data_size(
284 const struct lttng_ust_lib_ring_buffer_config
*config
,
285 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
286 struct lttng_ust_shm_handle
*handle
)
288 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages
;
289 unsigned long sb_bindex
;
291 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
292 pages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
293 return shmp(handle
, pages
->shmp
)->data_size
;
297 unsigned long subbuffer_get_data_size(
298 const struct lttng_ust_lib_ring_buffer_config
*config
,
299 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
301 struct lttng_ust_shm_handle
*handle
)
303 struct lttng_ust_lib_ring_buffer_backend_pages_shmp
*pages
;
304 unsigned long sb_bindex
;
306 sb_bindex
= subbuffer_id_get_index(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
307 pages
= shmp_index(handle
, bufb
->array
, sb_bindex
);
308 return shmp(handle
, pages
->shmp
)->data_size
;
312 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
316 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config
*config
,
317 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
319 struct lttng_ust_shm_handle
*handle
)
321 unsigned long id
, new_id
;
323 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
327 * Performing a volatile access to read the sb_pages, because we want to
328 * read a coherent version of the pointer and the associated noref flag.
330 id
= CMM_ACCESS_ONCE(shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
);
332 /* This check is called on the fast path for each record. */
333 if (caa_likely(!subbuffer_id_is_noref(config
, id
))) {
335 * Store after load dependency ordering the writes to
336 * the subbuffer after load and test of the noref flag
337 * matches the memory barrier implied by the cmpxchg()
338 * in update_read_sb_index().
340 return; /* Already writing to this buffer */
343 subbuffer_id_clear_noref(config
, &new_id
);
344 new_id
= uatomic_cmpxchg(&shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
, id
, new_id
);
345 if (caa_likely(new_id
== id
))
352 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
356 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config
*config
,
357 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
358 unsigned long idx
, unsigned long offset
,
359 struct lttng_ust_shm_handle
*handle
)
361 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
365 * Because ring_buffer_set_noref() is only called by a single thread
366 * (the one which updated the cc_sb value), there are no concurrent
367 * updates to take care of: other writers have not updated cc_sb, so
368 * they cannot set the noref flag, and concurrent readers cannot modify
369 * the pointer because the noref flag is not set yet.
370 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
371 * to the subbuffer before this set noref operation.
372 * subbuffer_set_noref() uses a volatile store to deal with concurrent
373 * readers of the noref flag.
375 CHAN_WARN_ON(shmp(handle
, bufb
->chan
),
376 subbuffer_id_is_noref(config
, shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
));
378 * Memory barrier that ensures counter stores are ordered before set
382 subbuffer_id_set_noref_offset(config
, &shmp_index(handle
, bufb
->buf_wsb
, idx
)->id
, offset
);
386 * update_read_sb_index - Read-side subbuffer index update.
389 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config
*config
,
390 struct lttng_ust_lib_ring_buffer_backend
*bufb
,
391 struct channel_backend
*chanb
,
392 unsigned long consumed_idx
,
393 unsigned long consumed_count
,
394 struct lttng_ust_shm_handle
*handle
)
396 unsigned long old_id
, new_id
;
398 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
400 * Exchange the target writer subbuffer with our own unused
401 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
402 * old_wpage, because the value read will be confirmed by the
403 * following cmpxchg().
405 old_id
= shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
)->id
;
406 if (caa_unlikely(!subbuffer_id_is_noref(config
, old_id
)))
409 * Make sure the offset count we are expecting matches the one
410 * indicated by the writer.
412 if (caa_unlikely(!subbuffer_id_compare_offset(config
, old_id
,
415 CHAN_WARN_ON(shmp(handle
, bufb
->chan
),
416 !subbuffer_id_is_noref(config
, bufb
->buf_rsb
.id
));
417 subbuffer_id_set_noref_offset(config
, &bufb
->buf_rsb
.id
,
419 new_id
= uatomic_cmpxchg(&shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
)->id
, old_id
,
421 if (caa_unlikely(old_id
!= new_id
))
423 bufb
->buf_rsb
.id
= new_id
;
425 /* No page exchange, use the writer page directly */
426 bufb
->buf_rsb
.id
= shmp_index(handle
, bufb
->buf_wsb
, consumed_idx
)->id
;
431 #ifndef inline_memcpy
432 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
436 * Use the architecture-specific memcpy implementation for constant-sized
437 * inputs, but rely on an inline memcpy for length statically unknown.
438 * The function call to memcpy is just way too expensive for a fast path.
440 #define lib_ring_buffer_do_copy(config, dest, src, len) \
442 size_t __len = (len); \
443 if (__builtin_constant_p(len)) \
444 memcpy(dest, src, __len); \
446 inline_memcpy(dest, src, __len); \
449 /* arch-agnostic implementation */
451 static inline int fls(unsigned int x
)
457 if (!(x
& 0xFFFF0000U
)) {
461 if (!(x
& 0xFF000000U
)) {
465 if (!(x
& 0xF0000000U
)) {
469 if (!(x
& 0xC0000000U
)) {
473 if (!(x
& 0x80000000U
)) {
480 static inline int get_count_order(unsigned int count
)
484 order
= fls(count
) - 1;
485 if (count
& (count
- 1))
490 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */