1 #ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
5 * linux/ringbuffer/backend_internal.h
7 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring buffer backend (internal helpers).
11 * Dual LGPL v2.1/GPL v2 license.
14 #include "../../wrapper/ringbuffer/config.h"
15 #include "../../wrapper//ringbuffer/backend_types.h"
16 #include "../../wrapper/ringbuffer/frontend_types.h"
17 #include <linux/string.h>
19 /* Ring buffer backend API presented to the frontend */
21 /* Ring buffer and channel backend create/free */
23 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
24 struct channel_backend
*chan
, int cpu
);
25 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
);
26 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
);
27 int channel_backend_init(struct channel_backend
*chanb
,
29 const struct lib_ring_buffer_config
*config
,
30 void *priv
, size_t subbuf_size
,
32 void channel_backend_free(struct channel_backend
*chanb
);
34 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
);
35 void channel_backend_reset(struct channel_backend
*chanb
);
37 int lib_ring_buffer_backend_init(void);
38 void lib_ring_buffer_backend_exit(void);
40 extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
,
41 size_t offset
, const void *src
, size_t len
,
45 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
46 * exchanged atomically.
48 * Top half word, except lowest bit, belongs to "offset", which is used to keep
49 * to count the produced buffers. For overwrite mode, this provides the
50 * consumer with the capacity to read subbuffers in order, handling the
51 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
52 * systems) concurrently with a single execution of get_subbuf (between offset
53 * sampling and subbuffer ID exchange).
56 #define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
58 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
59 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
60 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
62 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
64 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
65 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
66 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
68 * In overwrite mode: lowest half of word is used for index.
69 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
70 * In producer-consumer mode: whole word used for index.
72 #define SB_ID_INDEX_SHIFT 0
73 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
74 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
77 * Construct the subbuffer id from offset, index and noref. Use only the index
78 * for producer-consumer mode (offset and noref are only used in overwrite
82 unsigned long subbuffer_id(const struct lib_ring_buffer_config
*config
,
83 unsigned long offset
, unsigned long noref
,
86 if (config
->mode
== RING_BUFFER_OVERWRITE
)
87 return (offset
<< SB_ID_OFFSET_SHIFT
)
88 | (noref
<< SB_ID_NOREF_SHIFT
)
95 * Compare offset with the offset contained within id. Return 1 if the offset
96 * bits are identical, else 0.
99 int subbuffer_id_compare_offset(const struct lib_ring_buffer_config
*config
,
100 unsigned long id
, unsigned long offset
)
102 return (id
& SB_ID_OFFSET_MASK
) == (offset
<< SB_ID_OFFSET_SHIFT
);
106 unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config
*config
,
109 if (config
->mode
== RING_BUFFER_OVERWRITE
)
110 return id
& SB_ID_INDEX_MASK
;
116 unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config
*config
,
119 if (config
->mode
== RING_BUFFER_OVERWRITE
)
120 return !!(id
& SB_ID_NOREF_MASK
);
126 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
130 void subbuffer_id_set_noref(const struct lib_ring_buffer_config
*config
,
133 if (config
->mode
== RING_BUFFER_OVERWRITE
)
134 *id
|= SB_ID_NOREF_MASK
;
138 void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config
*config
,
139 unsigned long *id
, unsigned long offset
)
143 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
145 tmp
&= ~SB_ID_OFFSET_MASK
;
146 tmp
|= offset
<< SB_ID_OFFSET_SHIFT
;
147 tmp
|= SB_ID_NOREF_MASK
;
148 /* Volatile store, read concurrently by readers. */
149 ACCESS_ONCE(*id
) = tmp
;
153 /* No volatile access, since already used locally */
155 void subbuffer_id_clear_noref(const struct lib_ring_buffer_config
*config
,
158 if (config
->mode
== RING_BUFFER_OVERWRITE
)
159 *id
&= ~SB_ID_NOREF_MASK
;
163 * For overwrite mode, cap the number of subbuffers per buffer to:
164 * 2^16 on 32-bit architectures
165 * 2^32 on 64-bit architectures
166 * This is required to fit in the index part of the ID. Return 0 on success,
170 int subbuffer_id_check_index(const struct lib_ring_buffer_config
*config
,
171 unsigned long num_subbuf
)
173 if (config
->mode
== RING_BUFFER_OVERWRITE
)
174 return (num_subbuf
> (1UL << HALF_ULONG_BITS
)) ? -EPERM
: 0;
180 void subbuffer_count_record(const struct lib_ring_buffer_config
*config
,
181 struct lib_ring_buffer_backend
*bufb
,
184 unsigned long sb_bindex
;
186 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_wsb
[idx
].id
);
187 v_inc(config
, &bufb
->array
[sb_bindex
]->records_commit
);
191 * Reader has exclusive subbuffer access for record consumption. No need to
192 * perform the decrement atomically.
195 void subbuffer_consume_record(const struct lib_ring_buffer_config
*config
,
196 struct lib_ring_buffer_backend
*bufb
)
198 unsigned long sb_bindex
;
200 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
201 CHAN_WARN_ON(bufb
->chan
,
202 !v_read(config
, &bufb
->array
[sb_bindex
]->records_unread
));
203 /* Non-atomic decrement protected by exclusive subbuffer access */
204 _v_dec(config
, &bufb
->array
[sb_bindex
]->records_unread
);
205 v_inc(config
, &bufb
->records_read
);
209 unsigned long subbuffer_get_records_count(
210 const struct lib_ring_buffer_config
*config
,
211 struct lib_ring_buffer_backend
*bufb
,
214 unsigned long sb_bindex
;
216 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_wsb
[idx
].id
);
217 return v_read(config
, &bufb
->array
[sb_bindex
]->records_commit
);
221 * Must be executed at subbuffer delivery when the writer has _exclusive_
222 * subbuffer access. See ring_buffer_check_deliver() for details.
223 * ring_buffer_get_records_count() must be called to get the records count
224 * before this function, because it resets the records_commit count.
227 unsigned long subbuffer_count_records_overrun(
228 const struct lib_ring_buffer_config
*config
,
229 struct lib_ring_buffer_backend
*bufb
,
232 struct lib_ring_buffer_backend_pages
*pages
;
233 unsigned long overruns
, sb_bindex
;
235 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_wsb
[idx
].id
);
236 pages
= bufb
->array
[sb_bindex
];
237 overruns
= v_read(config
, &pages
->records_unread
);
238 v_set(config
, &pages
->records_unread
,
239 v_read(config
, &pages
->records_commit
));
240 v_set(config
, &pages
->records_commit
, 0);
246 void subbuffer_set_data_size(const struct lib_ring_buffer_config
*config
,
247 struct lib_ring_buffer_backend
*bufb
,
249 unsigned long data_size
)
251 struct lib_ring_buffer_backend_pages
*pages
;
252 unsigned long sb_bindex
;
254 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_wsb
[idx
].id
);
255 pages
= bufb
->array
[sb_bindex
];
256 pages
->data_size
= data_size
;
260 unsigned long subbuffer_get_read_data_size(
261 const struct lib_ring_buffer_config
*config
,
262 struct lib_ring_buffer_backend
*bufb
)
264 struct lib_ring_buffer_backend_pages
*pages
;
265 unsigned long sb_bindex
;
267 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_rsb
.id
);
268 pages
= bufb
->array
[sb_bindex
];
269 return pages
->data_size
;
273 unsigned long subbuffer_get_data_size(
274 const struct lib_ring_buffer_config
*config
,
275 struct lib_ring_buffer_backend
*bufb
,
278 struct lib_ring_buffer_backend_pages
*pages
;
279 unsigned long sb_bindex
;
281 sb_bindex
= subbuffer_id_get_index(config
, bufb
->buf_wsb
[idx
].id
);
282 pages
= bufb
->array
[sb_bindex
];
283 return pages
->data_size
;
287 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
291 void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config
*config
,
292 struct lib_ring_buffer_backend
*bufb
,
295 unsigned long id
, new_id
;
297 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
301 * Performing a volatile access to read the sb_pages, because we want to
302 * read a coherent version of the pointer and the associated noref flag.
304 id
= ACCESS_ONCE(bufb
->buf_wsb
[idx
].id
);
306 /* This check is called on the fast path for each record. */
307 if (likely(!subbuffer_id_is_noref(config
, id
))) {
309 * Store after load dependency ordering the writes to
310 * the subbuffer after load and test of the noref flag
311 * matches the memory barrier implied by the cmpxchg()
312 * in update_read_sb_index().
314 return; /* Already writing to this buffer */
317 subbuffer_id_clear_noref(config
, &new_id
);
318 new_id
= cmpxchg(&bufb
->buf_wsb
[idx
].id
, id
, new_id
);
319 if (likely(new_id
== id
))
326 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
330 void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config
*config
,
331 struct lib_ring_buffer_backend
*bufb
,
332 unsigned long idx
, unsigned long offset
)
334 if (config
->mode
!= RING_BUFFER_OVERWRITE
)
338 * Because ring_buffer_set_noref() is only called by a single thread
339 * (the one which updated the cc_sb value), there are no concurrent
340 * updates to take care of: other writers have not updated cc_sb, so
341 * they cannot set the noref flag, and concurrent readers cannot modify
342 * the pointer because the noref flag is not set yet.
343 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
344 * to the subbuffer before this set noref operation.
345 * subbuffer_set_noref() uses a volatile store to deal with concurrent
346 * readers of the noref flag.
348 CHAN_WARN_ON(bufb
->chan
,
349 subbuffer_id_is_noref(config
, bufb
->buf_wsb
[idx
].id
));
351 * Memory barrier that ensures counter stores are ordered before set
355 subbuffer_id_set_noref_offset(config
, &bufb
->buf_wsb
[idx
].id
, offset
);
359 * update_read_sb_index - Read-side subbuffer index update.
362 int update_read_sb_index(const struct lib_ring_buffer_config
*config
,
363 struct lib_ring_buffer_backend
*bufb
,
364 struct channel_backend
*chanb
,
365 unsigned long consumed_idx
,
366 unsigned long consumed_count
)
368 unsigned long old_id
, new_id
;
370 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
372 * Exchange the target writer subbuffer with our own unused
373 * subbuffer. No need to use ACCESS_ONCE() here to read the
374 * old_wpage, because the value read will be confirmed by the
375 * following cmpxchg().
377 old_id
= bufb
->buf_wsb
[consumed_idx
].id
;
378 if (unlikely(!subbuffer_id_is_noref(config
, old_id
)))
381 * Make sure the offset count we are expecting matches the one
382 * indicated by the writer.
384 if (unlikely(!subbuffer_id_compare_offset(config
, old_id
,
387 CHAN_WARN_ON(bufb
->chan
,
388 !subbuffer_id_is_noref(config
, bufb
->buf_rsb
.id
));
389 subbuffer_id_set_noref_offset(config
, &bufb
->buf_rsb
.id
,
391 new_id
= cmpxchg(&bufb
->buf_wsb
[consumed_idx
].id
, old_id
,
393 if (unlikely(old_id
!= new_id
))
395 bufb
->buf_rsb
.id
= new_id
;
397 /* No page exchange, use the writer page directly */
398 bufb
->buf_rsb
.id
= bufb
->buf_wsb
[consumed_idx
].id
;
404 * Use the architecture-specific memcpy implementation for constant-sized
405 * inputs, but rely on an inline memcpy for length statically unknown.
406 * The function call to memcpy is just way too expensive for a fast path.
408 #define lib_ring_buffer_do_copy(config, dest, src, len) \
410 size_t __len = (len); \
411 if (__builtin_constant_p(len)) \
412 memcpy(dest, src, __len); \
414 inline_memcpy(dest, src, __len); \
417 #endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */