Commit | Line | Data |
---|---|---|
f3bc08c5 MD |
1 | #ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H |
2 | #define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/backend_internal.h | |
6 | * | |
7 | * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring buffer backend (internal helpers). | |
10 | * | |
11 | * Dual LGPL v2.1/GPL v2 license. | |
12 | */ | |
13 | ||
14 | #include "../../wrapper/ringbuffer/config.h" | |
494a81f5 | 15 | #include "../../wrapper/ringbuffer/backend_types.h" |
f3bc08c5 MD |
16 | #include "../../wrapper/ringbuffer/frontend_types.h" |
17 | #include <linux/string.h> | |
4ea00e4f | 18 | #include <linux/uaccess.h> |
f3bc08c5 MD |
19 | |
20 | /* Ring buffer backend API presented to the frontend */ | |
21 | ||
22 | /* Ring buffer and channel backend create/free */ | |
23 | ||
24 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
25 | struct channel_backend *chan, int cpu); | |
26 | void channel_backend_unregister_notifiers(struct channel_backend *chanb); | |
27 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb); | |
28 | int channel_backend_init(struct channel_backend *chanb, | |
29 | const char *name, | |
30 | const struct lib_ring_buffer_config *config, | |
31 | void *priv, size_t subbuf_size, | |
32 | size_t num_subbuf); | |
33 | void channel_backend_free(struct channel_backend *chanb); | |
34 | ||
35 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb); | |
36 | void channel_backend_reset(struct channel_backend *chanb); | |
37 | ||
38 | int lib_ring_buffer_backend_init(void); | |
39 | void lib_ring_buffer_backend_exit(void); | |
40 | ||
41 | extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, | |
42 | size_t offset, const void *src, size_t len, | |
43 | ssize_t pagecpy); | |
4ea00e4f JD |
44 | extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, |
45 | size_t offset, int c, size_t len, | |
46 | ssize_t pagecpy); | |
47 | extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb, | |
48 | size_t offset, const void *src, | |
49 | size_t len, ssize_t pagecpy); | |
f3bc08c5 MD |
50 | |
51 | /* | |
52 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
53 | * exchanged atomically. | |
54 | * | |
55 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
56 | * to count the produced buffers. For overwrite mode, this provides the | |
57 | * consumer with the capacity to read subbuffers in order, handling the | |
58 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
59 | * systems) concurrently with a single execution of get_subbuf (between offset | |
60 | * sampling and subbuffer ID exchange). | |
61 | */ | |
62 | ||
63 | #define HALF_ULONG_BITS (BITS_PER_LONG >> 1) | |
64 | ||
65 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
66 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
67 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
68 | /* | |
69 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
70 | */ | |
71 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
72 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
73 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
74 | /* | |
75 | * In overwrite mode: lowest half of word is used for index. | |
76 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
77 | * In producer-consumer mode: whole word used for index. | |
78 | */ | |
79 | #define SB_ID_INDEX_SHIFT 0 | |
80 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
81 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
82 | ||
83 | /* | |
84 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
85 | * for producer-consumer mode (offset and noref are only used in overwrite | |
86 | * mode). | |
87 | */ | |
88 | static inline | |
89 | unsigned long subbuffer_id(const struct lib_ring_buffer_config *config, | |
90 | unsigned long offset, unsigned long noref, | |
91 | unsigned long index) | |
92 | { | |
93 | if (config->mode == RING_BUFFER_OVERWRITE) | |
94 | return (offset << SB_ID_OFFSET_SHIFT) | |
95 | | (noref << SB_ID_NOREF_SHIFT) | |
96 | | index; | |
97 | else | |
98 | return index; | |
99 | } | |
100 | ||
101 | /* | |
102 | * Compare offset with the offset contained within id. Return 1 if the offset | |
103 | * bits are identical, else 0. | |
104 | */ | |
105 | static inline | |
106 | int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config, | |
107 | unsigned long id, unsigned long offset) | |
108 | { | |
109 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
110 | } | |
111 | ||
112 | static inline | |
113 | unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config, | |
114 | unsigned long id) | |
115 | { | |
116 | if (config->mode == RING_BUFFER_OVERWRITE) | |
117 | return id & SB_ID_INDEX_MASK; | |
118 | else | |
119 | return id; | |
120 | } | |
121 | ||
122 | static inline | |
123 | unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config, | |
124 | unsigned long id) | |
125 | { | |
126 | if (config->mode == RING_BUFFER_OVERWRITE) | |
127 | return !!(id & SB_ID_NOREF_MASK); | |
128 | else | |
129 | return 1; | |
130 | } | |
131 | ||
132 | /* | |
133 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
134 | * needed. | |
135 | */ | |
136 | static inline | |
137 | void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config, | |
138 | unsigned long *id) | |
139 | { | |
140 | if (config->mode == RING_BUFFER_OVERWRITE) | |
141 | *id |= SB_ID_NOREF_MASK; | |
142 | } | |
143 | ||
144 | static inline | |
145 | void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config, | |
146 | unsigned long *id, unsigned long offset) | |
147 | { | |
148 | unsigned long tmp; | |
149 | ||
150 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
151 | tmp = *id; | |
152 | tmp &= ~SB_ID_OFFSET_MASK; | |
153 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
154 | tmp |= SB_ID_NOREF_MASK; | |
155 | /* Volatile store, read concurrently by readers. */ | |
156 | ACCESS_ONCE(*id) = tmp; | |
157 | } | |
158 | } | |
159 | ||
160 | /* No volatile access, since already used locally */ | |
161 | static inline | |
162 | void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config, | |
163 | unsigned long *id) | |
164 | { | |
165 | if (config->mode == RING_BUFFER_OVERWRITE) | |
166 | *id &= ~SB_ID_NOREF_MASK; | |
167 | } | |
168 | ||
169 | /* | |
170 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
171 | * 2^16 on 32-bit architectures | |
172 | * 2^32 on 64-bit architectures | |
173 | * This is required to fit in the index part of the ID. Return 0 on success, | |
174 | * -EPERM on failure. | |
175 | */ | |
176 | static inline | |
177 | int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, | |
178 | unsigned long num_subbuf) | |
179 | { | |
180 | if (config->mode == RING_BUFFER_OVERWRITE) | |
181 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
182 | else | |
183 | return 0; | |
184 | } | |
185 | ||
186 | static inline | |
187 | void subbuffer_count_record(const struct lib_ring_buffer_config *config, | |
188 | struct lib_ring_buffer_backend *bufb, | |
189 | unsigned long idx) | |
190 | { | |
191 | unsigned long sb_bindex; | |
192 | ||
193 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
194 | v_inc(config, &bufb->array[sb_bindex]->records_commit); | |
195 | } | |
196 | ||
197 | /* | |
198 | * Reader has exclusive subbuffer access for record consumption. No need to | |
199 | * perform the decrement atomically. | |
200 | */ | |
201 | static inline | |
202 | void subbuffer_consume_record(const struct lib_ring_buffer_config *config, | |
203 | struct lib_ring_buffer_backend *bufb) | |
204 | { | |
205 | unsigned long sb_bindex; | |
206 | ||
207 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
208 | CHAN_WARN_ON(bufb->chan, | |
209 | !v_read(config, &bufb->array[sb_bindex]->records_unread)); | |
210 | /* Non-atomic decrement protected by exclusive subbuffer access */ | |
211 | _v_dec(config, &bufb->array[sb_bindex]->records_unread); | |
212 | v_inc(config, &bufb->records_read); | |
213 | } | |
214 | ||
215 | static inline | |
216 | unsigned long subbuffer_get_records_count( | |
217 | const struct lib_ring_buffer_config *config, | |
218 | struct lib_ring_buffer_backend *bufb, | |
219 | unsigned long idx) | |
220 | { | |
221 | unsigned long sb_bindex; | |
222 | ||
223 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
224 | return v_read(config, &bufb->array[sb_bindex]->records_commit); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
229 | * subbuffer access. See ring_buffer_check_deliver() for details. | |
230 | * ring_buffer_get_records_count() must be called to get the records count | |
231 | * before this function, because it resets the records_commit count. | |
232 | */ | |
233 | static inline | |
234 | unsigned long subbuffer_count_records_overrun( | |
235 | const struct lib_ring_buffer_config *config, | |
236 | struct lib_ring_buffer_backend *bufb, | |
237 | unsigned long idx) | |
238 | { | |
239 | struct lib_ring_buffer_backend_pages *pages; | |
240 | unsigned long overruns, sb_bindex; | |
241 | ||
242 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
243 | pages = bufb->array[sb_bindex]; | |
244 | overruns = v_read(config, &pages->records_unread); | |
245 | v_set(config, &pages->records_unread, | |
246 | v_read(config, &pages->records_commit)); | |
247 | v_set(config, &pages->records_commit, 0); | |
248 | ||
249 | return overruns; | |
250 | } | |
251 | ||
252 | static inline | |
253 | void subbuffer_set_data_size(const struct lib_ring_buffer_config *config, | |
254 | struct lib_ring_buffer_backend *bufb, | |
255 | unsigned long idx, | |
256 | unsigned long data_size) | |
257 | { | |
258 | struct lib_ring_buffer_backend_pages *pages; | |
259 | unsigned long sb_bindex; | |
260 | ||
261 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
262 | pages = bufb->array[sb_bindex]; | |
263 | pages->data_size = data_size; | |
264 | } | |
265 | ||
266 | static inline | |
267 | unsigned long subbuffer_get_read_data_size( | |
268 | const struct lib_ring_buffer_config *config, | |
269 | struct lib_ring_buffer_backend *bufb) | |
270 | { | |
271 | struct lib_ring_buffer_backend_pages *pages; | |
272 | unsigned long sb_bindex; | |
273 | ||
274 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
275 | pages = bufb->array[sb_bindex]; | |
276 | return pages->data_size; | |
277 | } | |
278 | ||
279 | static inline | |
280 | unsigned long subbuffer_get_data_size( | |
281 | const struct lib_ring_buffer_config *config, | |
282 | struct lib_ring_buffer_backend *bufb, | |
283 | unsigned long idx) | |
284 | { | |
285 | struct lib_ring_buffer_backend_pages *pages; | |
286 | unsigned long sb_bindex; | |
287 | ||
288 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
289 | pages = bufb->array[sb_bindex]; | |
290 | return pages->data_size; | |
291 | } | |
292 | ||
293 | /** | |
294 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
295 | * writer. | |
296 | */ | |
297 | static inline | |
298 | void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, | |
299 | struct lib_ring_buffer_backend *bufb, | |
300 | unsigned long idx) | |
301 | { | |
302 | unsigned long id, new_id; | |
303 | ||
304 | if (config->mode != RING_BUFFER_OVERWRITE) | |
305 | return; | |
306 | ||
307 | /* | |
308 | * Performing a volatile access to read the sb_pages, because we want to | |
309 | * read a coherent version of the pointer and the associated noref flag. | |
310 | */ | |
311 | id = ACCESS_ONCE(bufb->buf_wsb[idx].id); | |
312 | for (;;) { | |
313 | /* This check is called on the fast path for each record. */ | |
314 | if (likely(!subbuffer_id_is_noref(config, id))) { | |
315 | /* | |
316 | * Store after load dependency ordering the writes to | |
317 | * the subbuffer after load and test of the noref flag | |
318 | * matches the memory barrier implied by the cmpxchg() | |
319 | * in update_read_sb_index(). | |
320 | */ | |
321 | return; /* Already writing to this buffer */ | |
322 | } | |
323 | new_id = id; | |
324 | subbuffer_id_clear_noref(config, &new_id); | |
325 | new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id); | |
326 | if (likely(new_id == id)) | |
327 | break; | |
328 | id = new_id; | |
329 | } | |
330 | } | |
331 | ||
332 | /** | |
333 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
334 | * called by writer. | |
335 | */ | |
336 | static inline | |
337 | void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config, | |
338 | struct lib_ring_buffer_backend *bufb, | |
339 | unsigned long idx, unsigned long offset) | |
340 | { | |
341 | if (config->mode != RING_BUFFER_OVERWRITE) | |
342 | return; | |
343 | ||
344 | /* | |
345 | * Because ring_buffer_set_noref() is only called by a single thread | |
346 | * (the one which updated the cc_sb value), there are no concurrent | |
347 | * updates to take care of: other writers have not updated cc_sb, so | |
348 | * they cannot set the noref flag, and concurrent readers cannot modify | |
349 | * the pointer because the noref flag is not set yet. | |
350 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
351 | * to the subbuffer before this set noref operation. | |
352 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
353 | * readers of the noref flag. | |
354 | */ | |
355 | CHAN_WARN_ON(bufb->chan, | |
356 | subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id)); | |
357 | /* | |
358 | * Memory barrier that ensures counter stores are ordered before set | |
359 | * noref and offset. | |
360 | */ | |
361 | smp_mb(); | |
362 | subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset); | |
363 | } | |
364 | ||
365 | /** | |
366 | * update_read_sb_index - Read-side subbuffer index update. | |
367 | */ | |
368 | static inline | |
369 | int update_read_sb_index(const struct lib_ring_buffer_config *config, | |
370 | struct lib_ring_buffer_backend *bufb, | |
371 | struct channel_backend *chanb, | |
372 | unsigned long consumed_idx, | |
373 | unsigned long consumed_count) | |
374 | { | |
375 | unsigned long old_id, new_id; | |
376 | ||
377 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
378 | /* | |
379 | * Exchange the target writer subbuffer with our own unused | |
380 | * subbuffer. No need to use ACCESS_ONCE() here to read the | |
381 | * old_wpage, because the value read will be confirmed by the | |
382 | * following cmpxchg(). | |
383 | */ | |
384 | old_id = bufb->buf_wsb[consumed_idx].id; | |
385 | if (unlikely(!subbuffer_id_is_noref(config, old_id))) | |
386 | return -EAGAIN; | |
387 | /* | |
388 | * Make sure the offset count we are expecting matches the one | |
389 | * indicated by the writer. | |
390 | */ | |
391 | if (unlikely(!subbuffer_id_compare_offset(config, old_id, | |
392 | consumed_count))) | |
393 | return -EAGAIN; | |
394 | CHAN_WARN_ON(bufb->chan, | |
395 | !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); | |
396 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, | |
397 | consumed_count); | |
398 | new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id, | |
399 | bufb->buf_rsb.id); | |
400 | if (unlikely(old_id != new_id)) | |
401 | return -EAGAIN; | |
402 | bufb->buf_rsb.id = new_id; | |
403 | } else { | |
404 | /* No page exchange, use the writer page directly */ | |
405 | bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id; | |
406 | } | |
407 | return 0; | |
408 | } | |
409 | ||
410 | /* | |
411 | * Use the architecture-specific memcpy implementation for constant-sized | |
412 | * inputs, but rely on an inline memcpy for length statically unknown. | |
413 | * The function call to memcpy is just way too expensive for a fast path. | |
414 | */ | |
415 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
416 | do { \ | |
417 | size_t __len = (len); \ | |
418 | if (__builtin_constant_p(len)) \ | |
419 | memcpy(dest, src, __len); \ | |
420 | else \ | |
421 | inline_memcpy(dest, src, __len); \ | |
422 | } while (0) | |
423 | ||
4ea00e4f JD |
424 | /* |
425 | * We use __copy_from_user to copy userspace data since we already | |
426 | * did the access_ok for the whole range. | |
427 | */ | |
428 | static inline | |
429 | unsigned long lib_ring_buffer_do_copy_from_user(void *dest, | |
430 | const void __user *src, | |
431 | unsigned long len) | |
432 | { | |
433 | return __copy_from_user(dest, src, len); | |
434 | } | |
435 | ||
436 | /* | |
437 | * write len bytes to dest with c | |
438 | */ | |
439 | static inline | |
440 | void lib_ring_buffer_do_memset(char *dest, int c, | |
441 | unsigned long len) | |
442 | { | |
443 | unsigned long i; | |
444 | ||
445 | for (i = 0; i < len; i++) | |
446 | dest[i] = c; | |
447 | } | |
448 | ||
f3bc08c5 | 449 | #endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */ |