1 #ifndef _LIB_RING_BUFFER_BACKEND_H
2 #define _LIB_RING_BUFFER_BACKEND_H
5 * lib/ringbuffer/backend.h
7 * Ring buffer backend (API).
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/timer.h>
32 #include <linux/wait.h>
33 #include <linux/poll.h>
34 #include <linux/list.h>
37 #include <linux/uaccess.h>
39 /* Internal helpers */
40 #include <wrapper/ringbuffer/backend_internal.h>
41 #include <wrapper/ringbuffer/frontend_internal.h>
43 /* Ring buffer backend API */
45 /* Ring buffer backend access (read/write) */
47 extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
,
48 size_t offset
, void *dest
, size_t len
);
50 extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
51 size_t offset
, void __user
*dest
,
54 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
,
55 size_t offset
, void *dest
, size_t len
);
57 extern unsigned long *
58 lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
62 * Return the address where a given offset is located.
63 * Should be used to get the current subbuffer header pointer. Given we know
64 * it's never on a page boundary, it's safe to write directly to this address,
65 * as long as the write is never bigger than a page size.
68 lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
71 lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
75 * lib_ring_buffer_write - write data to a buffer backend
76 * @config : ring buffer instance configuration
77 * @ctx: ring buffer context. (input arguments only)
78 * @src : source pointer to copy from
79 * @len : length of data to copy
81 * This function copies "len" bytes of data from a source pointer to a buffer
82 * backend, at the current context offset. This is more or less a buffer
83 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
84 * if copy is crossing a page boundary.
87 void lib_ring_buffer_write(const struct lib_ring_buffer_config
*config
,
88 struct lib_ring_buffer_ctx
*ctx
,
89 const void *src
, size_t len
)
91 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
92 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
93 size_t sbidx
, index
, pagecpy
;
94 size_t offset
= ctx
->buf_offset
;
95 struct lib_ring_buffer_backend_pages
*rpages
;
96 unsigned long sb_bindex
, id
;
100 offset
&= chanb
->buf_size
- 1;
101 sbidx
= offset
>> chanb
->subbuf_size_order
;
102 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
103 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
104 id
= bufb
->buf_wsb
[sbidx
].id
;
105 sb_bindex
= subbuffer_id_get_index(config
, id
);
106 rpages
= bufb
->array
[sb_bindex
];
107 CHAN_WARN_ON(ctx
->chan
,
108 config
->mode
== RING_BUFFER_OVERWRITE
109 && subbuffer_id_is_noref(config
, id
));
110 if (likely(pagecpy
== len
))
111 lib_ring_buffer_do_copy(config
,
112 rpages
->p
[index
].virt
113 + (offset
& ~PAGE_MASK
),
116 _lib_ring_buffer_write(bufb
, offset
, src
, len
, 0);
117 ctx
->buf_offset
+= len
;
121 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
122 * @config : ring buffer instance configuration
123 * @bufb : ring buffer backend
124 * @offset : offset within the buffer
125 * @c : the byte to copy
126 * @len : number of bytes to copy
128 * This function writes "len" bytes of "c" to a buffer backend, at a specific
129 * offset. This is more or less a buffer backend-specific memset() operation.
130 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
134 void lib_ring_buffer_memset(const struct lib_ring_buffer_config
*config
,
135 struct lib_ring_buffer_ctx
*ctx
, int c
, size_t len
)
138 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
139 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
140 size_t sbidx
, index
, pagecpy
;
141 size_t offset
= ctx
->buf_offset
;
142 struct lib_ring_buffer_backend_pages
*rpages
;
143 unsigned long sb_bindex
, id
;
147 offset
&= chanb
->buf_size
- 1;
148 sbidx
= offset
>> chanb
->subbuf_size_order
;
149 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
150 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
151 id
= bufb
->buf_wsb
[sbidx
].id
;
152 sb_bindex
= subbuffer_id_get_index(config
, id
);
153 rpages
= bufb
->array
[sb_bindex
];
154 CHAN_WARN_ON(ctx
->chan
,
155 config
->mode
== RING_BUFFER_OVERWRITE
156 && subbuffer_id_is_noref(config
, id
));
157 if (likely(pagecpy
== len
))
158 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
159 + (offset
& ~PAGE_MASK
),
162 _lib_ring_buffer_memset(bufb
, offset
, c
, len
, 0);
163 ctx
->buf_offset
+= len
;
167 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
168 * terminating character is found in @src. Returns the number of bytes
169 * copied. Does *not* terminate @dest with NULL terminating character.
172 size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config
*config
,
173 char *dest
, const char *src
, size_t len
)
177 for (count
= 0; count
< len
; count
++) {
181 * Only read source character once, in case it is
182 * modified concurrently.
184 c
= ACCESS_ONCE(src
[count
]);
187 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
193 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
194 * terminating character is found in @src, or when a fault occurs.
195 * Returns the number of bytes copied. Does *not* terminate @dest with
196 * NULL terminating character.
198 * This function deals with userspace pointers, it should never be called
199 * directly without having the src pointer checked with access_ok()
203 size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
204 char *dest
, const char __user
*src
, size_t len
)
208 for (count
= 0; count
< len
; count
++) {
212 ret
= __copy_from_user_inatomic(&c
, src
+ count
, 1);
215 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
221 * lib_ring_buffer_strcpy - write string data to a buffer backend
222 * @config : ring buffer instance configuration
223 * @ctx: ring buffer context. (input arguments only)
224 * @src : source pointer to copy from
225 * @len : length of data to copy
226 * @pad : character to use for padding
228 * This function copies @len - 1 bytes of string data from a source
229 * pointer to a buffer backend, followed by a terminating '\0'
230 * character, at the current context offset. This is more or less a
231 * buffer backend-specific strncpy() operation. If a terminating '\0'
232 * character is found in @src before @len - 1 characters are copied, pad
233 * the buffer with @pad characters (e.g. '#'). Calls the slow path
234 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
237 void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config
*config
,
238 struct lib_ring_buffer_ctx
*ctx
,
239 const char *src
, size_t len
, int pad
)
241 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
242 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
243 size_t sbidx
, index
, pagecpy
;
244 size_t offset
= ctx
->buf_offset
;
245 struct lib_ring_buffer_backend_pages
*rpages
;
246 unsigned long sb_bindex
, id
;
250 offset
&= chanb
->buf_size
- 1;
251 sbidx
= offset
>> chanb
->subbuf_size_order
;
252 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
253 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
254 id
= bufb
->buf_wsb
[sbidx
].id
;
255 sb_bindex
= subbuffer_id_get_index(config
, id
);
256 rpages
= bufb
->array
[sb_bindex
];
257 CHAN_WARN_ON(ctx
->chan
,
258 config
->mode
== RING_BUFFER_OVERWRITE
259 && subbuffer_id_is_noref(config
, id
));
260 if (likely(pagecpy
== len
)) {
263 count
= lib_ring_buffer_do_strcpy(config
,
264 rpages
->p
[index
].virt
265 + (offset
& ~PAGE_MASK
),
269 if (unlikely(count
< len
- 1)) {
270 size_t pad_len
= len
- 1 - count
;
272 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
273 + (offset
& ~PAGE_MASK
),
278 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
279 + (offset
& ~PAGE_MASK
),
282 _lib_ring_buffer_strcpy(bufb
, offset
, src
, len
, 0, pad
);
284 ctx
->buf_offset
+= len
;
288 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
289 * @config : ring buffer instance configuration
290 * @ctx: ring buffer context. (input arguments only)
291 * @src : userspace source pointer to copy from
292 * @len : length of data to copy
294 * This function copies "len" bytes of data from a userspace pointer to a
295 * buffer backend, at the current context offset. This is more or less a buffer
296 * backend-specific memcpy() operation. Calls the slow path
297 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
298 * Disable the page fault handler to ensure we never try to take the mmap_sem.
301 void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
302 struct lib_ring_buffer_ctx
*ctx
,
303 const void __user
*src
, size_t len
)
305 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
306 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
307 size_t sbidx
, index
, pagecpy
;
308 size_t offset
= ctx
->buf_offset
;
309 struct lib_ring_buffer_backend_pages
*rpages
;
310 unsigned long sb_bindex
, id
;
312 mm_segment_t old_fs
= get_fs();
316 offset
&= chanb
->buf_size
- 1;
317 sbidx
= offset
>> chanb
->subbuf_size_order
;
318 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
319 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
320 id
= bufb
->buf_wsb
[sbidx
].id
;
321 sb_bindex
= subbuffer_id_get_index(config
, id
);
322 rpages
= bufb
->array
[sb_bindex
];
323 CHAN_WARN_ON(ctx
->chan
,
324 config
->mode
== RING_BUFFER_OVERWRITE
325 && subbuffer_id_is_noref(config
, id
));
329 if (unlikely(!access_ok(VERIFY_READ
, src
, len
)))
332 if (likely(pagecpy
== len
)) {
333 ret
= lib_ring_buffer_do_copy_from_user_inatomic(
334 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
336 if (unlikely(ret
> 0)) {
341 _lib_ring_buffer_copy_from_user_inatomic(bufb
, offset
, src
, len
, 0);
345 ctx
->buf_offset
+= len
;
353 * In the error path we call the slow path version to avoid
354 * the pollution of static inline code.
356 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
360 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
361 * @config : ring buffer instance configuration
362 * @ctx: ring buffer context (input arguments only)
363 * @src : userspace source pointer to copy from
364 * @len : length of data to copy
365 * @pad : character to use for padding
367 * This function copies @len - 1 bytes of string data from a userspace
368 * source pointer to a buffer backend, followed by a terminating '\0'
369 * character, at the current context offset. This is more or less a
370 * buffer backend-specific strncpy() operation. If a terminating '\0'
371 * character is found in @src before @len - 1 characters are copied, pad
372 * the buffer with @pad characters (e.g. '#'). Calls the slow path
373 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
374 * boundary. Disable the page fault handler to ensure we never try to
378 void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
379 struct lib_ring_buffer_ctx
*ctx
,
380 const void __user
*src
, size_t len
, int pad
)
382 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
383 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
384 size_t sbidx
, index
, pagecpy
;
385 size_t offset
= ctx
->buf_offset
;
386 struct lib_ring_buffer_backend_pages
*rpages
;
387 unsigned long sb_bindex
, id
;
388 mm_segment_t old_fs
= get_fs();
392 offset
&= chanb
->buf_size
- 1;
393 sbidx
= offset
>> chanb
->subbuf_size_order
;
394 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
395 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
396 id
= bufb
->buf_wsb
[sbidx
].id
;
397 sb_bindex
= subbuffer_id_get_index(config
, id
);
398 rpages
= bufb
->array
[sb_bindex
];
399 CHAN_WARN_ON(ctx
->chan
,
400 config
->mode
== RING_BUFFER_OVERWRITE
401 && subbuffer_id_is_noref(config
, id
));
405 if (unlikely(!access_ok(VERIFY_READ
, src
, len
)))
408 if (likely(pagecpy
== len
)) {
411 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
412 rpages
->p
[index
].virt
413 + (offset
& ~PAGE_MASK
),
417 if (unlikely(count
< len
- 1)) {
418 size_t pad_len
= len
- 1 - count
;
420 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
421 + (offset
& ~PAGE_MASK
),
426 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
427 + (offset
& ~PAGE_MASK
),
430 _lib_ring_buffer_strcpy_from_user_inatomic(bufb
, offset
, src
,
435 ctx
->buf_offset
+= len
;
443 * In the error path we call the slow path version to avoid
444 * the pollution of static inline code.
446 _lib_ring_buffer_memset(bufb
, offset
, pad
, len
- 1, 0);
448 _lib_ring_buffer_memset(bufb
, offset
, '\0', 1, 0);
452 * This accessor counts the number of unread records in a buffer.
453 * It only provides a consistent value if no reads not writes are performed
457 unsigned long lib_ring_buffer_get_records_unread(
458 const struct lib_ring_buffer_config
*config
,
459 struct lib_ring_buffer
*buf
)
461 struct lib_ring_buffer_backend
*bufb
= &buf
->backend
;
462 struct lib_ring_buffer_backend_pages
*pages
;
463 unsigned long records_unread
= 0, sb_bindex
, id
;
466 for (i
= 0; i
< bufb
->chan
->backend
.num_subbuf
; i
++) {
467 id
= bufb
->buf_wsb
[i
].id
;
468 sb_bindex
= subbuffer_id_get_index(config
, id
);
469 pages
= bufb
->array
[sb_bindex
];
470 records_unread
+= v_read(config
, &pages
->records_unread
);
472 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
473 id
= bufb
->buf_rsb
.id
;
474 sb_bindex
= subbuffer_id_get_index(config
, id
);
475 pages
= bufb
->array
[sb_bindex
];
476 records_unread
+= v_read(config
, &pages
->records_unread
);
478 return records_unread
;
482 * We use __copy_from_user_inatomic to copy userspace data after
483 * checking with access_ok() and disabling page faults.
485 * Return 0 if OK, nonzero on error.
488 unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest
,
489 const void __user
*src
,
495 if (!access_ok(VERIFY_READ
, src
, len
))
500 ret
= __copy_from_user_inatomic(dest
, src
, len
);
506 #endif /* _LIB_RING_BUFFER_BACKEND_H */