1 /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
3 * lib/ringbuffer/backend.h
5 * Ring buffer backend (API).
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
10 * the reader in flight recorder mode.
13 #ifndef _LIB_RING_BUFFER_BACKEND_H
14 #define _LIB_RING_BUFFER_BACKEND_H
16 #include <linux/types.h>
17 #include <linux/sched.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/poll.h>
21 #include <linux/list.h>
24 #include <wrapper/uaccess.h>
26 /* Internal helpers */
27 #include <wrapper/ringbuffer/backend_internal.h>
28 #include <wrapper/ringbuffer/frontend_internal.h>
30 /* Ring buffer backend API */
32 /* Ring buffer backend access (read/write) */
34 extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
,
35 size_t offset
, void *dest
, size_t len
);
37 extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
38 size_t offset
, void __user
*dest
,
41 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
,
42 size_t offset
, void *dest
, size_t len
);
44 extern unsigned long *
45 lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
49 * Return the address where a given offset is located.
50 * Should be used to get the current subbuffer header pointer. Given we know
51 * it's never on a page boundary, it's safe to write directly to this address,
52 * as long as the write is never bigger than a page size.
55 lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
58 lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
62 * lib_ring_buffer_write - write data to a buffer backend
63 * @config : ring buffer instance configuration
64 * @ctx: ring buffer context. (input arguments only)
65 * @src : source pointer to copy from
66 * @len : length of data to copy
68 * This function copies "len" bytes of data from a source pointer to a buffer
69 * backend, at the current context offset. This is more or less a buffer
70 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
71 * if copy is crossing a page boundary.
73 static inline __attribute__((always_inline
))
74 void lib_ring_buffer_write(const struct lib_ring_buffer_config
*config
,
75 struct lib_ring_buffer_ctx
*ctx
,
76 const void *src
, size_t len
)
78 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
79 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
80 size_t index
, pagecpy
;
81 size_t offset
= ctx
->buf_offset
;
82 struct lib_ring_buffer_backend_pages
*backend_pages
;
87 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
88 offset
&= chanb
->buf_size
- 1;
89 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
90 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
91 if (likely(pagecpy
== len
))
92 lib_ring_buffer_do_copy(config
,
93 backend_pages
->p
[index
].virt
94 + (offset
& ~PAGE_MASK
),
97 _lib_ring_buffer_write(bufb
, offset
, src
, len
, 0);
98 ctx
->buf_offset
+= len
;
102 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
103 * @config : ring buffer instance configuration
104 * @bufb : ring buffer backend
105 * @offset : offset within the buffer
106 * @c : the byte to copy
107 * @len : number of bytes to copy
109 * This function writes "len" bytes of "c" to a buffer backend, at a specific
110 * offset. This is more or less a buffer backend-specific memset() operation.
111 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
115 void lib_ring_buffer_memset(const struct lib_ring_buffer_config
*config
,
116 struct lib_ring_buffer_ctx
*ctx
, int c
, size_t len
)
119 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
120 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
121 size_t index
, pagecpy
;
122 size_t offset
= ctx
->buf_offset
;
123 struct lib_ring_buffer_backend_pages
*backend_pages
;
128 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
129 offset
&= chanb
->buf_size
- 1;
130 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
131 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
132 if (likely(pagecpy
== len
))
133 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
134 + (offset
& ~PAGE_MASK
),
137 _lib_ring_buffer_memset(bufb
, offset
, c
, len
, 0);
138 ctx
->buf_offset
+= len
;
142 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
143 * terminating character is found in @src. Returns the number of bytes
144 * copied. Does *not* terminate @dest with NULL terminating character.
146 static inline __attribute__((always_inline
))
147 size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config
*config
,
148 char *dest
, const char *src
, size_t len
)
152 for (count
= 0; count
< len
; count
++) {
156 * Only read source character once, in case it is
157 * modified concurrently.
159 c
= LTTNG_READ_ONCE(src
[count
]);
162 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
168 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
169 * terminating character is found in @src, or when a fault occurs.
170 * Returns the number of bytes copied. Does *not* terminate @dest with
171 * NULL terminating character.
173 * This function deals with userspace pointers, it should never be called
174 * directly without having the src pointer checked with access_ok()
177 static inline __attribute__((always_inline
))
178 size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
179 char *dest
, const char __user
*src
, size_t len
)
183 for (count
= 0; count
< len
; count
++) {
187 ret
= __copy_from_user_inatomic(&c
, src
+ count
, 1);
190 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
196 * lib_ring_buffer_strcpy - write string data to a buffer backend
197 * @config : ring buffer instance configuration
198 * @ctx: ring buffer context. (input arguments only)
199 * @src : source pointer to copy from
200 * @len : length of data to copy
201 * @pad : character to use for padding
203 * This function copies @len - 1 bytes of string data from a source
204 * pointer to a buffer backend, followed by a terminating '\0'
205 * character, at the current context offset. This is more or less a
206 * buffer backend-specific strncpy() operation. If a terminating '\0'
207 * character is found in @src before @len - 1 characters are copied, pad
208 * the buffer with @pad characters (e.g. '#'). Calls the slow path
209 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
212 void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config
*config
,
213 struct lib_ring_buffer_ctx
*ctx
,
214 const char *src
, size_t len
, int pad
)
216 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
217 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
218 size_t index
, pagecpy
;
219 size_t offset
= ctx
->buf_offset
;
220 struct lib_ring_buffer_backend_pages
*backend_pages
;
225 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
226 offset
&= chanb
->buf_size
- 1;
227 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
228 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
229 if (likely(pagecpy
== len
)) {
232 count
= lib_ring_buffer_do_strcpy(config
,
233 backend_pages
->p
[index
].virt
234 + (offset
& ~PAGE_MASK
),
238 if (unlikely(count
< len
- 1)) {
239 size_t pad_len
= len
- 1 - count
;
241 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
242 + (offset
& ~PAGE_MASK
),
247 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
248 + (offset
& ~PAGE_MASK
),
251 _lib_ring_buffer_strcpy(bufb
, offset
, src
, len
, 0, pad
);
253 ctx
->buf_offset
+= len
;
257 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
258 * @config : ring buffer instance configuration
259 * @ctx: ring buffer context. (input arguments only)
260 * @src : userspace source pointer to copy from
261 * @len : length of data to copy
263 * This function copies "len" bytes of data from a userspace pointer to a
264 * buffer backend, at the current context offset. This is more or less a buffer
265 * backend-specific memcpy() operation. Calls the slow path
266 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
267 * Disable the page fault handler to ensure we never try to take the mmap_sem.
269 static inline __attribute__((always_inline
))
270 void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
271 struct lib_ring_buffer_ctx
*ctx
,
272 const void __user
*src
, size_t len
)
274 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
275 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
276 size_t index
, pagecpy
;
277 size_t offset
= ctx
->buf_offset
;
278 struct lib_ring_buffer_backend_pages
*backend_pages
;
280 mm_segment_t old_fs
= get_fs();
285 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
286 offset
&= chanb
->buf_size
- 1;
287 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
288 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
292 if (unlikely(!lttng_access_ok(VERIFY_READ
, src
, len
)))
295 if (likely(pagecpy
== len
)) {
296 ret
= lib_ring_buffer_do_copy_from_user_inatomic(
297 backend_pages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
299 if (unlikely(ret
> 0)) {
304 _lib_ring_buffer_copy_from_user_inatomic(bufb
, offset
, src
, len
, 0);
308 ctx
->buf_offset
+= len
;
316 * In the error path we call the slow path version to avoid
317 * the pollution of static inline code.
319 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
323 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
324 * @config : ring buffer instance configuration
325 * @ctx: ring buffer context (input arguments only)
326 * @src : userspace source pointer to copy from
327 * @len : length of data to copy
328 * @pad : character to use for padding
330 * This function copies @len - 1 bytes of string data from a userspace
331 * source pointer to a buffer backend, followed by a terminating '\0'
332 * character, at the current context offset. This is more or less a
333 * buffer backend-specific strncpy() operation. If a terminating '\0'
334 * character is found in @src before @len - 1 characters are copied, pad
335 * the buffer with @pad characters (e.g. '#'). Calls the slow path
336 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
337 * boundary. Disable the page fault handler to ensure we never try to
341 void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
342 struct lib_ring_buffer_ctx
*ctx
,
343 const void __user
*src
, size_t len
, int pad
)
345 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
346 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
347 size_t index
, pagecpy
;
348 size_t offset
= ctx
->buf_offset
;
349 struct lib_ring_buffer_backend_pages
*backend_pages
;
350 mm_segment_t old_fs
= get_fs();
355 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
356 offset
&= chanb
->buf_size
- 1;
357 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
358 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
362 if (unlikely(!lttng_access_ok(VERIFY_READ
, src
, len
)))
365 if (likely(pagecpy
== len
)) {
368 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
369 backend_pages
->p
[index
].virt
370 + (offset
& ~PAGE_MASK
),
374 if (unlikely(count
< len
- 1)) {
375 size_t pad_len
= len
- 1 - count
;
377 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
378 + (offset
& ~PAGE_MASK
),
383 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
384 + (offset
& ~PAGE_MASK
),
387 _lib_ring_buffer_strcpy_from_user_inatomic(bufb
, offset
, src
,
392 ctx
->buf_offset
+= len
;
400 * In the error path we call the slow path version to avoid
401 * the pollution of static inline code.
403 _lib_ring_buffer_memset(bufb
, offset
, pad
, len
- 1, 0);
405 _lib_ring_buffer_memset(bufb
, offset
, '\0', 1, 0);
409 * This accessor counts the number of unread records in a buffer.
410 * It only provides a consistent value if no reads not writes are performed
414 unsigned long lib_ring_buffer_get_records_unread(
415 const struct lib_ring_buffer_config
*config
,
416 struct lib_ring_buffer
*buf
)
418 struct lib_ring_buffer_backend
*bufb
= &buf
->backend
;
419 struct lib_ring_buffer_backend_pages
*pages
;
420 unsigned long records_unread
= 0, sb_bindex
, id
;
423 for (i
= 0; i
< bufb
->chan
->backend
.num_subbuf
; i
++) {
424 id
= bufb
->buf_wsb
[i
].id
;
425 sb_bindex
= subbuffer_id_get_index(config
, id
);
426 pages
= bufb
->array
[sb_bindex
];
427 records_unread
+= v_read(config
, &pages
->records_unread
);
429 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
430 id
= bufb
->buf_rsb
.id
;
431 sb_bindex
= subbuffer_id_get_index(config
, id
);
432 pages
= bufb
->array
[sb_bindex
];
433 records_unread
+= v_read(config
, &pages
->records_unread
);
435 return records_unread
;
439 * We use __copy_from_user_inatomic to copy userspace data after
440 * checking with access_ok() and disabling page faults.
442 * Return 0 if OK, nonzero on error.
445 unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest
,
446 const void __user
*src
,
452 if (!lttng_access_ok(VERIFY_READ
, src
, len
))
457 ret
= __copy_from_user_inatomic(dest
, src
, len
);
463 #endif /* _LIB_RING_BUFFER_BACKEND_H */