1 #ifndef _LIB_RING_BUFFER_BACKEND_H
2 #define _LIB_RING_BUFFER_BACKEND_H
5 * lib/ringbuffer/backend.h
7 * Ring buffer backend (API).
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/timer.h>
32 #include <linux/wait.h>
33 #include <linux/poll.h>
34 #include <linux/list.h>
37 #include <wrapper/uaccess.h>
39 /* Internal helpers */
40 #include <wrapper/ringbuffer/backend_internal.h>
41 #include <wrapper/ringbuffer/frontend_internal.h>
43 /* Ring buffer backend API */
45 /* Ring buffer backend access (read/write) */
47 extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
,
48 size_t offset
, void *dest
, size_t len
);
50 extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
51 size_t offset
, void __user
*dest
,
54 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
,
55 size_t offset
, void *dest
, size_t len
);
57 extern unsigned long *
58 lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
62 * Return the address where a given offset is located.
63 * Should be used to get the current subbuffer header pointer. Given we know
64 * it's never on a page boundary, it's safe to write directly to this address,
65 * as long as the write is never bigger than a page size.
68 lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
71 lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
75 * lib_ring_buffer_write - write data to a buffer backend
76 * @config : ring buffer instance configuration
77 * @ctx: ring buffer context. (input arguments only)
78 * @src : source pointer to copy from
79 * @len : length of data to copy
81 * This function copies "len" bytes of data from a source pointer to a buffer
82 * backend, at the current context offset. This is more or less a buffer
83 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
84 * if copy is crossing a page boundary.
86 static inline __attribute__((always_inline
))
87 void lib_ring_buffer_write(const struct lib_ring_buffer_config
*config
,
88 struct lib_ring_buffer_ctx
*ctx
,
89 const void *src
, size_t len
)
91 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
92 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
93 size_t index
, pagecpy
;
94 size_t offset
= ctx
->buf_offset
;
95 struct lib_ring_buffer_backend_pages
*backend_pages
;
100 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
101 offset
&= chanb
->buf_size
- 1;
102 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
103 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
104 if (likely(pagecpy
== len
))
105 lib_ring_buffer_do_copy(config
,
106 backend_pages
->p
[index
].virt
107 + (offset
& ~PAGE_MASK
),
110 _lib_ring_buffer_write(bufb
, offset
, src
, len
, 0);
111 ctx
->buf_offset
+= len
;
115 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
116 * @config : ring buffer instance configuration
117 * @bufb : ring buffer backend
118 * @offset : offset within the buffer
119 * @c : the byte to copy
120 * @len : number of bytes to copy
122 * This function writes "len" bytes of "c" to a buffer backend, at a specific
123 * offset. This is more or less a buffer backend-specific memset() operation.
124 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
128 void lib_ring_buffer_memset(const struct lib_ring_buffer_config
*config
,
129 struct lib_ring_buffer_ctx
*ctx
, int c
, size_t len
)
132 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
133 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
134 size_t index
, pagecpy
;
135 size_t offset
= ctx
->buf_offset
;
136 struct lib_ring_buffer_backend_pages
*backend_pages
;
141 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
142 offset
&= chanb
->buf_size
- 1;
143 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
144 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
145 if (likely(pagecpy
== len
))
146 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
147 + (offset
& ~PAGE_MASK
),
150 _lib_ring_buffer_memset(bufb
, offset
, c
, len
, 0);
151 ctx
->buf_offset
+= len
;
155 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
156 * terminating character is found in @src. Returns the number of bytes
157 * copied. Does *not* terminate @dest with NULL terminating character.
159 static inline __attribute__((always_inline
))
160 size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config
*config
,
161 char *dest
, const char *src
, size_t len
)
165 for (count
= 0; count
< len
; count
++) {
169 * Only read source character once, in case it is
170 * modified concurrently.
172 c
= READ_ONCE(src
[count
]);
175 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
181 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
182 * terminating character is found in @src, or when a fault occurs.
183 * Returns the number of bytes copied. Does *not* terminate @dest with
184 * NULL terminating character.
186 * This function deals with userspace pointers, it should never be called
187 * directly without having the src pointer checked with access_ok()
190 static inline __attribute__((always_inline
))
191 size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
192 char *dest
, const char __user
*src
, size_t len
)
196 for (count
= 0; count
< len
; count
++) {
200 ret
= __copy_from_user_inatomic(&c
, src
+ count
, 1);
203 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
209 * lib_ring_buffer_strcpy - write string data to a buffer backend
210 * @config : ring buffer instance configuration
211 * @ctx: ring buffer context. (input arguments only)
212 * @src : source pointer to copy from
213 * @len : length of data to copy
214 * @pad : character to use for padding
216 * This function copies @len - 1 bytes of string data from a source
217 * pointer to a buffer backend, followed by a terminating '\0'
218 * character, at the current context offset. This is more or less a
219 * buffer backend-specific strncpy() operation. If a terminating '\0'
220 * character is found in @src before @len - 1 characters are copied, pad
221 * the buffer with @pad characters (e.g. '#'). Calls the slow path
222 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
225 void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config
*config
,
226 struct lib_ring_buffer_ctx
*ctx
,
227 const char *src
, size_t len
, int pad
)
229 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
230 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
231 size_t index
, pagecpy
;
232 size_t offset
= ctx
->buf_offset
;
233 struct lib_ring_buffer_backend_pages
*backend_pages
;
238 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
239 offset
&= chanb
->buf_size
- 1;
240 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
241 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
242 if (likely(pagecpy
== len
)) {
245 count
= lib_ring_buffer_do_strcpy(config
,
246 backend_pages
->p
[index
].virt
247 + (offset
& ~PAGE_MASK
),
251 if (unlikely(count
< len
- 1)) {
252 size_t pad_len
= len
- 1 - count
;
254 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
255 + (offset
& ~PAGE_MASK
),
260 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
261 + (offset
& ~PAGE_MASK
),
264 _lib_ring_buffer_strcpy(bufb
, offset
, src
, len
, 0, pad
);
266 ctx
->buf_offset
+= len
;
270 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
271 * @config : ring buffer instance configuration
272 * @ctx: ring buffer context. (input arguments only)
273 * @src : userspace source pointer to copy from
274 * @len : length of data to copy
276 * This function copies "len" bytes of data from a userspace pointer to a
277 * buffer backend, at the current context offset. This is more or less a buffer
278 * backend-specific memcpy() operation. Calls the slow path
279 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
280 * Disable the page fault handler to ensure we never try to take the mmap_sem.
282 static inline __attribute__((always_inline
))
283 void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
284 struct lib_ring_buffer_ctx
*ctx
,
285 const void __user
*src
, size_t len
)
287 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
288 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
289 size_t index
, pagecpy
;
290 size_t offset
= ctx
->buf_offset
;
291 struct lib_ring_buffer_backend_pages
*backend_pages
;
293 mm_segment_t old_fs
= get_fs();
298 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
299 offset
&= chanb
->buf_size
- 1;
300 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
301 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
305 if (unlikely(!lttng_access_ok(VERIFY_READ
, src
, len
)))
308 if (likely(pagecpy
== len
)) {
309 ret
= lib_ring_buffer_do_copy_from_user_inatomic(
310 backend_pages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
312 if (unlikely(ret
> 0)) {
317 _lib_ring_buffer_copy_from_user_inatomic(bufb
, offset
, src
, len
, 0);
321 ctx
->buf_offset
+= len
;
329 * In the error path we call the slow path version to avoid
330 * the pollution of static inline code.
332 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
336 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
337 * @config : ring buffer instance configuration
338 * @ctx: ring buffer context (input arguments only)
339 * @src : userspace source pointer to copy from
340 * @len : length of data to copy
341 * @pad : character to use for padding
343 * This function copies @len - 1 bytes of string data from a userspace
344 * source pointer to a buffer backend, followed by a terminating '\0'
345 * character, at the current context offset. This is more or less a
346 * buffer backend-specific strncpy() operation. If a terminating '\0'
347 * character is found in @src before @len - 1 characters are copied, pad
348 * the buffer with @pad characters (e.g. '#'). Calls the slow path
349 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
350 * boundary. Disable the page fault handler to ensure we never try to
354 void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
355 struct lib_ring_buffer_ctx
*ctx
,
356 const void __user
*src
, size_t len
, int pad
)
358 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
359 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
360 size_t index
, pagecpy
;
361 size_t offset
= ctx
->buf_offset
;
362 struct lib_ring_buffer_backend_pages
*backend_pages
;
363 mm_segment_t old_fs
= get_fs();
368 lib_ring_buffer_get_backend_pages_from_ctx(config
, ctx
);
369 offset
&= chanb
->buf_size
- 1;
370 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
371 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
375 if (unlikely(!lttng_access_ok(VERIFY_READ
, src
, len
)))
378 if (likely(pagecpy
== len
)) {
381 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
382 backend_pages
->p
[index
].virt
383 + (offset
& ~PAGE_MASK
),
387 if (unlikely(count
< len
- 1)) {
388 size_t pad_len
= len
- 1 - count
;
390 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
391 + (offset
& ~PAGE_MASK
),
396 lib_ring_buffer_do_memset(backend_pages
->p
[index
].virt
397 + (offset
& ~PAGE_MASK
),
400 _lib_ring_buffer_strcpy_from_user_inatomic(bufb
, offset
, src
,
405 ctx
->buf_offset
+= len
;
413 * In the error path we call the slow path version to avoid
414 * the pollution of static inline code.
416 _lib_ring_buffer_memset(bufb
, offset
, pad
, len
- 1, 0);
418 _lib_ring_buffer_memset(bufb
, offset
, '\0', 1, 0);
422 * This accessor counts the number of unread records in a buffer.
423 * It only provides a consistent value if no reads not writes are performed
427 unsigned long lib_ring_buffer_get_records_unread(
428 const struct lib_ring_buffer_config
*config
,
429 struct lib_ring_buffer
*buf
)
431 struct lib_ring_buffer_backend
*bufb
= &buf
->backend
;
432 struct lib_ring_buffer_backend_pages
*pages
;
433 unsigned long records_unread
= 0, sb_bindex
, id
;
436 for (i
= 0; i
< bufb
->chan
->backend
.num_subbuf
; i
++) {
437 id
= bufb
->buf_wsb
[i
].id
;
438 sb_bindex
= subbuffer_id_get_index(config
, id
);
439 pages
= bufb
->array
[sb_bindex
];
440 records_unread
+= v_read(config
, &pages
->records_unread
);
442 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
443 id
= bufb
->buf_rsb
.id
;
444 sb_bindex
= subbuffer_id_get_index(config
, id
);
445 pages
= bufb
->array
[sb_bindex
];
446 records_unread
+= v_read(config
, &pages
->records_unread
);
448 return records_unread
;
452 * We use __copy_from_user_inatomic to copy userspace data after
453 * checking with access_ok() and disabling page faults.
455 * Return 0 if OK, nonzero on error.
458 unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest
,
459 const void __user
*src
,
465 if (!lttng_access_ok(VERIFY_READ
, src
, len
))
470 ret
= __copy_from_user_inatomic(dest
, src
, len
);
476 #endif /* _LIB_RING_BUFFER_BACKEND_H */