Commit | Line | Data |
---|---|---|
886d51a3 MD |
1 | #ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H |
2 | #define _LIB_RING_BUFFER_BACKEND_INTERNAL_H | |
f3bc08c5 MD |
3 | |
4 | /* | |
886d51a3 | 5 | * lib/ringbuffer/backend_internal.h |
f3bc08c5 MD |
6 | * |
7 | * Ring buffer backend (internal helpers). | |
8 | * | |
886d51a3 MD |
9 | * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; only | |
14 | * version 2.1 of the License. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
f3bc08c5 MD |
24 | */ |
25 | ||
acb98454 | 26 | #include <wrapper/compiler.h> |
5671a661 MD |
27 | #include <wrapper/ringbuffer/config.h> |
28 | #include <wrapper/ringbuffer/backend_types.h> | |
29 | #include <wrapper/ringbuffer/frontend_types.h> | |
f3bc08c5 | 30 | #include <linux/string.h> |
4ea00e4f | 31 | #include <linux/uaccess.h> |
f3bc08c5 MD |
32 | |
33 | /* Ring buffer backend API presented to the frontend */ | |
34 | ||
35 | /* Ring buffer and channel backend create/free */ | |
36 | ||
37 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
38 | struct channel_backend *chan, int cpu); | |
39 | void channel_backend_unregister_notifiers(struct channel_backend *chanb); | |
40 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb); | |
41 | int channel_backend_init(struct channel_backend *chanb, | |
42 | const char *name, | |
43 | const struct lib_ring_buffer_config *config, | |
44 | void *priv, size_t subbuf_size, | |
45 | size_t num_subbuf); | |
46 | void channel_backend_free(struct channel_backend *chanb); | |
47 | ||
48 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb); | |
49 | void channel_backend_reset(struct channel_backend *chanb); | |
50 | ||
51 | int lib_ring_buffer_backend_init(void); | |
52 | void lib_ring_buffer_backend_exit(void); | |
53 | ||
54 | extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, | |
55 | size_t offset, const void *src, size_t len, | |
bfe529f9 | 56 | size_t pagecpy); |
4ea00e4f JD |
57 | extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, |
58 | size_t offset, int c, size_t len, | |
bfe529f9 | 59 | size_t pagecpy); |
16f78f3a MD |
60 | extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, |
61 | size_t offset, const char *src, size_t len, | |
62 | size_t pagecpy, int pad); | |
7b8ea3a5 | 63 | extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, |
4ea00e4f | 64 | size_t offset, const void *src, |
bfe529f9 | 65 | size_t len, size_t pagecpy); |
16f78f3a MD |
66 | extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, |
67 | size_t offset, const char __user *src, size_t len, | |
68 | size_t pagecpy, int pad); | |
f3bc08c5 MD |
69 | |
70 | /* | |
71 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
72 | * exchanged atomically. | |
73 | * | |
74 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
75 | * to count the produced buffers. For overwrite mode, this provides the | |
76 | * consumer with the capacity to read subbuffers in order, handling the | |
77 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
78 | * systems) concurrently with a single execution of get_subbuf (between offset | |
79 | * sampling and subbuffer ID exchange). | |
80 | */ | |
81 | ||
82 | #define HALF_ULONG_BITS (BITS_PER_LONG >> 1) | |
83 | ||
84 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
85 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
86 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
87 | /* | |
88 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
89 | */ | |
90 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
91 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
92 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
93 | /* | |
94 | * In overwrite mode: lowest half of word is used for index. | |
95 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
96 | * In producer-consumer mode: whole word used for index. | |
97 | */ | |
98 | #define SB_ID_INDEX_SHIFT 0 | |
99 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
100 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
101 | ||
102 | /* | |
103 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
104 | * for producer-consumer mode (offset and noref are only used in overwrite | |
105 | * mode). | |
106 | */ | |
107 | static inline | |
108 | unsigned long subbuffer_id(const struct lib_ring_buffer_config *config, | |
109 | unsigned long offset, unsigned long noref, | |
110 | unsigned long index) | |
111 | { | |
112 | if (config->mode == RING_BUFFER_OVERWRITE) | |
113 | return (offset << SB_ID_OFFSET_SHIFT) | |
114 | | (noref << SB_ID_NOREF_SHIFT) | |
115 | | index; | |
116 | else | |
117 | return index; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Compare offset with the offset contained within id. Return 1 if the offset | |
122 | * bits are identical, else 0. | |
123 | */ | |
124 | static inline | |
125 | int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config, | |
126 | unsigned long id, unsigned long offset) | |
127 | { | |
128 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
129 | } | |
130 | ||
131 | static inline | |
132 | unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config, | |
133 | unsigned long id) | |
134 | { | |
135 | if (config->mode == RING_BUFFER_OVERWRITE) | |
136 | return id & SB_ID_INDEX_MASK; | |
137 | else | |
138 | return id; | |
139 | } | |
140 | ||
141 | static inline | |
142 | unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config, | |
143 | unsigned long id) | |
144 | { | |
145 | if (config->mode == RING_BUFFER_OVERWRITE) | |
146 | return !!(id & SB_ID_NOREF_MASK); | |
147 | else | |
148 | return 1; | |
149 | } | |
150 | ||
151 | /* | |
152 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
153 | * needed. | |
154 | */ | |
155 | static inline | |
156 | void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config, | |
157 | unsigned long *id) | |
158 | { | |
159 | if (config->mode == RING_BUFFER_OVERWRITE) | |
160 | *id |= SB_ID_NOREF_MASK; | |
161 | } | |
162 | ||
163 | static inline | |
164 | void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config, | |
165 | unsigned long *id, unsigned long offset) | |
166 | { | |
167 | unsigned long tmp; | |
168 | ||
169 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
170 | tmp = *id; | |
171 | tmp &= ~SB_ID_OFFSET_MASK; | |
172 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
173 | tmp |= SB_ID_NOREF_MASK; | |
174 | /* Volatile store, read concurrently by readers. */ | |
acb98454 | 175 | WRITE_ONCE(*id, tmp); |
f3bc08c5 MD |
176 | } |
177 | } | |
178 | ||
179 | /* No volatile access, since already used locally */ | |
180 | static inline | |
181 | void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config, | |
182 | unsigned long *id) | |
183 | { | |
184 | if (config->mode == RING_BUFFER_OVERWRITE) | |
185 | *id &= ~SB_ID_NOREF_MASK; | |
186 | } | |
187 | ||
188 | /* | |
189 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
190 | * 2^16 on 32-bit architectures | |
191 | * 2^32 on 64-bit architectures | |
192 | * This is required to fit in the index part of the ID. Return 0 on success, | |
193 | * -EPERM on failure. | |
194 | */ | |
195 | static inline | |
196 | int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, | |
197 | unsigned long num_subbuf) | |
198 | { | |
199 | if (config->mode == RING_BUFFER_OVERWRITE) | |
200 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
201 | else | |
202 | return 0; | |
203 | } | |
204 | ||
85a07c33 MD |
205 | static inline |
206 | void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config, | |
207 | struct lib_ring_buffer_ctx *ctx, | |
208 | struct lib_ring_buffer_backend_pages **backend_pages) | |
209 | { | |
210 | struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; | |
211 | struct channel_backend *chanb = &ctx->chan->backend; | |
212 | size_t sbidx, offset = ctx->buf_offset; | |
213 | unsigned long sb_bindex, id; | |
214 | struct lib_ring_buffer_backend_pages *rpages; | |
215 | ||
216 | offset &= chanb->buf_size - 1; | |
217 | sbidx = offset >> chanb->subbuf_size_order; | |
218 | id = bufb->buf_wsb[sbidx].id; | |
219 | sb_bindex = subbuffer_id_get_index(config, id); | |
220 | rpages = bufb->array[sb_bindex]; | |
221 | CHAN_WARN_ON(ctx->chan, | |
222 | config->mode == RING_BUFFER_OVERWRITE | |
223 | && subbuffer_id_is_noref(config, id)); | |
224 | *backend_pages = rpages; | |
225 | } | |
226 | ||
227 | /* Get backend pages from cache. */ | |
228 | static inline | |
229 | struct lib_ring_buffer_backend_pages * | |
230 | lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, | |
231 | struct lib_ring_buffer_ctx *ctx) | |
232 | { | |
233 | return ctx->backend_pages; | |
234 | } | |
235 | ||
25337cb5 MD |
236 | /* |
237 | * The ring buffer can count events recorded and overwritten per buffer, | |
238 | * but it is disabled by default due to its performance overhead. | |
239 | */ | |
240 | #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS | |
f3bc08c5 MD |
241 | static inline |
242 | void subbuffer_count_record(const struct lib_ring_buffer_config *config, | |
243 | struct lib_ring_buffer_backend *bufb, | |
244 | unsigned long idx) | |
245 | { | |
246 | unsigned long sb_bindex; | |
247 | ||
248 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
249 | v_inc(config, &bufb->array[sb_bindex]->records_commit); | |
250 | } | |
25337cb5 MD |
251 | #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ |
252 | static inline | |
253 | void subbuffer_count_record(const struct lib_ring_buffer_config *config, | |
254 | struct lib_ring_buffer_backend *bufb, | |
255 | unsigned long idx) | |
256 | { | |
257 | } | |
258 | #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ | |
f3bc08c5 MD |
259 | |
260 | /* | |
261 | * Reader has exclusive subbuffer access for record consumption. No need to | |
262 | * perform the decrement atomically. | |
263 | */ | |
264 | static inline | |
265 | void subbuffer_consume_record(const struct lib_ring_buffer_config *config, | |
266 | struct lib_ring_buffer_backend *bufb) | |
267 | { | |
268 | unsigned long sb_bindex; | |
269 | ||
270 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
271 | CHAN_WARN_ON(bufb->chan, | |
272 | !v_read(config, &bufb->array[sb_bindex]->records_unread)); | |
273 | /* Non-atomic decrement protected by exclusive subbuffer access */ | |
274 | _v_dec(config, &bufb->array[sb_bindex]->records_unread); | |
275 | v_inc(config, &bufb->records_read); | |
276 | } | |
277 | ||
278 | static inline | |
279 | unsigned long subbuffer_get_records_count( | |
280 | const struct lib_ring_buffer_config *config, | |
281 | struct lib_ring_buffer_backend *bufb, | |
282 | unsigned long idx) | |
283 | { | |
284 | unsigned long sb_bindex; | |
285 | ||
286 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
287 | return v_read(config, &bufb->array[sb_bindex]->records_commit); | |
288 | } | |
289 | ||
290 | /* | |
291 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
c68be968 MD |
292 | * subbuffer access. See lib_ring_buffer_check_deliver() for details. |
293 | * lib_ring_buffer_get_records_count() must be called to get the records | |
294 | * count before this function, because it resets the records_commit | |
295 | * count. | |
f3bc08c5 MD |
296 | */ |
297 | static inline | |
298 | unsigned long subbuffer_count_records_overrun( | |
299 | const struct lib_ring_buffer_config *config, | |
300 | struct lib_ring_buffer_backend *bufb, | |
301 | unsigned long idx) | |
302 | { | |
303 | struct lib_ring_buffer_backend_pages *pages; | |
304 | unsigned long overruns, sb_bindex; | |
305 | ||
306 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
307 | pages = bufb->array[sb_bindex]; | |
308 | overruns = v_read(config, &pages->records_unread); | |
309 | v_set(config, &pages->records_unread, | |
310 | v_read(config, &pages->records_commit)); | |
311 | v_set(config, &pages->records_commit, 0); | |
312 | ||
313 | return overruns; | |
314 | } | |
315 | ||
316 | static inline | |
317 | void subbuffer_set_data_size(const struct lib_ring_buffer_config *config, | |
318 | struct lib_ring_buffer_backend *bufb, | |
319 | unsigned long idx, | |
320 | unsigned long data_size) | |
321 | { | |
322 | struct lib_ring_buffer_backend_pages *pages; | |
323 | unsigned long sb_bindex; | |
324 | ||
325 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
326 | pages = bufb->array[sb_bindex]; | |
327 | pages->data_size = data_size; | |
328 | } | |
329 | ||
330 | static inline | |
331 | unsigned long subbuffer_get_read_data_size( | |
332 | const struct lib_ring_buffer_config *config, | |
333 | struct lib_ring_buffer_backend *bufb) | |
334 | { | |
335 | struct lib_ring_buffer_backend_pages *pages; | |
336 | unsigned long sb_bindex; | |
337 | ||
338 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
339 | pages = bufb->array[sb_bindex]; | |
340 | return pages->data_size; | |
341 | } | |
342 | ||
343 | static inline | |
344 | unsigned long subbuffer_get_data_size( | |
345 | const struct lib_ring_buffer_config *config, | |
346 | struct lib_ring_buffer_backend *bufb, | |
347 | unsigned long idx) | |
348 | { | |
349 | struct lib_ring_buffer_backend_pages *pages; | |
350 | unsigned long sb_bindex; | |
351 | ||
352 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); | |
353 | pages = bufb->array[sb_bindex]; | |
354 | return pages->data_size; | |
355 | } | |
356 | ||
5b3cf4f9 JD |
357 | static inline |
358 | void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config, | |
359 | struct lib_ring_buffer_backend *bufb, | |
360 | unsigned long idx) | |
361 | { | |
362 | bufb->buf_cnt[idx].seq_cnt++; | |
363 | } | |
364 | ||
f3bc08c5 MD |
365 | /** |
366 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
367 | * writer. | |
368 | */ | |
369 | static inline | |
370 | void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, | |
371 | struct lib_ring_buffer_backend *bufb, | |
372 | unsigned long idx) | |
373 | { | |
374 | unsigned long id, new_id; | |
375 | ||
376 | if (config->mode != RING_BUFFER_OVERWRITE) | |
377 | return; | |
378 | ||
379 | /* | |
380 | * Performing a volatile access to read the sb_pages, because we want to | |
381 | * read a coherent version of the pointer and the associated noref flag. | |
382 | */ | |
acb98454 | 383 | id = READ_ONCE(bufb->buf_wsb[idx].id); |
f3bc08c5 MD |
384 | for (;;) { |
385 | /* This check is called on the fast path for each record. */ | |
386 | if (likely(!subbuffer_id_is_noref(config, id))) { | |
387 | /* | |
388 | * Store after load dependency ordering the writes to | |
389 | * the subbuffer after load and test of the noref flag | |
390 | * matches the memory barrier implied by the cmpxchg() | |
391 | * in update_read_sb_index(). | |
392 | */ | |
393 | return; /* Already writing to this buffer */ | |
394 | } | |
395 | new_id = id; | |
396 | subbuffer_id_clear_noref(config, &new_id); | |
397 | new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id); | |
398 | if (likely(new_id == id)) | |
399 | break; | |
400 | id = new_id; | |
401 | } | |
402 | } | |
403 | ||
404 | /** | |
405 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
406 | * called by writer. | |
407 | */ | |
408 | static inline | |
409 | void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config, | |
410 | struct lib_ring_buffer_backend *bufb, | |
411 | unsigned long idx, unsigned long offset) | |
412 | { | |
413 | if (config->mode != RING_BUFFER_OVERWRITE) | |
414 | return; | |
415 | ||
416 | /* | |
417 | * Because ring_buffer_set_noref() is only called by a single thread | |
418 | * (the one which updated the cc_sb value), there are no concurrent | |
419 | * updates to take care of: other writers have not updated cc_sb, so | |
420 | * they cannot set the noref flag, and concurrent readers cannot modify | |
421 | * the pointer because the noref flag is not set yet. | |
422 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
423 | * to the subbuffer before this set noref operation. | |
424 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
425 | * readers of the noref flag. | |
426 | */ | |
427 | CHAN_WARN_ON(bufb->chan, | |
428 | subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id)); | |
429 | /* | |
430 | * Memory barrier that ensures counter stores are ordered before set | |
431 | * noref and offset. | |
432 | */ | |
433 | smp_mb(); | |
434 | subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset); | |
435 | } | |
436 | ||
437 | /** | |
438 | * update_read_sb_index - Read-side subbuffer index update. | |
439 | */ | |
440 | static inline | |
441 | int update_read_sb_index(const struct lib_ring_buffer_config *config, | |
442 | struct lib_ring_buffer_backend *bufb, | |
443 | struct channel_backend *chanb, | |
444 | unsigned long consumed_idx, | |
445 | unsigned long consumed_count) | |
446 | { | |
447 | unsigned long old_id, new_id; | |
448 | ||
449 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
450 | /* | |
451 | * Exchange the target writer subbuffer with our own unused | |
acb98454 | 452 | * subbuffer. No need to use READ_ONCE() here to read the |
f3bc08c5 MD |
453 | * old_wpage, because the value read will be confirmed by the |
454 | * following cmpxchg(). | |
455 | */ | |
456 | old_id = bufb->buf_wsb[consumed_idx].id; | |
457 | if (unlikely(!subbuffer_id_is_noref(config, old_id))) | |
458 | return -EAGAIN; | |
459 | /* | |
460 | * Make sure the offset count we are expecting matches the one | |
461 | * indicated by the writer. | |
462 | */ | |
463 | if (unlikely(!subbuffer_id_compare_offset(config, old_id, | |
464 | consumed_count))) | |
465 | return -EAGAIN; | |
466 | CHAN_WARN_ON(bufb->chan, | |
467 | !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); | |
468 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, | |
469 | consumed_count); | |
470 | new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id, | |
471 | bufb->buf_rsb.id); | |
472 | if (unlikely(old_id != new_id)) | |
473 | return -EAGAIN; | |
474 | bufb->buf_rsb.id = new_id; | |
475 | } else { | |
476 | /* No page exchange, use the writer page directly */ | |
477 | bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id; | |
478 | } | |
479 | return 0; | |
480 | } | |
481 | ||
327177e6 | 482 | static inline __attribute__((always_inline)) |
04707312 | 483 | void lttng_inline_memcpy(void *dest, const void *src, |
327177e6 MD |
484 | unsigned long len) |
485 | { | |
486 | switch (len) { | |
487 | case 1: | |
488 | *(uint8_t *) dest = *(const uint8_t *) src; | |
489 | break; | |
490 | case 2: | |
491 | *(uint16_t *) dest = *(const uint16_t *) src; | |
492 | break; | |
493 | case 4: | |
494 | *(uint32_t *) dest = *(const uint32_t *) src; | |
495 | break; | |
496 | case 8: | |
497 | *(uint64_t *) dest = *(const uint64_t *) src; | |
498 | break; | |
499 | default: | |
500 | inline_memcpy(dest, src, len); | |
501 | } | |
502 | } | |
503 | ||
f3bc08c5 MD |
504 | /* |
505 | * Use the architecture-specific memcpy implementation for constant-sized | |
506 | * inputs, but rely on an inline memcpy for length statically unknown. | |
507 | * The function call to memcpy is just way too expensive for a fast path. | |
508 | */ | |
509 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
510 | do { \ | |
511 | size_t __len = (len); \ | |
512 | if (__builtin_constant_p(len)) \ | |
513 | memcpy(dest, src, __len); \ | |
514 | else \ | |
327177e6 | 515 | lttng_inline_memcpy(dest, src, __len); \ |
f3bc08c5 MD |
516 | } while (0) |
517 | ||
4ea00e4f | 518 | /* |
7b8ea3a5 | 519 | * We use __copy_from_user_inatomic to copy userspace data since we already |
4ea00e4f | 520 | * did the access_ok for the whole range. |
d87a9f03 MD |
521 | * |
522 | * Return 0 if OK, nonzero on error. | |
4ea00e4f JD |
523 | */ |
524 | static inline | |
7b8ea3a5 | 525 | unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest, |
4ea00e4f JD |
526 | const void __user *src, |
527 | unsigned long len) | |
528 | { | |
7b8ea3a5 | 529 | return __copy_from_user_inatomic(dest, src, len); |
4ea00e4f JD |
530 | } |
531 | ||
532 | /* | |
533 | * write len bytes to dest with c | |
534 | */ | |
535 | static inline | |
536 | void lib_ring_buffer_do_memset(char *dest, int c, | |
537 | unsigned long len) | |
538 | { | |
539 | unsigned long i; | |
540 | ||
541 | for (i = 0; i < len; i++) | |
542 | dest[i] = c; | |
543 | } | |
544 | ||
886d51a3 | 545 | #endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */ |