Fix: pad strings that are modified concurrently with tracing
[lttng-ust.git] / libringbuffer / backend_internal.h
1 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
3
4 /*
5 * libringbuffer/backend_internal.h
6 *
7 * Ring buffer backend (internal helpers).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <unistd.h>
27 #include <urcu/compiler.h>
28
29 #include <lttng/ringbuffer-config.h>
30 #include "backend_types.h"
31 #include "frontend_types.h"
32 #include "shm.h"
33
34 /* Ring buffer backend API presented to the frontend */
35
36 /* Ring buffer and channel backend create/free */
37
38 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
39 struct channel_backend *chan, int cpu,
40 struct lttng_ust_shm_handle *handle,
41 struct shm_object *shmobj);
42 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
43 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
44 int channel_backend_init(struct channel_backend *chanb,
45 const char *name,
46 const struct lttng_ust_lib_ring_buffer_config *config,
47 size_t subbuf_size,
48 size_t num_subbuf, struct lttng_ust_shm_handle *handle);
49 void channel_backend_free(struct channel_backend *chanb,
50 struct lttng_ust_shm_handle *handle);
51
52 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
53 struct lttng_ust_shm_handle *handle);
54 void channel_backend_reset(struct channel_backend *chanb);
55
56 int lib_ring_buffer_backend_init(void);
57 void lib_ring_buffer_backend_exit(void);
58
59 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
60 size_t offset, const void *src, size_t len,
61 ssize_t pagecpy);
62
63 /*
64 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
65 * exchanged atomically.
66 *
67 * Top half word, except lowest bit, belongs to "offset", which is used to keep
68 * to count the produced buffers. For overwrite mode, this provides the
69 * consumer with the capacity to read subbuffers in order, handling the
70 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
71 * systems) concurrently with a single execution of get_subbuf (between offset
72 * sampling and subbuffer ID exchange).
73 */
74
75 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
76
77 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
78 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
79 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
80 /*
81 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
82 */
83 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
84 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
85 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
86 /*
87 * In overwrite mode: lowest half of word is used for index.
88 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
89 * In producer-consumer mode: whole word used for index.
90 */
91 #define SB_ID_INDEX_SHIFT 0
92 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
93 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
94
95 /*
96 * Construct the subbuffer id from offset, index and noref. Use only the index
97 * for producer-consumer mode (offset and noref are only used in overwrite
98 * mode).
99 */
100 static inline
101 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
102 unsigned long offset, unsigned long noref,
103 unsigned long index)
104 {
105 if (config->mode == RING_BUFFER_OVERWRITE)
106 return (offset << SB_ID_OFFSET_SHIFT)
107 | (noref << SB_ID_NOREF_SHIFT)
108 | index;
109 else
110 return index;
111 }
112
113 /*
114 * Compare offset with the offset contained within id. Return 1 if the offset
115 * bits are identical, else 0.
116 */
117 static inline
118 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
119 unsigned long id, unsigned long offset)
120 {
121 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
122 }
123
124 static inline
125 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
126 unsigned long id)
127 {
128 if (config->mode == RING_BUFFER_OVERWRITE)
129 return id & SB_ID_INDEX_MASK;
130 else
131 return id;
132 }
133
134 static inline
135 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
136 unsigned long id)
137 {
138 if (config->mode == RING_BUFFER_OVERWRITE)
139 return !!(id & SB_ID_NOREF_MASK);
140 else
141 return 1;
142 }
143
144 /*
145 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
146 * needed.
147 */
148 static inline
149 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
150 unsigned long *id)
151 {
152 if (config->mode == RING_BUFFER_OVERWRITE)
153 *id |= SB_ID_NOREF_MASK;
154 }
155
156 static inline
157 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
158 unsigned long *id, unsigned long offset)
159 {
160 unsigned long tmp;
161
162 if (config->mode == RING_BUFFER_OVERWRITE) {
163 tmp = *id;
164 tmp &= ~SB_ID_OFFSET_MASK;
165 tmp |= offset << SB_ID_OFFSET_SHIFT;
166 tmp |= SB_ID_NOREF_MASK;
167 /* Volatile store, read concurrently by readers. */
168 CMM_ACCESS_ONCE(*id) = tmp;
169 }
170 }
171
172 /* No volatile access, since already used locally */
173 static inline
174 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
175 unsigned long *id)
176 {
177 if (config->mode == RING_BUFFER_OVERWRITE)
178 *id &= ~SB_ID_NOREF_MASK;
179 }
180
181 /*
182 * For overwrite mode, cap the number of subbuffers per buffer to:
183 * 2^16 on 32-bit architectures
184 * 2^32 on 64-bit architectures
185 * This is required to fit in the index part of the ID. Return 0 on success,
186 * -EPERM on failure.
187 */
188 static inline
189 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
190 unsigned long num_subbuf)
191 {
192 if (config->mode == RING_BUFFER_OVERWRITE)
193 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
194 else
195 return 0;
196 }
197
198 static inline
199 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
200 struct lttng_ust_lib_ring_buffer_backend *bufb,
201 unsigned long idx, struct lttng_ust_shm_handle *handle)
202 {
203 unsigned long sb_bindex;
204
205 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
206 v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
207 }
208
209 /*
210 * Reader has exclusive subbuffer access for record consumption. No need to
211 * perform the decrement atomically.
212 */
213 static inline
214 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
215 struct lttng_ust_lib_ring_buffer_backend *bufb,
216 struct lttng_ust_shm_handle *handle)
217 {
218 unsigned long sb_bindex;
219
220 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
221 CHAN_WARN_ON(shmp(handle, bufb->chan),
222 !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
223 /* Non-atomic decrement protected by exclusive subbuffer access */
224 _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
225 v_inc(config, &bufb->records_read);
226 }
227
228 static inline
229 unsigned long subbuffer_get_records_count(
230 const struct lttng_ust_lib_ring_buffer_config *config,
231 struct lttng_ust_lib_ring_buffer_backend *bufb,
232 unsigned long idx,
233 struct lttng_ust_shm_handle *handle)
234 {
235 unsigned long sb_bindex;
236
237 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
238 return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
239 }
240
241 /*
242 * Must be executed at subbuffer delivery when the writer has _exclusive_
243 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
244 * lib_ring_buffer_get_records_count() must be called to get the records
245 * count before this function, because it resets the records_commit
246 * count.
247 */
248 static inline
249 unsigned long subbuffer_count_records_overrun(
250 const struct lttng_ust_lib_ring_buffer_config *config,
251 struct lttng_ust_lib_ring_buffer_backend *bufb,
252 unsigned long idx,
253 struct lttng_ust_shm_handle *handle)
254 {
255 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
256 unsigned long overruns, sb_bindex;
257
258 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
259 pages = shmp_index(handle, bufb->array, sb_bindex);
260 overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
261 v_set(config, &shmp(handle, pages->shmp)->records_unread,
262 v_read(config, &shmp(handle, pages->shmp)->records_commit));
263 v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
264
265 return overruns;
266 }
267
268 static inline
269 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
270 struct lttng_ust_lib_ring_buffer_backend *bufb,
271 unsigned long idx,
272 unsigned long data_size,
273 struct lttng_ust_shm_handle *handle)
274 {
275 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
276 unsigned long sb_bindex;
277
278 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
279 pages = shmp_index(handle, bufb->array, sb_bindex);
280 shmp(handle, pages->shmp)->data_size = data_size;
281 }
282
283 static inline
284 unsigned long subbuffer_get_read_data_size(
285 const struct lttng_ust_lib_ring_buffer_config *config,
286 struct lttng_ust_lib_ring_buffer_backend *bufb,
287 struct lttng_ust_shm_handle *handle)
288 {
289 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
290 unsigned long sb_bindex;
291
292 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
293 pages = shmp_index(handle, bufb->array, sb_bindex);
294 return shmp(handle, pages->shmp)->data_size;
295 }
296
297 static inline
298 unsigned long subbuffer_get_data_size(
299 const struct lttng_ust_lib_ring_buffer_config *config,
300 struct lttng_ust_lib_ring_buffer_backend *bufb,
301 unsigned long idx,
302 struct lttng_ust_shm_handle *handle)
303 {
304 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
305 unsigned long sb_bindex;
306
307 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
308 pages = shmp_index(handle, bufb->array, sb_bindex);
309 return shmp(handle, pages->shmp)->data_size;
310 }
311
312 /**
313 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
314 * writer.
315 */
316 static inline
317 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
318 struct lttng_ust_lib_ring_buffer_backend *bufb,
319 unsigned long idx,
320 struct lttng_ust_shm_handle *handle)
321 {
322 unsigned long id, new_id;
323
324 if (config->mode != RING_BUFFER_OVERWRITE)
325 return;
326
327 /*
328 * Performing a volatile access to read the sb_pages, because we want to
329 * read a coherent version of the pointer and the associated noref flag.
330 */
331 id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
332 for (;;) {
333 /* This check is called on the fast path for each record. */
334 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
335 /*
336 * Store after load dependency ordering the writes to
337 * the subbuffer after load and test of the noref flag
338 * matches the memory barrier implied by the cmpxchg()
339 * in update_read_sb_index().
340 */
341 return; /* Already writing to this buffer */
342 }
343 new_id = id;
344 subbuffer_id_clear_noref(config, &new_id);
345 new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
346 if (caa_likely(new_id == id))
347 break;
348 id = new_id;
349 }
350 }
351
352 /**
353 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
354 * called by writer.
355 */
356 static inline
357 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
358 struct lttng_ust_lib_ring_buffer_backend *bufb,
359 unsigned long idx, unsigned long offset,
360 struct lttng_ust_shm_handle *handle)
361 {
362 if (config->mode != RING_BUFFER_OVERWRITE)
363 return;
364
365 /*
366 * Because ring_buffer_set_noref() is only called by a single thread
367 * (the one which updated the cc_sb value), there are no concurrent
368 * updates to take care of: other writers have not updated cc_sb, so
369 * they cannot set the noref flag, and concurrent readers cannot modify
370 * the pointer because the noref flag is not set yet.
371 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
372 * to the subbuffer before this set noref operation.
373 * subbuffer_set_noref() uses a volatile store to deal with concurrent
374 * readers of the noref flag.
375 */
376 CHAN_WARN_ON(shmp(handle, bufb->chan),
377 subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
378 /*
379 * Memory barrier that ensures counter stores are ordered before set
380 * noref and offset.
381 */
382 cmm_smp_mb();
383 subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
384 }
385
386 /**
387 * update_read_sb_index - Read-side subbuffer index update.
388 */
389 static inline
390 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
391 struct lttng_ust_lib_ring_buffer_backend *bufb,
392 struct channel_backend *chanb,
393 unsigned long consumed_idx,
394 unsigned long consumed_count,
395 struct lttng_ust_shm_handle *handle)
396 {
397 unsigned long old_id, new_id;
398
399 if (config->mode == RING_BUFFER_OVERWRITE) {
400 /*
401 * Exchange the target writer subbuffer with our own unused
402 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
403 * old_wpage, because the value read will be confirmed by the
404 * following cmpxchg().
405 */
406 old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
407 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
408 return -EAGAIN;
409 /*
410 * Make sure the offset count we are expecting matches the one
411 * indicated by the writer.
412 */
413 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
414 consumed_count)))
415 return -EAGAIN;
416 CHAN_WARN_ON(shmp(handle, bufb->chan),
417 !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
418 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
419 consumed_count);
420 new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
421 bufb->buf_rsb.id);
422 if (caa_unlikely(old_id != new_id))
423 return -EAGAIN;
424 bufb->buf_rsb.id = new_id;
425 } else {
426 /* No page exchange, use the writer page directly */
427 bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
428 }
429 return 0;
430 }
431
432 #ifndef inline_memcpy
433 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
434 #endif
435
436 /*
437 * Use the architecture-specific memcpy implementation for constant-sized
438 * inputs, but rely on an inline memcpy for length statically unknown.
439 * The function call to memcpy is just way too expensive for a fast path.
440 */
441 #define lib_ring_buffer_do_copy(config, dest, src, len) \
442 do { \
443 size_t __len = (len); \
444 if (__builtin_constant_p(len)) \
445 memcpy(dest, src, __len); \
446 else \
447 inline_memcpy(dest, src, __len); \
448 } while (0)
449
450 /*
451 * write len bytes to dest with c
452 */
453 static inline
454 void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
455 {
456 unsigned long i;
457
458 for (i = 0; i < len; i++)
459 dest[i] = c;
460 }
461
462 /* arch-agnostic implementation */
463
464 static inline int lttng_ust_fls(unsigned int x)
465 {
466 int r = 32;
467
468 if (!x)
469 return 0;
470 if (!(x & 0xFFFF0000U)) {
471 x <<= 16;
472 r -= 16;
473 }
474 if (!(x & 0xFF000000U)) {
475 x <<= 8;
476 r -= 8;
477 }
478 if (!(x & 0xF0000000U)) {
479 x <<= 4;
480 r -= 4;
481 }
482 if (!(x & 0xC0000000U)) {
483 x <<= 2;
484 r -= 2;
485 }
486 if (!(x & 0x80000000U)) {
487 x <<= 1;
488 r -= 1;
489 }
490 return r;
491 }
492
493 static inline int get_count_order(unsigned int count)
494 {
495 int order;
496
497 order = lttng_ust_fls(count) - 1;
498 if (count & (count - 1))
499 order++;
500 return order;
501 }
502
503 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.040303 seconds and 4 git commands to generate.