Fix: use unaligned pointer accesses for lttng_inline_memcpy
[lttng-ust.git] / src / common / ringbuffer / backend_internal.h
CommitLineData
852c2936 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
852c2936 3 *
e92f3e28
MD
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * Ring buffer backend (internal helpers).
852c2936
MD
7 */
8
c0c0989a
MJ
9#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
11
b4051ad8 12#include <stddef.h>
fb31eb73 13#include <stdint.h>
14641deb
MD
14#include <unistd.h>
15#include <urcu/compiler.h>
16
0b4b8811 17#include <lttng/ust-ringbuffer-context.h>
0466ac28 18#include "ringbuffer-config.h"
4931a13e
MD
19#include "backend_types.h"
20#include "frontend_types.h"
a6352fd4 21#include "shm.h"
852c2936
MD
22
23/* Ring buffer backend API presented to the frontend */
24
25/* Ring buffer and channel backend create/free */
26
b5457df5 27int lib_ring_buffer_backend_create(struct lttng_ust_ring_buffer_backend *bufb,
5198080d
MJ
28 struct channel_backend *chan,
29 int cpu,
38fae1d3 30 struct lttng_ust_shm_handle *handle,
1d18d519
MJ
31 struct shm_object *shmobj)
32 __attribute__((visibility("hidden")));
ddabe860 33
1d18d519
MJ
34void channel_backend_unregister_notifiers(struct channel_backend *chanb)
35 __attribute__((visibility("hidden")));
ddabe860 36
b5457df5 37void lib_ring_buffer_backend_free(struct lttng_ust_ring_buffer_backend *bufb)
1d18d519 38 __attribute__((visibility("hidden")));
ddabe860 39
852c2936
MD
40int channel_backend_init(struct channel_backend *chanb,
41 const char *name,
b5457df5 42 const struct lttng_ust_ring_buffer_config *config,
a3f61e7f 43 size_t subbuf_size,
a9ff648c 44 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
1d18d519
MJ
45 const int *stream_fds)
46 __attribute__((visibility("hidden")));
ddabe860 47
1d498196 48void channel_backend_free(struct channel_backend *chanb,
1d18d519
MJ
49 struct lttng_ust_shm_handle *handle)
50 __attribute__((visibility("hidden")));
852c2936 51
b5457df5 52void lib_ring_buffer_backend_reset(struct lttng_ust_ring_buffer_backend *bufb,
1d18d519
MJ
53 struct lttng_ust_shm_handle *handle)
54 __attribute__((visibility("hidden")));
ddabe860 55
1d18d519
MJ
56void channel_backend_reset(struct channel_backend *chanb)
57 __attribute__((visibility("hidden")));
852c2936 58
1d18d519
MJ
59int lib_ring_buffer_backend_init(void)
60 __attribute__((visibility("hidden")));
ddabe860 61
1d18d519
MJ
62void lib_ring_buffer_backend_exit(void)
63 __attribute__((visibility("hidden")));
852c2936 64
b5457df5 65extern void _lib_ring_buffer_write(struct lttng_ust_ring_buffer_backend *bufb,
852c2936 66 size_t offset, const void *src, size_t len,
1d18d519
MJ
67 ssize_t pagecpy)
68 __attribute__((visibility("hidden")));
852c2936
MD
69
70/*
71 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
72 * exchanged atomically.
73 *
74 * Top half word, except lowest bit, belongs to "offset", which is used to keep
75 * to count the produced buffers. For overwrite mode, this provides the
76 * consumer with the capacity to read subbuffers in order, handling the
77 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
78 * systems) concurrently with a single execution of get_subbuf (between offset
79 * sampling and subbuffer ID exchange).
80 */
81
14641deb 82#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
852c2936
MD
83
84#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
85#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
86#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
87/*
88 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
89 */
90#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
91#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
92#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
93/*
94 * In overwrite mode: lowest half of word is used for index.
95 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
96 * In producer-consumer mode: whole word used for index.
97 */
98#define SB_ID_INDEX_SHIFT 0
99#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
100#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
101
102/*
103 * Construct the subbuffer id from offset, index and noref. Use only the index
104 * for producer-consumer mode (offset and noref are only used in overwrite
105 * mode).
106 */
107static inline
b5457df5 108unsigned long subbuffer_id(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
109 unsigned long offset, unsigned long noref,
110 unsigned long index)
111{
112 if (config->mode == RING_BUFFER_OVERWRITE)
113 return (offset << SB_ID_OFFSET_SHIFT)
114 | (noref << SB_ID_NOREF_SHIFT)
115 | index;
116 else
117 return index;
118}
119
120/*
121 * Compare offset with the offset contained within id. Return 1 if the offset
122 * bits are identical, else 0.
123 */
124static inline
2208d8b5 125int subbuffer_id_compare_offset(
b5457df5 126 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
2208d8b5 127 unsigned long id, unsigned long offset)
852c2936
MD
128{
129 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
130}
131
132static inline
b5457df5 133unsigned long subbuffer_id_get_index(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
134 unsigned long id)
135{
136 if (config->mode == RING_BUFFER_OVERWRITE)
137 return id & SB_ID_INDEX_MASK;
138 else
139 return id;
140}
141
142static inline
b5457df5 143unsigned long subbuffer_id_is_noref(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
144 unsigned long id)
145{
146 if (config->mode == RING_BUFFER_OVERWRITE)
147 return !!(id & SB_ID_NOREF_MASK);
148 else
149 return 1;
150}
151
152/*
153 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
154 * needed.
155 */
156static inline
b5457df5 157void subbuffer_id_set_noref(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
158 unsigned long *id)
159{
160 if (config->mode == RING_BUFFER_OVERWRITE)
161 *id |= SB_ID_NOREF_MASK;
162}
163
164static inline
b5457df5 165void subbuffer_id_set_noref_offset(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
166 unsigned long *id, unsigned long offset)
167{
168 unsigned long tmp;
169
170 if (config->mode == RING_BUFFER_OVERWRITE) {
171 tmp = *id;
172 tmp &= ~SB_ID_OFFSET_MASK;
173 tmp |= offset << SB_ID_OFFSET_SHIFT;
174 tmp |= SB_ID_NOREF_MASK;
175 /* Volatile store, read concurrently by readers. */
14641deb 176 CMM_ACCESS_ONCE(*id) = tmp;
852c2936
MD
177 }
178}
179
180/* No volatile access, since already used locally */
181static inline
b5457df5 182void subbuffer_id_clear_noref(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
183 unsigned long *id)
184{
185 if (config->mode == RING_BUFFER_OVERWRITE)
186 *id &= ~SB_ID_NOREF_MASK;
187}
188
189/*
190 * For overwrite mode, cap the number of subbuffers per buffer to:
191 * 2^16 on 32-bit architectures
192 * 2^32 on 64-bit architectures
193 * This is required to fit in the index part of the ID. Return 0 on success,
194 * -EPERM on failure.
195 */
196static inline
b5457df5 197int subbuffer_id_check_index(const struct lttng_ust_ring_buffer_config *config,
852c2936
MD
198 unsigned long num_subbuf)
199{
200 if (config->mode == RING_BUFFER_OVERWRITE)
201 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
202 else
203 return 0;
204}
205
a3492932 206static inline
b5457df5 207int lib_ring_buffer_backend_get_pages(const struct lttng_ust_ring_buffer_config *config,
bee7cf75 208 const struct lttng_ust_ring_buffer_ctx *ctx,
b5457df5 209 struct lttng_ust_ring_buffer_backend_pages **backend_pages)
a3492932 210{
b5457df5
MD
211 struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
212 struct lttng_ust_ring_buffer_backend *bufb = &ctx_private->buf->backend;
8936b6c0
MD
213 struct channel_backend *chanb = &ctx_private->chan->backend;
214 struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
a3492932 215 size_t sbidx;
8936b6c0 216 size_t offset = ctx_private->buf_offset;
b5457df5
MD
217 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
218 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
a3492932 219 unsigned long sb_bindex, id;
b5457df5 220 struct lttng_ust_ring_buffer_backend_pages *_backend_pages;
a3492932
MD
221
222 offset &= chanb->buf_size - 1;
223 sbidx = offset >> chanb->subbuf_size_order;
224 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
225 if (caa_unlikely(!wsb))
226 return -1;
227 id = wsb->id;
228 sb_bindex = subbuffer_id_get_index(config, id);
229 rpages = shmp_index(handle, bufb->array, sb_bindex);
230 if (caa_unlikely(!rpages))
231 return -1;
8936b6c0 232 CHAN_WARN_ON(ctx_private->chan,
a3492932
MD
233 config->mode == RING_BUFFER_OVERWRITE
234 && subbuffer_id_is_noref(config, id));
235 _backend_pages = shmp(handle, rpages->shmp);
236 if (caa_unlikely(!_backend_pages))
237 return -1;
238 *backend_pages = _backend_pages;
239 return 0;
240}
241
242/* Get backend pages from cache. */
243static inline
b5457df5 244struct lttng_ust_ring_buffer_backend_pages *
2208d8b5 245 lib_ring_buffer_get_backend_pages_from_ctx(
b5457df5 246 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
bee7cf75 247 const struct lttng_ust_ring_buffer_ctx *ctx)
a3492932 248{
8936b6c0 249 return ctx->priv->backend_pages;
a3492932
MD
250}
251
9c995331
MD
252/*
253 * The ring buffer can count events recorded and overwritten per buffer,
254 * but it is disabled by default due to its performance overhead.
255 */
256#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
852c2936 257static inline
b5457df5 258void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config,
bee7cf75 259 const struct lttng_ust_ring_buffer_ctx *ctx)
852c2936 260{
b5457df5 261 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936 262
15500a1b
MD
263 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
264 if (caa_unlikely(!backend_pages)) {
265 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
266 return;
267 }
268 v_inc(config, &backend_pages->records_commit);
852c2936 269}
9c995331
MD
270#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
271static inline
b5457df5 272void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
bee7cf75 273 const struct lttng_ust_ring_buffer_ctx *ctx __attribute__((unused)))
9c995331
MD
274{
275}
276#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
852c2936
MD
277
278/*
279 * Reader has exclusive subbuffer access for record consumption. No need to
280 * perform the decrement atomically.
281 */
282static inline
b5457df5
MD
283void subbuffer_consume_record(const struct lttng_ust_ring_buffer_config *config,
284 struct lttng_ust_ring_buffer_backend *bufb,
38fae1d3 285 struct lttng_ust_shm_handle *handle)
852c2936
MD
286{
287 unsigned long sb_bindex;
b5457df5
MD
288 struct lttng_ust_ring_buffer_channel *chan;
289 struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp;
290 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936
MD
291
292 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
293 chan = shmp(handle, bufb->chan);
294 if (!chan)
295 return;
296 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
297 if (!pages_shmp)
298 return;
299 backend_pages = shmp(handle, pages_shmp->shmp);
300 if (!backend_pages)
301 return;
302 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
852c2936 303 /* Non-atomic decrement protected by exclusive subbuffer access */
15500a1b 304 _v_dec(config, &backend_pages->records_unread);
852c2936
MD
305 v_inc(config, &bufb->records_read);
306}
307
308static inline
309unsigned long subbuffer_get_records_count(
b5457df5
MD
310 const struct lttng_ust_ring_buffer_config *config,
311 struct lttng_ust_ring_buffer_backend *bufb,
1d498196 312 unsigned long idx,
38fae1d3 313 struct lttng_ust_shm_handle *handle)
852c2936
MD
314{
315 unsigned long sb_bindex;
b5457df5
MD
316 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
317 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
318 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936 319
15500a1b
MD
320 wsb = shmp_index(handle, bufb->buf_wsb, idx);
321 if (!wsb)
322 return 0;
323 sb_bindex = subbuffer_id_get_index(config, wsb->id);
324 rpages = shmp_index(handle, bufb->array, sb_bindex);
325 if (!rpages)
326 return 0;
327 backend_pages = shmp(handle, rpages->shmp);
328 if (!backend_pages)
329 return 0;
330 return v_read(config, &backend_pages->records_commit);
852c2936
MD
331}
332
333/*
334 * Must be executed at subbuffer delivery when the writer has _exclusive_
3d1aec25
MD
335 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
336 * lib_ring_buffer_get_records_count() must be called to get the records
337 * count before this function, because it resets the records_commit
338 * count.
852c2936
MD
339 */
340static inline
341unsigned long subbuffer_count_records_overrun(
b5457df5
MD
342 const struct lttng_ust_ring_buffer_config *config,
343 struct lttng_ust_ring_buffer_backend *bufb,
1d498196 344 unsigned long idx,
38fae1d3 345 struct lttng_ust_shm_handle *handle)
852c2936 346{
852c2936 347 unsigned long overruns, sb_bindex;
b5457df5
MD
348 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
349 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
350 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936 351
15500a1b
MD
352 wsb = shmp_index(handle, bufb->buf_wsb, idx);
353 if (!wsb)
354 return 0;
355 sb_bindex = subbuffer_id_get_index(config, wsb->id);
356 rpages = shmp_index(handle, bufb->array, sb_bindex);
357 if (!rpages)
358 return 0;
359 backend_pages = shmp(handle, rpages->shmp);
360 if (!backend_pages)
361 return 0;
362 overruns = v_read(config, &backend_pages->records_unread);
363 v_set(config, &backend_pages->records_unread,
364 v_read(config, &backend_pages->records_commit));
365 v_set(config, &backend_pages->records_commit, 0);
852c2936
MD
366
367 return overruns;
368}
369
370static inline
b5457df5
MD
371void subbuffer_set_data_size(const struct lttng_ust_ring_buffer_config *config,
372 struct lttng_ust_ring_buffer_backend *bufb,
852c2936 373 unsigned long idx,
1d498196 374 unsigned long data_size,
38fae1d3 375 struct lttng_ust_shm_handle *handle)
852c2936 376{
852c2936 377 unsigned long sb_bindex;
b5457df5
MD
378 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
379 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
380 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936 381
15500a1b
MD
382 wsb = shmp_index(handle, bufb->buf_wsb, idx);
383 if (!wsb)
384 return;
385 sb_bindex = subbuffer_id_get_index(config, wsb->id);
386 rpages = shmp_index(handle, bufb->array, sb_bindex);
387 if (!rpages)
388 return;
389 backend_pages = shmp(handle, rpages->shmp);
390 if (!backend_pages)
391 return;
392 backend_pages->data_size = data_size;
852c2936
MD
393}
394
395static inline
396unsigned long subbuffer_get_read_data_size(
b5457df5
MD
397 const struct lttng_ust_ring_buffer_config *config,
398 struct lttng_ust_ring_buffer_backend *bufb,
38fae1d3 399 struct lttng_ust_shm_handle *handle)
852c2936 400{
852c2936 401 unsigned long sb_bindex;
b5457df5
MD
402 struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp;
403 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936
MD
404
405 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
406 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
407 if (!pages_shmp)
408 return 0;
409 backend_pages = shmp(handle, pages_shmp->shmp);
410 if (!backend_pages)
411 return 0;
412 return backend_pages->data_size;
852c2936
MD
413}
414
415static inline
416unsigned long subbuffer_get_data_size(
b5457df5
MD
417 const struct lttng_ust_ring_buffer_config *config,
418 struct lttng_ust_ring_buffer_backend *bufb,
1d498196 419 unsigned long idx,
38fae1d3 420 struct lttng_ust_shm_handle *handle)
852c2936 421{
852c2936 422 unsigned long sb_bindex;
b5457df5
MD
423 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
424 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
425 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
852c2936 426
15500a1b
MD
427 wsb = shmp_index(handle, bufb->buf_wsb, idx);
428 if (!wsb)
429 return 0;
430 sb_bindex = subbuffer_id_get_index(config, wsb->id);
431 rpages = shmp_index(handle, bufb->array, sb_bindex);
432 if (!rpages)
433 return 0;
434 backend_pages = shmp(handle, rpages->shmp);
435 if (!backend_pages)
436 return 0;
437 return backend_pages->data_size;
852c2936
MD
438}
439
1ff31389 440static inline
2208d8b5 441void subbuffer_inc_packet_count(
b5457df5
MD
442 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
443 struct lttng_ust_ring_buffer_backend *bufb,
1ff31389
JD
444 unsigned long idx, struct lttng_ust_shm_handle *handle)
445{
b5457df5 446 struct lttng_ust_ring_buffer_backend_counts *counts;
15500a1b
MD
447
448 counts = shmp_index(handle, bufb->buf_cnt, idx);
449 if (!counts)
450 return;
451 counts->seq_cnt++;
1ff31389
JD
452}
453
852c2936
MD
454/**
455 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
456 * writer.
457 */
458static inline
b5457df5
MD
459void lib_ring_buffer_clear_noref(const struct lttng_ust_ring_buffer_config *config,
460 struct lttng_ust_ring_buffer_backend *bufb,
1d498196 461 unsigned long idx,
38fae1d3 462 struct lttng_ust_shm_handle *handle)
852c2936
MD
463{
464 unsigned long id, new_id;
b5457df5 465 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
466
467 if (config->mode != RING_BUFFER_OVERWRITE)
468 return;
469
470 /*
471 * Performing a volatile access to read the sb_pages, because we want to
472 * read a coherent version of the pointer and the associated noref flag.
473 */
15500a1b
MD
474 wsb = shmp_index(handle, bufb->buf_wsb, idx);
475 if (!wsb)
476 return;
477 id = CMM_ACCESS_ONCE(wsb->id);
852c2936
MD
478 for (;;) {
479 /* This check is called on the fast path for each record. */
b5a3dfa5 480 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
852c2936
MD
481 /*
482 * Store after load dependency ordering the writes to
483 * the subbuffer after load and test of the noref flag
484 * matches the memory barrier implied by the cmpxchg()
485 * in update_read_sb_index().
486 */
487 return; /* Already writing to this buffer */
488 }
489 new_id = id;
490 subbuffer_id_clear_noref(config, &new_id);
15500a1b 491 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
b5a3dfa5 492 if (caa_likely(new_id == id))
852c2936
MD
493 break;
494 id = new_id;
495 }
496}
497
498/**
499 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
500 * called by writer.
501 */
502static inline
b5457df5
MD
503void lib_ring_buffer_set_noref_offset(const struct lttng_ust_ring_buffer_config *config,
504 struct lttng_ust_ring_buffer_backend *bufb,
1d498196 505 unsigned long idx, unsigned long offset,
38fae1d3 506 struct lttng_ust_shm_handle *handle)
852c2936 507{
b5457df5
MD
508 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
509 struct lttng_ust_ring_buffer_channel *chan;
15500a1b 510
852c2936
MD
511 if (config->mode != RING_BUFFER_OVERWRITE)
512 return;
513
15500a1b
MD
514 wsb = shmp_index(handle, bufb->buf_wsb, idx);
515 if (!wsb)
516 return;
852c2936
MD
517 /*
518 * Because ring_buffer_set_noref() is only called by a single thread
519 * (the one which updated the cc_sb value), there are no concurrent
520 * updates to take care of: other writers have not updated cc_sb, so
521 * they cannot set the noref flag, and concurrent readers cannot modify
522 * the pointer because the noref flag is not set yet.
523 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
524 * to the subbuffer before this set noref operation.
525 * subbuffer_set_noref() uses a volatile store to deal with concurrent
526 * readers of the noref flag.
527 */
15500a1b
MD
528 chan = shmp(handle, bufb->chan);
529 if (!chan)
530 return;
531 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
852c2936
MD
532 /*
533 * Memory barrier that ensures counter stores are ordered before set
534 * noref and offset.
535 */
14641deb 536 cmm_smp_mb();
15500a1b 537 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
852c2936
MD
538}
539
540/**
541 * update_read_sb_index - Read-side subbuffer index update.
542 */
543static inline
b5457df5
MD
544int update_read_sb_index(const struct lttng_ust_ring_buffer_config *config,
545 struct lttng_ust_ring_buffer_backend *bufb,
2208d8b5 546 struct channel_backend *chanb __attribute__((unused)),
852c2936 547 unsigned long consumed_idx,
1d498196 548 unsigned long consumed_count,
38fae1d3 549 struct lttng_ust_shm_handle *handle)
852c2936 550{
b5457df5 551 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
552 unsigned long old_id, new_id;
553
15500a1b
MD
554 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
555 if (caa_unlikely(!wsb))
556 return -EPERM;
557
852c2936 558 if (config->mode == RING_BUFFER_OVERWRITE) {
b5457df5 559 struct lttng_ust_ring_buffer_channel *chan;
15500a1b 560
852c2936
MD
561 /*
562 * Exchange the target writer subbuffer with our own unused
14641deb 563 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
852c2936
MD
564 * old_wpage, because the value read will be confirmed by the
565 * following cmpxchg().
566 */
15500a1b 567 old_id = wsb->id;
b5a3dfa5 568 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
852c2936
MD
569 return -EAGAIN;
570 /*
571 * Make sure the offset count we are expecting matches the one
572 * indicated by the writer.
573 */
b5a3dfa5 574 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
852c2936
MD
575 consumed_count)))
576 return -EAGAIN;
15500a1b
MD
577 chan = shmp(handle, bufb->chan);
578 if (caa_unlikely(!chan))
579 return -EPERM;
580 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
852c2936
MD
581 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
582 consumed_count);
15500a1b 583 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
b5a3dfa5 584 if (caa_unlikely(old_id != new_id))
852c2936
MD
585 return -EAGAIN;
586 bufb->buf_rsb.id = new_id;
587 } else {
588 /* No page exchange, use the writer page directly */
15500a1b 589 bufb->buf_rsb.id = wsb->id;
852c2936
MD
590 }
591 return 0;
592}
593
0d4aa2df
MD
594#ifndef inline_memcpy
595#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
596#endif
597
7fdb690c
MD
598#define LOAD_UNALIGNED_INT(type, p) \
599 ({ \
600 struct packed_struct { type __v; } __attribute__((packed)); \
601 (((const struct packed_struct *) (p))->__v); \
602 })
603
604#define STORE_UNALIGNED_INT(type, p, v) \
605 do { \
606 struct packed_struct { type __v; } __attribute__((packed)); \
607 ((struct packed_struct *) (p))->__v = (v); \
608 } while (0)
609
610/*
611 * Copy from src into dest, assuming unaligned src and dest.
612 */
5f6daaef
MJ
613static inline
614void lttng_inline_memcpy(void *dest, const void *src,
615 unsigned long len)
616 __attribute__((always_inline));
617static inline
51b8f2fa
MD
618void lttng_inline_memcpy(void *dest, const void *src,
619 unsigned long len)
620{
621 switch (len) {
622 case 1:
623 *(uint8_t *) dest = *(const uint8_t *) src;
624 break;
625 case 2:
7fdb690c 626 STORE_UNALIGNED_INT(uint16_t, dest, LOAD_UNALIGNED_INT(uint16_t, src));
51b8f2fa
MD
627 break;
628 case 4:
7fdb690c 629 STORE_UNALIGNED_INT(uint32_t, dest, LOAD_UNALIGNED_INT(uint32_t, src));
51b8f2fa
MD
630 break;
631 case 8:
7fdb690c 632 STORE_UNALIGNED_INT(uint64_t, dest, LOAD_UNALIGNED_INT(uint64_t, src));
51b8f2fa
MD
633 break;
634 default:
635 inline_memcpy(dest, src, len);
636 }
637}
638
852c2936
MD
639/*
640 * Use the architecture-specific memcpy implementation for constant-sized
641 * inputs, but rely on an inline memcpy for length statically unknown.
642 * The function call to memcpy is just way too expensive for a fast path.
643 */
644#define lib_ring_buffer_do_copy(config, dest, src, len) \
645do { \
646 size_t __len = (len); \
647 if (__builtin_constant_p(len)) \
648 memcpy(dest, src, __len); \
649 else \
51b8f2fa 650 lttng_inline_memcpy(dest, src, __len); \
852c2936
MD
651} while (0)
652
a44c74d9
MD
653/*
654 * write len bytes to dest with c
655 */
656static inline
b4c8bf2f 657void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len)
a44c74d9
MD
658{
659 unsigned long i;
660
661 for (i = 0; i < len; i++)
662 dest[i] = c;
663}
664
b728d87e
MD
665/* arch-agnostic implementation */
666
bfd26582 667static inline int lttng_ust_fls(unsigned int x)
b728d87e
MD
668{
669 int r = 32;
670
671 if (!x)
672 return 0;
673 if (!(x & 0xFFFF0000U)) {
674 x <<= 16;
675 r -= 16;
676 }
677 if (!(x & 0xFF000000U)) {
678 x <<= 8;
679 r -= 8;
680 }
681 if (!(x & 0xF0000000U)) {
682 x <<= 4;
683 r -= 4;
684 }
685 if (!(x & 0xC0000000U)) {
686 x <<= 2;
687 r -= 2;
688 }
689 if (!(x & 0x80000000U)) {
e2bd33a5 690 /* No need to bit shift on last operation */
b728d87e
MD
691 r -= 1;
692 }
693 return r;
694}
695
696static inline int get_count_order(unsigned int count)
697{
698 int order;
699
bfd26582 700 order = lttng_ust_fls(count) - 1;
b728d87e
MD
701 if (count & (count - 1))
702 order++;
703 return order;
704}
705
e92f3e28 706#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.071041 seconds and 4 git commands to generate.