Commit | Line | Data |
---|---|---|
852c2936 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-only |
852c2936 | 3 | * |
e92f3e28 MD |
4 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
5 | * | |
c0c0989a | 6 | * Ring buffer backend (internal helpers). |
852c2936 MD |
7 | */ |
8 | ||
c0c0989a MJ |
9 | #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H |
10 | #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H | |
11 | ||
b4051ad8 | 12 | #include <stddef.h> |
fb31eb73 | 13 | #include <stdint.h> |
14641deb MD |
14 | #include <unistd.h> |
15 | #include <urcu/compiler.h> | |
16 | ||
0b4b8811 | 17 | #include <lttng/ust-ringbuffer-context.h> |
0466ac28 | 18 | #include "ringbuffer-config.h" |
4931a13e MD |
19 | #include "backend_types.h" |
20 | #include "frontend_types.h" | |
a6352fd4 | 21 | #include "shm.h" |
852c2936 MD |
22 | |
23 | /* Ring buffer backend API presented to the frontend */ | |
24 | ||
25 | /* Ring buffer and channel backend create/free */ | |
26 | ||
b5457df5 | 27 | int lib_ring_buffer_backend_create(struct lttng_ust_ring_buffer_backend *bufb, |
5198080d MJ |
28 | struct channel_backend *chan, |
29 | int cpu, | |
38fae1d3 | 30 | struct lttng_ust_shm_handle *handle, |
1d18d519 MJ |
31 | struct shm_object *shmobj) |
32 | __attribute__((visibility("hidden"))); | |
ddabe860 | 33 | |
1d18d519 MJ |
34 | void channel_backend_unregister_notifiers(struct channel_backend *chanb) |
35 | __attribute__((visibility("hidden"))); | |
ddabe860 | 36 | |
b5457df5 | 37 | void lib_ring_buffer_backend_free(struct lttng_ust_ring_buffer_backend *bufb) |
1d18d519 | 38 | __attribute__((visibility("hidden"))); |
ddabe860 | 39 | |
852c2936 MD |
40 | int channel_backend_init(struct channel_backend *chanb, |
41 | const char *name, | |
b5457df5 | 42 | const struct lttng_ust_ring_buffer_config *config, |
a3f61e7f | 43 | size_t subbuf_size, |
a9ff648c | 44 | size_t num_subbuf, struct lttng_ust_shm_handle *handle, |
1d18d519 MJ |
45 | const int *stream_fds) |
46 | __attribute__((visibility("hidden"))); | |
ddabe860 | 47 | |
1d498196 | 48 | void channel_backend_free(struct channel_backend *chanb, |
1d18d519 MJ |
49 | struct lttng_ust_shm_handle *handle) |
50 | __attribute__((visibility("hidden"))); | |
852c2936 | 51 | |
b5457df5 | 52 | void lib_ring_buffer_backend_reset(struct lttng_ust_ring_buffer_backend *bufb, |
1d18d519 MJ |
53 | struct lttng_ust_shm_handle *handle) |
54 | __attribute__((visibility("hidden"))); | |
ddabe860 | 55 | |
1d18d519 MJ |
56 | void channel_backend_reset(struct channel_backend *chanb) |
57 | __attribute__((visibility("hidden"))); | |
852c2936 | 58 | |
1d18d519 MJ |
59 | int lib_ring_buffer_backend_init(void) |
60 | __attribute__((visibility("hidden"))); | |
ddabe860 | 61 | |
1d18d519 MJ |
62 | void lib_ring_buffer_backend_exit(void) |
63 | __attribute__((visibility("hidden"))); | |
852c2936 | 64 | |
b5457df5 | 65 | extern void _lib_ring_buffer_write(struct lttng_ust_ring_buffer_backend *bufb, |
852c2936 | 66 | size_t offset, const void *src, size_t len, |
1d18d519 MJ |
67 | ssize_t pagecpy) |
68 | __attribute__((visibility("hidden"))); | |
852c2936 MD |
69 | |
70 | /* | |
71 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
72 | * exchanged atomically. | |
73 | * | |
74 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
75 | * to count the produced buffers. For overwrite mode, this provides the | |
76 | * consumer with the capacity to read subbuffers in order, handling the | |
77 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
78 | * systems) concurrently with a single execution of get_subbuf (between offset | |
79 | * sampling and subbuffer ID exchange). | |
80 | */ | |
81 | ||
14641deb | 82 | #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1) |
852c2936 MD |
83 | |
84 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
85 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
86 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
87 | /* | |
88 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
89 | */ | |
90 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
91 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
92 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
93 | /* | |
94 | * In overwrite mode: lowest half of word is used for index. | |
95 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
96 | * In producer-consumer mode: whole word used for index. | |
97 | */ | |
98 | #define SB_ID_INDEX_SHIFT 0 | |
99 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
100 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
101 | ||
102 | /* | |
103 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
104 | * for producer-consumer mode (offset and noref are only used in overwrite | |
105 | * mode). | |
106 | */ | |
107 | static inline | |
b5457df5 | 108 | unsigned long subbuffer_id(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
109 | unsigned long offset, unsigned long noref, |
110 | unsigned long index) | |
111 | { | |
112 | if (config->mode == RING_BUFFER_OVERWRITE) | |
113 | return (offset << SB_ID_OFFSET_SHIFT) | |
114 | | (noref << SB_ID_NOREF_SHIFT) | |
115 | | index; | |
116 | else | |
117 | return index; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Compare offset with the offset contained within id. Return 1 if the offset | |
122 | * bits are identical, else 0. | |
123 | */ | |
124 | static inline | |
2208d8b5 | 125 | int subbuffer_id_compare_offset( |
b5457df5 | 126 | const struct lttng_ust_ring_buffer_config *config __attribute__((unused)), |
2208d8b5 | 127 | unsigned long id, unsigned long offset) |
852c2936 MD |
128 | { |
129 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
130 | } | |
131 | ||
132 | static inline | |
b5457df5 | 133 | unsigned long subbuffer_id_get_index(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
134 | unsigned long id) |
135 | { | |
136 | if (config->mode == RING_BUFFER_OVERWRITE) | |
137 | return id & SB_ID_INDEX_MASK; | |
138 | else | |
139 | return id; | |
140 | } | |
141 | ||
142 | static inline | |
b5457df5 | 143 | unsigned long subbuffer_id_is_noref(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
144 | unsigned long id) |
145 | { | |
146 | if (config->mode == RING_BUFFER_OVERWRITE) | |
147 | return !!(id & SB_ID_NOREF_MASK); | |
148 | else | |
149 | return 1; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
154 | * needed. | |
155 | */ | |
156 | static inline | |
b5457df5 | 157 | void subbuffer_id_set_noref(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
158 | unsigned long *id) |
159 | { | |
160 | if (config->mode == RING_BUFFER_OVERWRITE) | |
161 | *id |= SB_ID_NOREF_MASK; | |
162 | } | |
163 | ||
164 | static inline | |
b5457df5 | 165 | void subbuffer_id_set_noref_offset(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
166 | unsigned long *id, unsigned long offset) |
167 | { | |
168 | unsigned long tmp; | |
169 | ||
170 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
171 | tmp = *id; | |
172 | tmp &= ~SB_ID_OFFSET_MASK; | |
173 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
174 | tmp |= SB_ID_NOREF_MASK; | |
175 | /* Volatile store, read concurrently by readers. */ | |
14641deb | 176 | CMM_ACCESS_ONCE(*id) = tmp; |
852c2936 MD |
177 | } |
178 | } | |
179 | ||
180 | /* No volatile access, since already used locally */ | |
181 | static inline | |
b5457df5 | 182 | void subbuffer_id_clear_noref(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
183 | unsigned long *id) |
184 | { | |
185 | if (config->mode == RING_BUFFER_OVERWRITE) | |
186 | *id &= ~SB_ID_NOREF_MASK; | |
187 | } | |
188 | ||
189 | /* | |
190 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
191 | * 2^16 on 32-bit architectures | |
192 | * 2^32 on 64-bit architectures | |
193 | * This is required to fit in the index part of the ID. Return 0 on success, | |
194 | * -EPERM on failure. | |
195 | */ | |
196 | static inline | |
b5457df5 | 197 | int subbuffer_id_check_index(const struct lttng_ust_ring_buffer_config *config, |
852c2936 MD |
198 | unsigned long num_subbuf) |
199 | { | |
200 | if (config->mode == RING_BUFFER_OVERWRITE) | |
201 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
202 | else | |
203 | return 0; | |
204 | } | |
205 | ||
a3492932 | 206 | static inline |
b5457df5 MD |
207 | int lib_ring_buffer_backend_get_pages(const struct lttng_ust_ring_buffer_config *config, |
208 | struct lttng_ust_ring_buffer_ctx *ctx, | |
209 | struct lttng_ust_ring_buffer_backend_pages **backend_pages) | |
a3492932 | 210 | { |
b5457df5 MD |
211 | struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv; |
212 | struct lttng_ust_ring_buffer_backend *bufb = &ctx_private->buf->backend; | |
8936b6c0 MD |
213 | struct channel_backend *chanb = &ctx_private->chan->backend; |
214 | struct lttng_ust_shm_handle *handle = ctx_private->chan->handle; | |
a3492932 | 215 | size_t sbidx; |
8936b6c0 | 216 | size_t offset = ctx_private->buf_offset; |
b5457df5 MD |
217 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
218 | struct lttng_ust_ring_buffer_backend_pages_shmp *rpages; | |
a3492932 | 219 | unsigned long sb_bindex, id; |
b5457df5 | 220 | struct lttng_ust_ring_buffer_backend_pages *_backend_pages; |
a3492932 MD |
221 | |
222 | offset &= chanb->buf_size - 1; | |
223 | sbidx = offset >> chanb->subbuf_size_order; | |
224 | wsb = shmp_index(handle, bufb->buf_wsb, sbidx); | |
225 | if (caa_unlikely(!wsb)) | |
226 | return -1; | |
227 | id = wsb->id; | |
228 | sb_bindex = subbuffer_id_get_index(config, id); | |
229 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
230 | if (caa_unlikely(!rpages)) | |
231 | return -1; | |
8936b6c0 | 232 | CHAN_WARN_ON(ctx_private->chan, |
a3492932 MD |
233 | config->mode == RING_BUFFER_OVERWRITE |
234 | && subbuffer_id_is_noref(config, id)); | |
235 | _backend_pages = shmp(handle, rpages->shmp); | |
236 | if (caa_unlikely(!_backend_pages)) | |
237 | return -1; | |
238 | *backend_pages = _backend_pages; | |
239 | return 0; | |
240 | } | |
241 | ||
242 | /* Get backend pages from cache. */ | |
243 | static inline | |
b5457df5 | 244 | struct lttng_ust_ring_buffer_backend_pages * |
2208d8b5 | 245 | lib_ring_buffer_get_backend_pages_from_ctx( |
b5457df5 MD |
246 | const struct lttng_ust_ring_buffer_config *config __attribute__((unused)), |
247 | struct lttng_ust_ring_buffer_ctx *ctx) | |
a3492932 | 248 | { |
8936b6c0 | 249 | return ctx->priv->backend_pages; |
a3492932 MD |
250 | } |
251 | ||
9c995331 MD |
252 | /* |
253 | * The ring buffer can count events recorded and overwritten per buffer, | |
254 | * but it is disabled by default due to its performance overhead. | |
255 | */ | |
256 | #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS | |
852c2936 | 257 | static inline |
b5457df5 MD |
258 | void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config, |
259 | const struct lttng_ust_ring_buffer_ctx *ctx, | |
260 | struct lttng_ust_ring_buffer_backend *bufb, | |
38fae1d3 | 261 | unsigned long idx, struct lttng_ust_shm_handle *handle) |
852c2936 | 262 | { |
b5457df5 | 263 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; |
852c2936 | 264 | |
15500a1b MD |
265 | backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); |
266 | if (caa_unlikely(!backend_pages)) { | |
267 | if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) | |
268 | return; | |
269 | } | |
270 | v_inc(config, &backend_pages->records_commit); | |
852c2936 | 271 | } |
9c995331 MD |
272 | #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ |
273 | static inline | |
b5457df5 MD |
274 | void subbuffer_count_record(const struct lttng_ust_ring_buffer_config *config __attribute__((unused)), |
275 | const struct lttng_ust_ring_buffer_ctx *ctx __attribute__((unused)), | |
276 | struct lttng_ust_ring_buffer_backend *bufb __attribute__((unused)), | |
2208d8b5 MJ |
277 | unsigned long idx __attribute__((unused)), |
278 | struct lttng_ust_shm_handle *handle __attribute__((unused))) | |
9c995331 MD |
279 | { |
280 | } | |
281 | #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ | |
852c2936 MD |
282 | |
283 | /* | |
284 | * Reader has exclusive subbuffer access for record consumption. No need to | |
285 | * perform the decrement atomically. | |
286 | */ | |
287 | static inline | |
b5457df5 MD |
288 | void subbuffer_consume_record(const struct lttng_ust_ring_buffer_config *config, |
289 | struct lttng_ust_ring_buffer_backend *bufb, | |
38fae1d3 | 290 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
291 | { |
292 | unsigned long sb_bindex; | |
b5457df5 MD |
293 | struct lttng_ust_ring_buffer_channel *chan; |
294 | struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp; | |
295 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 MD |
296 | |
297 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
15500a1b MD |
298 | chan = shmp(handle, bufb->chan); |
299 | if (!chan) | |
300 | return; | |
301 | pages_shmp = shmp_index(handle, bufb->array, sb_bindex); | |
302 | if (!pages_shmp) | |
303 | return; | |
304 | backend_pages = shmp(handle, pages_shmp->shmp); | |
305 | if (!backend_pages) | |
306 | return; | |
307 | CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread)); | |
852c2936 | 308 | /* Non-atomic decrement protected by exclusive subbuffer access */ |
15500a1b | 309 | _v_dec(config, &backend_pages->records_unread); |
852c2936 MD |
310 | v_inc(config, &bufb->records_read); |
311 | } | |
312 | ||
313 | static inline | |
314 | unsigned long subbuffer_get_records_count( | |
b5457df5 MD |
315 | const struct lttng_ust_ring_buffer_config *config, |
316 | struct lttng_ust_ring_buffer_backend *bufb, | |
1d498196 | 317 | unsigned long idx, |
38fae1d3 | 318 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
319 | { |
320 | unsigned long sb_bindex; | |
b5457df5 MD |
321 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
322 | struct lttng_ust_ring_buffer_backend_pages_shmp *rpages; | |
323 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 324 | |
15500a1b MD |
325 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
326 | if (!wsb) | |
327 | return 0; | |
328 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
329 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
330 | if (!rpages) | |
331 | return 0; | |
332 | backend_pages = shmp(handle, rpages->shmp); | |
333 | if (!backend_pages) | |
334 | return 0; | |
335 | return v_read(config, &backend_pages->records_commit); | |
852c2936 MD |
336 | } |
337 | ||
338 | /* | |
339 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
3d1aec25 MD |
340 | * subbuffer access. See lib_ring_buffer_check_deliver() for details. |
341 | * lib_ring_buffer_get_records_count() must be called to get the records | |
342 | * count before this function, because it resets the records_commit | |
343 | * count. | |
852c2936 MD |
344 | */ |
345 | static inline | |
346 | unsigned long subbuffer_count_records_overrun( | |
b5457df5 MD |
347 | const struct lttng_ust_ring_buffer_config *config, |
348 | struct lttng_ust_ring_buffer_backend *bufb, | |
1d498196 | 349 | unsigned long idx, |
38fae1d3 | 350 | struct lttng_ust_shm_handle *handle) |
852c2936 | 351 | { |
852c2936 | 352 | unsigned long overruns, sb_bindex; |
b5457df5 MD |
353 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
354 | struct lttng_ust_ring_buffer_backend_pages_shmp *rpages; | |
355 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 356 | |
15500a1b MD |
357 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
358 | if (!wsb) | |
359 | return 0; | |
360 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
361 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
362 | if (!rpages) | |
363 | return 0; | |
364 | backend_pages = shmp(handle, rpages->shmp); | |
365 | if (!backend_pages) | |
366 | return 0; | |
367 | overruns = v_read(config, &backend_pages->records_unread); | |
368 | v_set(config, &backend_pages->records_unread, | |
369 | v_read(config, &backend_pages->records_commit)); | |
370 | v_set(config, &backend_pages->records_commit, 0); | |
852c2936 MD |
371 | |
372 | return overruns; | |
373 | } | |
374 | ||
375 | static inline | |
b5457df5 MD |
376 | void subbuffer_set_data_size(const struct lttng_ust_ring_buffer_config *config, |
377 | struct lttng_ust_ring_buffer_backend *bufb, | |
852c2936 | 378 | unsigned long idx, |
1d498196 | 379 | unsigned long data_size, |
38fae1d3 | 380 | struct lttng_ust_shm_handle *handle) |
852c2936 | 381 | { |
852c2936 | 382 | unsigned long sb_bindex; |
b5457df5 MD |
383 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
384 | struct lttng_ust_ring_buffer_backend_pages_shmp *rpages; | |
385 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 386 | |
15500a1b MD |
387 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
388 | if (!wsb) | |
389 | return; | |
390 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
391 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
392 | if (!rpages) | |
393 | return; | |
394 | backend_pages = shmp(handle, rpages->shmp); | |
395 | if (!backend_pages) | |
396 | return; | |
397 | backend_pages->data_size = data_size; | |
852c2936 MD |
398 | } |
399 | ||
400 | static inline | |
401 | unsigned long subbuffer_get_read_data_size( | |
b5457df5 MD |
402 | const struct lttng_ust_ring_buffer_config *config, |
403 | struct lttng_ust_ring_buffer_backend *bufb, | |
38fae1d3 | 404 | struct lttng_ust_shm_handle *handle) |
852c2936 | 405 | { |
852c2936 | 406 | unsigned long sb_bindex; |
b5457df5 MD |
407 | struct lttng_ust_ring_buffer_backend_pages_shmp *pages_shmp; |
408 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 MD |
409 | |
410 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
15500a1b MD |
411 | pages_shmp = shmp_index(handle, bufb->array, sb_bindex); |
412 | if (!pages_shmp) | |
413 | return 0; | |
414 | backend_pages = shmp(handle, pages_shmp->shmp); | |
415 | if (!backend_pages) | |
416 | return 0; | |
417 | return backend_pages->data_size; | |
852c2936 MD |
418 | } |
419 | ||
420 | static inline | |
421 | unsigned long subbuffer_get_data_size( | |
b5457df5 MD |
422 | const struct lttng_ust_ring_buffer_config *config, |
423 | struct lttng_ust_ring_buffer_backend *bufb, | |
1d498196 | 424 | unsigned long idx, |
38fae1d3 | 425 | struct lttng_ust_shm_handle *handle) |
852c2936 | 426 | { |
852c2936 | 427 | unsigned long sb_bindex; |
b5457df5 MD |
428 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
429 | struct lttng_ust_ring_buffer_backend_pages_shmp *rpages; | |
430 | struct lttng_ust_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 431 | |
15500a1b MD |
432 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
433 | if (!wsb) | |
434 | return 0; | |
435 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
436 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
437 | if (!rpages) | |
438 | return 0; | |
439 | backend_pages = shmp(handle, rpages->shmp); | |
440 | if (!backend_pages) | |
441 | return 0; | |
442 | return backend_pages->data_size; | |
852c2936 MD |
443 | } |
444 | ||
1ff31389 | 445 | static inline |
2208d8b5 | 446 | void subbuffer_inc_packet_count( |
b5457df5 MD |
447 | const struct lttng_ust_ring_buffer_config *config __attribute__((unused)), |
448 | struct lttng_ust_ring_buffer_backend *bufb, | |
1ff31389 JD |
449 | unsigned long idx, struct lttng_ust_shm_handle *handle) |
450 | { | |
b5457df5 | 451 | struct lttng_ust_ring_buffer_backend_counts *counts; |
15500a1b MD |
452 | |
453 | counts = shmp_index(handle, bufb->buf_cnt, idx); | |
454 | if (!counts) | |
455 | return; | |
456 | counts->seq_cnt++; | |
1ff31389 JD |
457 | } |
458 | ||
852c2936 MD |
459 | /** |
460 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
461 | * writer. | |
462 | */ | |
463 | static inline | |
b5457df5 MD |
464 | void lib_ring_buffer_clear_noref(const struct lttng_ust_ring_buffer_config *config, |
465 | struct lttng_ust_ring_buffer_backend *bufb, | |
1d498196 | 466 | unsigned long idx, |
38fae1d3 | 467 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
468 | { |
469 | unsigned long id, new_id; | |
b5457df5 | 470 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
852c2936 MD |
471 | |
472 | if (config->mode != RING_BUFFER_OVERWRITE) | |
473 | return; | |
474 | ||
475 | /* | |
476 | * Performing a volatile access to read the sb_pages, because we want to | |
477 | * read a coherent version of the pointer and the associated noref flag. | |
478 | */ | |
15500a1b MD |
479 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
480 | if (!wsb) | |
481 | return; | |
482 | id = CMM_ACCESS_ONCE(wsb->id); | |
852c2936 MD |
483 | for (;;) { |
484 | /* This check is called on the fast path for each record. */ | |
b5a3dfa5 | 485 | if (caa_likely(!subbuffer_id_is_noref(config, id))) { |
852c2936 MD |
486 | /* |
487 | * Store after load dependency ordering the writes to | |
488 | * the subbuffer after load and test of the noref flag | |
489 | * matches the memory barrier implied by the cmpxchg() | |
490 | * in update_read_sb_index(). | |
491 | */ | |
492 | return; /* Already writing to this buffer */ | |
493 | } | |
494 | new_id = id; | |
495 | subbuffer_id_clear_noref(config, &new_id); | |
15500a1b | 496 | new_id = uatomic_cmpxchg(&wsb->id, id, new_id); |
b5a3dfa5 | 497 | if (caa_likely(new_id == id)) |
852c2936 MD |
498 | break; |
499 | id = new_id; | |
500 | } | |
501 | } | |
502 | ||
503 | /** | |
504 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
505 | * called by writer. | |
506 | */ | |
507 | static inline | |
b5457df5 MD |
508 | void lib_ring_buffer_set_noref_offset(const struct lttng_ust_ring_buffer_config *config, |
509 | struct lttng_ust_ring_buffer_backend *bufb, | |
1d498196 | 510 | unsigned long idx, unsigned long offset, |
38fae1d3 | 511 | struct lttng_ust_shm_handle *handle) |
852c2936 | 512 | { |
b5457df5 MD |
513 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
514 | struct lttng_ust_ring_buffer_channel *chan; | |
15500a1b | 515 | |
852c2936 MD |
516 | if (config->mode != RING_BUFFER_OVERWRITE) |
517 | return; | |
518 | ||
15500a1b MD |
519 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
520 | if (!wsb) | |
521 | return; | |
852c2936 MD |
522 | /* |
523 | * Because ring_buffer_set_noref() is only called by a single thread | |
524 | * (the one which updated the cc_sb value), there are no concurrent | |
525 | * updates to take care of: other writers have not updated cc_sb, so | |
526 | * they cannot set the noref flag, and concurrent readers cannot modify | |
527 | * the pointer because the noref flag is not set yet. | |
528 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
529 | * to the subbuffer before this set noref operation. | |
530 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
531 | * readers of the noref flag. | |
532 | */ | |
15500a1b MD |
533 | chan = shmp(handle, bufb->chan); |
534 | if (!chan) | |
535 | return; | |
536 | CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id)); | |
852c2936 MD |
537 | /* |
538 | * Memory barrier that ensures counter stores are ordered before set | |
539 | * noref and offset. | |
540 | */ | |
14641deb | 541 | cmm_smp_mb(); |
15500a1b | 542 | subbuffer_id_set_noref_offset(config, &wsb->id, offset); |
852c2936 MD |
543 | } |
544 | ||
545 | /** | |
546 | * update_read_sb_index - Read-side subbuffer index update. | |
547 | */ | |
548 | static inline | |
b5457df5 MD |
549 | int update_read_sb_index(const struct lttng_ust_ring_buffer_config *config, |
550 | struct lttng_ust_ring_buffer_backend *bufb, | |
2208d8b5 | 551 | struct channel_backend *chanb __attribute__((unused)), |
852c2936 | 552 | unsigned long consumed_idx, |
1d498196 | 553 | unsigned long consumed_count, |
38fae1d3 | 554 | struct lttng_ust_shm_handle *handle) |
852c2936 | 555 | { |
b5457df5 | 556 | struct lttng_ust_ring_buffer_backend_subbuffer *wsb; |
852c2936 MD |
557 | unsigned long old_id, new_id; |
558 | ||
15500a1b MD |
559 | wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx); |
560 | if (caa_unlikely(!wsb)) | |
561 | return -EPERM; | |
562 | ||
852c2936 | 563 | if (config->mode == RING_BUFFER_OVERWRITE) { |
b5457df5 | 564 | struct lttng_ust_ring_buffer_channel *chan; |
15500a1b | 565 | |
852c2936 MD |
566 | /* |
567 | * Exchange the target writer subbuffer with our own unused | |
14641deb | 568 | * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the |
852c2936 MD |
569 | * old_wpage, because the value read will be confirmed by the |
570 | * following cmpxchg(). | |
571 | */ | |
15500a1b | 572 | old_id = wsb->id; |
b5a3dfa5 | 573 | if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) |
852c2936 MD |
574 | return -EAGAIN; |
575 | /* | |
576 | * Make sure the offset count we are expecting matches the one | |
577 | * indicated by the writer. | |
578 | */ | |
b5a3dfa5 | 579 | if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, |
852c2936 MD |
580 | consumed_count))) |
581 | return -EAGAIN; | |
15500a1b MD |
582 | chan = shmp(handle, bufb->chan); |
583 | if (caa_unlikely(!chan)) | |
584 | return -EPERM; | |
585 | CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); | |
852c2936 MD |
586 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, |
587 | consumed_count); | |
15500a1b | 588 | new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id); |
b5a3dfa5 | 589 | if (caa_unlikely(old_id != new_id)) |
852c2936 MD |
590 | return -EAGAIN; |
591 | bufb->buf_rsb.id = new_id; | |
592 | } else { | |
593 | /* No page exchange, use the writer page directly */ | |
15500a1b | 594 | bufb->buf_rsb.id = wsb->id; |
852c2936 MD |
595 | } |
596 | return 0; | |
597 | } | |
598 | ||
0d4aa2df MD |
599 | #ifndef inline_memcpy |
600 | #define inline_memcpy(dest, src, n) memcpy(dest, src, n) | |
601 | #endif | |
602 | ||
5f6daaef MJ |
603 | static inline |
604 | void lttng_inline_memcpy(void *dest, const void *src, | |
605 | unsigned long len) | |
606 | __attribute__((always_inline)); | |
607 | static inline | |
51b8f2fa MD |
608 | void lttng_inline_memcpy(void *dest, const void *src, |
609 | unsigned long len) | |
610 | { | |
611 | switch (len) { | |
612 | case 1: | |
613 | *(uint8_t *) dest = *(const uint8_t *) src; | |
614 | break; | |
615 | case 2: | |
616 | *(uint16_t *) dest = *(const uint16_t *) src; | |
617 | break; | |
618 | case 4: | |
619 | *(uint32_t *) dest = *(const uint32_t *) src; | |
620 | break; | |
621 | case 8: | |
622 | *(uint64_t *) dest = *(const uint64_t *) src; | |
623 | break; | |
624 | default: | |
625 | inline_memcpy(dest, src, len); | |
626 | } | |
627 | } | |
628 | ||
852c2936 MD |
629 | /* |
630 | * Use the architecture-specific memcpy implementation for constant-sized | |
631 | * inputs, but rely on an inline memcpy for length statically unknown. | |
632 | * The function call to memcpy is just way too expensive for a fast path. | |
633 | */ | |
634 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
635 | do { \ | |
636 | size_t __len = (len); \ | |
637 | if (__builtin_constant_p(len)) \ | |
638 | memcpy(dest, src, __len); \ | |
639 | else \ | |
51b8f2fa | 640 | lttng_inline_memcpy(dest, src, __len); \ |
852c2936 MD |
641 | } while (0) |
642 | ||
a44c74d9 MD |
643 | /* |
644 | * write len bytes to dest with c | |
645 | */ | |
646 | static inline | |
b4c8bf2f | 647 | void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len) |
a44c74d9 MD |
648 | { |
649 | unsigned long i; | |
650 | ||
651 | for (i = 0; i < len; i++) | |
652 | dest[i] = c; | |
653 | } | |
654 | ||
b728d87e MD |
655 | /* arch-agnostic implementation */ |
656 | ||
bfd26582 | 657 | static inline int lttng_ust_fls(unsigned int x) |
b728d87e MD |
658 | { |
659 | int r = 32; | |
660 | ||
661 | if (!x) | |
662 | return 0; | |
663 | if (!(x & 0xFFFF0000U)) { | |
664 | x <<= 16; | |
665 | r -= 16; | |
666 | } | |
667 | if (!(x & 0xFF000000U)) { | |
668 | x <<= 8; | |
669 | r -= 8; | |
670 | } | |
671 | if (!(x & 0xF0000000U)) { | |
672 | x <<= 4; | |
673 | r -= 4; | |
674 | } | |
675 | if (!(x & 0xC0000000U)) { | |
676 | x <<= 2; | |
677 | r -= 2; | |
678 | } | |
679 | if (!(x & 0x80000000U)) { | |
e2bd33a5 | 680 | /* No need to bit shift on last operation */ |
b728d87e MD |
681 | r -= 1; |
682 | } | |
683 | return r; | |
684 | } | |
685 | ||
686 | static inline int get_count_order(unsigned int count) | |
687 | { | |
688 | int order; | |
689 | ||
bfd26582 | 690 | order = lttng_ust_fls(count) - 1; |
b728d87e MD |
691 | if (count & (count - 1)) |
692 | order++; | |
693 | return order; | |
694 | } | |
695 | ||
e92f3e28 | 696 | #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */ |