Commit | Line | Data |
---|---|---|
e92f3e28 MD |
1 | #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H |
2 | #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H | |
852c2936 MD |
3 | |
4 | /* | |
e92f3e28 | 5 | * libringbuffer/backend_internal.h |
852c2936 MD |
6 | * |
7 | * Ring buffer backend (internal helpers). | |
8 | * | |
e92f3e28 MD |
9 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; only | |
14 | * version 2.1 of the License. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
852c2936 MD |
24 | */ |
25 | ||
14641deb MD |
26 | #include <unistd.h> |
27 | #include <urcu/compiler.h> | |
28 | ||
4318ae1b | 29 | #include <lttng/ringbuffer-config.h> |
4931a13e MD |
30 | #include "backend_types.h" |
31 | #include "frontend_types.h" | |
a6352fd4 | 32 | #include "shm.h" |
852c2936 MD |
33 | |
34 | /* Ring buffer backend API presented to the frontend */ | |
35 | ||
36 | /* Ring buffer and channel backend create/free */ | |
37 | ||
4cfec15c | 38 | int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, |
a6352fd4 | 39 | struct channel_backend *chan, int cpu, |
38fae1d3 | 40 | struct lttng_ust_shm_handle *handle, |
1d498196 | 41 | struct shm_object *shmobj); |
852c2936 | 42 | void channel_backend_unregister_notifiers(struct channel_backend *chanb); |
4cfec15c | 43 | void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb); |
852c2936 MD |
44 | int channel_backend_init(struct channel_backend *chanb, |
45 | const char *name, | |
4cfec15c | 46 | const struct lttng_ust_lib_ring_buffer_config *config, |
a3f61e7f | 47 | size_t subbuf_size, |
a9ff648c | 48 | size_t num_subbuf, struct lttng_ust_shm_handle *handle, |
5ea386c3 | 49 | const int *stream_fds); |
1d498196 | 50 | void channel_backend_free(struct channel_backend *chanb, |
38fae1d3 | 51 | struct lttng_ust_shm_handle *handle); |
852c2936 | 52 | |
4cfec15c | 53 | void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, |
38fae1d3 | 54 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
55 | void channel_backend_reset(struct channel_backend *chanb); |
56 | ||
57 | int lib_ring_buffer_backend_init(void); | |
58 | void lib_ring_buffer_backend_exit(void); | |
59 | ||
4cfec15c | 60 | extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb, |
852c2936 MD |
61 | size_t offset, const void *src, size_t len, |
62 | ssize_t pagecpy); | |
63 | ||
64 | /* | |
65 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
66 | * exchanged atomically. | |
67 | * | |
68 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
69 | * to count the produced buffers. For overwrite mode, this provides the | |
70 | * consumer with the capacity to read subbuffers in order, handling the | |
71 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
72 | * systems) concurrently with a single execution of get_subbuf (between offset | |
73 | * sampling and subbuffer ID exchange). | |
74 | */ | |
75 | ||
14641deb | 76 | #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1) |
852c2936 MD |
77 | |
78 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
79 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
80 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
81 | /* | |
82 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
83 | */ | |
84 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
85 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
86 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
87 | /* | |
88 | * In overwrite mode: lowest half of word is used for index. | |
89 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
90 | * In producer-consumer mode: whole word used for index. | |
91 | */ | |
92 | #define SB_ID_INDEX_SHIFT 0 | |
93 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
94 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
95 | ||
96 | /* | |
97 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
98 | * for producer-consumer mode (offset and noref are only used in overwrite | |
99 | * mode). | |
100 | */ | |
101 | static inline | |
4cfec15c | 102 | unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
103 | unsigned long offset, unsigned long noref, |
104 | unsigned long index) | |
105 | { | |
106 | if (config->mode == RING_BUFFER_OVERWRITE) | |
107 | return (offset << SB_ID_OFFSET_SHIFT) | |
108 | | (noref << SB_ID_NOREF_SHIFT) | |
109 | | index; | |
110 | else | |
111 | return index; | |
112 | } | |
113 | ||
114 | /* | |
115 | * Compare offset with the offset contained within id. Return 1 if the offset | |
116 | * bits are identical, else 0. | |
117 | */ | |
118 | static inline | |
4cfec15c | 119 | int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
120 | unsigned long id, unsigned long offset) |
121 | { | |
122 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
123 | } | |
124 | ||
125 | static inline | |
4cfec15c | 126 | unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
127 | unsigned long id) |
128 | { | |
129 | if (config->mode == RING_BUFFER_OVERWRITE) | |
130 | return id & SB_ID_INDEX_MASK; | |
131 | else | |
132 | return id; | |
133 | } | |
134 | ||
135 | static inline | |
4cfec15c | 136 | unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
137 | unsigned long id) |
138 | { | |
139 | if (config->mode == RING_BUFFER_OVERWRITE) | |
140 | return !!(id & SB_ID_NOREF_MASK); | |
141 | else | |
142 | return 1; | |
143 | } | |
144 | ||
145 | /* | |
146 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
147 | * needed. | |
148 | */ | |
149 | static inline | |
4cfec15c | 150 | void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
151 | unsigned long *id) |
152 | { | |
153 | if (config->mode == RING_BUFFER_OVERWRITE) | |
154 | *id |= SB_ID_NOREF_MASK; | |
155 | } | |
156 | ||
157 | static inline | |
4cfec15c | 158 | void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
159 | unsigned long *id, unsigned long offset) |
160 | { | |
161 | unsigned long tmp; | |
162 | ||
163 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
164 | tmp = *id; | |
165 | tmp &= ~SB_ID_OFFSET_MASK; | |
166 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
167 | tmp |= SB_ID_NOREF_MASK; | |
168 | /* Volatile store, read concurrently by readers. */ | |
14641deb | 169 | CMM_ACCESS_ONCE(*id) = tmp; |
852c2936 MD |
170 | } |
171 | } | |
172 | ||
173 | /* No volatile access, since already used locally */ | |
174 | static inline | |
4cfec15c | 175 | void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
176 | unsigned long *id) |
177 | { | |
178 | if (config->mode == RING_BUFFER_OVERWRITE) | |
179 | *id &= ~SB_ID_NOREF_MASK; | |
180 | } | |
181 | ||
182 | /* | |
183 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
184 | * 2^16 on 32-bit architectures | |
185 | * 2^32 on 64-bit architectures | |
186 | * This is required to fit in the index part of the ID. Return 0 on success, | |
187 | * -EPERM on failure. | |
188 | */ | |
189 | static inline | |
4cfec15c | 190 | int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
191 | unsigned long num_subbuf) |
192 | { | |
193 | if (config->mode == RING_BUFFER_OVERWRITE) | |
194 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
195 | else | |
196 | return 0; | |
197 | } | |
198 | ||
9c995331 MD |
199 | /* |
200 | * The ring buffer can count events recorded and overwritten per buffer, | |
201 | * but it is disabled by default due to its performance overhead. | |
202 | */ | |
203 | #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS | |
852c2936 | 204 | static inline |
4cfec15c | 205 | void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config, |
15500a1b | 206 | const struct lttng_ust_lib_ring_buffer_ctx *ctx, |
4cfec15c | 207 | struct lttng_ust_lib_ring_buffer_backend *bufb, |
38fae1d3 | 208 | unsigned long idx, struct lttng_ust_shm_handle *handle) |
852c2936 | 209 | { |
15500a1b | 210 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; |
852c2936 | 211 | |
15500a1b MD |
212 | backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); |
213 | if (caa_unlikely(!backend_pages)) { | |
214 | if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) | |
215 | return; | |
216 | } | |
217 | v_inc(config, &backend_pages->records_commit); | |
852c2936 | 218 | } |
9c995331 MD |
219 | #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ |
220 | static inline | |
221 | void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config, | |
15500a1b | 222 | const struct lttng_ust_lib_ring_buffer_ctx *ctx, |
9c995331 MD |
223 | struct lttng_ust_lib_ring_buffer_backend *bufb, |
224 | unsigned long idx, struct lttng_ust_shm_handle *handle) | |
225 | { | |
226 | } | |
227 | #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ | |
852c2936 MD |
228 | |
229 | /* | |
230 | * Reader has exclusive subbuffer access for record consumption. No need to | |
231 | * perform the decrement atomically. | |
232 | */ | |
233 | static inline | |
4cfec15c MD |
234 | void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config, |
235 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 236 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
237 | { |
238 | unsigned long sb_bindex; | |
15500a1b MD |
239 | struct channel *chan; |
240 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp; | |
241 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 MD |
242 | |
243 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
15500a1b MD |
244 | chan = shmp(handle, bufb->chan); |
245 | if (!chan) | |
246 | return; | |
247 | pages_shmp = shmp_index(handle, bufb->array, sb_bindex); | |
248 | if (!pages_shmp) | |
249 | return; | |
250 | backend_pages = shmp(handle, pages_shmp->shmp); | |
251 | if (!backend_pages) | |
252 | return; | |
253 | CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread)); | |
852c2936 | 254 | /* Non-atomic decrement protected by exclusive subbuffer access */ |
15500a1b | 255 | _v_dec(config, &backend_pages->records_unread); |
852c2936 MD |
256 | v_inc(config, &bufb->records_read); |
257 | } | |
258 | ||
259 | static inline | |
260 | unsigned long subbuffer_get_records_count( | |
4cfec15c MD |
261 | const struct lttng_ust_lib_ring_buffer_config *config, |
262 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 263 | unsigned long idx, |
38fae1d3 | 264 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
265 | { |
266 | unsigned long sb_bindex; | |
15500a1b MD |
267 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
268 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; | |
269 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 270 | |
15500a1b MD |
271 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
272 | if (!wsb) | |
273 | return 0; | |
274 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
275 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
276 | if (!rpages) | |
277 | return 0; | |
278 | backend_pages = shmp(handle, rpages->shmp); | |
279 | if (!backend_pages) | |
280 | return 0; | |
281 | return v_read(config, &backend_pages->records_commit); | |
852c2936 MD |
282 | } |
283 | ||
284 | /* | |
285 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
3d1aec25 MD |
286 | * subbuffer access. See lib_ring_buffer_check_deliver() for details. |
287 | * lib_ring_buffer_get_records_count() must be called to get the records | |
288 | * count before this function, because it resets the records_commit | |
289 | * count. | |
852c2936 MD |
290 | */ |
291 | static inline | |
292 | unsigned long subbuffer_count_records_overrun( | |
4cfec15c MD |
293 | const struct lttng_ust_lib_ring_buffer_config *config, |
294 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 295 | unsigned long idx, |
38fae1d3 | 296 | struct lttng_ust_shm_handle *handle) |
852c2936 | 297 | { |
852c2936 | 298 | unsigned long overruns, sb_bindex; |
15500a1b MD |
299 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
300 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; | |
301 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 302 | |
15500a1b MD |
303 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
304 | if (!wsb) | |
305 | return 0; | |
306 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
307 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
308 | if (!rpages) | |
309 | return 0; | |
310 | backend_pages = shmp(handle, rpages->shmp); | |
311 | if (!backend_pages) | |
312 | return 0; | |
313 | overruns = v_read(config, &backend_pages->records_unread); | |
314 | v_set(config, &backend_pages->records_unread, | |
315 | v_read(config, &backend_pages->records_commit)); | |
316 | v_set(config, &backend_pages->records_commit, 0); | |
852c2936 MD |
317 | |
318 | return overruns; | |
319 | } | |
320 | ||
321 | static inline | |
4cfec15c MD |
322 | void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
323 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 | 324 | unsigned long idx, |
1d498196 | 325 | unsigned long data_size, |
38fae1d3 | 326 | struct lttng_ust_shm_handle *handle) |
852c2936 | 327 | { |
852c2936 | 328 | unsigned long sb_bindex; |
15500a1b MD |
329 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
330 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; | |
331 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 332 | |
15500a1b MD |
333 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
334 | if (!wsb) | |
335 | return; | |
336 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
337 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
338 | if (!rpages) | |
339 | return; | |
340 | backend_pages = shmp(handle, rpages->shmp); | |
341 | if (!backend_pages) | |
342 | return; | |
343 | backend_pages->data_size = data_size; | |
852c2936 MD |
344 | } |
345 | ||
346 | static inline | |
347 | unsigned long subbuffer_get_read_data_size( | |
4cfec15c MD |
348 | const struct lttng_ust_lib_ring_buffer_config *config, |
349 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 350 | struct lttng_ust_shm_handle *handle) |
852c2936 | 351 | { |
852c2936 | 352 | unsigned long sb_bindex; |
15500a1b MD |
353 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp; |
354 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 MD |
355 | |
356 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
15500a1b MD |
357 | pages_shmp = shmp_index(handle, bufb->array, sb_bindex); |
358 | if (!pages_shmp) | |
359 | return 0; | |
360 | backend_pages = shmp(handle, pages_shmp->shmp); | |
361 | if (!backend_pages) | |
362 | return 0; | |
363 | return backend_pages->data_size; | |
852c2936 MD |
364 | } |
365 | ||
366 | static inline | |
367 | unsigned long subbuffer_get_data_size( | |
4cfec15c MD |
368 | const struct lttng_ust_lib_ring_buffer_config *config, |
369 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 370 | unsigned long idx, |
38fae1d3 | 371 | struct lttng_ust_shm_handle *handle) |
852c2936 | 372 | { |
852c2936 | 373 | unsigned long sb_bindex; |
15500a1b MD |
374 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
375 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; | |
376 | struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; | |
852c2936 | 377 | |
15500a1b MD |
378 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
379 | if (!wsb) | |
380 | return 0; | |
381 | sb_bindex = subbuffer_id_get_index(config, wsb->id); | |
382 | rpages = shmp_index(handle, bufb->array, sb_bindex); | |
383 | if (!rpages) | |
384 | return 0; | |
385 | backend_pages = shmp(handle, rpages->shmp); | |
386 | if (!backend_pages) | |
387 | return 0; | |
388 | return backend_pages->data_size; | |
852c2936 MD |
389 | } |
390 | ||
1ff31389 JD |
391 | static inline |
392 | void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config, | |
393 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
394 | unsigned long idx, struct lttng_ust_shm_handle *handle) | |
395 | { | |
15500a1b MD |
396 | struct lttng_ust_lib_ring_buffer_backend_counts *counts; |
397 | ||
398 | counts = shmp_index(handle, bufb->buf_cnt, idx); | |
399 | if (!counts) | |
400 | return; | |
401 | counts->seq_cnt++; | |
1ff31389 JD |
402 | } |
403 | ||
852c2936 MD |
404 | /** |
405 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
406 | * writer. | |
407 | */ | |
408 | static inline | |
4cfec15c MD |
409 | void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
410 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 411 | unsigned long idx, |
38fae1d3 | 412 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
413 | { |
414 | unsigned long id, new_id; | |
15500a1b | 415 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
852c2936 MD |
416 | |
417 | if (config->mode != RING_BUFFER_OVERWRITE) | |
418 | return; | |
419 | ||
420 | /* | |
421 | * Performing a volatile access to read the sb_pages, because we want to | |
422 | * read a coherent version of the pointer and the associated noref flag. | |
423 | */ | |
15500a1b MD |
424 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
425 | if (!wsb) | |
426 | return; | |
427 | id = CMM_ACCESS_ONCE(wsb->id); | |
852c2936 MD |
428 | for (;;) { |
429 | /* This check is called on the fast path for each record. */ | |
b5a3dfa5 | 430 | if (caa_likely(!subbuffer_id_is_noref(config, id))) { |
852c2936 MD |
431 | /* |
432 | * Store after load dependency ordering the writes to | |
433 | * the subbuffer after load and test of the noref flag | |
434 | * matches the memory barrier implied by the cmpxchg() | |
435 | * in update_read_sb_index(). | |
436 | */ | |
437 | return; /* Already writing to this buffer */ | |
438 | } | |
439 | new_id = id; | |
440 | subbuffer_id_clear_noref(config, &new_id); | |
15500a1b | 441 | new_id = uatomic_cmpxchg(&wsb->id, id, new_id); |
b5a3dfa5 | 442 | if (caa_likely(new_id == id)) |
852c2936 MD |
443 | break; |
444 | id = new_id; | |
445 | } | |
446 | } | |
447 | ||
448 | /** | |
449 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
450 | * called by writer. | |
451 | */ | |
452 | static inline | |
4cfec15c MD |
453 | void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
454 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 455 | unsigned long idx, unsigned long offset, |
38fae1d3 | 456 | struct lttng_ust_shm_handle *handle) |
852c2936 | 457 | { |
15500a1b MD |
458 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
459 | struct channel *chan; | |
460 | ||
852c2936 MD |
461 | if (config->mode != RING_BUFFER_OVERWRITE) |
462 | return; | |
463 | ||
15500a1b MD |
464 | wsb = shmp_index(handle, bufb->buf_wsb, idx); |
465 | if (!wsb) | |
466 | return; | |
852c2936 MD |
467 | /* |
468 | * Because ring_buffer_set_noref() is only called by a single thread | |
469 | * (the one which updated the cc_sb value), there are no concurrent | |
470 | * updates to take care of: other writers have not updated cc_sb, so | |
471 | * they cannot set the noref flag, and concurrent readers cannot modify | |
472 | * the pointer because the noref flag is not set yet. | |
473 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
474 | * to the subbuffer before this set noref operation. | |
475 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
476 | * readers of the noref flag. | |
477 | */ | |
15500a1b MD |
478 | chan = shmp(handle, bufb->chan); |
479 | if (!chan) | |
480 | return; | |
481 | CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id)); | |
852c2936 MD |
482 | /* |
483 | * Memory barrier that ensures counter stores are ordered before set | |
484 | * noref and offset. | |
485 | */ | |
14641deb | 486 | cmm_smp_mb(); |
15500a1b | 487 | subbuffer_id_set_noref_offset(config, &wsb->id, offset); |
852c2936 MD |
488 | } |
489 | ||
490 | /** | |
491 | * update_read_sb_index - Read-side subbuffer index update. | |
492 | */ | |
493 | static inline | |
4cfec15c MD |
494 | int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, |
495 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 MD |
496 | struct channel_backend *chanb, |
497 | unsigned long consumed_idx, | |
1d498196 | 498 | unsigned long consumed_count, |
38fae1d3 | 499 | struct lttng_ust_shm_handle *handle) |
852c2936 | 500 | { |
15500a1b | 501 | struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; |
852c2936 MD |
502 | unsigned long old_id, new_id; |
503 | ||
15500a1b MD |
504 | wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx); |
505 | if (caa_unlikely(!wsb)) | |
506 | return -EPERM; | |
507 | ||
852c2936 | 508 | if (config->mode == RING_BUFFER_OVERWRITE) { |
15500a1b MD |
509 | struct channel *chan; |
510 | ||
852c2936 MD |
511 | /* |
512 | * Exchange the target writer subbuffer with our own unused | |
14641deb | 513 | * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the |
852c2936 MD |
514 | * old_wpage, because the value read will be confirmed by the |
515 | * following cmpxchg(). | |
516 | */ | |
15500a1b | 517 | old_id = wsb->id; |
b5a3dfa5 | 518 | if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) |
852c2936 MD |
519 | return -EAGAIN; |
520 | /* | |
521 | * Make sure the offset count we are expecting matches the one | |
522 | * indicated by the writer. | |
523 | */ | |
b5a3dfa5 | 524 | if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, |
852c2936 MD |
525 | consumed_count))) |
526 | return -EAGAIN; | |
15500a1b MD |
527 | chan = shmp(handle, bufb->chan); |
528 | if (caa_unlikely(!chan)) | |
529 | return -EPERM; | |
530 | CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); | |
852c2936 MD |
531 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, |
532 | consumed_count); | |
15500a1b | 533 | new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id); |
b5a3dfa5 | 534 | if (caa_unlikely(old_id != new_id)) |
852c2936 MD |
535 | return -EAGAIN; |
536 | bufb->buf_rsb.id = new_id; | |
537 | } else { | |
538 | /* No page exchange, use the writer page directly */ | |
15500a1b | 539 | bufb->buf_rsb.id = wsb->id; |
852c2936 MD |
540 | } |
541 | return 0; | |
542 | } | |
543 | ||
0d4aa2df MD |
544 | #ifndef inline_memcpy |
545 | #define inline_memcpy(dest, src, n) memcpy(dest, src, n) | |
546 | #endif | |
547 | ||
852c2936 MD |
548 | /* |
549 | * Use the architecture-specific memcpy implementation for constant-sized | |
550 | * inputs, but rely on an inline memcpy for length statically unknown. | |
551 | * The function call to memcpy is just way too expensive for a fast path. | |
552 | */ | |
553 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
554 | do { \ | |
555 | size_t __len = (len); \ | |
556 | if (__builtin_constant_p(len)) \ | |
557 | memcpy(dest, src, __len); \ | |
558 | else \ | |
559 | inline_memcpy(dest, src, __len); \ | |
560 | } while (0) | |
561 | ||
a44c74d9 MD |
562 | /* |
563 | * write len bytes to dest with c | |
564 | */ | |
565 | static inline | |
566 | void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len) | |
567 | { | |
568 | unsigned long i; | |
569 | ||
570 | for (i = 0; i < len; i++) | |
571 | dest[i] = c; | |
572 | } | |
573 | ||
b728d87e MD |
574 | /* arch-agnostic implementation */ |
575 | ||
bfd26582 | 576 | static inline int lttng_ust_fls(unsigned int x) |
b728d87e MD |
577 | { |
578 | int r = 32; | |
579 | ||
580 | if (!x) | |
581 | return 0; | |
582 | if (!(x & 0xFFFF0000U)) { | |
583 | x <<= 16; | |
584 | r -= 16; | |
585 | } | |
586 | if (!(x & 0xFF000000U)) { | |
587 | x <<= 8; | |
588 | r -= 8; | |
589 | } | |
590 | if (!(x & 0xF0000000U)) { | |
591 | x <<= 4; | |
592 | r -= 4; | |
593 | } | |
594 | if (!(x & 0xC0000000U)) { | |
595 | x <<= 2; | |
596 | r -= 2; | |
597 | } | |
598 | if (!(x & 0x80000000U)) { | |
e2bd33a5 | 599 | /* No need to bit shift on last operation */ |
b728d87e MD |
600 | r -= 1; |
601 | } | |
602 | return r; | |
603 | } | |
604 | ||
605 | static inline int get_count_order(unsigned int count) | |
606 | { | |
607 | int order; | |
608 | ||
bfd26582 | 609 | order = lttng_ust_fls(count) - 1; |
b728d87e MD |
610 | if (count & (count - 1)) |
611 | order++; | |
612 | return order; | |
613 | } | |
614 | ||
e92f3e28 | 615 | #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */ |