2 * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (internal helpers).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
14 #include <urcu/compiler.h>
15 #include <urcu/tls-compat.h>
20 #include <lttng/ringbuffer-config.h>
21 #include "backend_types.h"
22 #include "frontend_types.h"
25 /* Buffer offset macros */
27 /* buf_trunc mask selects only the buffer number. */
29 unsigned long buf_trunc(unsigned long offset
,
30 struct lttng_ust_lib_ring_buffer_channel
*chan
)
32 return offset
& ~(chan
->backend
.buf_size
- 1);
36 /* Select the buffer number value (counter). */
38 unsigned long buf_trunc_val(unsigned long offset
,
39 struct lttng_ust_lib_ring_buffer_channel
*chan
)
41 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
44 /* buf_offset mask selects only the offset within the current buffer. */
46 unsigned long buf_offset(unsigned long offset
,
47 struct lttng_ust_lib_ring_buffer_channel
*chan
)
49 return offset
& (chan
->backend
.buf_size
- 1);
52 /* subbuf_offset mask selects the offset within the current subbuffer. */
54 unsigned long subbuf_offset(unsigned long offset
,
55 struct lttng_ust_lib_ring_buffer_channel
*chan
)
57 return offset
& (chan
->backend
.subbuf_size
- 1);
60 /* subbuf_trunc mask selects the subbuffer number. */
62 unsigned long subbuf_trunc(unsigned long offset
,
63 struct lttng_ust_lib_ring_buffer_channel
*chan
)
65 return offset
& ~(chan
->backend
.subbuf_size
- 1);
68 /* subbuf_align aligns the offset to the next subbuffer. */
70 unsigned long subbuf_align(unsigned long offset
,
71 struct lttng_ust_lib_ring_buffer_channel
*chan
)
73 return (offset
+ chan
->backend
.subbuf_size
)
74 & ~(chan
->backend
.subbuf_size
- 1);
77 /* subbuf_index returns the index of the current subbuffer within the buffer. */
79 unsigned long subbuf_index(unsigned long offset
,
80 struct lttng_ust_lib_ring_buffer_channel
*chan
)
82 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
86 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
87 * bits from the last TSC read. When overflows are detected, the full 64-bit
88 * timestamp counter should be written in the record header. Reads and writes
89 * last_tsc atomically.
92 #if (CAA_BITS_PER_LONG == 32)
94 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
95 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
97 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
101 * Ensure the compiler performs this update in a single instruction.
103 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
107 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
108 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
110 unsigned long tsc_shifted
;
112 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
115 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
116 if (caa_unlikely(tsc_shifted
117 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
124 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
125 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
127 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
130 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
134 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
135 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
137 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
140 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
141 >> config
->tsc_bits
))
148 __attribute__((visibility("hidden")))
150 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
153 __attribute__((visibility("hidden")))
155 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
156 enum switch_mode mode
,
157 struct lttng_ust_shm_handle
*handle
);
159 __attribute__((visibility("hidden")))
160 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config
*config
,
161 struct lttng_ust_lib_ring_buffer
*buf
,
162 struct lttng_ust_lib_ring_buffer_channel
*chan
,
163 unsigned long offset
,
164 unsigned long commit_count
,
166 struct lttng_ust_shm_handle
*handle
,
169 /* Buffer write helpers */
172 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
173 struct lttng_ust_lib_ring_buffer_channel
*chan
,
174 unsigned long offset
)
176 unsigned long consumed_old
, consumed_new
;
179 consumed_old
= uatomic_read(&buf
->consumed
);
181 * If buffer is in overwrite mode, push the reader consumed
182 * count if the write position has reached it and we are not
183 * at the first iteration (don't push the reader farther than
184 * the writer). This operation can be done concurrently by many
185 * writers in the same buffer, the writer being at the farthest
186 * write position sub-buffer index in the buffer being the one
187 * which will win this loop.
189 if (caa_unlikely(subbuf_trunc(offset
, chan
)
190 - subbuf_trunc(consumed_old
, chan
)
191 >= chan
->backend
.buf_size
))
192 consumed_new
= subbuf_align(consumed_old
, chan
);
195 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
196 consumed_new
) != consumed_old
));
200 * Move consumed position to the beginning of subbuffer in which the
201 * write offset is. Should only be used on ring buffers that are not
202 * actively being written into, because clear_reader does not take into
203 * account the commit counters when moving the consumed position, which
204 * can make concurrent trace producers or consumers observe consumed
205 * position further than the write offset, which breaks ring buffer
206 * algorithm guarantees.
209 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer
*buf
,
210 struct lttng_ust_shm_handle
*handle
)
212 struct lttng_ust_lib_ring_buffer_channel
*chan
;
213 const struct lttng_ust_lib_ring_buffer_config
*config
;
214 unsigned long offset
, consumed_old
, consumed_new
;
216 chan
= shmp(handle
, buf
->backend
.chan
);
219 config
= &chan
->backend
.config
;
222 offset
= v_read(config
, &buf
->offset
);
223 consumed_old
= uatomic_read(&buf
->consumed
);
224 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
225 - subbuf_trunc(consumed_old
, chan
))
227 consumed_new
= subbuf_trunc(offset
, chan
);
228 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
229 consumed_new
) != consumed_old
));
233 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
234 struct lttng_ust_lib_ring_buffer
*buf
,
235 struct lttng_ust_lib_ring_buffer_channel
*chan
)
237 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
241 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
242 struct lttng_ust_lib_ring_buffer
*buf
,
244 struct lttng_ust_shm_handle
*handle
)
246 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
250 * Check if all space reservation in a buffer have been committed. This helps
251 * knowing if an execution context is nested (for per-cpu buffers only).
252 * This is a very specific ftrace use-case, so we keep this as "internal" API.
255 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
256 struct lttng_ust_lib_ring_buffer
*buf
,
257 struct lttng_ust_lib_ring_buffer_channel
*chan
,
258 struct lttng_ust_shm_handle
*handle
)
260 unsigned long offset
, idx
, commit_count
;
261 struct commit_counters_hot
*cc_hot
;
263 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
264 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
267 * Read offset and commit count in a loop so they are both read
268 * atomically wrt interrupts. By deal with interrupt concurrency by
269 * restarting both reads if the offset has been pushed. Note that given
270 * we only have to deal with interrupt concurrency here, an interrupt
271 * modifying the commit count will also modify "offset", so it is safe
272 * to only check for offset modifications.
275 offset
= v_read(config
, &buf
->offset
);
276 idx
= subbuf_index(offset
, chan
);
277 cc_hot
= shmp_index(handle
, buf
->commit_hot
, idx
);
278 if (caa_unlikely(!cc_hot
))
280 commit_count
= v_read(config
, &cc_hot
->cc
);
281 } while (offset
!= v_read(config
, &buf
->offset
));
283 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
284 - (commit_count
& chan
->commit_count_mask
) == 0);
288 * Receive end of subbuffer TSC as parameter. It has been read in the
289 * space reservation loop of either reserve or switch, which ensures it
290 * progresses monotonically with event records in the buffer. Therefore,
291 * it ensures that the end timestamp of a subbuffer is <= begin
292 * timestamp of the following subbuffers.
295 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
296 struct lttng_ust_lib_ring_buffer
*buf
,
297 struct lttng_ust_lib_ring_buffer_channel
*chan
,
298 unsigned long offset
,
299 unsigned long commit_count
,
301 struct lttng_ust_shm_handle
*handle
,
304 unsigned long old_commit_count
= commit_count
305 - chan
->backend
.subbuf_size
;
307 /* Check if all commits have been done */
308 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
309 - (old_commit_count
& chan
->commit_count_mask
) == 0))
310 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
311 commit_count
, idx
, handle
, tsc
);
315 * lib_ring_buffer_write_commit_counter
317 * For flight recording. must be called after commit.
318 * This function increments the subbuffer's commit_seq counter each time the
319 * commit count reaches back the reserve offset (modulo subbuffer size). It is
320 * useful for crash dump.
323 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config
*config
,
324 struct lttng_ust_lib_ring_buffer
*buf
,
325 struct lttng_ust_lib_ring_buffer_channel
*chan
,
326 unsigned long buf_offset
,
327 unsigned long commit_count
,
328 struct lttng_ust_shm_handle
*handle
,
329 struct commit_counters_hot
*cc_hot
)
331 unsigned long commit_seq_old
;
333 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
337 * subbuf_offset includes commit_count_mask. We can simply
338 * compare the offsets within the subbuffer without caring about
339 * buffer full/empty mismatch because offset is never zero here
340 * (subbuffer header and record headers have non-zero length).
342 if (caa_unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
345 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
346 if (caa_likely((long) (commit_seq_old
- commit_count
) < 0))
347 v_set(config
, &cc_hot
->seq
, commit_count
);
350 __attribute__((visibility("hidden")))
351 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
352 struct channel_backend
*chanb
, int cpu
,
353 struct lttng_ust_shm_handle
*handle
,
354 struct shm_object
*shmobj
);
356 __attribute__((visibility("hidden")))
357 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
358 struct lttng_ust_shm_handle
*handle
);
360 /* Keep track of trap nesting inside ring buffer code */
361 __attribute__((visibility("hidden")))
362 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting
);
364 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */