2 * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (internal helpers).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
14 #include <urcu/compiler.h>
15 #include <urcu/tls-compat.h>
20 #include <lttng/ringbuffer-context.h>
21 #include "ringbuffer-config.h"
22 #include "backend_types.h"
23 #include "frontend_types.h"
26 /* Buffer offset macros */
28 /* buf_trunc mask selects only the buffer number. */
30 unsigned long buf_trunc(unsigned long offset
,
31 struct lttng_ust_lib_ring_buffer_channel
*chan
)
33 return offset
& ~(chan
->backend
.buf_size
- 1);
37 /* Select the buffer number value (counter). */
39 unsigned long buf_trunc_val(unsigned long offset
,
40 struct lttng_ust_lib_ring_buffer_channel
*chan
)
42 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
45 /* buf_offset mask selects only the offset within the current buffer. */
47 unsigned long buf_offset(unsigned long offset
,
48 struct lttng_ust_lib_ring_buffer_channel
*chan
)
50 return offset
& (chan
->backend
.buf_size
- 1);
53 /* subbuf_offset mask selects the offset within the current subbuffer. */
55 unsigned long subbuf_offset(unsigned long offset
,
56 struct lttng_ust_lib_ring_buffer_channel
*chan
)
58 return offset
& (chan
->backend
.subbuf_size
- 1);
61 /* subbuf_trunc mask selects the subbuffer number. */
63 unsigned long subbuf_trunc(unsigned long offset
,
64 struct lttng_ust_lib_ring_buffer_channel
*chan
)
66 return offset
& ~(chan
->backend
.subbuf_size
- 1);
69 /* subbuf_align aligns the offset to the next subbuffer. */
71 unsigned long subbuf_align(unsigned long offset
,
72 struct lttng_ust_lib_ring_buffer_channel
*chan
)
74 return (offset
+ chan
->backend
.subbuf_size
)
75 & ~(chan
->backend
.subbuf_size
- 1);
78 /* subbuf_index returns the index of the current subbuffer within the buffer. */
80 unsigned long subbuf_index(unsigned long offset
,
81 struct lttng_ust_lib_ring_buffer_channel
*chan
)
83 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
87 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
88 * bits from the last TSC read. When overflows are detected, the full 64-bit
89 * timestamp counter should be written in the record header. Reads and writes
90 * last_tsc atomically.
93 #if (CAA_BITS_PER_LONG == 32)
95 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
96 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
98 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
102 * Ensure the compiler performs this update in a single instruction.
104 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
108 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
109 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
111 unsigned long tsc_shifted
;
113 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
116 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
117 if (caa_unlikely(tsc_shifted
118 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
125 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
126 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
128 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
131 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
135 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
136 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
138 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
141 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
142 >> config
->tsc_bits
))
149 __attribute__((visibility("hidden")))
151 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
154 __attribute__((visibility("hidden")))
156 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
157 enum switch_mode mode
,
158 struct lttng_ust_shm_handle
*handle
);
160 __attribute__((visibility("hidden")))
161 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config
*config
,
162 struct lttng_ust_lib_ring_buffer
*buf
,
163 struct lttng_ust_lib_ring_buffer_channel
*chan
,
164 unsigned long offset
,
165 unsigned long commit_count
,
167 struct lttng_ust_shm_handle
*handle
,
170 /* Buffer write helpers */
173 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
174 struct lttng_ust_lib_ring_buffer_channel
*chan
,
175 unsigned long offset
)
177 unsigned long consumed_old
, consumed_new
;
180 consumed_old
= uatomic_read(&buf
->consumed
);
182 * If buffer is in overwrite mode, push the reader consumed
183 * count if the write position has reached it and we are not
184 * at the first iteration (don't push the reader farther than
185 * the writer). This operation can be done concurrently by many
186 * writers in the same buffer, the writer being at the farthest
187 * write position sub-buffer index in the buffer being the one
188 * which will win this loop.
190 if (caa_unlikely(subbuf_trunc(offset
, chan
)
191 - subbuf_trunc(consumed_old
, chan
)
192 >= chan
->backend
.buf_size
))
193 consumed_new
= subbuf_align(consumed_old
, chan
);
196 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
197 consumed_new
) != consumed_old
));
201 * Move consumed position to the beginning of subbuffer in which the
202 * write offset is. Should only be used on ring buffers that are not
203 * actively being written into, because clear_reader does not take into
204 * account the commit counters when moving the consumed position, which
205 * can make concurrent trace producers or consumers observe consumed
206 * position further than the write offset, which breaks ring buffer
207 * algorithm guarantees.
210 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer
*buf
,
211 struct lttng_ust_shm_handle
*handle
)
213 struct lttng_ust_lib_ring_buffer_channel
*chan
;
214 const struct lttng_ust_lib_ring_buffer_config
*config
;
215 unsigned long offset
, consumed_old
, consumed_new
;
217 chan
= shmp(handle
, buf
->backend
.chan
);
220 config
= &chan
->backend
.config
;
223 offset
= v_read(config
, &buf
->offset
);
224 consumed_old
= uatomic_read(&buf
->consumed
);
225 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
226 - subbuf_trunc(consumed_old
, chan
))
228 consumed_new
= subbuf_trunc(offset
, chan
);
229 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
230 consumed_new
) != consumed_old
));
234 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
235 struct lttng_ust_lib_ring_buffer
*buf
,
236 struct lttng_ust_lib_ring_buffer_channel
*chan
)
238 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
242 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
243 struct lttng_ust_lib_ring_buffer
*buf
,
245 struct lttng_ust_shm_handle
*handle
)
247 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
251 * Check if all space reservation in a buffer have been committed. This helps
252 * knowing if an execution context is nested (for per-cpu buffers only).
253 * This is a very specific ftrace use-case, so we keep this as "internal" API.
256 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
257 struct lttng_ust_lib_ring_buffer
*buf
,
258 struct lttng_ust_lib_ring_buffer_channel
*chan
,
259 struct lttng_ust_shm_handle
*handle
)
261 unsigned long offset
, idx
, commit_count
;
262 struct commit_counters_hot
*cc_hot
;
264 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
265 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
268 * Read offset and commit count in a loop so they are both read
269 * atomically wrt interrupts. By deal with interrupt concurrency by
270 * restarting both reads if the offset has been pushed. Note that given
271 * we only have to deal with interrupt concurrency here, an interrupt
272 * modifying the commit count will also modify "offset", so it is safe
273 * to only check for offset modifications.
276 offset
= v_read(config
, &buf
->offset
);
277 idx
= subbuf_index(offset
, chan
);
278 cc_hot
= shmp_index(handle
, buf
->commit_hot
, idx
);
279 if (caa_unlikely(!cc_hot
))
281 commit_count
= v_read(config
, &cc_hot
->cc
);
282 } while (offset
!= v_read(config
, &buf
->offset
));
284 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
285 - (commit_count
& chan
->commit_count_mask
) == 0);
289 * Receive end of subbuffer TSC as parameter. It has been read in the
290 * space reservation loop of either reserve or switch, which ensures it
291 * progresses monotonically with event records in the buffer. Therefore,
292 * it ensures that the end timestamp of a subbuffer is <= begin
293 * timestamp of the following subbuffers.
296 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
297 struct lttng_ust_lib_ring_buffer
*buf
,
298 struct lttng_ust_lib_ring_buffer_channel
*chan
,
299 unsigned long offset
,
300 unsigned long commit_count
,
302 struct lttng_ust_shm_handle
*handle
,
305 unsigned long old_commit_count
= commit_count
306 - chan
->backend
.subbuf_size
;
308 /* Check if all commits have been done */
309 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
310 - (old_commit_count
& chan
->commit_count_mask
) == 0))
311 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
312 commit_count
, idx
, handle
, tsc
);
316 * lib_ring_buffer_write_commit_counter
318 * For flight recording. must be called after commit.
319 * This function increments the subbuffer's commit_seq counter each time the
320 * commit count reaches back the reserve offset (modulo subbuffer size). It is
321 * useful for crash dump.
324 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config
*config
,
325 struct lttng_ust_lib_ring_buffer
*buf
,
326 struct lttng_ust_lib_ring_buffer_channel
*chan
,
327 unsigned long buf_offset
,
328 unsigned long commit_count
,
329 struct lttng_ust_shm_handle
*handle
,
330 struct commit_counters_hot
*cc_hot
)
332 unsigned long commit_seq_old
;
334 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
338 * subbuf_offset includes commit_count_mask. We can simply
339 * compare the offsets within the subbuffer without caring about
340 * buffer full/empty mismatch because offset is never zero here
341 * (subbuffer header and record headers have non-zero length).
343 if (caa_unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
346 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
347 if (caa_likely((long) (commit_seq_old
- commit_count
) < 0))
348 v_set(config
, &cc_hot
->seq
, commit_count
);
351 __attribute__((visibility("hidden")))
352 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
353 struct channel_backend
*chanb
, int cpu
,
354 struct lttng_ust_shm_handle
*handle
,
355 struct shm_object
*shmobj
);
357 __attribute__((visibility("hidden")))
358 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
359 struct lttng_ust_shm_handle
*handle
);
361 /* Keep track of trap nesting inside ring buffer code */
362 __attribute__((visibility("hidden")))
363 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting
);
365 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */