2 * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (internal helpers).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
14 #include <urcu/compiler.h>
15 #include <urcu/tls-compat.h>
20 #include <lttng/ringbuffer-config.h>
21 #include "backend_types.h"
22 #include "frontend_types.h"
25 /* Buffer offset macros */
27 /* buf_trunc mask selects only the buffer number. */
29 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
31 return offset
& ~(chan
->backend
.buf_size
- 1);
35 /* Select the buffer number value (counter). */
37 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
39 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
42 /* buf_offset mask selects only the offset within the current buffer. */
44 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
46 return offset
& (chan
->backend
.buf_size
- 1);
49 /* subbuf_offset mask selects the offset within the current subbuffer. */
51 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
53 return offset
& (chan
->backend
.subbuf_size
- 1);
56 /* subbuf_trunc mask selects the subbuffer number. */
58 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
60 return offset
& ~(chan
->backend
.subbuf_size
- 1);
63 /* subbuf_align aligns the offset to the next subbuffer. */
65 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
67 return (offset
+ chan
->backend
.subbuf_size
)
68 & ~(chan
->backend
.subbuf_size
- 1);
71 /* subbuf_index returns the index of the current subbuffer within the buffer. */
73 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
75 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
79 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
80 * bits from the last TSC read. When overflows are detected, the full 64-bit
81 * timestamp counter should be written in the record header. Reads and writes
82 * last_tsc atomically.
85 #if (CAA_BITS_PER_LONG == 32)
87 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
88 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
90 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
94 * Ensure the compiler performs this update in a single instruction.
96 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
100 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
101 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
103 unsigned long tsc_shifted
;
105 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
108 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
109 if (caa_unlikely(tsc_shifted
110 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
117 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
118 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
120 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
123 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
127 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
128 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
130 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
133 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
134 >> config
->tsc_bits
))
142 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
146 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
147 enum switch_mode mode
,
148 struct lttng_ust_shm_handle
*handle
);
150 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config
*config
,
151 struct lttng_ust_lib_ring_buffer
*buf
,
152 struct channel
*chan
,
153 unsigned long offset
,
154 unsigned long commit_count
,
156 struct lttng_ust_shm_handle
*handle
,
159 /* Buffer write helpers */
162 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
163 struct channel
*chan
,
164 unsigned long offset
)
166 unsigned long consumed_old
, consumed_new
;
169 consumed_old
= uatomic_read(&buf
->consumed
);
171 * If buffer is in overwrite mode, push the reader consumed
172 * count if the write position has reached it and we are not
173 * at the first iteration (don't push the reader farther than
174 * the writer). This operation can be done concurrently by many
175 * writers in the same buffer, the writer being at the farthest
176 * write position sub-buffer index in the buffer being the one
177 * which will win this loop.
179 if (caa_unlikely(subbuf_trunc(offset
, chan
)
180 - subbuf_trunc(consumed_old
, chan
)
181 >= chan
->backend
.buf_size
))
182 consumed_new
= subbuf_align(consumed_old
, chan
);
185 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
186 consumed_new
) != consumed_old
));
190 * Move consumed position to the beginning of subbuffer in which the
191 * write offset is. Should only be used on ring buffers that are not
192 * actively being written into, because clear_reader does not take into
193 * account the commit counters when moving the consumed position, which
194 * can make concurrent trace producers or consumers observe consumed
195 * position further than the write offset, which breaks ring buffer
196 * algorithm guarantees.
199 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer
*buf
,
200 struct lttng_ust_shm_handle
*handle
)
202 struct channel
*chan
;
203 const struct lttng_ust_lib_ring_buffer_config
*config
;
204 unsigned long offset
, consumed_old
, consumed_new
;
206 chan
= shmp(handle
, buf
->backend
.chan
);
209 config
= &chan
->backend
.config
;
212 offset
= v_read(config
, &buf
->offset
);
213 consumed_old
= uatomic_read(&buf
->consumed
);
214 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
215 - subbuf_trunc(consumed_old
, chan
))
217 consumed_new
= subbuf_trunc(offset
, chan
);
218 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
219 consumed_new
) != consumed_old
));
223 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
224 struct lttng_ust_lib_ring_buffer
*buf
,
225 struct channel
*chan
)
227 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
231 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
232 struct lttng_ust_lib_ring_buffer
*buf
,
234 struct lttng_ust_shm_handle
*handle
)
236 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
240 * Check if all space reservation in a buffer have been committed. This helps
241 * knowing if an execution context is nested (for per-cpu buffers only).
242 * This is a very specific ftrace use-case, so we keep this as "internal" API.
245 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
246 struct lttng_ust_lib_ring_buffer
*buf
,
247 struct channel
*chan
,
248 struct lttng_ust_shm_handle
*handle
)
250 unsigned long offset
, idx
, commit_count
;
251 struct commit_counters_hot
*cc_hot
;
253 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
254 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
257 * Read offset and commit count in a loop so they are both read
258 * atomically wrt interrupts. By deal with interrupt concurrency by
259 * restarting both reads if the offset has been pushed. Note that given
260 * we only have to deal with interrupt concurrency here, an interrupt
261 * modifying the commit count will also modify "offset", so it is safe
262 * to only check for offset modifications.
265 offset
= v_read(config
, &buf
->offset
);
266 idx
= subbuf_index(offset
, chan
);
267 cc_hot
= shmp_index(handle
, buf
->commit_hot
, idx
);
268 if (caa_unlikely(!cc_hot
))
270 commit_count
= v_read(config
, &cc_hot
->cc
);
271 } while (offset
!= v_read(config
, &buf
->offset
));
273 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
274 - (commit_count
& chan
->commit_count_mask
) == 0);
278 * Receive end of subbuffer TSC as parameter. It has been read in the
279 * space reservation loop of either reserve or switch, which ensures it
280 * progresses monotonically with event records in the buffer. Therefore,
281 * it ensures that the end timestamp of a subbuffer is <= begin
282 * timestamp of the following subbuffers.
285 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
286 struct lttng_ust_lib_ring_buffer
*buf
,
287 struct channel
*chan
,
288 unsigned long offset
,
289 unsigned long commit_count
,
291 struct lttng_ust_shm_handle
*handle
,
294 unsigned long old_commit_count
= commit_count
295 - chan
->backend
.subbuf_size
;
297 /* Check if all commits have been done */
298 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
299 - (old_commit_count
& chan
->commit_count_mask
) == 0))
300 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
301 commit_count
, idx
, handle
, tsc
);
305 * lib_ring_buffer_write_commit_counter
307 * For flight recording. must be called after commit.
308 * This function increments the subbuffer's commit_seq counter each time the
309 * commit count reaches back the reserve offset (modulo subbuffer size). It is
310 * useful for crash dump.
313 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config
*config
,
314 struct lttng_ust_lib_ring_buffer
*buf
,
315 struct channel
*chan
,
316 unsigned long buf_offset
,
317 unsigned long commit_count
,
318 struct lttng_ust_shm_handle
*handle
,
319 struct commit_counters_hot
*cc_hot
)
321 unsigned long commit_seq_old
;
323 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
327 * subbuf_offset includes commit_count_mask. We can simply
328 * compare the offsets within the subbuffer without caring about
329 * buffer full/empty mismatch because offset is never zero here
330 * (subbuffer header and record headers have non-zero length).
332 if (caa_unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
335 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
336 if (caa_likely((long) (commit_seq_old
- commit_count
) < 0))
337 v_set(config
, &cc_hot
->seq
, commit_count
);
340 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
341 struct channel_backend
*chanb
, int cpu
,
342 struct lttng_ust_shm_handle
*handle
,
343 struct shm_object
*shmobj
);
344 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
345 struct lttng_ust_shm_handle
*handle
);
347 /* Keep track of trap nesting inside ring buffer code */
348 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting
);
350 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */