2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * See ring_buffer_frontend.c for more information on wait-free
8 * See frontend.h for channel allocation and read-side API.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_API_H
16 #include <urcu/compiler.h>
21 * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection.
23 * The rint buffer buffer nesting count is a safety net to ensure tracer
24 * client code will never trigger an endless recursion.
25 * Returns a nesting level >= 0 on success, -EPERM on failure (nesting
28 * asm volatile and "memory" clobber prevent the compiler from moving
29 * instructions out of the ring buffer nesting count. This is required to ensure
30 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
31 * divisions by 0, ...) are triggered within the incremented nesting count
35 int lib_ring_buffer_nesting_inc(const struct lttng_ust_lib_ring_buffer_config
*config
)
39 nesting
= ++URCU_TLS(lib_ring_buffer_nesting
);
41 if (caa_unlikely(nesting
>= LIB_RING_BUFFER_MAX_NESTING
)) {
43 URCU_TLS(lib_ring_buffer_nesting
)--;
50 int lib_ring_buffer_nesting_count(const struct lttng_ust_lib_ring_buffer_config
*config
)
52 return URCU_TLS(lib_ring_buffer_nesting
);
56 void lib_ring_buffer_nesting_dec(const struct lttng_ust_lib_ring_buffer_config
*config
)
59 URCU_TLS(lib_ring_buffer_nesting
)--; /* TLS */
63 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
64 * part of the API per se.
66 * returns 0 if reserve ok, or 1 if the slow path must be taken.
69 int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
70 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
72 unsigned long *o_begin
, unsigned long *o_end
,
73 unsigned long *o_old
, size_t *before_hdr_pad
)
75 struct lttng_ust_lib_ring_buffer_ctx_private
*ctx_private
= ctx
->priv
;
76 struct lttng_ust_lib_ring_buffer_channel
*chan
= ctx_private
->chan
;
77 struct lttng_ust_lib_ring_buffer
*buf
= ctx_private
->buf
;
78 *o_begin
= v_read(config
, &buf
->offset
);
81 ctx_private
->tsc
= lib_ring_buffer_clock_read(chan
);
82 if ((int64_t) ctx_private
->tsc
== -EIO
)
86 * Prefetch cacheline for read because we have to read the previous
87 * commit counter to increment it and commit seq value to compare it to
90 //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
92 if (last_tsc_overflow(config
, buf
, ctx_private
->tsc
))
93 ctx_private
->rflags
|= RING_BUFFER_RFLAG_FULL_TSC
;
95 if (caa_unlikely(subbuf_offset(*o_begin
, chan
) == 0))
98 ctx_private
->slot_size
= record_header_size(config
, chan
, *o_begin
,
99 before_hdr_pad
, ctx
, client_ctx
);
100 ctx_private
->slot_size
+=
101 lttng_ust_lib_ring_buffer_align(*o_begin
+ ctx_private
->slot_size
,
102 ctx
->largest_align
) + ctx
->data_size
;
103 if (caa_unlikely((subbuf_offset(*o_begin
, chan
) + ctx_private
->slot_size
)
104 > chan
->backend
.subbuf_size
))
108 * Record fits in the current buffer and we are not on a switch
109 * boundary. It's safe to write.
111 *o_end
= *o_begin
+ ctx_private
->slot_size
;
113 if (caa_unlikely((subbuf_offset(*o_end
, chan
)) == 0))
115 * The offset_end will fall at the very beginning of the next
124 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
125 * @config: ring buffer instance configuration.
126 * @ctx: ring buffer context. (input and output) Must be already initialized.
128 * Atomic wait-free slot reservation. The reserved space starts at the context
129 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
133 * -EAGAIN if channel is disabled.
134 * -ENOSPC if event size is too large for packet.
135 * -ENOBUFS if there is currently not enough space in buffer for the event.
136 * -EIO if data cannot be written into the buffer for any other reason.
140 int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
141 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
144 struct lttng_ust_lib_ring_buffer_ctx_private
*ctx_private
= ctx
->priv
;
145 struct lttng_ust_lib_ring_buffer_channel
*chan
= ctx_private
->chan
;
146 struct lttng_ust_shm_handle
*handle
= chan
->handle
;
147 struct lttng_ust_lib_ring_buffer
*buf
;
148 unsigned long o_begin
, o_end
, o_old
;
149 size_t before_hdr_pad
= 0;
151 if (caa_unlikely(uatomic_read(&chan
->record_disabled
)))
154 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
155 ctx_private
->reserve_cpu
= lttng_ust_get_cpu();
156 buf
= shmp(handle
, chan
->backend
.buf
[ctx_private
->reserve_cpu
].shmp
);
158 buf
= shmp(handle
, chan
->backend
.buf
[0].shmp
);
160 if (caa_unlikely(!buf
))
162 if (caa_unlikely(uatomic_read(&buf
->record_disabled
)))
164 ctx_private
->buf
= buf
;
167 * Perform retryable operations.
169 if (caa_unlikely(lib_ring_buffer_try_reserve(config
, ctx
, client_ctx
, &o_begin
,
170 &o_end
, &o_old
, &before_hdr_pad
)))
173 if (caa_unlikely(v_cmpxchg(config
, &buf
->offset
, o_old
, o_end
)
178 * Atomically update last_tsc. This update races against concurrent
179 * atomic updates, but the race will always cause supplementary full TSC
180 * record headers, never the opposite (missing a full TSC record header
181 * when it would be needed).
183 save_last_tsc(config
, buf
, ctx_private
->tsc
);
186 * Push the reader if necessary
188 lib_ring_buffer_reserve_push_reader(buf
, chan
, o_end
- 1);
191 * Clear noref flag for this subbuffer.
193 lib_ring_buffer_clear_noref(config
, &buf
->backend
,
194 subbuf_index(o_end
- 1, chan
), handle
);
196 ctx_private
->pre_offset
= o_begin
;
197 ctx_private
->buf_offset
= o_begin
+ before_hdr_pad
;
200 return lib_ring_buffer_reserve_slow(ctx
, client_ctx
);
204 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
205 * @config: ring buffer instance configuration.
207 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
209 * This operation is completely reentrant : can be called while tracing is
210 * active with absolutely no lock held.
212 * Note, however, that as a v_cmpxchg is used for some atomic operations and
213 * requires to be executed locally for per-CPU buffers, this function must be
214 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
215 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
218 void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config
*config
,
219 struct lttng_ust_lib_ring_buffer
*buf
, enum switch_mode mode
,
220 struct lttng_ust_shm_handle
*handle
)
222 lib_ring_buffer_switch_slow(buf
, mode
, handle
);
225 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
228 * lib_ring_buffer_commit - Commit an record.
229 * @config: ring buffer instance configuration.
230 * @ctx: ring buffer context. (input arguments only)
232 * Atomic unordered slot commit. Increments the commit count in the
233 * specified sub-buffer, and delivers it if necessary.
236 void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config
*config
,
237 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
239 struct lttng_ust_lib_ring_buffer_ctx_private
*ctx_private
= ctx
->priv
;
240 struct lttng_ust_lib_ring_buffer_channel
*chan
= ctx_private
->chan
;
241 struct lttng_ust_shm_handle
*handle
= chan
->handle
;
242 struct lttng_ust_lib_ring_buffer
*buf
= ctx_private
->buf
;
243 unsigned long offset_end
= ctx_private
->buf_offset
;
244 unsigned long endidx
= subbuf_index(offset_end
- 1, chan
);
245 unsigned long commit_count
;
246 struct commit_counters_hot
*cc_hot
= shmp_index(handle
,
247 buf
->commit_hot
, endidx
);
249 if (caa_unlikely(!cc_hot
))
253 * Must count record before incrementing the commit count.
255 subbuffer_count_record(config
, ctx
, &buf
->backend
, endidx
, handle
);
258 * Order all writes to buffer before the commit count update that will
259 * determine that the subbuffer is full.
263 v_add(config
, ctx_private
->slot_size
, &cc_hot
->cc
);
266 * commit count read can race with concurrent OOO commit count updates.
267 * This is only needed for lib_ring_buffer_check_deliver (for
268 * non-polling delivery only) and for
269 * lib_ring_buffer_write_commit_counter. The race can only cause the
270 * counter to be read with the same value more than once, which could
272 * - Multiple delivery for the same sub-buffer (which is handled
273 * gracefully by the reader code) if the value is for a full
274 * sub-buffer. It's important that we can never miss a sub-buffer
275 * delivery. Re-reading the value after the v_add ensures this.
276 * - Reading a commit_count with a higher value that what was actually
277 * added to it for the lib_ring_buffer_write_commit_counter call
278 * (again caused by a concurrent committer). It does not matter,
279 * because this function is interested in the fact that the commit
280 * count reaches back the reserve offset for a specific sub-buffer,
281 * which is completely independent of the order.
283 commit_count
= v_read(config
, &cc_hot
->cc
);
285 lib_ring_buffer_check_deliver(config
, buf
, chan
, offset_end
- 1,
286 commit_count
, endidx
, handle
, ctx_private
->tsc
);
288 * Update used size at each commit. It's needed only for extracting
289 * ring_buffer buffers from vmcore, after crash.
291 lib_ring_buffer_write_commit_counter(config
, buf
, chan
,
292 offset_end
, commit_count
, handle
, cc_hot
);
296 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
297 * @config: ring buffer instance configuration.
298 * @ctx: ring buffer context. (input arguments only)
300 * Only succeeds if no other record has been written after the record to
301 * discard. If discard fails, the record must be committed to the buffer.
303 * Returns 0 upon success, -EPERM if the record cannot be discarded.
306 int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
307 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
309 struct lttng_ust_lib_ring_buffer_ctx_private
*ctx_private
= ctx
->priv
;
310 struct lttng_ust_lib_ring_buffer
*buf
= ctx_private
->buf
;
311 unsigned long end_offset
= ctx_private
->pre_offset
+ ctx_private
->slot_size
;
314 * We need to ensure that if the cmpxchg succeeds and discards the
315 * record, the next record will record a full TSC, because it cannot
316 * rely on the last_tsc associated with the discarded record to detect
317 * overflows. The only way to ensure this is to set the last_tsc to 0
318 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
319 * timestamp in the next record.
321 * Note: if discard fails, we must leave the TSC in the record header.
322 * It is needed to keep track of TSC overflows for the following
325 save_last_tsc(config
, buf
, 0ULL);
327 if (caa_likely(v_cmpxchg(config
, &buf
->offset
, end_offset
, ctx_private
->pre_offset
)
335 void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
336 struct lttng_ust_lib_ring_buffer_channel
*chan
)
338 uatomic_inc(&chan
->record_disabled
);
342 void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
343 struct lttng_ust_lib_ring_buffer_channel
*chan
)
345 uatomic_dec(&chan
->record_disabled
);
349 void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
350 struct lttng_ust_lib_ring_buffer
*buf
)
352 uatomic_inc(&buf
->record_disabled
);
356 void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
357 struct lttng_ust_lib_ring_buffer
*buf
)
359 uatomic_dec(&buf
->record_disabled
);
362 #endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */