1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
3 * ringbuffer/frontend_api.h
5 * Ring Buffer Library Synchronization Header (buffer write API).
7 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * See ring_buffer_frontend.c for more information on wait-free algorithms.
10 * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
13 #ifndef _LIB_RING_BUFFER_FRONTEND_API_H
14 #define _LIB_RING_BUFFER_FRONTEND_API_H
16 #include <ringbuffer/frontend.h>
17 #include <wrapper/percpu-defs.h>
18 #include <linux/errno.h>
19 #include <linux/prefetch.h>
22 * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
24 * Disables preemption (acts as a RCU read-side critical section) and keeps a
25 * ring buffer nesting count as supplementary safety net to ensure tracer client
26 * code will never trigger an endless recursion. Returns the processor ID on
27 * success, -EPERM on failure (nesting count too high).
29 * asm volatile and "memory" clobber prevent the compiler from moving
30 * instructions out of the ring buffer nesting count. This is required to ensure
31 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
32 * divisions by 0, ...) are triggered within the incremented nesting count
36 int lib_ring_buffer_get_cpu(const struct lttng_kernel_ring_buffer_config
*config
)
40 rcu_read_lock_sched_notrace();
41 cpu
= smp_processor_id();
42 nesting
= ++per_cpu(lib_ring_buffer_nesting
, cpu
);
45 if (unlikely(nesting
> RING_BUFFER_MAX_NESTING
)) {
47 per_cpu(lib_ring_buffer_nesting
, cpu
)--;
48 rcu_read_unlock_sched_notrace();
55 * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
58 void lib_ring_buffer_put_cpu(const struct lttng_kernel_ring_buffer_config
*config
)
61 (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting
))--;
62 rcu_read_unlock_sched_notrace();
66 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
67 * part of the API per se.
69 * returns 0 if reserve ok, or 1 if the slow path must be taken.
72 int lib_ring_buffer_try_reserve(const struct lttng_kernel_ring_buffer_config
*config
,
73 struct lttng_kernel_ring_buffer_ctx
*ctx
,
75 unsigned long *o_begin
, unsigned long *o_end
,
76 unsigned long *o_old
, size_t *before_hdr_pad
)
78 struct lttng_kernel_ring_buffer_channel
*chan
= ctx
->priv
.chan
;
79 struct lttng_kernel_ring_buffer
*buf
= ctx
->priv
.buf
;
80 *o_begin
= v_read(config
, &buf
->offset
);
83 ctx
->priv
.tsc
= lib_ring_buffer_clock_read(chan
);
84 if ((int64_t) ctx
->priv
.tsc
== -EIO
)
88 * Prefetch cacheline for read because we have to read the previous
89 * commit counter to increment it and commit seq value to compare it to
92 prefetch(&buf
->commit_hot
[subbuf_index(*o_begin
, chan
)]);
94 if (last_tsc_overflow(config
, buf
, ctx
->priv
.tsc
))
95 ctx
->priv
.rflags
|= RING_BUFFER_RFLAG_FULL_TSC
;
97 if (unlikely(subbuf_offset(*o_begin
, chan
) == 0))
100 ctx
->priv
.slot_size
= record_header_size(config
, chan
, *o_begin
,
101 before_hdr_pad
, ctx
, client_ctx
);
102 ctx
->priv
.slot_size
+=
103 lib_ring_buffer_align(*o_begin
+ ctx
->priv
.slot_size
,
104 ctx
->largest_align
) + ctx
->data_size
;
105 if (unlikely((subbuf_offset(*o_begin
, chan
) + ctx
->priv
.slot_size
)
106 > chan
->backend
.subbuf_size
))
110 * Record fits in the current buffer and we are not on a switch
111 * boundary. It's safe to write.
113 *o_end
= *o_begin
+ ctx
->priv
.slot_size
;
115 if (unlikely((subbuf_offset(*o_end
, chan
)) == 0))
117 * The offset_end will fall at the very beginning of the next
126 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
127 * @config: ring buffer instance configuration.
128 * @ctx: ring buffer context. (input and output) Must be already initialized.
130 * Atomic wait-free slot reservation. The reserved space starts at the context
131 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
135 * -EAGAIN if channel is disabled.
136 * -ENOSPC if event size is too large for packet.
137 * -ENOBUFS if there is currently not enough space in buffer for the event.
138 * -EIO if data cannot be written into the buffer for any other reason.
142 int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config
*config
,
143 struct lttng_kernel_ring_buffer_ctx
*ctx
,
146 struct lttng_kernel_ring_buffer_channel
*chan
= ctx
->priv
.chan
;
147 struct lttng_kernel_ring_buffer
*buf
;
148 unsigned long o_begin
, o_end
, o_old
;
149 size_t before_hdr_pad
= 0;
151 if (unlikely(atomic_read(&chan
->record_disabled
)))
154 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
155 buf
= per_cpu_ptr(chan
->backend
.buf
, ctx
->priv
.reserve_cpu
);
157 buf
= chan
->backend
.buf
;
158 if (unlikely(atomic_read(&buf
->record_disabled
)))
163 * Perform retryable operations.
165 if (unlikely(lib_ring_buffer_try_reserve(config
, ctx
, client_ctx
, &o_begin
,
166 &o_end
, &o_old
, &before_hdr_pad
)))
169 if (unlikely(v_cmpxchg(config
, &ctx
->priv
.buf
->offset
, o_old
, o_end
)
174 * Atomically update last_tsc. This update races against concurrent
175 * atomic updates, but the race will always cause supplementary full TSC
176 * record headers, never the opposite (missing a full TSC record header
177 * when it would be needed).
179 save_last_tsc(config
, ctx
->priv
.buf
, ctx
->priv
.tsc
);
182 * Push the reader if necessary
184 lib_ring_buffer_reserve_push_reader(ctx
->priv
.buf
, chan
, o_end
- 1);
187 * Clear noref flag for this subbuffer.
189 lib_ring_buffer_clear_noref(config
, &ctx
->priv
.buf
->backend
,
190 subbuf_index(o_end
- 1, chan
));
192 ctx
->priv
.pre_offset
= o_begin
;
193 ctx
->priv
.buf_offset
= o_begin
+ before_hdr_pad
;
196 return lib_ring_buffer_reserve_slow(ctx
, client_ctx
);
200 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
201 * @config: ring buffer instance configuration.
203 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
205 * This operation is completely reentrant : can be called while tracing is
206 * active with absolutely no lock held.
208 * Note, however, that as a v_cmpxchg is used for some atomic operations and
209 * requires to be executed locally for per-CPU buffers, this function must be
210 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
211 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
214 void lib_ring_buffer_switch(const struct lttng_kernel_ring_buffer_config
*config
,
215 struct lttng_kernel_ring_buffer
*buf
, enum switch_mode mode
)
217 lib_ring_buffer_switch_slow(buf
, mode
);
220 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
223 * lib_ring_buffer_commit - Commit an record.
224 * @config: ring buffer instance configuration.
225 * @ctx: ring buffer context. (input arguments only)
227 * Atomic unordered slot commit. Increments the commit count in the
228 * specified sub-buffer, and delivers it if necessary.
231 void lib_ring_buffer_commit(const struct lttng_kernel_ring_buffer_config
*config
,
232 const struct lttng_kernel_ring_buffer_ctx
*ctx
)
234 struct lttng_kernel_ring_buffer_channel
*chan
= ctx
->priv
.chan
;
235 struct lttng_kernel_ring_buffer
*buf
= ctx
->priv
.buf
;
236 unsigned long offset_end
= ctx
->priv
.buf_offset
;
237 unsigned long endidx
= subbuf_index(offset_end
- 1, chan
);
238 unsigned long commit_count
;
239 struct commit_counters_hot
*cc_hot
= &buf
->commit_hot
[endidx
];
242 * Must count record before incrementing the commit count.
244 subbuffer_count_record(config
, &buf
->backend
, endidx
);
247 * Order all writes to buffer before the commit count update that will
248 * determine that the subbuffer is full.
250 if (config
->ipi
== RING_BUFFER_IPI_BARRIER
) {
252 * Must write slot data before incrementing commit count. This
253 * compiler barrier is upgraded into a smp_mb() by the IPI sent
260 v_add(config
, ctx
->priv
.slot_size
, &cc_hot
->cc
);
263 * commit count read can race with concurrent OOO commit count updates.
264 * This is only needed for lib_ring_buffer_check_deliver (for
265 * non-polling delivery only) and for
266 * lib_ring_buffer_write_commit_counter. The race can only cause the
267 * counter to be read with the same value more than once, which could
269 * - Multiple delivery for the same sub-buffer (which is handled
270 * gracefully by the reader code) if the value is for a full
271 * sub-buffer. It's important that we can never miss a sub-buffer
272 * delivery. Re-reading the value after the v_add ensures this.
273 * - Reading a commit_count with a higher value that what was actually
274 * added to it for the lib_ring_buffer_write_commit_counter call
275 * (again caused by a concurrent committer). It does not matter,
276 * because this function is interested in the fact that the commit
277 * count reaches back the reserve offset for a specific sub-buffer,
278 * which is completely independent of the order.
280 commit_count
= v_read(config
, &cc_hot
->cc
);
282 lib_ring_buffer_check_deliver(config
, buf
, chan
, offset_end
- 1,
283 commit_count
, endidx
, ctx
->priv
.tsc
);
285 * Update used size at each commit. It's needed only for extracting
286 * ring_buffer buffers from vmcore, after crash.
288 lib_ring_buffer_write_commit_counter(config
, buf
, chan
,
289 offset_end
, commit_count
, cc_hot
);
293 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
294 * @config: ring buffer instance configuration.
295 * @ctx: ring buffer context. (input arguments only)
297 * Only succeeds if no other record has been written after the record to
298 * discard. If discard fails, the record must be committed to the buffer.
300 * Returns 0 upon success, -EPERM if the record cannot be discarded.
303 int lib_ring_buffer_try_discard_reserve(const struct lttng_kernel_ring_buffer_config
*config
,
304 const struct lttng_kernel_ring_buffer_ctx
*ctx
)
306 struct lttng_kernel_ring_buffer
*buf
= ctx
->priv
.buf
;
307 unsigned long end_offset
= ctx
->priv
.pre_offset
+ ctx
->priv
.slot_size
;
310 * We need to ensure that if the cmpxchg succeeds and discards the
311 * record, the next record will record a full TSC, because it cannot
312 * rely on the last_tsc associated with the discarded record to detect
313 * overflows. The only way to ensure this is to set the last_tsc to 0
314 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
315 * timestamp in the next record.
317 * Note: if discard fails, we must leave the TSC in the record header.
318 * It is needed to keep track of TSC overflows for the following
321 save_last_tsc(config
, buf
, 0ULL);
323 if (likely(v_cmpxchg(config
, &buf
->offset
, end_offset
, ctx
->priv
.pre_offset
)
331 void channel_record_disable(const struct lttng_kernel_ring_buffer_config
*config
,
332 struct lttng_kernel_ring_buffer_channel
*chan
)
334 atomic_inc(&chan
->record_disabled
);
338 void channel_record_enable(const struct lttng_kernel_ring_buffer_config
*config
,
339 struct lttng_kernel_ring_buffer_channel
*chan
)
341 atomic_dec(&chan
->record_disabled
);
345 void lib_ring_buffer_record_disable(const struct lttng_kernel_ring_buffer_config
*config
,
346 struct lttng_kernel_ring_buffer
*buf
)
348 atomic_inc(&buf
->record_disabled
);
352 void lib_ring_buffer_record_enable(const struct lttng_kernel_ring_buffer_config
*config
,
353 struct lttng_kernel_ring_buffer
*buf
)
355 atomic_dec(&buf
->record_disabled
);
358 #endif /* _LIB_RING_BUFFER_FRONTEND_API_H */