1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
3 * ringbuffer/frontend_internal.h
5 * Ring Buffer Library Synchronization Header (internal helpers).
7 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * See ring_buffer_frontend.c for more information on wait-free algorithms.
12 #ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
13 #define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
15 #include <ringbuffer/config.h>
16 #include <ringbuffer/backend_types.h>
17 #include <ringbuffer/frontend_types.h>
18 #include <lttng/prio_heap.h> /* For per-CPU read-side iterator */
20 /* Buffer offset macros */
22 /* buf_trunc mask selects only the buffer number. */
24 unsigned long buf_trunc(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
26 return offset
& ~(chan
->backend
.buf_size
- 1);
30 /* Select the buffer number value (counter). */
32 unsigned long buf_trunc_val(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
34 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
37 /* buf_offset mask selects only the offset within the current buffer. */
39 unsigned long buf_offset(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
41 return offset
& (chan
->backend
.buf_size
- 1);
44 /* subbuf_offset mask selects the offset within the current subbuffer. */
46 unsigned long subbuf_offset(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
48 return offset
& (chan
->backend
.subbuf_size
- 1);
51 /* subbuf_trunc mask selects the subbuffer number. */
53 unsigned long subbuf_trunc(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
55 return offset
& ~(chan
->backend
.subbuf_size
- 1);
58 /* subbuf_align aligns the offset to the next subbuffer. */
60 unsigned long subbuf_align(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
62 return (offset
+ chan
->backend
.subbuf_size
)
63 & ~(chan
->backend
.subbuf_size
- 1);
66 /* subbuf_index returns the index of the current subbuffer within the buffer. */
68 unsigned long subbuf_index(unsigned long offset
, struct lttng_kernel_ring_buffer_channel
*chan
)
70 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
74 * Last timestamp comparison functions. Check if the current timestamp
75 * overflows timestamp_bits bits from the last timestamp read. When
76 * overflows are detected, the full 64-bit timestamp counter should be
77 * written in the record header. Reads and writes last_timestamp
81 #if (BITS_PER_LONG == 32)
83 void save_last_timestamp(const struct lttng_kernel_ring_buffer_config
*config
,
84 struct lttng_kernel_ring_buffer
*buf
, u64 timestamp
)
86 if (config
->timestamp_bits
== 0 || config
->timestamp_bits
== 64)
90 * Ensure the compiler performs this update in a single instruction.
92 v_set(config
, &buf
->last_timestamp
, (unsigned long)(timestamp
>> config
->timestamp_bits
));
96 int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config
*config
,
97 struct lttng_kernel_ring_buffer
*buf
, u64 timestamp
)
99 unsigned long timestamp_shifted
;
101 if (config
->timestamp_bits
== 0 || config
->timestamp_bits
== 64)
104 timestamp_shifted
= (unsigned long)(timestamp
>> config
->timestamp_bits
);
105 if (unlikely(timestamp_shifted
106 - (unsigned long)v_read(config
, &buf
->last_timestamp
)))
113 void save_last_timestamp(const struct lttng_kernel_ring_buffer_config
*config
,
114 struct lttng_kernel_ring_buffer
*buf
, u64 timestamp
)
116 if (config
->timestamp_bits
== 0 || config
->timestamp_bits
== 64)
119 v_set(config
, &buf
->last_timestamp
, (unsigned long)timestamp
);
123 int last_timestamp_overflow(const struct lttng_kernel_ring_buffer_config
*config
,
124 struct lttng_kernel_ring_buffer
*buf
, u64 timestamp
)
126 if (config
->timestamp_bits
== 0 || config
->timestamp_bits
== 64)
129 if (unlikely((timestamp
- v_read(config
, &buf
->last_timestamp
))
130 >> config
->timestamp_bits
))
138 int lib_ring_buffer_reserve_slow(struct lttng_kernel_ring_buffer_ctx
*ctx
,
142 void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer
*buf
,
143 enum switch_mode mode
);
146 void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_config
*config
,
147 struct lttng_kernel_ring_buffer
*buf
,
148 struct lttng_kernel_ring_buffer_channel
*chan
,
149 unsigned long offset
,
150 unsigned long commit_count
,
152 const struct lttng_kernel_ring_buffer_ctx
*ctx
);
155 void lib_ring_buffer_switch_remote(struct lttng_kernel_ring_buffer
*buf
);
157 void lib_ring_buffer_switch_remote_empty(struct lttng_kernel_ring_buffer
*buf
);
159 void lib_ring_buffer_clear(struct lttng_kernel_ring_buffer
*buf
);
161 /* Buffer write helpers */
164 void lib_ring_buffer_reserve_push_reader(struct lttng_kernel_ring_buffer
*buf
,
165 struct lttng_kernel_ring_buffer_channel
*chan
,
166 unsigned long offset
)
168 unsigned long consumed_old
, consumed_new
;
171 consumed_old
= atomic_long_read(&buf
->consumed
);
173 * If buffer is in overwrite mode, push the reader consumed
174 * count if the write position has reached it and we are not
175 * at the first iteration (don't push the reader farther than
176 * the writer). This operation can be done concurrently by many
177 * writers in the same buffer, the writer being at the farthest
178 * write position sub-buffer index in the buffer being the one
179 * which will win this loop.
181 if (unlikely(subbuf_trunc(offset
, chan
)
182 - subbuf_trunc(consumed_old
, chan
)
183 >= chan
->backend
.buf_size
))
184 consumed_new
= subbuf_align(consumed_old
, chan
);
187 } while (unlikely(atomic_long_cmpxchg(&buf
->consumed
, consumed_old
,
188 consumed_new
) != consumed_old
));
192 * Move consumed position to the beginning of subbuffer in which the
193 * write offset is. Should only be used on ring buffers that are not
194 * actively being written into, because clear_reader does not take into
195 * account the commit counters when moving the consumed position, which
196 * can make concurrent trace producers or consumers observe consumed
197 * position further than the write offset, which breaks ring buffer
198 * algorithm guarantees.
201 void lib_ring_buffer_clear_reader(struct lttng_kernel_ring_buffer
*buf
,
202 struct lttng_kernel_ring_buffer_channel
*chan
)
204 const struct lttng_kernel_ring_buffer_config
*config
= &chan
->backend
.config
;
205 unsigned long offset
, consumed_old
, consumed_new
;
208 offset
= v_read(config
, &buf
->offset
);
209 consumed_old
= atomic_long_read(&buf
->consumed
);
210 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
211 - subbuf_trunc(consumed_old
, chan
))
213 consumed_new
= subbuf_trunc(offset
, chan
);
214 } while (unlikely(atomic_long_cmpxchg(&buf
->consumed
, consumed_old
,
215 consumed_new
) != consumed_old
));
219 int lib_ring_buffer_pending_data(const struct lttng_kernel_ring_buffer_config
*config
,
220 struct lttng_kernel_ring_buffer
*buf
,
221 struct lttng_kernel_ring_buffer_channel
*chan
)
223 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
227 unsigned long lib_ring_buffer_get_data_size(const struct lttng_kernel_ring_buffer_config
*config
,
228 struct lttng_kernel_ring_buffer
*buf
,
231 return subbuffer_get_data_size(config
, &buf
->backend
, idx
);
235 * Check if all space reservation in a buffer have been committed. This helps
236 * knowing if an execution context is nested (for per-cpu buffers only).
237 * This is a very specific ftrace use-case, so we keep this as "internal" API.
240 int lib_ring_buffer_reserve_committed(const struct lttng_kernel_ring_buffer_config
*config
,
241 struct lttng_kernel_ring_buffer
*buf
,
242 struct lttng_kernel_ring_buffer_channel
*chan
)
244 unsigned long offset
, idx
, commit_count
;
246 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
247 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
250 * Read offset and commit count in a loop so they are both read
251 * atomically wrt interrupts. By deal with interrupt concurrency by
252 * restarting both reads if the offset has been pushed. Note that given
253 * we only have to deal with interrupt concurrency here, an interrupt
254 * modifying the commit count will also modify "offset", so it is safe
255 * to only check for offset modifications.
258 offset
= v_read(config
, &buf
->offset
);
259 idx
= subbuf_index(offset
, chan
);
260 commit_count
= v_read(config
, &buf
->commit_hot
[idx
].cc
);
261 } while (offset
!= v_read(config
, &buf
->offset
));
263 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
264 - (commit_count
& chan
->commit_count_mask
) == 0);
268 * Receive end of subbuffer timestamp as parameter. It has been read in the
269 * space reservation loop of either reserve or switch, which ensures it
270 * progresses monotonically with event records in the buffer. Therefore,
271 * it ensures that the end timestamp of a subbuffer is <= begin
272 * timestamp of the following subbuffers.
275 void lib_ring_buffer_check_deliver(const struct lttng_kernel_ring_buffer_config
*config
,
276 struct lttng_kernel_ring_buffer
*buf
,
277 struct lttng_kernel_ring_buffer_channel
*chan
,
278 unsigned long offset
,
279 unsigned long commit_count
,
281 const struct lttng_kernel_ring_buffer_ctx
*ctx
)
283 unsigned long old_commit_count
= commit_count
284 - chan
->backend
.subbuf_size
;
286 /* Check if all commits have been done */
287 if (unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
288 - (old_commit_count
& chan
->commit_count_mask
) == 0))
289 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
290 commit_count
, idx
, ctx
);
294 * lib_ring_buffer_write_commit_counter
296 * For flight recording. must be called after commit.
297 * This function increments the subbuffer's commit_seq counter each time the
298 * commit count reaches back the reserve offset (modulo subbuffer size). It is
299 * useful for crash dump.
302 void lib_ring_buffer_write_commit_counter(const struct lttng_kernel_ring_buffer_config
*config
,
303 struct lttng_kernel_ring_buffer
*buf
,
304 struct lttng_kernel_ring_buffer_channel
*chan
,
305 unsigned long buf_offset
,
306 unsigned long commit_count
,
307 struct commit_counters_hot
*cc_hot
)
309 unsigned long commit_seq_old
;
311 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
315 * subbuf_offset includes commit_count_mask. We can simply
316 * compare the offsets within the subbuffer without caring about
317 * buffer full/empty mismatch because offset is never zero here
318 * (subbuffer header and record headers have non-zero length).
320 if (unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
323 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
324 if (likely((long) (commit_seq_old
- commit_count
) < 0))
325 v_set(config
, &cc_hot
->seq
, commit_count
);
328 extern int lib_ring_buffer_create(struct lttng_kernel_ring_buffer
*buf
,
329 struct channel_backend
*chanb
, int cpu
);
330 extern void lib_ring_buffer_free(struct lttng_kernel_ring_buffer
*buf
);
332 /* Keep track of trap nesting inside ring buffer code */
333 DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting
);
335 #endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */