1 #ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
2 #define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
5 * linux/ringbuffer/frontend_internal.h
7 * Ring Buffer Library Synchronization Header (internal helpers).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
31 #include <wrapper/ringbuffer/config.h>
32 #include <wrapper/ringbuffer/backend_types.h>
33 #include <wrapper/ringbuffer/frontend_types.h>
34 #include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
36 /* Buffer offset macros */
38 /* buf_trunc mask selects only the buffer number. */
40 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
42 return offset
& ~(chan
->backend
.buf_size
- 1);
46 /* Select the buffer number value (counter). */
48 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
50 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
53 /* buf_offset mask selects only the offset within the current buffer. */
55 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
57 return offset
& (chan
->backend
.buf_size
- 1);
60 /* subbuf_offset mask selects the offset within the current subbuffer. */
62 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
64 return offset
& (chan
->backend
.subbuf_size
- 1);
67 /* subbuf_trunc mask selects the subbuffer number. */
69 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
71 return offset
& ~(chan
->backend
.subbuf_size
- 1);
74 /* subbuf_align aligns the offset to the next subbuffer. */
76 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
78 return (offset
+ chan
->backend
.subbuf_size
)
79 & ~(chan
->backend
.subbuf_size
- 1);
82 /* subbuf_index returns the index of the current subbuffer within the buffer. */
84 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
86 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
90 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
91 * bits from the last TSC read. When overflows are detected, the full 64-bit
92 * timestamp counter should be written in the record header. Reads and writes
93 * last_tsc atomically.
96 #if (BITS_PER_LONG == 32)
98 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
99 struct lib_ring_buffer
*buf
, u64 tsc
)
101 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
105 * Ensure the compiler performs this update in a single instruction.
107 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
111 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
112 struct lib_ring_buffer
*buf
, u64 tsc
)
114 unsigned long tsc_shifted
;
116 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
119 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
120 if (unlikely(tsc_shifted
121 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
128 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
129 struct lib_ring_buffer
*buf
, u64 tsc
)
131 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
134 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
138 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
139 struct lib_ring_buffer
*buf
, u64 tsc
)
141 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
144 if (unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
145 >> config
->tsc_bits
))
153 int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx
*ctx
);
156 void lib_ring_buffer_switch_slow(struct lib_ring_buffer
*buf
,
157 enum switch_mode mode
);
160 void lib_ring_buffer_switch_remote(struct lib_ring_buffer
*buf
);
163 void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer
*buf
);
165 /* Buffer write helpers */
168 void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer
*buf
,
169 struct channel
*chan
,
170 unsigned long offset
)
172 unsigned long consumed_old
, consumed_new
;
175 consumed_old
= atomic_long_read(&buf
->consumed
);
177 * If buffer is in overwrite mode, push the reader consumed
178 * count if the write position has reached it and we are not
179 * at the first iteration (don't push the reader farther than
180 * the writer). This operation can be done concurrently by many
181 * writers in the same buffer, the writer being at the farthest
182 * write position sub-buffer index in the buffer being the one
183 * which will win this loop.
185 if (unlikely(subbuf_trunc(offset
, chan
)
186 - subbuf_trunc(consumed_old
, chan
)
187 >= chan
->backend
.buf_size
))
188 consumed_new
= subbuf_align(consumed_old
, chan
);
191 } while (unlikely(atomic_long_cmpxchg(&buf
->consumed
, consumed_old
,
192 consumed_new
) != consumed_old
));
196 void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config
*config
,
197 struct lib_ring_buffer
*buf
,
198 unsigned long commit_count
,
201 if (config
->oops
== RING_BUFFER_OOPS_CONSISTENCY
)
202 v_set(config
, &buf
->commit_hot
[idx
].seq
, commit_count
);
206 int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config
*config
,
207 struct lib_ring_buffer
*buf
,
208 struct channel
*chan
)
210 unsigned long consumed_old
, consumed_idx
, commit_count
, write_offset
;
212 consumed_old
= atomic_long_read(&buf
->consumed
);
213 consumed_idx
= subbuf_index(consumed_old
, chan
);
214 commit_count
= v_read(config
, &buf
->commit_cold
[consumed_idx
].cc_sb
);
216 * No memory barrier here, since we are only interested
217 * in a statistically correct polling result. The next poll will
218 * get the data is we are racing. The mb() that ensures correct
219 * memory order is in get_subbuf.
221 write_offset
= v_read(config
, &buf
->offset
);
224 * Check that the subbuffer we are trying to consume has been
225 * already fully committed.
228 if (((commit_count
- chan
->backend
.subbuf_size
)
229 & chan
->commit_count_mask
)
230 - (buf_trunc(consumed_old
, chan
)
231 >> chan
->backend
.num_subbuf_order
)
236 * Check that we are not about to read the same subbuffer in
237 * which the writer head is.
239 if (subbuf_trunc(write_offset
, chan
) - subbuf_trunc(consumed_old
, chan
)
248 int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config
*config
,
249 struct lib_ring_buffer
*buf
,
250 struct channel
*chan
)
252 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
256 unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config
*config
,
257 struct lib_ring_buffer
*buf
,
260 return subbuffer_get_data_size(config
, &buf
->backend
, idx
);
264 * Check if all space reservation in a buffer have been committed. This helps
265 * knowing if an execution context is nested (for per-cpu buffers only).
266 * This is a very specific ftrace use-case, so we keep this as "internal" API.
269 int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config
*config
,
270 struct lib_ring_buffer
*buf
,
271 struct channel
*chan
)
273 unsigned long offset
, idx
, commit_count
;
275 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
276 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
279 * Read offset and commit count in a loop so they are both read
280 * atomically wrt interrupts. By deal with interrupt concurrency by
281 * restarting both reads if the offset has been pushed. Note that given
282 * we only have to deal with interrupt concurrency here, an interrupt
283 * modifying the commit count will also modify "offset", so it is safe
284 * to only check for offset modifications.
287 offset
= v_read(config
, &buf
->offset
);
288 idx
= subbuf_index(offset
, chan
);
289 commit_count
= v_read(config
, &buf
->commit_hot
[idx
].cc
);
290 } while (offset
!= v_read(config
, &buf
->offset
));
292 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
293 - (commit_count
& chan
->commit_count_mask
) == 0);
297 * Receive end of subbuffer TSC as parameter. It has been read in the
298 * space reservation loop of either reserve or switch, which ensures it
299 * progresses monotonically with event records in the buffer. Therefore,
300 * it ensures that the end timestamp of a subbuffer is <= begin
301 * timestamp of the following subbuffers.
304 void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config
*config
,
305 struct lib_ring_buffer
*buf
,
306 struct channel
*chan
,
307 unsigned long offset
,
308 unsigned long commit_count
,
312 unsigned long old_commit_count
= commit_count
313 - chan
->backend
.subbuf_size
;
315 /* Check if all commits have been done */
316 if (unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
317 - (old_commit_count
& chan
->commit_count_mask
) == 0)) {
319 * If we succeeded at updating cc_sb below, we are the subbuffer
320 * writer delivering the subbuffer. Deals with concurrent
321 * updates of the "cc" value without adding a add_return atomic
322 * operation to the fast path.
324 * We are doing the delivery in two steps:
325 * - First, we cmpxchg() cc_sb to the new value
326 * old_commit_count + 1. This ensures that we are the only
327 * subbuffer user successfully filling the subbuffer, but we
328 * do _not_ set the cc_sb value to "commit_count" yet.
329 * Therefore, other writers that would wrap around the ring
330 * buffer and try to start writing to our subbuffer would
331 * have to drop records, because it would appear as
333 * We therefore have exclusive access to the subbuffer control
334 * structures. This mutual exclusion with other writers is
335 * crucially important to perform record overruns count in
336 * flight recorder mode locklessly.
337 * - When we are ready to release the subbuffer (either for
338 * reading or for overrun by other writers), we simply set the
339 * cc_sb value to "commit_count" and perform delivery.
341 * The subbuffer size is least 2 bytes (minimum size: 1 page).
342 * This guarantees that old_commit_count + 1 != commit_count.
346 * Order prior updates to reserve count prior to the
347 * commit_cold cc_sb update.
350 if (likely(v_cmpxchg(config
, &buf
->commit_cold
[idx
].cc_sb
,
351 old_commit_count
, old_commit_count
+ 1)
352 == old_commit_count
)) {
354 * Start of exclusive subbuffer access. We are
355 * guaranteed to be the last writer in this subbuffer
356 * and any other writer trying to access this subbuffer
357 * in this state is required to drop records.
360 subbuffer_get_records_count(config
,
362 &buf
->records_count
);
364 subbuffer_count_records_overrun(config
,
367 &buf
->records_overrun
);
368 config
->cb
.buffer_end(buf
, tsc
, idx
,
369 lib_ring_buffer_get_data_size(config
,
374 * Increment the packet counter while we have exclusive
377 subbuffer_inc_packet_count(config
, &buf
->backend
, idx
);
380 * Set noref flag and offset for this subbuffer id.
381 * Contains a memory barrier that ensures counter stores
382 * are ordered before set noref and offset.
384 lib_ring_buffer_set_noref_offset(config
, &buf
->backend
, idx
,
385 buf_trunc_val(offset
, chan
));
388 * Order set_noref and record counter updates before the
389 * end of subbuffer exclusive access. Orders with
390 * respect to writers coming into the subbuffer after
391 * wrap around, and also order wrt concurrent readers.
394 /* End of exclusive subbuffer access */
395 v_set(config
, &buf
->commit_cold
[idx
].cc_sb
,
398 * Order later updates to reserve count after
399 * the commit_cold cc_sb update.
402 lib_ring_buffer_vmcore_check_deliver(config
, buf
,
406 * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
408 if (config
->wakeup
== RING_BUFFER_WAKEUP_BY_WRITER
409 && atomic_long_read(&buf
->active_readers
)
410 && lib_ring_buffer_poll_deliver(config
, buf
, chan
)) {
411 wake_up_interruptible(&buf
->read_wait
);
412 wake_up_interruptible(&chan
->read_wait
);
420 * lib_ring_buffer_write_commit_counter
422 * For flight recording. must be called after commit.
423 * This function increments the subbuffer's commit_seq counter each time the
424 * commit count reaches back the reserve offset (modulo subbuffer size). It is
425 * useful for crash dump.
428 void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config
*config
,
429 struct lib_ring_buffer
*buf
,
430 struct channel
*chan
,
432 unsigned long buf_offset
,
433 unsigned long commit_count
)
435 unsigned long commit_seq_old
;
437 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
441 * subbuf_offset includes commit_count_mask. We can simply
442 * compare the offsets within the subbuffer without caring about
443 * buffer full/empty mismatch because offset is never zero here
444 * (subbuffer header and record headers have non-zero length).
446 if (unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
449 commit_seq_old
= v_read(config
, &buf
->commit_hot
[idx
].seq
);
450 while ((long) (commit_seq_old
- commit_count
) < 0)
451 commit_seq_old
= v_cmpxchg(config
, &buf
->commit_hot
[idx
].seq
,
452 commit_seq_old
, commit_count
);
455 extern int lib_ring_buffer_create(struct lib_ring_buffer
*buf
,
456 struct channel_backend
*chanb
, int cpu
);
457 extern void lib_ring_buffer_free(struct lib_ring_buffer
*buf
);
459 /* Keep track of trap nesting inside ring buffer code */
460 DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting
);
462 #endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */