1 #ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
2 #define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
5 * linux/ringbuffer/frontend_internal.h
7 * Ring Buffer Library Synchronization Header (internal helpers).
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
31 #include <wrapper/ringbuffer/config.h>
32 #include <wrapper/ringbuffer/backend_types.h>
33 #include <wrapper/ringbuffer/frontend_types.h>
34 #include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
36 /* Buffer offset macros */
38 /* buf_trunc mask selects only the buffer number. */
40 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
42 return offset
& ~(chan
->backend
.buf_size
- 1);
46 /* Select the buffer number value (counter). */
48 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
50 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
53 /* buf_offset mask selects only the offset within the current buffer. */
55 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
57 return offset
& (chan
->backend
.buf_size
- 1);
60 /* subbuf_offset mask selects the offset within the current subbuffer. */
62 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
64 return offset
& (chan
->backend
.subbuf_size
- 1);
67 /* subbuf_trunc mask selects the subbuffer number. */
69 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
71 return offset
& ~(chan
->backend
.subbuf_size
- 1);
74 /* subbuf_align aligns the offset to the next subbuffer. */
76 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
78 return (offset
+ chan
->backend
.subbuf_size
)
79 & ~(chan
->backend
.subbuf_size
- 1);
82 /* subbuf_index returns the index of the current subbuffer within the buffer. */
84 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
86 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
90 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
91 * bits from the last TSC read. When overflows are detected, the full 64-bit
92 * timestamp counter should be written in the record header. Reads and writes
93 * last_tsc atomically.
96 #if (BITS_PER_LONG == 32)
98 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
99 struct lib_ring_buffer
*buf
, u64 tsc
)
101 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
105 * Ensure the compiler performs this update in a single instruction.
107 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
111 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
112 struct lib_ring_buffer
*buf
, u64 tsc
)
114 unsigned long tsc_shifted
;
116 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
119 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
120 if (unlikely(tsc_shifted
121 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
128 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
129 struct lib_ring_buffer
*buf
, u64 tsc
)
131 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
134 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
138 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
139 struct lib_ring_buffer
*buf
, u64 tsc
)
141 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
144 if (unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
145 >> config
->tsc_bits
))
153 int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx
*ctx
);
156 void lib_ring_buffer_switch_slow(struct lib_ring_buffer
*buf
,
157 enum switch_mode mode
);
160 void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config
*config
,
161 struct lib_ring_buffer
*buf
,
162 struct channel
*chan
,
163 unsigned long offset
,
164 unsigned long commit_count
,
169 void lib_ring_buffer_switch_remote(struct lib_ring_buffer
*buf
);
172 void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer
*buf
);
174 /* Buffer write helpers */
177 void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer
*buf
,
178 struct channel
*chan
,
179 unsigned long offset
)
181 unsigned long consumed_old
, consumed_new
;
184 consumed_old
= atomic_long_read(&buf
->consumed
);
186 * If buffer is in overwrite mode, push the reader consumed
187 * count if the write position has reached it and we are not
188 * at the first iteration (don't push the reader farther than
189 * the writer). This operation can be done concurrently by many
190 * writers in the same buffer, the writer being at the farthest
191 * write position sub-buffer index in the buffer being the one
192 * which will win this loop.
194 if (unlikely(subbuf_trunc(offset
, chan
)
195 - subbuf_trunc(consumed_old
, chan
)
196 >= chan
->backend
.buf_size
))
197 consumed_new
= subbuf_align(consumed_old
, chan
);
200 } while (unlikely(atomic_long_cmpxchg(&buf
->consumed
, consumed_old
,
201 consumed_new
) != consumed_old
));
205 int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config
*config
,
206 struct lib_ring_buffer
*buf
,
207 struct channel
*chan
)
209 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
213 unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config
*config
,
214 struct lib_ring_buffer
*buf
,
217 return subbuffer_get_data_size(config
, &buf
->backend
, idx
);
221 * Check if all space reservation in a buffer have been committed. This helps
222 * knowing if an execution context is nested (for per-cpu buffers only).
223 * This is a very specific ftrace use-case, so we keep this as "internal" API.
226 int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config
*config
,
227 struct lib_ring_buffer
*buf
,
228 struct channel
*chan
)
230 unsigned long offset
, idx
, commit_count
;
232 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
233 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
236 * Read offset and commit count in a loop so they are both read
237 * atomically wrt interrupts. By deal with interrupt concurrency by
238 * restarting both reads if the offset has been pushed. Note that given
239 * we only have to deal with interrupt concurrency here, an interrupt
240 * modifying the commit count will also modify "offset", so it is safe
241 * to only check for offset modifications.
244 offset
= v_read(config
, &buf
->offset
);
245 idx
= subbuf_index(offset
, chan
);
246 commit_count
= v_read(config
, &buf
->commit_hot
[idx
].cc
);
247 } while (offset
!= v_read(config
, &buf
->offset
));
249 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
250 - (commit_count
& chan
->commit_count_mask
) == 0);
254 * Receive end of subbuffer TSC as parameter. It has been read in the
255 * space reservation loop of either reserve or switch, which ensures it
256 * progresses monotonically with event records in the buffer. Therefore,
257 * it ensures that the end timestamp of a subbuffer is <= begin
258 * timestamp of the following subbuffers.
261 void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config
*config
,
262 struct lib_ring_buffer
*buf
,
263 struct channel
*chan
,
264 unsigned long offset
,
265 unsigned long commit_count
,
269 unsigned long old_commit_count
= commit_count
270 - chan
->backend
.subbuf_size
;
272 /* Check if all commits have been done */
273 if (unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
274 - (old_commit_count
& chan
->commit_count_mask
) == 0))
275 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
276 commit_count
, idx
, tsc
);
280 * lib_ring_buffer_write_commit_counter
282 * For flight recording. must be called after commit.
283 * This function increments the subbuffer's commit_seq counter each time the
284 * commit count reaches back the reserve offset (modulo subbuffer size). It is
285 * useful for crash dump.
288 void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config
*config
,
289 struct lib_ring_buffer
*buf
,
290 struct channel
*chan
,
291 unsigned long buf_offset
,
292 unsigned long commit_count
,
293 struct commit_counters_hot
*cc_hot
)
295 unsigned long commit_seq_old
;
297 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
301 * subbuf_offset includes commit_count_mask. We can simply
302 * compare the offsets within the subbuffer without caring about
303 * buffer full/empty mismatch because offset is never zero here
304 * (subbuffer header and record headers have non-zero length).
306 if (unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
309 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
310 if (likely((long) (commit_seq_old
- commit_count
) < 0))
311 v_set(config
, &cc_hot
->seq
, commit_count
);
314 extern int lib_ring_buffer_create(struct lib_ring_buffer
*buf
,
315 struct channel_backend
*chanb
, int cpu
);
316 extern void lib_ring_buffer_free(struct lib_ring_buffer
*buf
);
318 /* Keep track of trap nesting inside ring buffer code */
319 DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting
);
321 #endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */