Commit | Line | Data |
---|---|---|
886d51a3 MD |
1 | #ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H |
2 | #define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H | |
f3bc08c5 MD |
3 | |
4 | /* | |
5 | * linux/ringbuffer/frontend_internal.h | |
6 | * | |
f3bc08c5 MD |
7 | * Ring Buffer Library Synchronization Header (internal helpers). |
8 | * | |
886d51a3 MD |
9 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; only | |
14 | * version 2.1 of the License. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
24 | * | |
f3bc08c5 MD |
25 | * Author: |
26 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
27 | * | |
28 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
f3bc08c5 MD |
29 | */ |
30 | ||
5671a661 MD |
31 | #include <wrapper/ringbuffer/config.h> |
32 | #include <wrapper/ringbuffer/backend_types.h> | |
33 | #include <wrapper/ringbuffer/frontend_types.h> | |
34 | #include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */ | |
f3bc08c5 MD |
35 | |
36 | /* Buffer offset macros */ | |
37 | ||
38 | /* buf_trunc mask selects only the buffer number. */ | |
39 | static inline | |
40 | unsigned long buf_trunc(unsigned long offset, struct channel *chan) | |
41 | { | |
42 | return offset & ~(chan->backend.buf_size - 1); | |
43 | ||
44 | } | |
45 | ||
46 | /* Select the buffer number value (counter). */ | |
47 | static inline | |
48 | unsigned long buf_trunc_val(unsigned long offset, struct channel *chan) | |
49 | { | |
50 | return buf_trunc(offset, chan) >> chan->backend.buf_size_order; | |
51 | } | |
52 | ||
53 | /* buf_offset mask selects only the offset within the current buffer. */ | |
54 | static inline | |
55 | unsigned long buf_offset(unsigned long offset, struct channel *chan) | |
56 | { | |
57 | return offset & (chan->backend.buf_size - 1); | |
58 | } | |
59 | ||
60 | /* subbuf_offset mask selects the offset within the current subbuffer. */ | |
61 | static inline | |
62 | unsigned long subbuf_offset(unsigned long offset, struct channel *chan) | |
63 | { | |
64 | return offset & (chan->backend.subbuf_size - 1); | |
65 | } | |
66 | ||
67 | /* subbuf_trunc mask selects the subbuffer number. */ | |
68 | static inline | |
69 | unsigned long subbuf_trunc(unsigned long offset, struct channel *chan) | |
70 | { | |
71 | return offset & ~(chan->backend.subbuf_size - 1); | |
72 | } | |
73 | ||
74 | /* subbuf_align aligns the offset to the next subbuffer. */ | |
75 | static inline | |
76 | unsigned long subbuf_align(unsigned long offset, struct channel *chan) | |
77 | { | |
78 | return (offset + chan->backend.subbuf_size) | |
79 | & ~(chan->backend.subbuf_size - 1); | |
80 | } | |
81 | ||
82 | /* subbuf_index returns the index of the current subbuffer within the buffer. */ | |
83 | static inline | |
84 | unsigned long subbuf_index(unsigned long offset, struct channel *chan) | |
85 | { | |
86 | return buf_offset(offset, chan) >> chan->backend.subbuf_size_order; | |
87 | } | |
88 | ||
89 | /* | |
90 | * Last TSC comparison functions. Check if the current TSC overflows tsc_bits | |
91 | * bits from the last TSC read. When overflows are detected, the full 64-bit | |
92 | * timestamp counter should be written in the record header. Reads and writes | |
93 | * last_tsc atomically. | |
94 | */ | |
95 | ||
96 | #if (BITS_PER_LONG == 32) | |
97 | static inline | |
98 | void save_last_tsc(const struct lib_ring_buffer_config *config, | |
99 | struct lib_ring_buffer *buf, u64 tsc) | |
100 | { | |
101 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
102 | return; | |
103 | ||
104 | /* | |
105 | * Ensure the compiler performs this update in a single instruction. | |
106 | */ | |
107 | v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); | |
108 | } | |
109 | ||
110 | static inline | |
111 | int last_tsc_overflow(const struct lib_ring_buffer_config *config, | |
112 | struct lib_ring_buffer *buf, u64 tsc) | |
113 | { | |
114 | unsigned long tsc_shifted; | |
115 | ||
116 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
117 | return 0; | |
118 | ||
119 | tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); | |
120 | if (unlikely(tsc_shifted | |
121 | - (unsigned long)v_read(config, &buf->last_tsc))) | |
122 | return 1; | |
123 | else | |
124 | return 0; | |
125 | } | |
126 | #else | |
127 | static inline | |
128 | void save_last_tsc(const struct lib_ring_buffer_config *config, | |
129 | struct lib_ring_buffer *buf, u64 tsc) | |
130 | { | |
131 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
132 | return; | |
133 | ||
134 | v_set(config, &buf->last_tsc, (unsigned long)tsc); | |
135 | } | |
136 | ||
137 | static inline | |
138 | int last_tsc_overflow(const struct lib_ring_buffer_config *config, | |
139 | struct lib_ring_buffer *buf, u64 tsc) | |
140 | { | |
141 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
142 | return 0; | |
143 | ||
144 | if (unlikely((tsc - v_read(config, &buf->last_tsc)) | |
145 | >> config->tsc_bits)) | |
146 | return 1; | |
147 | else | |
148 | return 0; | |
149 | } | |
150 | #endif | |
151 | ||
152 | extern | |
153 | int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx); | |
154 | ||
155 | extern | |
156 | void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, | |
157 | enum switch_mode mode); | |
158 | ||
5e391252 MD |
159 | extern |
160 | void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf); | |
eac99267 MD |
161 | extern |
162 | void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf); | |
5e391252 | 163 | |
f3bc08c5 MD |
164 | /* Buffer write helpers */ |
165 | ||
166 | static inline | |
167 | void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf, | |
168 | struct channel *chan, | |
169 | unsigned long offset) | |
170 | { | |
171 | unsigned long consumed_old, consumed_new; | |
172 | ||
173 | do { | |
174 | consumed_old = atomic_long_read(&buf->consumed); | |
175 | /* | |
176 | * If buffer is in overwrite mode, push the reader consumed | |
177 | * count if the write position has reached it and we are not | |
178 | * at the first iteration (don't push the reader farther than | |
179 | * the writer). This operation can be done concurrently by many | |
180 | * writers in the same buffer, the writer being at the farthest | |
181 | * write position sub-buffer index in the buffer being the one | |
182 | * which will win this loop. | |
183 | */ | |
184 | if (unlikely(subbuf_trunc(offset, chan) | |
185 | - subbuf_trunc(consumed_old, chan) | |
186 | >= chan->backend.buf_size)) | |
187 | consumed_new = subbuf_align(consumed_old, chan); | |
188 | else | |
189 | return; | |
190 | } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
191 | consumed_new) != consumed_old)); | |
192 | } | |
193 | ||
194 | static inline | |
195 | void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config, | |
196 | struct lib_ring_buffer *buf, | |
197 | unsigned long commit_count, | |
198 | unsigned long idx) | |
199 | { | |
200 | if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) | |
201 | v_set(config, &buf->commit_hot[idx].seq, commit_count); | |
202 | } | |
203 | ||
204 | static inline | |
205 | int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, | |
206 | struct lib_ring_buffer *buf, | |
207 | struct channel *chan) | |
208 | { | |
209 | unsigned long consumed_old, consumed_idx, commit_count, write_offset; | |
210 | ||
211 | consumed_old = atomic_long_read(&buf->consumed); | |
212 | consumed_idx = subbuf_index(consumed_old, chan); | |
213 | commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb); | |
214 | /* | |
215 | * No memory barrier here, since we are only interested | |
216 | * in a statistically correct polling result. The next poll will | |
217 | * get the data is we are racing. The mb() that ensures correct | |
218 | * memory order is in get_subbuf. | |
219 | */ | |
220 | write_offset = v_read(config, &buf->offset); | |
221 | ||
222 | /* | |
223 | * Check that the subbuffer we are trying to consume has been | |
224 | * already fully committed. | |
225 | */ | |
226 | ||
227 | if (((commit_count - chan->backend.subbuf_size) | |
228 | & chan->commit_count_mask) | |
229 | - (buf_trunc(consumed_old, chan) | |
230 | >> chan->backend.num_subbuf_order) | |
231 | != 0) | |
232 | return 0; | |
233 | ||
234 | /* | |
235 | * Check that we are not about to read the same subbuffer in | |
236 | * which the writer head is. | |
237 | */ | |
238 | if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) | |
239 | == 0) | |
240 | return 0; | |
241 | ||
242 | return 1; | |
243 | ||
244 | } | |
245 | ||
246 | static inline | |
247 | int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config, | |
248 | struct lib_ring_buffer *buf, | |
249 | struct channel *chan) | |
250 | { | |
251 | return !!subbuf_offset(v_read(config, &buf->offset), chan); | |
252 | } | |
253 | ||
254 | static inline | |
255 | unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config, | |
256 | struct lib_ring_buffer *buf, | |
257 | unsigned long idx) | |
258 | { | |
259 | return subbuffer_get_data_size(config, &buf->backend, idx); | |
260 | } | |
261 | ||
262 | /* | |
263 | * Check if all space reservation in a buffer have been committed. This helps | |
264 | * knowing if an execution context is nested (for per-cpu buffers only). | |
265 | * This is a very specific ftrace use-case, so we keep this as "internal" API. | |
266 | */ | |
267 | static inline | |
268 | int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config, | |
269 | struct lib_ring_buffer *buf, | |
270 | struct channel *chan) | |
271 | { | |
272 | unsigned long offset, idx, commit_count; | |
273 | ||
274 | CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); | |
275 | CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); | |
276 | ||
277 | /* | |
278 | * Read offset and commit count in a loop so they are both read | |
279 | * atomically wrt interrupts. By deal with interrupt concurrency by | |
280 | * restarting both reads if the offset has been pushed. Note that given | |
281 | * we only have to deal with interrupt concurrency here, an interrupt | |
282 | * modifying the commit count will also modify "offset", so it is safe | |
283 | * to only check for offset modifications. | |
284 | */ | |
285 | do { | |
286 | offset = v_read(config, &buf->offset); | |
287 | idx = subbuf_index(offset, chan); | |
288 | commit_count = v_read(config, &buf->commit_hot[idx].cc); | |
289 | } while (offset != v_read(config, &buf->offset)); | |
290 | ||
291 | return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
292 | - (commit_count & chan->commit_count_mask) == 0); | |
293 | } | |
294 | ||
635e457c MD |
295 | /* |
296 | * Receive end of subbuffer TSC as parameter. It has been read in the | |
297 | * space reservation loop of either reserve or switch, which ensures it | |
298 | * progresses monotonically with event records in the buffer. Therefore, | |
299 | * it ensures that the end timestamp of a subbuffer is <= begin | |
300 | * timestamp of the following subbuffers. | |
301 | */ | |
f3bc08c5 MD |
302 | static inline |
303 | void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, | |
304 | struct lib_ring_buffer *buf, | |
305 | struct channel *chan, | |
306 | unsigned long offset, | |
307 | unsigned long commit_count, | |
635e457c MD |
308 | unsigned long idx, |
309 | u64 tsc) | |
f3bc08c5 MD |
310 | { |
311 | unsigned long old_commit_count = commit_count | |
312 | - chan->backend.subbuf_size; | |
f3bc08c5 MD |
313 | |
314 | /* Check if all commits have been done */ | |
315 | if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
316 | - (old_commit_count & chan->commit_count_mask) == 0)) { | |
317 | /* | |
318 | * If we succeeded at updating cc_sb below, we are the subbuffer | |
319 | * writer delivering the subbuffer. Deals with concurrent | |
320 | * updates of the "cc" value without adding a add_return atomic | |
321 | * operation to the fast path. | |
322 | * | |
323 | * We are doing the delivery in two steps: | |
324 | * - First, we cmpxchg() cc_sb to the new value | |
325 | * old_commit_count + 1. This ensures that we are the only | |
326 | * subbuffer user successfully filling the subbuffer, but we | |
327 | * do _not_ set the cc_sb value to "commit_count" yet. | |
328 | * Therefore, other writers that would wrap around the ring | |
329 | * buffer and try to start writing to our subbuffer would | |
330 | * have to drop records, because it would appear as | |
331 | * non-filled. | |
332 | * We therefore have exclusive access to the subbuffer control | |
333 | * structures. This mutual exclusion with other writers is | |
334 | * crucially important to perform record overruns count in | |
335 | * flight recorder mode locklessly. | |
336 | * - When we are ready to release the subbuffer (either for | |
337 | * reading or for overrun by other writers), we simply set the | |
338 | * cc_sb value to "commit_count" and perform delivery. | |
339 | * | |
340 | * The subbuffer size is least 2 bytes (minimum size: 1 page). | |
341 | * This guarantees that old_commit_count + 1 != commit_count. | |
342 | */ | |
0fdec686 MD |
343 | |
344 | /* | |
345 | * Order prior updates to reserve count prior to the | |
346 | * commit_cold cc_sb update. | |
347 | */ | |
348 | smp_wmb(); | |
f3bc08c5 MD |
349 | if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb, |
350 | old_commit_count, old_commit_count + 1) | |
351 | == old_commit_count)) { | |
352 | /* | |
353 | * Start of exclusive subbuffer access. We are | |
354 | * guaranteed to be the last writer in this subbuffer | |
355 | * and any other writer trying to access this subbuffer | |
356 | * in this state is required to drop records. | |
357 | */ | |
f3bc08c5 MD |
358 | v_add(config, |
359 | subbuffer_get_records_count(config, | |
360 | &buf->backend, idx), | |
361 | &buf->records_count); | |
362 | v_add(config, | |
363 | subbuffer_count_records_overrun(config, | |
364 | &buf->backend, | |
365 | idx), | |
366 | &buf->records_overrun); | |
367 | config->cb.buffer_end(buf, tsc, idx, | |
368 | lib_ring_buffer_get_data_size(config, | |
369 | buf, | |
370 | idx)); | |
371 | ||
5b3cf4f9 JD |
372 | /* |
373 | * Increment the packet counter while we have exclusive | |
374 | * access. | |
375 | */ | |
376 | subbuffer_inc_packet_count(config, &buf->backend, idx); | |
377 | ||
f3bc08c5 MD |
378 | /* |
379 | * Set noref flag and offset for this subbuffer id. | |
380 | * Contains a memory barrier that ensures counter stores | |
381 | * are ordered before set noref and offset. | |
382 | */ | |
383 | lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, | |
384 | buf_trunc_val(offset, chan)); | |
385 | ||
386 | /* | |
387 | * Order set_noref and record counter updates before the | |
388 | * end of subbuffer exclusive access. Orders with | |
389 | * respect to writers coming into the subbuffer after | |
390 | * wrap around, and also order wrt concurrent readers. | |
391 | */ | |
392 | smp_mb(); | |
393 | /* End of exclusive subbuffer access */ | |
394 | v_set(config, &buf->commit_cold[idx].cc_sb, | |
395 | commit_count); | |
0fdec686 MD |
396 | /* |
397 | * Order later updates to reserve count after | |
398 | * the commit_cold cc_sb update. | |
399 | */ | |
400 | smp_wmb(); | |
f3bc08c5 MD |
401 | lib_ring_buffer_vmcore_check_deliver(config, buf, |
402 | commit_count, idx); | |
403 | ||
404 | /* | |
405 | * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. | |
406 | */ | |
407 | if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER | |
408 | && atomic_long_read(&buf->active_readers) | |
409 | && lib_ring_buffer_poll_deliver(config, buf, chan)) { | |
410 | wake_up_interruptible(&buf->read_wait); | |
411 | wake_up_interruptible(&chan->read_wait); | |
412 | } | |
413 | ||
414 | } | |
415 | } | |
416 | } | |
417 | ||
418 | /* | |
419 | * lib_ring_buffer_write_commit_counter | |
420 | * | |
421 | * For flight recording. must be called after commit. | |
422 | * This function increments the subbuffer's commit_seq counter each time the | |
423 | * commit count reaches back the reserve offset (modulo subbuffer size). It is | |
424 | * useful for crash dump. | |
425 | */ | |
426 | static inline | |
427 | void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config, | |
428 | struct lib_ring_buffer *buf, | |
429 | struct channel *chan, | |
430 | unsigned long idx, | |
431 | unsigned long buf_offset, | |
7915e163 | 432 | unsigned long commit_count) |
f3bc08c5 | 433 | { |
7915e163 | 434 | unsigned long commit_seq_old; |
f3bc08c5 MD |
435 | |
436 | if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) | |
437 | return; | |
438 | ||
f3bc08c5 MD |
439 | /* |
440 | * subbuf_offset includes commit_count_mask. We can simply | |
441 | * compare the offsets within the subbuffer without caring about | |
442 | * buffer full/empty mismatch because offset is never zero here | |
443 | * (subbuffer header and record headers have non-zero length). | |
444 | */ | |
7915e163 | 445 | if (unlikely(subbuf_offset(buf_offset - commit_count, chan))) |
f3bc08c5 MD |
446 | return; |
447 | ||
448 | commit_seq_old = v_read(config, &buf->commit_hot[idx].seq); | |
449 | while ((long) (commit_seq_old - commit_count) < 0) | |
450 | commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq, | |
451 | commit_seq_old, commit_count); | |
452 | } | |
453 | ||
454 | extern int lib_ring_buffer_create(struct lib_ring_buffer *buf, | |
455 | struct channel_backend *chanb, int cpu); | |
456 | extern void lib_ring_buffer_free(struct lib_ring_buffer *buf); | |
457 | ||
458 | /* Keep track of trap nesting inside ring buffer code */ | |
459 | DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting); | |
460 | ||
886d51a3 | 461 | #endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */ |