Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend_internal.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (internal helpers). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
14641deb MD |
19 | #include <urcu/compiler.h> |
20 | ||
8d8a24c8 | 21 | #include <ust/ringbuffer-config.h> |
4931a13e MD |
22 | #include "backend_types.h" |
23 | #include "frontend_types.h" | |
a6352fd4 | 24 | #include "shm.h" |
852c2936 MD |
25 | |
26 | /* Buffer offset macros */ | |
27 | ||
28 | /* buf_trunc mask selects only the buffer number. */ | |
29 | static inline | |
30 | unsigned long buf_trunc(unsigned long offset, struct channel *chan) | |
31 | { | |
32 | return offset & ~(chan->backend.buf_size - 1); | |
33 | ||
34 | } | |
35 | ||
36 | /* Select the buffer number value (counter). */ | |
37 | static inline | |
38 | unsigned long buf_trunc_val(unsigned long offset, struct channel *chan) | |
39 | { | |
40 | return buf_trunc(offset, chan) >> chan->backend.buf_size_order; | |
41 | } | |
42 | ||
43 | /* buf_offset mask selects only the offset within the current buffer. */ | |
44 | static inline | |
45 | unsigned long buf_offset(unsigned long offset, struct channel *chan) | |
46 | { | |
47 | return offset & (chan->backend.buf_size - 1); | |
48 | } | |
49 | ||
50 | /* subbuf_offset mask selects the offset within the current subbuffer. */ | |
51 | static inline | |
52 | unsigned long subbuf_offset(unsigned long offset, struct channel *chan) | |
53 | { | |
54 | return offset & (chan->backend.subbuf_size - 1); | |
55 | } | |
56 | ||
57 | /* subbuf_trunc mask selects the subbuffer number. */ | |
58 | static inline | |
59 | unsigned long subbuf_trunc(unsigned long offset, struct channel *chan) | |
60 | { | |
61 | return offset & ~(chan->backend.subbuf_size - 1); | |
62 | } | |
63 | ||
64 | /* subbuf_align aligns the offset to the next subbuffer. */ | |
65 | static inline | |
66 | unsigned long subbuf_align(unsigned long offset, struct channel *chan) | |
67 | { | |
68 | return (offset + chan->backend.subbuf_size) | |
69 | & ~(chan->backend.subbuf_size - 1); | |
70 | } | |
71 | ||
72 | /* subbuf_index returns the index of the current subbuffer within the buffer. */ | |
73 | static inline | |
74 | unsigned long subbuf_index(unsigned long offset, struct channel *chan) | |
75 | { | |
76 | return buf_offset(offset, chan) >> chan->backend.subbuf_size_order; | |
77 | } | |
78 | ||
79 | /* | |
80 | * Last TSC comparison functions. Check if the current TSC overflows tsc_bits | |
81 | * bits from the last TSC read. When overflows are detected, the full 64-bit | |
82 | * timestamp counter should be written in the record header. Reads and writes | |
83 | * last_tsc atomically. | |
84 | */ | |
85 | ||
14641deb | 86 | #if (CAA_BITS_PER_LONG == 32) |
852c2936 MD |
87 | static inline |
88 | void save_last_tsc(const struct lib_ring_buffer_config *config, | |
89 | struct lib_ring_buffer *buf, u64 tsc) | |
90 | { | |
91 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
92 | return; | |
93 | ||
94 | /* | |
95 | * Ensure the compiler performs this update in a single instruction. | |
96 | */ | |
97 | v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); | |
98 | } | |
99 | ||
100 | static inline | |
101 | int last_tsc_overflow(const struct lib_ring_buffer_config *config, | |
102 | struct lib_ring_buffer *buf, u64 tsc) | |
103 | { | |
104 | unsigned long tsc_shifted; | |
105 | ||
106 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
107 | return 0; | |
108 | ||
109 | tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); | |
110 | if (unlikely(tsc_shifted | |
111 | - (unsigned long)v_read(config, &buf->last_tsc))) | |
112 | return 1; | |
113 | else | |
114 | return 0; | |
115 | } | |
116 | #else | |
117 | static inline | |
118 | void save_last_tsc(const struct lib_ring_buffer_config *config, | |
119 | struct lib_ring_buffer *buf, u64 tsc) | |
120 | { | |
121 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
122 | return; | |
123 | ||
124 | v_set(config, &buf->last_tsc, (unsigned long)tsc); | |
125 | } | |
126 | ||
127 | static inline | |
128 | int last_tsc_overflow(const struct lib_ring_buffer_config *config, | |
129 | struct lib_ring_buffer *buf, u64 tsc) | |
130 | { | |
131 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
132 | return 0; | |
133 | ||
134 | if (unlikely((tsc - v_read(config, &buf->last_tsc)) | |
135 | >> config->tsc_bits)) | |
136 | return 1; | |
137 | else | |
138 | return 0; | |
139 | } | |
140 | #endif | |
141 | ||
142 | extern | |
143 | int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx); | |
144 | ||
145 | extern | |
146 | void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, | |
1d498196 MD |
147 | enum switch_mode mode, |
148 | struct shm_handle *handle); | |
852c2936 MD |
149 | |
150 | /* Buffer write helpers */ | |
151 | ||
152 | static inline | |
153 | void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf, | |
154 | struct channel *chan, | |
155 | unsigned long offset) | |
156 | { | |
157 | unsigned long consumed_old, consumed_new; | |
158 | ||
159 | do { | |
14641deb | 160 | consumed_old = uatomic_read(&buf->consumed); |
852c2936 MD |
161 | /* |
162 | * If buffer is in overwrite mode, push the reader consumed | |
163 | * count if the write position has reached it and we are not | |
164 | * at the first iteration (don't push the reader farther than | |
165 | * the writer). This operation can be done concurrently by many | |
166 | * writers in the same buffer, the writer being at the farthest | |
167 | * write position sub-buffer index in the buffer being the one | |
168 | * which will win this loop. | |
169 | */ | |
170 | if (unlikely(subbuf_trunc(offset, chan) | |
171 | - subbuf_trunc(consumed_old, chan) | |
172 | >= chan->backend.buf_size)) | |
173 | consumed_new = subbuf_align(consumed_old, chan); | |
174 | else | |
175 | return; | |
14641deb | 176 | } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
852c2936 MD |
177 | consumed_new) != consumed_old)); |
178 | } | |
179 | ||
180 | static inline | |
181 | void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config, | |
182 | struct lib_ring_buffer *buf, | |
183 | unsigned long commit_count, | |
1d498196 MD |
184 | unsigned long idx, |
185 | struct shm_handle *handle) | |
852c2936 MD |
186 | { |
187 | if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) | |
1d498196 | 188 | v_set(config, &shmp(handle, buf->commit_hot)[idx].seq, commit_count); |
852c2936 MD |
189 | } |
190 | ||
191 | static inline | |
192 | int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, | |
193 | struct lib_ring_buffer *buf, | |
1d498196 MD |
194 | struct channel *chan, |
195 | struct shm_handle *handle) | |
852c2936 MD |
196 | { |
197 | unsigned long consumed_old, consumed_idx, commit_count, write_offset; | |
198 | ||
14641deb | 199 | consumed_old = uatomic_read(&buf->consumed); |
852c2936 | 200 | consumed_idx = subbuf_index(consumed_old, chan); |
1d498196 | 201 | commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb); |
852c2936 MD |
202 | /* |
203 | * No memory barrier here, since we are only interested | |
204 | * in a statistically correct polling result. The next poll will | |
205 | * get the data is we are racing. The mb() that ensures correct | |
206 | * memory order is in get_subbuf. | |
207 | */ | |
208 | write_offset = v_read(config, &buf->offset); | |
209 | ||
210 | /* | |
211 | * Check that the subbuffer we are trying to consume has been | |
212 | * already fully committed. | |
213 | */ | |
214 | ||
215 | if (((commit_count - chan->backend.subbuf_size) | |
216 | & chan->commit_count_mask) | |
217 | - (buf_trunc(consumed_old, chan) | |
218 | >> chan->backend.num_subbuf_order) | |
219 | != 0) | |
220 | return 0; | |
221 | ||
222 | /* | |
223 | * Check that we are not about to read the same subbuffer in | |
224 | * which the writer head is. | |
225 | */ | |
226 | if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) | |
227 | == 0) | |
228 | return 0; | |
229 | ||
230 | return 1; | |
231 | ||
232 | } | |
233 | ||
234 | static inline | |
235 | int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config, | |
236 | struct lib_ring_buffer *buf, | |
237 | struct channel *chan) | |
238 | { | |
239 | return !!subbuf_offset(v_read(config, &buf->offset), chan); | |
240 | } | |
241 | ||
242 | static inline | |
243 | unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config, | |
244 | struct lib_ring_buffer *buf, | |
1d498196 MD |
245 | unsigned long idx, |
246 | struct shm_handle *handle) | |
852c2936 | 247 | { |
1d498196 | 248 | return subbuffer_get_data_size(config, &buf->backend, idx, handle); |
852c2936 MD |
249 | } |
250 | ||
251 | /* | |
252 | * Check if all space reservation in a buffer have been committed. This helps | |
253 | * knowing if an execution context is nested (for per-cpu buffers only). | |
254 | * This is a very specific ftrace use-case, so we keep this as "internal" API. | |
255 | */ | |
256 | static inline | |
257 | int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config, | |
258 | struct lib_ring_buffer *buf, | |
1d498196 MD |
259 | struct channel *chan, |
260 | struct shm_handle *handle) | |
852c2936 MD |
261 | { |
262 | unsigned long offset, idx, commit_count; | |
263 | ||
264 | CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); | |
265 | CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); | |
266 | ||
267 | /* | |
268 | * Read offset and commit count in a loop so they are both read | |
269 | * atomically wrt interrupts. By deal with interrupt concurrency by | |
270 | * restarting both reads if the offset has been pushed. Note that given | |
271 | * we only have to deal with interrupt concurrency here, an interrupt | |
272 | * modifying the commit count will also modify "offset", so it is safe | |
273 | * to only check for offset modifications. | |
274 | */ | |
275 | do { | |
276 | offset = v_read(config, &buf->offset); | |
277 | idx = subbuf_index(offset, chan); | |
1d498196 | 278 | commit_count = v_read(config, &shmp(handle, buf->commit_hot)[idx].cc); |
852c2936 MD |
279 | } while (offset != v_read(config, &buf->offset)); |
280 | ||
281 | return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
282 | - (commit_count & chan->commit_count_mask) == 0); | |
283 | } | |
284 | ||
285 | static inline | |
286 | void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, | |
287 | struct lib_ring_buffer *buf, | |
288 | struct channel *chan, | |
289 | unsigned long offset, | |
290 | unsigned long commit_count, | |
1d498196 MD |
291 | unsigned long idx, |
292 | struct shm_handle *handle) | |
852c2936 MD |
293 | { |
294 | unsigned long old_commit_count = commit_count | |
295 | - chan->backend.subbuf_size; | |
296 | u64 tsc; | |
297 | ||
298 | /* Check if all commits have been done */ | |
299 | if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
300 | - (old_commit_count & chan->commit_count_mask) == 0)) { | |
301 | /* | |
302 | * If we succeeded at updating cc_sb below, we are the subbuffer | |
303 | * writer delivering the subbuffer. Deals with concurrent | |
304 | * updates of the "cc" value without adding a add_return atomic | |
305 | * operation to the fast path. | |
306 | * | |
307 | * We are doing the delivery in two steps: | |
308 | * - First, we cmpxchg() cc_sb to the new value | |
309 | * old_commit_count + 1. This ensures that we are the only | |
310 | * subbuffer user successfully filling the subbuffer, but we | |
311 | * do _not_ set the cc_sb value to "commit_count" yet. | |
312 | * Therefore, other writers that would wrap around the ring | |
313 | * buffer and try to start writing to our subbuffer would | |
314 | * have to drop records, because it would appear as | |
315 | * non-filled. | |
316 | * We therefore have exclusive access to the subbuffer control | |
317 | * structures. This mutual exclusion with other writers is | |
318 | * crucially important to perform record overruns count in | |
319 | * flight recorder mode locklessly. | |
320 | * - When we are ready to release the subbuffer (either for | |
321 | * reading or for overrun by other writers), we simply set the | |
322 | * cc_sb value to "commit_count" and perform delivery. | |
323 | * | |
324 | * The subbuffer size is least 2 bytes (minimum size: 1 page). | |
325 | * This guarantees that old_commit_count + 1 != commit_count. | |
326 | */ | |
1d498196 | 327 | if (likely(v_cmpxchg(config, &shmp(handle, buf->commit_cold)[idx].cc_sb, |
852c2936 MD |
328 | old_commit_count, old_commit_count + 1) |
329 | == old_commit_count)) { | |
330 | /* | |
331 | * Start of exclusive subbuffer access. We are | |
332 | * guaranteed to be the last writer in this subbuffer | |
333 | * and any other writer trying to access this subbuffer | |
334 | * in this state is required to drop records. | |
335 | */ | |
336 | tsc = config->cb.ring_buffer_clock_read(chan); | |
337 | v_add(config, | |
338 | subbuffer_get_records_count(config, | |
1d498196 MD |
339 | &buf->backend, |
340 | idx, handle), | |
852c2936 MD |
341 | &buf->records_count); |
342 | v_add(config, | |
343 | subbuffer_count_records_overrun(config, | |
344 | &buf->backend, | |
1d498196 | 345 | idx, handle), |
852c2936 MD |
346 | &buf->records_overrun); |
347 | config->cb.buffer_end(buf, tsc, idx, | |
348 | lib_ring_buffer_get_data_size(config, | |
349 | buf, | |
1d498196 MD |
350 | idx, |
351 | handle), | |
352 | handle); | |
852c2936 MD |
353 | |
354 | /* | |
355 | * Set noref flag and offset for this subbuffer id. | |
356 | * Contains a memory barrier that ensures counter stores | |
357 | * are ordered before set noref and offset. | |
358 | */ | |
359 | lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, | |
1d498196 | 360 | buf_trunc_val(offset, chan), handle); |
852c2936 MD |
361 | |
362 | /* | |
363 | * Order set_noref and record counter updates before the | |
364 | * end of subbuffer exclusive access. Orders with | |
365 | * respect to writers coming into the subbuffer after | |
366 | * wrap around, and also order wrt concurrent readers. | |
367 | */ | |
14641deb | 368 | cmm_smp_mb(); |
852c2936 | 369 | /* End of exclusive subbuffer access */ |
1d498196 | 370 | v_set(config, &shmp(handle, buf->commit_cold)[idx].cc_sb, |
852c2936 MD |
371 | commit_count); |
372 | lib_ring_buffer_vmcore_check_deliver(config, buf, | |
1d498196 | 373 | commit_count, idx, handle); |
852c2936 MD |
374 | |
375 | /* | |
376 | * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. | |
377 | */ | |
378 | if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER | |
14641deb | 379 | && uatomic_read(&buf->active_readers) |
1d498196 | 380 | && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { |
5d61a504 MD |
381 | int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); |
382 | ||
383 | if (wakeup_fd >= 0) { | |
384 | int ret; | |
385 | /* | |
386 | * Wake-up the other end by | |
387 | * writing a null byte in the | |
388 | * pipe (non-blocking). | |
389 | */ | |
390 | do { | |
391 | ret = write(wakeup_fd, "", 1); | |
392 | } while (ret == -1L && errno == EINTR); | |
393 | } | |
852c2936 MD |
394 | } |
395 | ||
396 | } | |
397 | } | |
398 | } | |
399 | ||
400 | /* | |
401 | * lib_ring_buffer_write_commit_counter | |
402 | * | |
403 | * For flight recording. must be called after commit. | |
404 | * This function increments the subbuffer's commit_seq counter each time the | |
405 | * commit count reaches back the reserve offset (modulo subbuffer size). It is | |
406 | * useful for crash dump. | |
407 | */ | |
408 | static inline | |
409 | void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config, | |
410 | struct lib_ring_buffer *buf, | |
411 | struct channel *chan, | |
412 | unsigned long idx, | |
413 | unsigned long buf_offset, | |
414 | unsigned long commit_count, | |
1d498196 MD |
415 | size_t slot_size, |
416 | struct shm_handle *handle) | |
852c2936 MD |
417 | { |
418 | unsigned long offset, commit_seq_old; | |
419 | ||
420 | if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) | |
421 | return; | |
422 | ||
423 | offset = buf_offset + slot_size; | |
424 | ||
425 | /* | |
426 | * subbuf_offset includes commit_count_mask. We can simply | |
427 | * compare the offsets within the subbuffer without caring about | |
428 | * buffer full/empty mismatch because offset is never zero here | |
429 | * (subbuffer header and record headers have non-zero length). | |
430 | */ | |
431 | if (unlikely(subbuf_offset(offset - commit_count, chan))) | |
432 | return; | |
433 | ||
1d498196 | 434 | commit_seq_old = v_read(config, &shmp(handle, buf->commit_hot)[idx].seq); |
852c2936 | 435 | while ((long) (commit_seq_old - commit_count) < 0) |
1d498196 | 436 | commit_seq_old = v_cmpxchg(config, &shmp(handle, buf->commit_hot)[idx].seq, |
852c2936 MD |
437 | commit_seq_old, commit_count); |
438 | } | |
439 | ||
440 | extern int lib_ring_buffer_create(struct lib_ring_buffer *buf, | |
a6352fd4 | 441 | struct channel_backend *chanb, int cpu, |
1d498196 MD |
442 | struct shm_handle *handle, |
443 | struct shm_object *shmobj); | |
444 | extern void lib_ring_buffer_free(struct lib_ring_buffer *buf, | |
445 | struct shm_handle *handle); | |
852c2936 MD |
446 | |
447 | /* Keep track of trap nesting inside ring buffer code */ | |
14641deb | 448 | extern __thread unsigned int lib_ring_buffer_nesting; |
852c2936 MD |
449 | |
450 | #endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */ |