Commit | Line | Data |
---|---|---|
852c2936 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-only |
852c2936 | 3 | * |
e92f3e28 MD |
4 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
5 | * | |
e92f3e28 MD |
6 | * See ring_buffer_frontend.c for more information on wait-free |
7 | * algorithms. | |
8 | * See frontend.h for channel allocation and read-side API. | |
852c2936 MD |
9 | */ |
10 | ||
c0c0989a MJ |
11 | #ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H |
12 | #define _LTTNG_RING_BUFFER_FRONTEND_API_H | |
13 | ||
b4051ad8 FD |
14 | #include <stddef.h> |
15 | ||
9f3fdbc6 | 16 | #include <urcu/compiler.h> |
852c2936 | 17 | |
b4051ad8 FD |
18 | #include "frontend.h" |
19 | ||
852c2936 | 20 | /** |
7489fcb4 | 21 | * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection. |
852c2936 | 22 | * |
7489fcb4 MD |
23 | * The rint buffer buffer nesting count is a safety net to ensure tracer |
24 | * client code will never trigger an endless recursion. | |
8936b6c0 MD |
25 | * Returns a nesting level >= 0 on success, -EPERM on failure (nesting |
26 | * count too high). | |
852c2936 MD |
27 | * |
28 | * asm volatile and "memory" clobber prevent the compiler from moving | |
29 | * instructions out of the ring buffer nesting count. This is required to ensure | |
30 | * that probe side-effects which can cause recursion (e.g. unforeseen traps, | |
31 | * divisions by 0, ...) are triggered within the incremented nesting count | |
32 | * section. | |
33 | */ | |
34 | static inline | |
2208d8b5 MJ |
35 | int lib_ring_buffer_nesting_inc( |
36 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused))) | |
852c2936 | 37 | { |
7489fcb4 | 38 | int nesting; |
852c2936 | 39 | |
8c90a710 | 40 | nesting = ++URCU_TLS(lib_ring_buffer_nesting); |
9f3fdbc6 | 41 | cmm_barrier(); |
8936b6c0 | 42 | if (caa_unlikely(nesting >= LIB_RING_BUFFER_MAX_NESTING)) { |
852c2936 | 43 | WARN_ON_ONCE(1); |
8c90a710 | 44 | URCU_TLS(lib_ring_buffer_nesting)--; |
852c2936 | 45 | return -EPERM; |
7489fcb4 | 46 | } |
8936b6c0 MD |
47 | return nesting - 1; |
48 | } | |
49 | ||
50 | static inline | |
2208d8b5 MJ |
51 | int lib_ring_buffer_nesting_count( |
52 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused))) | |
8936b6c0 MD |
53 | { |
54 | return URCU_TLS(lib_ring_buffer_nesting); | |
852c2936 MD |
55 | } |
56 | ||
852c2936 | 57 | static inline |
2208d8b5 MJ |
58 | void lib_ring_buffer_nesting_dec( |
59 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused))) | |
852c2936 | 60 | { |
9f3fdbc6 | 61 | cmm_barrier(); |
8c90a710 | 62 | URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ |
852c2936 MD |
63 | } |
64 | ||
65 | /* | |
66 | * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not | |
67 | * part of the API per se. | |
68 | * | |
69 | * returns 0 if reserve ok, or 1 if the slow path must be taken. | |
70 | */ | |
71 | static inline | |
4cfec15c MD |
72 | int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
73 | struct lttng_ust_lib_ring_buffer_ctx *ctx, | |
e56bb47c | 74 | void *client_ctx, |
852c2936 MD |
75 | unsigned long *o_begin, unsigned long *o_end, |
76 | unsigned long *o_old, size_t *before_hdr_pad) | |
77 | { | |
8936b6c0 MD |
78 | struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv; |
79 | struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan; | |
80 | struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf; | |
852c2936 MD |
81 | *o_begin = v_read(config, &buf->offset); |
82 | *o_old = *o_begin; | |
83 | ||
8936b6c0 MD |
84 | ctx_private->tsc = lib_ring_buffer_clock_read(chan); |
85 | if ((int64_t) ctx_private->tsc == -EIO) | |
852c2936 MD |
86 | return 1; |
87 | ||
88 | /* | |
89 | * Prefetch cacheline for read because we have to read the previous | |
90 | * commit counter to increment it and commit seq value to compare it to | |
91 | * the commit counter. | |
92 | */ | |
9f3fdbc6 | 93 | //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); |
852c2936 | 94 | |
8936b6c0 MD |
95 | if (last_tsc_overflow(config, buf, ctx_private->tsc)) |
96 | ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC; | |
852c2936 | 97 | |
b5a3dfa5 | 98 | if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) |
852c2936 MD |
99 | return 1; |
100 | ||
8936b6c0 | 101 | ctx_private->slot_size = record_header_size(config, chan, *o_begin, |
e56bb47c | 102 | before_hdr_pad, ctx, client_ctx); |
8936b6c0 MD |
103 | ctx_private->slot_size += |
104 | lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size, | |
852c2936 | 105 | ctx->largest_align) + ctx->data_size; |
8936b6c0 | 106 | if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size) |
852c2936 MD |
107 | > chan->backend.subbuf_size)) |
108 | return 1; | |
109 | ||
110 | /* | |
111 | * Record fits in the current buffer and we are not on a switch | |
112 | * boundary. It's safe to write. | |
113 | */ | |
8936b6c0 | 114 | *o_end = *o_begin + ctx_private->slot_size; |
1ad21f70 MD |
115 | |
116 | if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0)) | |
117 | /* | |
118 | * The offset_end will fall at the very beginning of the next | |
119 | * subbuffer. | |
120 | */ | |
121 | return 1; | |
122 | ||
852c2936 MD |
123 | return 0; |
124 | } | |
125 | ||
126 | /** | |
127 | * lib_ring_buffer_reserve - Reserve space in a ring buffer. | |
128 | * @config: ring buffer instance configuration. | |
129 | * @ctx: ring buffer context. (input and output) Must be already initialized. | |
130 | * | |
131 | * Atomic wait-free slot reservation. The reserved space starts at the context | |
132 | * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". | |
133 | * | |
134 | * Return : | |
135 | * 0 on success. | |
136 | * -EAGAIN if channel is disabled. | |
137 | * -ENOSPC if event size is too large for packet. | |
138 | * -ENOBUFS if there is currently not enough space in buffer for the event. | |
139 | * -EIO if data cannot be written into the buffer for any other reason. | |
140 | */ | |
141 | ||
142 | static inline | |
4cfec15c | 143 | int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
e56bb47c MD |
144 | struct lttng_ust_lib_ring_buffer_ctx *ctx, |
145 | void *client_ctx) | |
852c2936 | 146 | { |
8936b6c0 MD |
147 | struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv; |
148 | struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan; | |
149 | struct lttng_ust_shm_handle *handle = chan->handle; | |
4cfec15c | 150 | struct lttng_ust_lib_ring_buffer *buf; |
852c2936 MD |
151 | unsigned long o_begin, o_end, o_old; |
152 | size_t before_hdr_pad = 0; | |
153 | ||
f52a5702 | 154 | if (caa_unlikely(uatomic_read(&chan->record_disabled))) |
852c2936 MD |
155 | return -EAGAIN; |
156 | ||
7489fcb4 | 157 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
8936b6c0 MD |
158 | ctx_private->reserve_cpu = lttng_ust_get_cpu(); |
159 | buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp); | |
7489fcb4 | 160 | } else { |
1d498196 | 161 | buf = shmp(handle, chan->backend.buf[0].shmp); |
7489fcb4 | 162 | } |
15500a1b MD |
163 | if (caa_unlikely(!buf)) |
164 | return -EIO; | |
f52a5702 | 165 | if (caa_unlikely(uatomic_read(&buf->record_disabled))) |
852c2936 | 166 | return -EAGAIN; |
8936b6c0 | 167 | ctx_private->buf = buf; |
852c2936 MD |
168 | |
169 | /* | |
170 | * Perform retryable operations. | |
171 | */ | |
e56bb47c | 172 | if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin, |
852c2936 MD |
173 | &o_end, &o_old, &before_hdr_pad))) |
174 | goto slow_path; | |
175 | ||
8936b6c0 | 176 | if (caa_unlikely(v_cmpxchg(config, &buf->offset, o_old, o_end) |
852c2936 MD |
177 | != o_old)) |
178 | goto slow_path; | |
179 | ||
180 | /* | |
181 | * Atomically update last_tsc. This update races against concurrent | |
182 | * atomic updates, but the race will always cause supplementary full TSC | |
183 | * record headers, never the opposite (missing a full TSC record header | |
184 | * when it would be needed). | |
185 | */ | |
8936b6c0 | 186 | save_last_tsc(config, buf, ctx_private->tsc); |
852c2936 MD |
187 | |
188 | /* | |
189 | * Push the reader if necessary | |
190 | */ | |
8936b6c0 | 191 | lib_ring_buffer_reserve_push_reader(buf, chan, o_end - 1); |
852c2936 MD |
192 | |
193 | /* | |
194 | * Clear noref flag for this subbuffer. | |
195 | */ | |
8936b6c0 | 196 | lib_ring_buffer_clear_noref(config, &buf->backend, |
1d498196 | 197 | subbuf_index(o_end - 1, chan), handle); |
852c2936 | 198 | |
8936b6c0 MD |
199 | ctx_private->pre_offset = o_begin; |
200 | ctx_private->buf_offset = o_begin + before_hdr_pad; | |
852c2936 MD |
201 | return 0; |
202 | slow_path: | |
e56bb47c | 203 | return lib_ring_buffer_reserve_slow(ctx, client_ctx); |
852c2936 MD |
204 | } |
205 | ||
206 | /** | |
207 | * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer. | |
208 | * @config: ring buffer instance configuration. | |
209 | * @buf: buffer | |
210 | * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH) | |
211 | * | |
212 | * This operation is completely reentrant : can be called while tracing is | |
213 | * active with absolutely no lock held. | |
214 | * | |
215 | * Note, however, that as a v_cmpxchg is used for some atomic operations and | |
216 | * requires to be executed locally for per-CPU buffers, this function must be | |
217 | * called from the CPU which owns the buffer for a ACTIVE flush, with preemption | |
218 | * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. | |
219 | */ | |
220 | static inline | |
2208d8b5 MJ |
221 | void lib_ring_buffer_switch( |
222 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), | |
223 | struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, | |
224 | struct lttng_ust_shm_handle *handle) | |
852c2936 | 225 | { |
1d498196 | 226 | lib_ring_buffer_switch_slow(buf, mode, handle); |
852c2936 MD |
227 | } |
228 | ||
229 | /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */ | |
230 | ||
231 | /** | |
232 | * lib_ring_buffer_commit - Commit an record. | |
233 | * @config: ring buffer instance configuration. | |
234 | * @ctx: ring buffer context. (input arguments only) | |
235 | * | |
236 | * Atomic unordered slot commit. Increments the commit count in the | |
237 | * specified sub-buffer, and delivers it if necessary. | |
238 | */ | |
239 | static inline | |
4cfec15c MD |
240 | void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config, |
241 | const struct lttng_ust_lib_ring_buffer_ctx *ctx) | |
852c2936 | 242 | { |
8936b6c0 MD |
243 | struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv; |
244 | struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan; | |
245 | struct lttng_ust_shm_handle *handle = chan->handle; | |
246 | struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf; | |
247 | unsigned long offset_end = ctx_private->buf_offset; | |
852c2936 MD |
248 | unsigned long endidx = subbuf_index(offset_end - 1, chan); |
249 | unsigned long commit_count; | |
d2fe4771 MD |
250 | struct commit_counters_hot *cc_hot = shmp_index(handle, |
251 | buf->commit_hot, endidx); | |
852c2936 | 252 | |
15500a1b MD |
253 | if (caa_unlikely(!cc_hot)) |
254 | return; | |
255 | ||
852c2936 MD |
256 | /* |
257 | * Must count record before incrementing the commit count. | |
258 | */ | |
15500a1b | 259 | subbuffer_count_record(config, ctx, &buf->backend, endidx, handle); |
852c2936 MD |
260 | |
261 | /* | |
262 | * Order all writes to buffer before the commit count update that will | |
263 | * determine that the subbuffer is full. | |
264 | */ | |
9f3fdbc6 | 265 | cmm_smp_wmb(); |
852c2936 | 266 | |
8936b6c0 | 267 | v_add(config, ctx_private->slot_size, &cc_hot->cc); |
852c2936 MD |
268 | |
269 | /* | |
270 | * commit count read can race with concurrent OOO commit count updates. | |
271 | * This is only needed for lib_ring_buffer_check_deliver (for | |
272 | * non-polling delivery only) and for | |
273 | * lib_ring_buffer_write_commit_counter. The race can only cause the | |
274 | * counter to be read with the same value more than once, which could | |
275 | * cause : | |
276 | * - Multiple delivery for the same sub-buffer (which is handled | |
277 | * gracefully by the reader code) if the value is for a full | |
278 | * sub-buffer. It's important that we can never miss a sub-buffer | |
279 | * delivery. Re-reading the value after the v_add ensures this. | |
280 | * - Reading a commit_count with a higher value that what was actually | |
281 | * added to it for the lib_ring_buffer_write_commit_counter call | |
282 | * (again caused by a concurrent committer). It does not matter, | |
283 | * because this function is interested in the fact that the commit | |
284 | * count reaches back the reserve offset for a specific sub-buffer, | |
285 | * which is completely independent of the order. | |
286 | */ | |
d2fe4771 | 287 | commit_count = v_read(config, &cc_hot->cc); |
852c2936 MD |
288 | |
289 | lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, | |
8936b6c0 | 290 | commit_count, endidx, handle, ctx_private->tsc); |
852c2936 MD |
291 | /* |
292 | * Update used size at each commit. It's needed only for extracting | |
293 | * ring_buffer buffers from vmcore, after crash. | |
294 | */ | |
d2fe4771 MD |
295 | lib_ring_buffer_write_commit_counter(config, buf, chan, |
296 | offset_end, commit_count, handle, cc_hot); | |
852c2936 MD |
297 | } |
298 | ||
299 | /** | |
300 | * lib_ring_buffer_try_discard_reserve - Try discarding a record. | |
301 | * @config: ring buffer instance configuration. | |
302 | * @ctx: ring buffer context. (input arguments only) | |
303 | * | |
304 | * Only succeeds if no other record has been written after the record to | |
305 | * discard. If discard fails, the record must be committed to the buffer. | |
306 | * | |
307 | * Returns 0 upon success, -EPERM if the record cannot be discarded. | |
308 | */ | |
309 | static inline | |
4cfec15c MD |
310 | int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
311 | const struct lttng_ust_lib_ring_buffer_ctx *ctx) | |
852c2936 | 312 | { |
8936b6c0 MD |
313 | struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv; |
314 | struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf; | |
315 | unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size; | |
852c2936 MD |
316 | |
317 | /* | |
318 | * We need to ensure that if the cmpxchg succeeds and discards the | |
319 | * record, the next record will record a full TSC, because it cannot | |
320 | * rely on the last_tsc associated with the discarded record to detect | |
321 | * overflows. The only way to ensure this is to set the last_tsc to 0 | |
322 | * (assuming no 64-bit TSC overflow), which forces to write a 64-bit | |
323 | * timestamp in the next record. | |
324 | * | |
325 | * Note: if discard fails, we must leave the TSC in the record header. | |
326 | * It is needed to keep track of TSC overflows for the following | |
327 | * records. | |
328 | */ | |
329 | save_last_tsc(config, buf, 0ULL); | |
330 | ||
8936b6c0 | 331 | if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset) |
852c2936 MD |
332 | != end_offset)) |
333 | return -EPERM; | |
334 | else | |
335 | return 0; | |
336 | } | |
337 | ||
338 | static inline | |
2208d8b5 MJ |
339 | void channel_record_disable( |
340 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), | |
341 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 | 342 | { |
9f3fdbc6 | 343 | uatomic_inc(&chan->record_disabled); |
852c2936 MD |
344 | } |
345 | ||
346 | static inline | |
2208d8b5 MJ |
347 | void channel_record_enable( |
348 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), | |
349 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 | 350 | { |
9f3fdbc6 | 351 | uatomic_dec(&chan->record_disabled); |
852c2936 MD |
352 | } |
353 | ||
354 | static inline | |
2208d8b5 MJ |
355 | void lib_ring_buffer_record_disable( |
356 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), | |
357 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 358 | { |
9f3fdbc6 | 359 | uatomic_inc(&buf->record_disabled); |
852c2936 MD |
360 | } |
361 | ||
362 | static inline | |
2208d8b5 MJ |
363 | void lib_ring_buffer_record_enable( |
364 | const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), | |
365 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 366 | { |
9f3fdbc6 | 367 | uatomic_dec(&buf->record_disabled); |
852c2936 MD |
368 | } |
369 | ||
e92f3e28 | 370 | #endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */ |