| 1 | /* |
| 2 | * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only) |
| 3 | * |
| 4 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 5 | * |
| 6 | * Ring Buffer Library Synchronization Header (internal helpers). |
| 7 | * |
| 8 | * See ring_buffer_frontend.c for more information on wait-free algorithms. |
| 9 | */ |
| 10 | |
| 11 | #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H |
| 12 | #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H |
| 13 | |
| 14 | #include <urcu/compiler.h> |
| 15 | #include <urcu/tls-compat.h> |
| 16 | #include <signal.h> |
| 17 | #include <stdint.h> |
| 18 | #include <pthread.h> |
| 19 | |
| 20 | #include <lttng/ringbuffer-context.h> |
| 21 | #include "ringbuffer-config.h" |
| 22 | #include "backend_types.h" |
| 23 | #include "frontend_types.h" |
| 24 | #include "shm.h" |
| 25 | |
| 26 | /* Buffer offset macros */ |
| 27 | |
| 28 | /* buf_trunc mask selects only the buffer number. */ |
| 29 | static inline |
| 30 | unsigned long buf_trunc(unsigned long offset, |
| 31 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 32 | { |
| 33 | return offset & ~(chan->backend.buf_size - 1); |
| 34 | |
| 35 | } |
| 36 | |
| 37 | /* Select the buffer number value (counter). */ |
| 38 | static inline |
| 39 | unsigned long buf_trunc_val(unsigned long offset, |
| 40 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 41 | { |
| 42 | return buf_trunc(offset, chan) >> chan->backend.buf_size_order; |
| 43 | } |
| 44 | |
| 45 | /* buf_offset mask selects only the offset within the current buffer. */ |
| 46 | static inline |
| 47 | unsigned long buf_offset(unsigned long offset, |
| 48 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 49 | { |
| 50 | return offset & (chan->backend.buf_size - 1); |
| 51 | } |
| 52 | |
| 53 | /* subbuf_offset mask selects the offset within the current subbuffer. */ |
| 54 | static inline |
| 55 | unsigned long subbuf_offset(unsigned long offset, |
| 56 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 57 | { |
| 58 | return offset & (chan->backend.subbuf_size - 1); |
| 59 | } |
| 60 | |
| 61 | /* subbuf_trunc mask selects the subbuffer number. */ |
| 62 | static inline |
| 63 | unsigned long subbuf_trunc(unsigned long offset, |
| 64 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 65 | { |
| 66 | return offset & ~(chan->backend.subbuf_size - 1); |
| 67 | } |
| 68 | |
| 69 | /* subbuf_align aligns the offset to the next subbuffer. */ |
| 70 | static inline |
| 71 | unsigned long subbuf_align(unsigned long offset, |
| 72 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 73 | { |
| 74 | return (offset + chan->backend.subbuf_size) |
| 75 | & ~(chan->backend.subbuf_size - 1); |
| 76 | } |
| 77 | |
| 78 | /* subbuf_index returns the index of the current subbuffer within the buffer. */ |
| 79 | static inline |
| 80 | unsigned long subbuf_index(unsigned long offset, |
| 81 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 82 | { |
| 83 | return buf_offset(offset, chan) >> chan->backend.subbuf_size_order; |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Last TSC comparison functions. Check if the current TSC overflows tsc_bits |
| 88 | * bits from the last TSC read. When overflows are detected, the full 64-bit |
| 89 | * timestamp counter should be written in the record header. Reads and writes |
| 90 | * last_tsc atomically. |
| 91 | */ |
| 92 | |
| 93 | #if (CAA_BITS_PER_LONG == 32) |
| 94 | static inline |
| 95 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
| 96 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
| 97 | { |
| 98 | if (config->tsc_bits == 0 || config->tsc_bits == 64) |
| 99 | return; |
| 100 | |
| 101 | /* |
| 102 | * Ensure the compiler performs this update in a single instruction. |
| 103 | */ |
| 104 | v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); |
| 105 | } |
| 106 | |
| 107 | static inline |
| 108 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
| 109 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
| 110 | { |
| 111 | unsigned long tsc_shifted; |
| 112 | |
| 113 | if (config->tsc_bits == 0 || config->tsc_bits == 64) |
| 114 | return 0; |
| 115 | |
| 116 | tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); |
| 117 | if (caa_unlikely(tsc_shifted |
| 118 | - (unsigned long)v_read(config, &buf->last_tsc))) |
| 119 | return 1; |
| 120 | else |
| 121 | return 0; |
| 122 | } |
| 123 | #else |
| 124 | static inline |
| 125 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
| 126 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
| 127 | { |
| 128 | if (config->tsc_bits == 0 || config->tsc_bits == 64) |
| 129 | return; |
| 130 | |
| 131 | v_set(config, &buf->last_tsc, (unsigned long)tsc); |
| 132 | } |
| 133 | |
| 134 | static inline |
| 135 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
| 136 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
| 137 | { |
| 138 | if (config->tsc_bits == 0 || config->tsc_bits == 64) |
| 139 | return 0; |
| 140 | |
| 141 | if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) |
| 142 | >> config->tsc_bits)) |
| 143 | return 1; |
| 144 | else |
| 145 | return 0; |
| 146 | } |
| 147 | #endif |
| 148 | |
| 149 | __attribute__((visibility("hidden"))) |
| 150 | extern |
| 151 | int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx, |
| 152 | void *client_ctx); |
| 153 | |
| 154 | __attribute__((visibility("hidden"))) |
| 155 | extern |
| 156 | void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, |
| 157 | enum switch_mode mode, |
| 158 | struct lttng_ust_shm_handle *handle); |
| 159 | |
| 160 | __attribute__((visibility("hidden"))) |
| 161 | void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config, |
| 162 | struct lttng_ust_lib_ring_buffer *buf, |
| 163 | struct lttng_ust_lib_ring_buffer_channel *chan, |
| 164 | unsigned long offset, |
| 165 | unsigned long commit_count, |
| 166 | unsigned long idx, |
| 167 | struct lttng_ust_shm_handle *handle, |
| 168 | uint64_t tsc); |
| 169 | |
| 170 | /* Buffer write helpers */ |
| 171 | |
| 172 | static inline |
| 173 | void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, |
| 174 | struct lttng_ust_lib_ring_buffer_channel *chan, |
| 175 | unsigned long offset) |
| 176 | { |
| 177 | unsigned long consumed_old, consumed_new; |
| 178 | |
| 179 | do { |
| 180 | consumed_old = uatomic_read(&buf->consumed); |
| 181 | /* |
| 182 | * If buffer is in overwrite mode, push the reader consumed |
| 183 | * count if the write position has reached it and we are not |
| 184 | * at the first iteration (don't push the reader farther than |
| 185 | * the writer). This operation can be done concurrently by many |
| 186 | * writers in the same buffer, the writer being at the farthest |
| 187 | * write position sub-buffer index in the buffer being the one |
| 188 | * which will win this loop. |
| 189 | */ |
| 190 | if (caa_unlikely(subbuf_trunc(offset, chan) |
| 191 | - subbuf_trunc(consumed_old, chan) |
| 192 | >= chan->backend.buf_size)) |
| 193 | consumed_new = subbuf_align(consumed_old, chan); |
| 194 | else |
| 195 | return; |
| 196 | } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
| 197 | consumed_new) != consumed_old)); |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Move consumed position to the beginning of subbuffer in which the |
| 202 | * write offset is. Should only be used on ring buffers that are not |
| 203 | * actively being written into, because clear_reader does not take into |
| 204 | * account the commit counters when moving the consumed position, which |
| 205 | * can make concurrent trace producers or consumers observe consumed |
| 206 | * position further than the write offset, which breaks ring buffer |
| 207 | * algorithm guarantees. |
| 208 | */ |
| 209 | static inline |
| 210 | void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf, |
| 211 | struct lttng_ust_shm_handle *handle) |
| 212 | { |
| 213 | struct lttng_ust_lib_ring_buffer_channel *chan; |
| 214 | const struct lttng_ust_lib_ring_buffer_config *config; |
| 215 | unsigned long offset, consumed_old, consumed_new; |
| 216 | |
| 217 | chan = shmp(handle, buf->backend.chan); |
| 218 | if (!chan) |
| 219 | return; |
| 220 | config = &chan->backend.config; |
| 221 | |
| 222 | do { |
| 223 | offset = v_read(config, &buf->offset); |
| 224 | consumed_old = uatomic_read(&buf->consumed); |
| 225 | CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan) |
| 226 | - subbuf_trunc(consumed_old, chan)) |
| 227 | < 0); |
| 228 | consumed_new = subbuf_trunc(offset, chan); |
| 229 | } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
| 230 | consumed_new) != consumed_old)); |
| 231 | } |
| 232 | |
| 233 | static inline |
| 234 | int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config, |
| 235 | struct lttng_ust_lib_ring_buffer *buf, |
| 236 | struct lttng_ust_lib_ring_buffer_channel *chan) |
| 237 | { |
| 238 | return !!subbuf_offset(v_read(config, &buf->offset), chan); |
| 239 | } |
| 240 | |
| 241 | static inline |
| 242 | unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
| 243 | struct lttng_ust_lib_ring_buffer *buf, |
| 244 | unsigned long idx, |
| 245 | struct lttng_ust_shm_handle *handle) |
| 246 | { |
| 247 | return subbuffer_get_data_size(config, &buf->backend, idx, handle); |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Check if all space reservation in a buffer have been committed. This helps |
| 252 | * knowing if an execution context is nested (for per-cpu buffers only). |
| 253 | * This is a very specific ftrace use-case, so we keep this as "internal" API. |
| 254 | */ |
| 255 | static inline |
| 256 | int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config, |
| 257 | struct lttng_ust_lib_ring_buffer *buf, |
| 258 | struct lttng_ust_lib_ring_buffer_channel *chan, |
| 259 | struct lttng_ust_shm_handle *handle) |
| 260 | { |
| 261 | unsigned long offset, idx, commit_count; |
| 262 | struct commit_counters_hot *cc_hot; |
| 263 | |
| 264 | CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); |
| 265 | CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); |
| 266 | |
| 267 | /* |
| 268 | * Read offset and commit count in a loop so they are both read |
| 269 | * atomically wrt interrupts. By deal with interrupt concurrency by |
| 270 | * restarting both reads if the offset has been pushed. Note that given |
| 271 | * we only have to deal with interrupt concurrency here, an interrupt |
| 272 | * modifying the commit count will also modify "offset", so it is safe |
| 273 | * to only check for offset modifications. |
| 274 | */ |
| 275 | do { |
| 276 | offset = v_read(config, &buf->offset); |
| 277 | idx = subbuf_index(offset, chan); |
| 278 | cc_hot = shmp_index(handle, buf->commit_hot, idx); |
| 279 | if (caa_unlikely(!cc_hot)) |
| 280 | return 0; |
| 281 | commit_count = v_read(config, &cc_hot->cc); |
| 282 | } while (offset != v_read(config, &buf->offset)); |
| 283 | |
| 284 | return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) |
| 285 | - (commit_count & chan->commit_count_mask) == 0); |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Receive end of subbuffer TSC as parameter. It has been read in the |
| 290 | * space reservation loop of either reserve or switch, which ensures it |
| 291 | * progresses monotonically with event records in the buffer. Therefore, |
| 292 | * it ensures that the end timestamp of a subbuffer is <= begin |
| 293 | * timestamp of the following subbuffers. |
| 294 | */ |
| 295 | static inline |
| 296 | void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, |
| 297 | struct lttng_ust_lib_ring_buffer *buf, |
| 298 | struct lttng_ust_lib_ring_buffer_channel *chan, |
| 299 | unsigned long offset, |
| 300 | unsigned long commit_count, |
| 301 | unsigned long idx, |
| 302 | struct lttng_ust_shm_handle *handle, |
| 303 | uint64_t tsc) |
| 304 | { |
| 305 | unsigned long old_commit_count = commit_count |
| 306 | - chan->backend.subbuf_size; |
| 307 | |
| 308 | /* Check if all commits have been done */ |
| 309 | if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) |
| 310 | - (old_commit_count & chan->commit_count_mask) == 0)) |
| 311 | lib_ring_buffer_check_deliver_slow(config, buf, chan, offset, |
| 312 | commit_count, idx, handle, tsc); |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * lib_ring_buffer_write_commit_counter |
| 317 | * |
| 318 | * For flight recording. must be called after commit. |
| 319 | * This function increments the subbuffer's commit_seq counter each time the |
| 320 | * commit count reaches back the reserve offset (modulo subbuffer size). It is |
| 321 | * useful for crash dump. |
| 322 | */ |
| 323 | static inline |
| 324 | void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config, |
| 325 | struct lttng_ust_lib_ring_buffer *buf, |
| 326 | struct lttng_ust_lib_ring_buffer_channel *chan, |
| 327 | unsigned long buf_offset, |
| 328 | unsigned long commit_count, |
| 329 | struct lttng_ust_shm_handle *handle, |
| 330 | struct commit_counters_hot *cc_hot) |
| 331 | { |
| 332 | unsigned long commit_seq_old; |
| 333 | |
| 334 | if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) |
| 335 | return; |
| 336 | |
| 337 | /* |
| 338 | * subbuf_offset includes commit_count_mask. We can simply |
| 339 | * compare the offsets within the subbuffer without caring about |
| 340 | * buffer full/empty mismatch because offset is never zero here |
| 341 | * (subbuffer header and record headers have non-zero length). |
| 342 | */ |
| 343 | if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan))) |
| 344 | return; |
| 345 | |
| 346 | commit_seq_old = v_read(config, &cc_hot->seq); |
| 347 | if (caa_likely((long) (commit_seq_old - commit_count) < 0)) |
| 348 | v_set(config, &cc_hot->seq, commit_count); |
| 349 | } |
| 350 | |
| 351 | __attribute__((visibility("hidden"))) |
| 352 | extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, |
| 353 | struct channel_backend *chanb, int cpu, |
| 354 | struct lttng_ust_shm_handle *handle, |
| 355 | struct shm_object *shmobj); |
| 356 | |
| 357 | __attribute__((visibility("hidden"))) |
| 358 | extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, |
| 359 | struct lttng_ust_shm_handle *handle); |
| 360 | |
| 361 | /* Keep track of trap nesting inside ring buffer code */ |
| 362 | __attribute__((visibility("hidden"))) |
| 363 | extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); |
| 364 | |
| 365 | #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */ |