| 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
| 2 | * |
| 3 | * ringbuffer/config.h |
| 4 | * |
| 5 | * Ring buffer configuration header. Note: after declaring the standard inline |
| 6 | * functions, clients should also include linux/ringbuffer/api.h. |
| 7 | * |
| 8 | * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 9 | */ |
| 10 | |
| 11 | #ifndef _LIB_RING_BUFFER_CONFIG_H |
| 12 | #define _LIB_RING_BUFFER_CONFIG_H |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/percpu.h> |
| 16 | #include <lttng/align.h> |
| 17 | #include <lttng/tracer-core.h> |
| 18 | |
| 19 | struct lttng_kernel_ring_buffer; |
| 20 | struct lttng_kernel_ring_buffer_channel; |
| 21 | struct lttng_kernel_ring_buffer_config; |
| 22 | struct lttng_kernel_ring_buffer_ctx; |
| 23 | struct lttng_kernel_ring_buffer_ctx_private; |
| 24 | |
| 25 | /* |
| 26 | * Ring buffer client callbacks. Only used by slow path, never on fast path. |
| 27 | * For the fast path, record_header_size(), ring_buffer_clock_read() should be |
| 28 | * provided as inline functions too. These may simply return 0 if not used by |
| 29 | * the client. |
| 30 | */ |
| 31 | struct lttng_kernel_ring_buffer_client_cb { |
| 32 | /* Mandatory callbacks */ |
| 33 | |
| 34 | /* A static inline version is also required for fast path */ |
| 35 | u64 (*ring_buffer_clock_read) (struct lttng_kernel_ring_buffer_channel *chan); |
| 36 | size_t (*record_header_size) (const struct lttng_kernel_ring_buffer_config *config, |
| 37 | struct lttng_kernel_ring_buffer_channel *chan, size_t offset, |
| 38 | size_t *pre_header_padding, |
| 39 | struct lttng_kernel_ring_buffer_ctx *ctx, |
| 40 | void *client_ctx); |
| 41 | |
| 42 | /* Slow path only, at subbuffer switch */ |
| 43 | size_t (*subbuffer_header_size) (void); |
| 44 | void (*buffer_begin) (struct lttng_kernel_ring_buffer *buf, u64 tsc, |
| 45 | unsigned int subbuf_idx); |
| 46 | void (*buffer_end) (struct lttng_kernel_ring_buffer *buf, u64 tsc, |
| 47 | unsigned int subbuf_idx, unsigned long data_size, |
| 48 | const struct lttng_kernel_ring_buffer_ctx *ctx); |
| 49 | |
| 50 | /* Optional callbacks (can be set to NULL) */ |
| 51 | |
| 52 | /* Called at buffer creation/finalize */ |
| 53 | int (*buffer_create) (struct lttng_kernel_ring_buffer *buf, void *priv, |
| 54 | int cpu, const char *name); |
| 55 | /* |
| 56 | * Clients should guarantee that no new reader handle can be opened |
| 57 | * after finalize. |
| 58 | */ |
| 59 | void (*buffer_finalize) (struct lttng_kernel_ring_buffer *buf, void *priv, int cpu); |
| 60 | |
| 61 | /* |
| 62 | * Extract header length, payload length and timestamp from event |
| 63 | * record. Used by buffer iterators. Timestamp is only used by channel |
| 64 | * iterator. |
| 65 | */ |
| 66 | void (*record_get) (const struct lttng_kernel_ring_buffer_config *config, |
| 67 | struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer *buf, |
| 68 | size_t offset, size_t *header_len, |
| 69 | size_t *payload_len, u64 *timestamp); |
| 70 | }; |
| 71 | |
| 72 | /* |
| 73 | * Ring buffer instance configuration. |
| 74 | * |
| 75 | * Declare as "static const" within the client object to ensure the inline fast |
| 76 | * paths can be optimized. |
| 77 | * |
| 78 | * alloc/sync pairs: |
| 79 | * |
| 80 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU : |
| 81 | * Per-cpu buffers with per-cpu synchronization. Tracing must be performed |
| 82 | * with preemption disabled (lib_ring_buffer_get_cpu() and |
| 83 | * lib_ring_buffer_put_cpu()). |
| 84 | * |
| 85 | * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL : |
| 86 | * Per-cpu buffer with global synchronization. Tracing can be performed with |
| 87 | * preemption enabled, statistically stays on the local buffers. |
| 88 | * |
| 89 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU : |
| 90 | * Should only be used for buffers belonging to a single thread or protected |
| 91 | * by mutual exclusion by the client. Note that periodical sub-buffer switch |
| 92 | * should be disabled in this kind of configuration. |
| 93 | * |
| 94 | * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL : |
| 95 | * Global shared buffer with global synchronization. |
| 96 | * |
| 97 | * wakeup: |
| 98 | * |
| 99 | * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the |
| 100 | * buffers and wake up readers if data is ready. Mainly useful for tracers which |
| 101 | * don't want to call into the wakeup code on the tracing path. Use in |
| 102 | * combination with "read_timer_interval" channel_create() argument. |
| 103 | * |
| 104 | * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is |
| 105 | * ready to read. Lower latencies before the reader is woken up. Mainly suitable |
| 106 | * for drivers. Going through an "irq_work" allows triggering this type of wakeup |
| 107 | * even from NMI context: the wakeup will be slightly delayed until the next |
| 108 | * interrupts are handled. |
| 109 | * |
| 110 | * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client |
| 111 | * has the responsibility to perform wakeups. |
| 112 | */ |
| 113 | struct lttng_kernel_ring_buffer_config { |
| 114 | enum { |
| 115 | RING_BUFFER_ALLOC_PER_CPU, |
| 116 | RING_BUFFER_ALLOC_GLOBAL, |
| 117 | } alloc; |
| 118 | enum { |
| 119 | RING_BUFFER_SYNC_PER_CPU, /* Wait-free */ |
| 120 | RING_BUFFER_SYNC_GLOBAL, /* Lock-free */ |
| 121 | } sync; |
| 122 | enum { |
| 123 | RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */ |
| 124 | RING_BUFFER_DISCARD, /* Discard when buffer full */ |
| 125 | } mode; |
| 126 | enum { |
| 127 | RING_BUFFER_SPLICE, |
| 128 | RING_BUFFER_MMAP, |
| 129 | RING_BUFFER_READ, /* TODO */ |
| 130 | RING_BUFFER_ITERATOR, |
| 131 | RING_BUFFER_NONE, |
| 132 | } output; |
| 133 | enum { |
| 134 | RING_BUFFER_PAGE, |
| 135 | RING_BUFFER_VMAP, /* TODO */ |
| 136 | RING_BUFFER_STATIC, /* TODO */ |
| 137 | } backend; |
| 138 | enum { |
| 139 | RING_BUFFER_NO_OOPS_CONSISTENCY, |
| 140 | RING_BUFFER_OOPS_CONSISTENCY, |
| 141 | } oops; |
| 142 | enum { |
| 143 | RING_BUFFER_IPI_BARRIER, |
| 144 | RING_BUFFER_NO_IPI_BARRIER, |
| 145 | } ipi; |
| 146 | enum { |
| 147 | RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */ |
| 148 | RING_BUFFER_WAKEUP_BY_WRITER, /* |
| 149 | * writer wakes up reader through |
| 150 | * irq_work. |
| 151 | */ |
| 152 | } wakeup; |
| 153 | /* |
| 154 | * tsc_bits: timestamp bits saved at each record. |
| 155 | * 0 and 64 disable the timestamp compression scheme. |
| 156 | */ |
| 157 | unsigned int tsc_bits; |
| 158 | struct lttng_kernel_ring_buffer_client_cb cb; |
| 159 | }; |
| 160 | |
| 161 | /* |
| 162 | * ring buffer private context |
| 163 | * |
| 164 | * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), |
| 165 | * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and |
| 166 | * lib_ring_buffer_write(). |
| 167 | * |
| 168 | * Get struct lttng_kernel_ring_buffer_ctx parent with container_of(). |
| 169 | */ |
| 170 | |
| 171 | struct lttng_kernel_ring_buffer_ctx_private { |
| 172 | /* input received by lib_ring_buffer_reserve(). */ |
| 173 | struct lttng_kernel_ring_buffer_channel *chan; /* ring buffer channel */ |
| 174 | |
| 175 | /* output from lib_ring_buffer_reserve() */ |
| 176 | int reserve_cpu; /* processor id updated by the reserve */ |
| 177 | size_t slot_size; /* size of the reserved slot */ |
| 178 | unsigned long buf_offset; /* offset following the record header */ |
| 179 | unsigned long pre_offset; /* |
| 180 | * Initial offset position _before_ |
| 181 | * the record is written. Positioned |
| 182 | * prior to record header alignment |
| 183 | * padding. |
| 184 | */ |
| 185 | u64 tsc; /* time-stamp counter value */ |
| 186 | unsigned int rflags; /* reservation flags */ |
| 187 | |
| 188 | struct lttng_kernel_ring_buffer *buf; /* |
| 189 | * buffer corresponding to processor id |
| 190 | * for this channel |
| 191 | */ |
| 192 | struct lttng_kernel_ring_buffer_backend_pages *backend_pages; |
| 193 | |
| 194 | /* |
| 195 | * Records lost counts are only loaded into these fields before |
| 196 | * reserving the last bytes from the ring buffer. |
| 197 | */ |
| 198 | unsigned long records_lost_full; |
| 199 | unsigned long records_lost_wrap; |
| 200 | unsigned long records_lost_big; |
| 201 | }; |
| 202 | |
| 203 | /* |
| 204 | * ring buffer context |
| 205 | * |
| 206 | * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), |
| 207 | * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and |
| 208 | * lib_ring_buffer_write(). |
| 209 | */ |
| 210 | struct lttng_kernel_ring_buffer_ctx { |
| 211 | /* Private ring buffer context, set by reserve callback. */ |
| 212 | struct lttng_kernel_ring_buffer_ctx_private priv; |
| 213 | |
| 214 | /* input received by lib_ring_buffer_reserve(), saved here. */ |
| 215 | void *client_priv; /* Ring buffer client private data */ |
| 216 | |
| 217 | size_t data_size; /* size of payload */ |
| 218 | int largest_align; /* |
| 219 | * alignment of the largest element |
| 220 | * in the payload |
| 221 | */ |
| 222 | struct lttng_kernel_probe_ctx *probe_ctx; /* Probe context */ |
| 223 | }; |
| 224 | |
| 225 | /** |
| 226 | * lib_ring_buffer_ctx_init - initialize ring buffer context |
| 227 | * @ctx: ring buffer context to initialize |
| 228 | * @client_priv: client private data |
| 229 | * @data_size: size of record data payload. It must be greater than 0. |
| 230 | * @largest_align: largest alignment within data payload types |
| 231 | */ |
| 232 | static inline |
| 233 | void lib_ring_buffer_ctx_init(struct lttng_kernel_ring_buffer_ctx *ctx, |
| 234 | void *client_priv, |
| 235 | size_t data_size, int largest_align, |
| 236 | struct lttng_kernel_probe_ctx *probe_ctx) |
| 237 | { |
| 238 | ctx->client_priv = client_priv; |
| 239 | ctx->data_size = data_size; |
| 240 | ctx->largest_align = largest_align; |
| 241 | ctx->probe_ctx = probe_ctx; |
| 242 | } |
| 243 | |
| 244 | /* |
| 245 | * Reservation flags. |
| 246 | * |
| 247 | * RING_BUFFER_RFLAG_FULL_TSC |
| 248 | * |
| 249 | * This flag is passed to record_header_size() and to the primitive used to |
| 250 | * write the record header. It indicates that the full 64-bit time value is |
| 251 | * needed in the record header. If this flag is not set, the record header needs |
| 252 | * only to contain "tsc_bits" bit of time value. |
| 253 | * |
| 254 | * Reservation flags can be added by the client, starting from |
| 255 | * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from |
| 256 | * record_header_size() to lib_ring_buffer_write_record_header(). |
| 257 | */ |
| 258 | #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0) |
| 259 | #define RING_BUFFER_RFLAG_END (1U << 1) |
| 260 | |
| 261 | #ifndef LTTNG_TRACER_CORE_H |
| 262 | #error "lttng/tracer-core.h is needed for RING_BUFFER_ALIGN define" |
| 263 | #endif |
| 264 | |
| 265 | /* |
| 266 | * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at |
| 267 | * compile-time. We have to duplicate the "config->align" information and the |
| 268 | * definition here because config->align is used both in the slow and fast |
| 269 | * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code. |
| 270 | */ |
| 271 | #ifdef RING_BUFFER_ALIGN |
| 272 | |
| 273 | # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */ |
| 274 | |
| 275 | /* |
| 276 | * Calculate the offset needed to align the type. |
| 277 | * size_of_type must be non-zero. |
| 278 | */ |
| 279 | static inline |
| 280 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) |
| 281 | { |
| 282 | return offset_align(align_drift, size_of_type); |
| 283 | } |
| 284 | |
| 285 | #else |
| 286 | |
| 287 | # define RING_BUFFER_ALIGN_ATTR __attribute__((packed)) |
| 288 | |
| 289 | /* |
| 290 | * Calculate the offset needed to align the type. |
| 291 | * size_of_type must be non-zero. |
| 292 | */ |
| 293 | static inline |
| 294 | unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type) |
| 295 | { |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | #endif |
| 300 | |
| 301 | /** |
| 302 | * lib_ring_buffer_align_ctx - Align context offset on "alignment" |
| 303 | * @ctx: ring buffer context. |
| 304 | */ |
| 305 | static inline |
| 306 | void lib_ring_buffer_align_ctx(struct lttng_kernel_ring_buffer_ctx *ctx, |
| 307 | size_t alignment) |
| 308 | { |
| 309 | ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset, |
| 310 | alignment); |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * lib_ring_buffer_check_config() returns 0 on success. |
| 315 | * Used internally to check for valid configurations at channel creation. |
| 316 | */ |
| 317 | static inline |
| 318 | int lib_ring_buffer_check_config(const struct lttng_kernel_ring_buffer_config *config, |
| 319 | unsigned int switch_timer_interval, |
| 320 | unsigned int read_timer_interval) |
| 321 | { |
| 322 | if (config->alloc == RING_BUFFER_ALLOC_GLOBAL |
| 323 | && config->sync == RING_BUFFER_SYNC_PER_CPU |
| 324 | && switch_timer_interval) |
| 325 | return -EINVAL; |
| 326 | return 0; |
| 327 | } |
| 328 | |
| 329 | #include <ringbuffer/vatomic.h> |
| 330 | |
| 331 | #endif /* _LIB_RING_BUFFER_CONFIG_H */ |