1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Ring buffer configuration header. Note: after declaring the standard inline
6 * functions, clients should also include linux/ringbuffer/api.h.
8 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 #ifndef _LIB_RING_BUFFER_CONFIG_H
12 #define _LIB_RING_BUFFER_CONFIG_H
14 #include <linux/types.h>
15 #include <linux/percpu.h>
16 #include <lttng/align.h>
17 #include <lttng/tracer-core.h>
19 struct lib_ring_buffer
;
21 struct lib_ring_buffer_config
;
22 struct lttng_kernel_ring_buffer_ctx
;
23 struct lttng_kernel_ring_buffer_ctx_private
;
26 * Ring buffer client callbacks. Only used by slow path, never on fast path.
27 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
28 * provided as inline functions too. These may simply return 0 if not used by
31 struct lib_ring_buffer_client_cb
{
32 /* Mandatory callbacks */
34 /* A static inline version is also required for fast path */
35 u64 (*ring_buffer_clock_read
) (struct channel
*chan
);
36 size_t (*record_header_size
) (const struct lib_ring_buffer_config
*config
,
37 struct channel
*chan
, size_t offset
,
38 size_t *pre_header_padding
,
39 struct lttng_kernel_ring_buffer_ctx
*ctx
,
42 /* Slow path only, at subbuffer switch */
43 size_t (*subbuffer_header_size
) (void);
44 void (*buffer_begin
) (struct lib_ring_buffer
*buf
, u64 tsc
,
45 unsigned int subbuf_idx
);
46 void (*buffer_end
) (struct lib_ring_buffer
*buf
, u64 tsc
,
47 unsigned int subbuf_idx
, unsigned long data_size
);
49 /* Optional callbacks (can be set to NULL) */
51 /* Called at buffer creation/finalize */
52 int (*buffer_create
) (struct lib_ring_buffer
*buf
, void *priv
,
53 int cpu
, const char *name
);
55 * Clients should guarantee that no new reader handle can be opened
58 void (*buffer_finalize
) (struct lib_ring_buffer
*buf
, void *priv
, int cpu
);
61 * Extract header length, payload length and timestamp from event
62 * record. Used by buffer iterators. Timestamp is only used by channel
65 void (*record_get
) (const struct lib_ring_buffer_config
*config
,
66 struct channel
*chan
, struct lib_ring_buffer
*buf
,
67 size_t offset
, size_t *header_len
,
68 size_t *payload_len
, u64
*timestamp
);
72 * Ring buffer instance configuration.
74 * Declare as "static const" within the client object to ensure the inline fast
75 * paths can be optimized.
79 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
80 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
81 * with preemption disabled (lib_ring_buffer_get_cpu() and
82 * lib_ring_buffer_put_cpu()).
84 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
85 * Per-cpu buffer with global synchronization. Tracing can be performed with
86 * preemption enabled, statistically stays on the local buffers.
88 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
89 * Should only be used for buffers belonging to a single thread or protected
90 * by mutual exclusion by the client. Note that periodical sub-buffer switch
91 * should be disabled in this kind of configuration.
93 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
94 * Global shared buffer with global synchronization.
98 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
99 * buffers and wake up readers if data is ready. Mainly useful for tracers which
100 * don't want to call into the wakeup code on the tracing path. Use in
101 * combination with "read_timer_interval" channel_create() argument.
103 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
104 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
105 * for drivers. Going through an "irq_work" allows triggering this type of wakeup
106 * even from NMI context: the wakeup will be slightly delayed until the next
107 * interrupts are handled.
109 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
110 * has the responsibility to perform wakeups.
112 struct lib_ring_buffer_config
{
114 RING_BUFFER_ALLOC_PER_CPU
,
115 RING_BUFFER_ALLOC_GLOBAL
,
118 RING_BUFFER_SYNC_PER_CPU
, /* Wait-free */
119 RING_BUFFER_SYNC_GLOBAL
, /* Lock-free */
122 RING_BUFFER_OVERWRITE
, /* Overwrite when buffer full */
123 RING_BUFFER_DISCARD
, /* Discard when buffer full */
128 RING_BUFFER_READ
, /* TODO */
129 RING_BUFFER_ITERATOR
,
134 RING_BUFFER_VMAP
, /* TODO */
135 RING_BUFFER_STATIC
, /* TODO */
138 RING_BUFFER_NO_OOPS_CONSISTENCY
,
139 RING_BUFFER_OOPS_CONSISTENCY
,
142 RING_BUFFER_IPI_BARRIER
,
143 RING_BUFFER_NO_IPI_BARRIER
,
146 RING_BUFFER_WAKEUP_BY_TIMER
, /* wake up performed by timer */
147 RING_BUFFER_WAKEUP_BY_WRITER
, /*
148 * writer wakes up reader through
153 * tsc_bits: timestamp bits saved at each record.
154 * 0 and 64 disable the timestamp compression scheme.
156 unsigned int tsc_bits
;
157 struct lib_ring_buffer_client_cb cb
;
161 * ring buffer private context
163 * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
164 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
165 * lib_ring_buffer_write().
167 * Get struct lttng_kernel_ring_buffer_ctx parent with container_of().
170 struct lttng_kernel_ring_buffer_ctx_private
{
171 /* input received by lib_ring_buffer_reserve(). */
172 struct channel
*chan
; /* ring buffer channel */
174 /* output from lib_ring_buffer_reserve() */
175 int reserve_cpu
; /* processor id updated by the reserve */
176 size_t slot_size
; /* size of the reserved slot */
177 unsigned long buf_offset
; /* offset following the record header */
178 unsigned long pre_offset
; /*
179 * Initial offset position _before_
180 * the record is written. Positioned
181 * prior to record header alignment
184 u64 tsc
; /* time-stamp counter value */
185 unsigned int rflags
; /* reservation flags */
187 struct lib_ring_buffer
*buf
; /*
188 * buffer corresponding to processor id
191 struct lib_ring_buffer_backend_pages
*backend_pages
;
195 * ring buffer context
197 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
198 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
199 * lib_ring_buffer_write().
201 struct lttng_kernel_ring_buffer_ctx
{
202 /* Private ring buffer context, set by reserve callback. */
203 struct lttng_kernel_ring_buffer_ctx_private priv
;
205 /* input received by lib_ring_buffer_reserve(), saved here. */
206 void *client_priv
; /* Ring buffer client private data */
208 size_t data_size
; /* size of payload */
209 int largest_align
; /*
210 * alignment of the largest element
213 struct lttng_kernel_probe_ctx
*probe_ctx
; /* Probe context */
217 * lib_ring_buffer_ctx_init - initialize ring buffer context
218 * @ctx: ring buffer context to initialize
219 * @client_priv: client private data
220 * @data_size: size of record data payload. It must be greater than 0.
221 * @largest_align: largest alignment within data payload types
224 void lib_ring_buffer_ctx_init(struct lttng_kernel_ring_buffer_ctx
*ctx
,
226 size_t data_size
, int largest_align
,
227 struct lttng_kernel_probe_ctx
*probe_ctx
)
229 ctx
->client_priv
= client_priv
;
230 ctx
->data_size
= data_size
;
231 ctx
->largest_align
= largest_align
;
232 ctx
->probe_ctx
= probe_ctx
;
238 * RING_BUFFER_RFLAG_FULL_TSC
240 * This flag is passed to record_header_size() and to the primitive used to
241 * write the record header. It indicates that the full 64-bit time value is
242 * needed in the record header. If this flag is not set, the record header needs
243 * only to contain "tsc_bits" bit of time value.
245 * Reservation flags can be added by the client, starting from
246 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
247 * record_header_size() to lib_ring_buffer_write_record_header().
249 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
250 #define RING_BUFFER_RFLAG_END (1U << 1)
252 #ifndef LTTNG_TRACER_CORE_H
253 #error "lttng/tracer-core.h is needed for RING_BUFFER_ALIGN define"
257 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
258 * compile-time. We have to duplicate the "config->align" information and the
259 * definition here because config->align is used both in the slow and fast
260 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
262 #ifdef RING_BUFFER_ALIGN
264 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
267 * Calculate the offset needed to align the type.
268 * size_of_type must be non-zero.
271 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
)
273 return offset_align(align_drift
, size_of_type
);
278 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
281 * Calculate the offset needed to align the type.
282 * size_of_type must be non-zero.
285 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
)
293 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
294 * @ctx: ring buffer context.
297 void lib_ring_buffer_align_ctx(struct lttng_kernel_ring_buffer_ctx
*ctx
,
300 ctx
->priv
.buf_offset
+= lib_ring_buffer_align(ctx
->priv
.buf_offset
,
305 * lib_ring_buffer_check_config() returns 0 on success.
306 * Used internally to check for valid configurations at channel creation.
309 int lib_ring_buffer_check_config(const struct lib_ring_buffer_config
*config
,
310 unsigned int switch_timer_interval
,
311 unsigned int read_timer_interval
)
313 if (config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
314 && config
->sync
== RING_BUFFER_SYNC_PER_CPU
315 && switch_timer_interval
)
320 #include <ringbuffer/vatomic.h>
322 #endif /* _LIB_RING_BUFFER_CONFIG_H */