2 * SPDX-License-Identifier: MIT
4 * Copyright (C) 2010-2021 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring buffer configuration header. Note: after declaring the standard inline
7 * functions, clients should also include linux/ringbuffer/api.h.
10 #ifndef _LTTNG_RING_BUFFER_CONFIG_H
11 #define _LTTNG_RING_BUFFER_CONFIG_H
16 #include <urcu/arch.h>
19 #include <lttng/ust-align.h>
20 #include <lttng/ust-compiler.h>
21 #include <lttng/ust-tracer.h>
23 struct lttng_ust_lib_ring_buffer
;
24 struct lttng_ust_lib_ring_buffer_channel
;
25 struct lttng_ust_lib_ring_buffer_config
;
26 struct lttng_ust_lib_ring_buffer_ctx
;
27 struct lttng_ust_shm_handle
;
30 * Ring buffer client callbacks. Only used by slow path, never on fast path.
31 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
32 * provided as inline functions too. These may simply return 0 if not used by
35 struct lttng_ust_lib_ring_buffer_client_cb
{
36 /* Mandatory callbacks */
38 /* A static inline version is also required for fast path */
39 uint64_t (*ring_buffer_clock_read
) (struct lttng_ust_lib_ring_buffer_channel
*chan
);
40 size_t (*record_header_size
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
41 struct lttng_ust_lib_ring_buffer_channel
*chan
,
43 size_t *pre_header_padding
,
44 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
47 /* Slow path only, at subbuffer switch */
48 size_t (*subbuffer_header_size
) (void);
49 void (*buffer_begin
) (struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
,
50 unsigned int subbuf_idx
,
51 struct lttng_ust_shm_handle
*handle
);
52 void (*buffer_end
) (struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
,
53 unsigned int subbuf_idx
, unsigned long data_size
,
54 struct lttng_ust_shm_handle
*handle
);
56 /* Optional callbacks (can be set to NULL) */
58 /* Called at buffer creation/finalize */
59 int (*buffer_create
) (struct lttng_ust_lib_ring_buffer
*buf
, void *priv
,
60 int cpu
, const char *name
,
61 struct lttng_ust_shm_handle
*handle
);
63 * Clients should guarantee that no new reader handle can be opened
66 void (*buffer_finalize
) (struct lttng_ust_lib_ring_buffer
*buf
,
68 struct lttng_ust_shm_handle
*handle
);
71 * Extract header length, payload length and timestamp from event
72 * record. Used by buffer iterators. Timestamp is only used by channel
75 void (*record_get
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
76 struct lttng_ust_lib_ring_buffer_channel
*chan
,
77 struct lttng_ust_lib_ring_buffer
*buf
,
78 size_t offset
, size_t *header_len
,
79 size_t *payload_len
, uint64_t *timestamp
,
80 struct lttng_ust_shm_handle
*handle
);
82 * Offset and size of content size field in client.
84 void (*content_size_field
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
85 size_t *offset
, size_t *length
);
86 void (*packet_size_field
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
87 size_t *offset
, size_t *length
);
91 * Ring buffer instance configuration.
93 * Declare as "static const" within the client object to ensure the inline fast
94 * paths can be optimized.
98 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
99 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
100 * with preemption disabled (lib_ring_buffer_get_cpu() and
101 * lib_ring_buffer_put_cpu()).
103 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
104 * Per-cpu buffer with global synchronization. Tracing can be performed with
105 * preemption enabled, statistically stays on the local buffers.
107 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
108 * Should only be used for buffers belonging to a single thread or protected
109 * by mutual exclusion by the client. Note that periodical sub-buffer switch
110 * should be disabled in this kind of configuration.
112 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
113 * Global shared buffer with global synchronization.
117 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
118 * buffers and wake up readers if data is ready. Mainly useful for tracers which
119 * don't want to call into the wakeup code on the tracing path. Use in
120 * combination with "read_timer_interval" channel_create() argument.
122 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
123 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
126 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
127 * has the responsibility to perform wakeups.
129 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
131 enum lttng_ust_lib_ring_buffer_alloc_types
{
132 RING_BUFFER_ALLOC_PER_CPU
,
133 RING_BUFFER_ALLOC_GLOBAL
,
136 enum lttng_ust_lib_ring_buffer_sync_types
{
137 RING_BUFFER_SYNC_PER_CPU
, /* Wait-free */
138 RING_BUFFER_SYNC_GLOBAL
, /* Lock-free */
141 enum lttng_ust_lib_ring_buffer_mode_types
{
142 RING_BUFFER_OVERWRITE
= 0, /* Overwrite when buffer full */
143 RING_BUFFER_DISCARD
= 1, /* Discard when buffer full */
146 enum lttng_ust_lib_ring_buffer_output_types
{
149 RING_BUFFER_READ
, /* TODO */
150 RING_BUFFER_ITERATOR
,
154 enum lttng_ust_lib_ring_buffer_backend_types
{
156 RING_BUFFER_VMAP
, /* TODO */
157 RING_BUFFER_STATIC
, /* TODO */
160 enum lttng_ust_lib_ring_buffer_oops_types
{
161 RING_BUFFER_NO_OOPS_CONSISTENCY
,
162 RING_BUFFER_OOPS_CONSISTENCY
,
165 enum lttng_ust_lib_ring_buffer_ipi_types
{
166 RING_BUFFER_IPI_BARRIER
,
167 RING_BUFFER_NO_IPI_BARRIER
,
170 enum lttng_ust_lib_ring_buffer_wakeup_types
{
171 RING_BUFFER_WAKEUP_BY_TIMER
, /* wake up performed by timer */
172 RING_BUFFER_WAKEUP_BY_WRITER
, /*
173 * writer wakes up reader,
179 struct lttng_ust_lib_ring_buffer_config
{
180 enum lttng_ust_lib_ring_buffer_alloc_types alloc
;
181 enum lttng_ust_lib_ring_buffer_sync_types sync
;
182 enum lttng_ust_lib_ring_buffer_mode_types mode
;
183 enum lttng_ust_lib_ring_buffer_output_types output
;
184 enum lttng_ust_lib_ring_buffer_backend_types backend
;
185 enum lttng_ust_lib_ring_buffer_oops_types oops
;
186 enum lttng_ust_lib_ring_buffer_ipi_types ipi
;
187 enum lttng_ust_lib_ring_buffer_wakeup_types wakeup
;
189 * tsc_bits: timestamp bits saved at each record.
190 * 0 and 64 disable the timestamp compression scheme.
192 unsigned int tsc_bits
;
193 struct lttng_ust_lib_ring_buffer_client_cb cb
;
195 * client_type is used by the consumer process (which is in a
196 * different address space) to lookup the appropriate client
197 * callbacks and update the cb pointers.
201 const struct lttng_ust_lib_ring_buffer_client_cb
*cb_ptr
;
202 char padding
[LTTNG_UST_RING_BUFFER_CONFIG_PADDING
];
208 * RING_BUFFER_RFLAG_FULL_TSC
210 * This flag is passed to record_header_size() and to the primitive used to
211 * write the record header. It indicates that the full 64-bit time value is
212 * needed in the record header. If this flag is not set, the record header needs
213 * only to contain "tsc_bits" bit of time value.
215 * Reservation flags can be added by the client, starting from
216 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
217 * record_header_size() to lib_ring_buffer_write_record_header().
219 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
220 #define RING_BUFFER_RFLAG_END (1U << 1)
223 * lib_ring_buffer_check_config() returns 0 on success.
224 * Used internally to check for valid configurations at channel creation.
226 static inline lttng_ust_notrace
227 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config
*config
,
228 unsigned int switch_timer_interval
,
229 unsigned int read_timer_interval
);
231 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config
*config
,
232 unsigned int switch_timer_interval
,
233 unsigned int read_timer_interval
)
235 if (config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
236 && config
->sync
== RING_BUFFER_SYNC_PER_CPU
237 && switch_timer_interval
)
242 #endif /* _LTTNG_RING_BUFFER_CONFIG_H */