Add ring buffer comment about shm
[lttng-ust.git] / libringbuffer / config.h
CommitLineData
a6352fd4
MD
1#ifndef _LINUX_RING_BUFFER_CONFIG_H
2#define _LINUX_RING_BUFFER_CONFIG_H
3
4/*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * Dual LGPL v2.1/GPL v2 license.
13 */
14
15#include <errno.h>
16#include "ust/kcompat/kcompat.h"
17#include "ust/align.h"
18
19struct lib_ring_buffer;
20struct channel;
21struct lib_ring_buffer_config;
22struct lib_ring_buffer_ctx;
23
24/*
25 * Ring buffer client callbacks. Only used by slow path, never on fast path.
26 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
27 * provided as inline functions too. These may simply return 0 if not used by
28 * the client.
29 */
30struct lib_ring_buffer_client_cb {
31 /* Mandatory callbacks */
32
33 /* A static inline version is also required for fast path */
34 u64 (*ring_buffer_clock_read) (struct channel *chan);
35 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
36 struct channel *chan, size_t offset,
37 size_t *pre_header_padding,
38 struct lib_ring_buffer_ctx *ctx);
39
40 /* Slow path only, at subbuffer switch */
41 size_t (*subbuffer_header_size) (void);
42 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
43 unsigned int subbuf_idx);
44 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
45 unsigned int subbuf_idx, unsigned long data_size);
46
47 /* Optional callbacks (can be set to NULL) */
48
49 /* Called at buffer creation/finalize */
50 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
51 int cpu, const char *name);
52 /*
53 * Clients should guarantee that no new reader handle can be opened
54 * after finalize.
55 */
56 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
57
58 /*
59 * Extract header length, payload length and timestamp from event
60 * record. Used by buffer iterators. Timestamp is only used by channel
61 * iterator.
62 */
63 void (*record_get) (const struct lib_ring_buffer_config *config,
64 struct channel *chan, struct lib_ring_buffer *buf,
65 size_t offset, size_t *header_len,
66 size_t *payload_len, u64 *timestamp);
67};
68
69/*
70 * Ring buffer instance configuration.
71 *
72 * Declare as "static const" within the client object to ensure the inline fast
73 * paths can be optimized.
74 *
75 * alloc/sync pairs:
76 *
77 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
78 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
79 * with preemption disabled (lib_ring_buffer_get_cpu() and
80 * lib_ring_buffer_put_cpu()).
81 *
82 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
83 * Per-cpu buffer with global synchronization. Tracing can be performed with
84 * preemption enabled, statistically stays on the local buffers.
85 *
86 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
87 * Should only be used for buffers belonging to a single thread or protected
88 * by mutual exclusion by the client. Note that periodical sub-buffer switch
89 * should be disabled in this kind of configuration.
90 *
91 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
92 * Global shared buffer with global synchronization.
93 *
94 * wakeup:
95 *
96 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
97 * buffers and wake up readers if data is ready. Mainly useful for tracers which
98 * don't want to call into the wakeup code on the tracing path. Use in
99 * combination with "read_timer_interval" channel_create() argument.
100 *
101 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
102 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
103 * for drivers.
104 *
105 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
106 * has the responsibility to perform wakeups.
107 */
108struct lib_ring_buffer_config {
109 enum {
110 RING_BUFFER_ALLOC_PER_CPU,
111 RING_BUFFER_ALLOC_GLOBAL,
112 } alloc;
113 enum {
114 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
115 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
116 } sync;
117 enum {
118 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
119 RING_BUFFER_DISCARD, /* Discard when buffer full */
120 } mode;
121 enum {
122 RING_BUFFER_SPLICE,
123 RING_BUFFER_MMAP,
124 RING_BUFFER_READ, /* TODO */
125 RING_BUFFER_ITERATOR,
126 RING_BUFFER_NONE,
127 } output;
128 enum {
129 RING_BUFFER_PAGE,
130 RING_BUFFER_VMAP, /* TODO */
131 RING_BUFFER_STATIC, /* TODO */
132 } backend;
133 enum {
134 RING_BUFFER_NO_OOPS_CONSISTENCY,
135 RING_BUFFER_OOPS_CONSISTENCY,
136 } oops;
137 enum {
138 RING_BUFFER_IPI_BARRIER,
139 RING_BUFFER_NO_IPI_BARRIER,
140 } ipi;
141 enum {
142 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
143 RING_BUFFER_WAKEUP_BY_WRITER, /*
144 * writer wakes up reader,
145 * not lock-free
146 * (takes spinlock).
147 */
148 } wakeup;
149 /*
150 * tsc_bits: timestamp bits saved at each record.
151 * 0 and 64 disable the timestamp compression scheme.
152 */
153 unsigned int tsc_bits;
154 struct lib_ring_buffer_client_cb cb;
155};
156
157/*
158 * ring buffer context
159 *
160 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
161 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
162 * lib_ring_buffer_write().
163 */
164struct lib_ring_buffer_ctx {
165 /* input received by lib_ring_buffer_reserve(), saved here. */
166 struct channel *chan; /* channel */
167 void *priv; /* client private data */
168 size_t data_size; /* size of payload */
169 int largest_align; /*
170 * alignment of the largest element
171 * in the payload
172 */
173 int cpu; /* processor id */
174
175 /* output from lib_ring_buffer_reserve() */
176 struct lib_ring_buffer *buf; /*
177 * buffer corresponding to processor id
178 * for this channel
179 */
180 size_t slot_size; /* size of the reserved slot */
181 unsigned long buf_offset; /* offset following the record header */
182 unsigned long pre_offset; /*
183 * Initial offset position _before_
184 * the record is written. Positioned
185 * prior to record header alignment
186 * padding.
187 */
188 u64 tsc; /* time-stamp counter value */
189 unsigned int rflags; /* reservation flags */
190};
191
192/**
193 * lib_ring_buffer_ctx_init - initialize ring buffer context
194 * @ctx: ring buffer context to initialize
195 * @chan: channel
196 * @priv: client private data
197 * @data_size: size of record data payload
198 * @largest_align: largest alignment within data payload types
199 * @cpu: processor id
200 */
201static inline
202void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
203 struct channel *chan, void *priv,
204 size_t data_size, int largest_align,
205 int cpu)
206{
207 ctx->chan = chan;
208 ctx->priv = priv;
209 ctx->data_size = data_size;
210 ctx->largest_align = largest_align;
211 ctx->cpu = cpu;
212 ctx->rflags = 0;
213}
214
215/*
216 * Reservation flags.
217 *
218 * RING_BUFFER_RFLAG_FULL_TSC
219 *
220 * This flag is passed to record_header_size() and to the primitive used to
221 * write the record header. It indicates that the full 64-bit time value is
222 * needed in the record header. If this flag is not set, the record header needs
223 * only to contain "tsc_bits" bit of time value.
224 *
225 * Reservation flags can be added by the client, starting from
226 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
227 * record_header_size() to lib_ring_buffer_write_record_header().
228 */
229#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
230#define RING_BUFFER_RFLAG_END (1U << 1)
231
232/*
233 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
234 * compile-time. We have to duplicate the "config->align" information and the
235 * definition here because config->align is used both in the slow and fast
236 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
237 */
238#ifdef RING_BUFFER_ALIGN
239
240# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
241
242/*
243 * Calculate the offset needed to align the type.
244 * size_of_type must be non-zero.
245 */
246static inline
247unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
248{
249 return offset_align(align_drift, size_of_type);
250}
251
252#else
253
254# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
255
256/*
257 * Calculate the offset needed to align the type.
258 * size_of_type must be non-zero.
259 */
260static inline
261unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
262{
263 return 0;
264}
265
266#endif
267
268/**
269 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
270 * @ctx: ring buffer context.
271 */
272static inline
273void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
274 size_t alignment)
275{
276 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
277 alignment);
278}
279
280/*
281 * lib_ring_buffer_check_config() returns 0 on success.
282 * Used internally to check for valid configurations at channel creation.
283 */
284static inline
285int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
286 unsigned int switch_timer_interval,
287 unsigned int read_timer_interval)
288{
289 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
290 && config->sync == RING_BUFFER_SYNC_PER_CPU
291 && switch_timer_interval)
292 return -EINVAL;
293 return 0;
294}
295
296#include "vatomic.h"
297
298#endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.035951 seconds and 4 git commands to generate.