Move lttng_ust_object_data et lttng_ust_channel_attr to -abi.h
[lttng-ust.git] / libringbuffer / backend_internal.h
... / ...
CommitLineData
1#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
2#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
3
4/*
5 * linux/ringbuffer/backend_internal.h
6 *
7 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer backend (internal helpers).
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <unistd.h>
15#include <urcu/compiler.h>
16
17#include <ust/ringbuffer-config.h>
18#include "backend_types.h"
19#include "frontend_types.h"
20#include "shm.h"
21
22/* Ring buffer backend API presented to the frontend */
23
24/* Ring buffer and channel backend create/free */
25
26int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
27 struct channel_backend *chan, int cpu,
28 struct shm_handle *handle,
29 struct shm_object *shmobj);
30void channel_backend_unregister_notifiers(struct channel_backend *chanb);
31void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
32int channel_backend_init(struct channel_backend *chanb,
33 const char *name,
34 const struct lib_ring_buffer_config *config,
35 void *priv, size_t subbuf_size,
36 size_t num_subbuf, struct shm_handle *handle);
37void channel_backend_free(struct channel_backend *chanb,
38 struct shm_handle *handle);
39
40void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
41 struct shm_handle *handle);
42void channel_backend_reset(struct channel_backend *chanb);
43
44int lib_ring_buffer_backend_init(void);
45void lib_ring_buffer_backend_exit(void);
46
47extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
48 size_t offset, const void *src, size_t len,
49 ssize_t pagecpy);
50
51/*
52 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
53 * exchanged atomically.
54 *
55 * Top half word, except lowest bit, belongs to "offset", which is used to keep
56 * to count the produced buffers. For overwrite mode, this provides the
57 * consumer with the capacity to read subbuffers in order, handling the
58 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
59 * systems) concurrently with a single execution of get_subbuf (between offset
60 * sampling and subbuffer ID exchange).
61 */
62
63#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
64
65#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
66#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
67#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
68/*
69 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
70 */
71#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
72#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
73#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
74/*
75 * In overwrite mode: lowest half of word is used for index.
76 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
77 * In producer-consumer mode: whole word used for index.
78 */
79#define SB_ID_INDEX_SHIFT 0
80#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
81#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
82
83/*
84 * Construct the subbuffer id from offset, index and noref. Use only the index
85 * for producer-consumer mode (offset and noref are only used in overwrite
86 * mode).
87 */
88static inline
89unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
90 unsigned long offset, unsigned long noref,
91 unsigned long index)
92{
93 if (config->mode == RING_BUFFER_OVERWRITE)
94 return (offset << SB_ID_OFFSET_SHIFT)
95 | (noref << SB_ID_NOREF_SHIFT)
96 | index;
97 else
98 return index;
99}
100
101/*
102 * Compare offset with the offset contained within id. Return 1 if the offset
103 * bits are identical, else 0.
104 */
105static inline
106int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
107 unsigned long id, unsigned long offset)
108{
109 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
110}
111
112static inline
113unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
114 unsigned long id)
115{
116 if (config->mode == RING_BUFFER_OVERWRITE)
117 return id & SB_ID_INDEX_MASK;
118 else
119 return id;
120}
121
122static inline
123unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
124 unsigned long id)
125{
126 if (config->mode == RING_BUFFER_OVERWRITE)
127 return !!(id & SB_ID_NOREF_MASK);
128 else
129 return 1;
130}
131
132/*
133 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
134 * needed.
135 */
136static inline
137void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
138 unsigned long *id)
139{
140 if (config->mode == RING_BUFFER_OVERWRITE)
141 *id |= SB_ID_NOREF_MASK;
142}
143
144static inline
145void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
146 unsigned long *id, unsigned long offset)
147{
148 unsigned long tmp;
149
150 if (config->mode == RING_BUFFER_OVERWRITE) {
151 tmp = *id;
152 tmp &= ~SB_ID_OFFSET_MASK;
153 tmp |= offset << SB_ID_OFFSET_SHIFT;
154 tmp |= SB_ID_NOREF_MASK;
155 /* Volatile store, read concurrently by readers. */
156 CMM_ACCESS_ONCE(*id) = tmp;
157 }
158}
159
160/* No volatile access, since already used locally */
161static inline
162void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
163 unsigned long *id)
164{
165 if (config->mode == RING_BUFFER_OVERWRITE)
166 *id &= ~SB_ID_NOREF_MASK;
167}
168
169/*
170 * For overwrite mode, cap the number of subbuffers per buffer to:
171 * 2^16 on 32-bit architectures
172 * 2^32 on 64-bit architectures
173 * This is required to fit in the index part of the ID. Return 0 on success,
174 * -EPERM on failure.
175 */
176static inline
177int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
178 unsigned long num_subbuf)
179{
180 if (config->mode == RING_BUFFER_OVERWRITE)
181 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
182 else
183 return 0;
184}
185
186static inline
187void subbuffer_count_record(const struct lib_ring_buffer_config *config,
188 struct lib_ring_buffer_backend *bufb,
189 unsigned long idx, struct shm_handle *handle)
190{
191 unsigned long sb_bindex;
192
193 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
194 v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
195}
196
197/*
198 * Reader has exclusive subbuffer access for record consumption. No need to
199 * perform the decrement atomically.
200 */
201static inline
202void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
203 struct lib_ring_buffer_backend *bufb,
204 struct shm_handle *handle)
205{
206 unsigned long sb_bindex;
207
208 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
209 CHAN_WARN_ON(shmp(handle, bufb->chan),
210 !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
211 /* Non-atomic decrement protected by exclusive subbuffer access */
212 _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
213 v_inc(config, &bufb->records_read);
214}
215
216static inline
217unsigned long subbuffer_get_records_count(
218 const struct lib_ring_buffer_config *config,
219 struct lib_ring_buffer_backend *bufb,
220 unsigned long idx,
221 struct shm_handle *handle)
222{
223 unsigned long sb_bindex;
224
225 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
226 return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
227}
228
229/*
230 * Must be executed at subbuffer delivery when the writer has _exclusive_
231 * subbuffer access. See ring_buffer_check_deliver() for details.
232 * ring_buffer_get_records_count() must be called to get the records count
233 * before this function, because it resets the records_commit count.
234 */
235static inline
236unsigned long subbuffer_count_records_overrun(
237 const struct lib_ring_buffer_config *config,
238 struct lib_ring_buffer_backend *bufb,
239 unsigned long idx,
240 struct shm_handle *handle)
241{
242 struct lib_ring_buffer_backend_pages_shmp *pages;
243 unsigned long overruns, sb_bindex;
244
245 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
246 pages = shmp_index(handle, bufb->array, sb_bindex);
247 overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
248 v_set(config, &shmp(handle, pages->shmp)->records_unread,
249 v_read(config, &shmp(handle, pages->shmp)->records_commit));
250 v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
251
252 return overruns;
253}
254
255static inline
256void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
257 struct lib_ring_buffer_backend *bufb,
258 unsigned long idx,
259 unsigned long data_size,
260 struct shm_handle *handle)
261{
262 struct lib_ring_buffer_backend_pages_shmp *pages;
263 unsigned long sb_bindex;
264
265 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
266 pages = shmp_index(handle, bufb->array, sb_bindex);
267 shmp(handle, pages->shmp)->data_size = data_size;
268}
269
270static inline
271unsigned long subbuffer_get_read_data_size(
272 const struct lib_ring_buffer_config *config,
273 struct lib_ring_buffer_backend *bufb,
274 struct shm_handle *handle)
275{
276 struct lib_ring_buffer_backend_pages_shmp *pages;
277 unsigned long sb_bindex;
278
279 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
280 pages = shmp_index(handle, bufb->array, sb_bindex);
281 return shmp(handle, pages->shmp)->data_size;
282}
283
284static inline
285unsigned long subbuffer_get_data_size(
286 const struct lib_ring_buffer_config *config,
287 struct lib_ring_buffer_backend *bufb,
288 unsigned long idx,
289 struct shm_handle *handle)
290{
291 struct lib_ring_buffer_backend_pages_shmp *pages;
292 unsigned long sb_bindex;
293
294 sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
295 pages = shmp_index(handle, bufb->array, sb_bindex);
296 return shmp(handle, pages->shmp)->data_size;
297}
298
299/**
300 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
301 * writer.
302 */
303static inline
304void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
305 struct lib_ring_buffer_backend *bufb,
306 unsigned long idx,
307 struct shm_handle *handle)
308{
309 unsigned long id, new_id;
310
311 if (config->mode != RING_BUFFER_OVERWRITE)
312 return;
313
314 /*
315 * Performing a volatile access to read the sb_pages, because we want to
316 * read a coherent version of the pointer and the associated noref flag.
317 */
318 id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
319 for (;;) {
320 /* This check is called on the fast path for each record. */
321 if (likely(!subbuffer_id_is_noref(config, id))) {
322 /*
323 * Store after load dependency ordering the writes to
324 * the subbuffer after load and test of the noref flag
325 * matches the memory barrier implied by the cmpxchg()
326 * in update_read_sb_index().
327 */
328 return; /* Already writing to this buffer */
329 }
330 new_id = id;
331 subbuffer_id_clear_noref(config, &new_id);
332 new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
333 if (likely(new_id == id))
334 break;
335 id = new_id;
336 }
337}
338
339/**
340 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
341 * called by writer.
342 */
343static inline
344void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
345 struct lib_ring_buffer_backend *bufb,
346 unsigned long idx, unsigned long offset,
347 struct shm_handle *handle)
348{
349 if (config->mode != RING_BUFFER_OVERWRITE)
350 return;
351
352 /*
353 * Because ring_buffer_set_noref() is only called by a single thread
354 * (the one which updated the cc_sb value), there are no concurrent
355 * updates to take care of: other writers have not updated cc_sb, so
356 * they cannot set the noref flag, and concurrent readers cannot modify
357 * the pointer because the noref flag is not set yet.
358 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
359 * to the subbuffer before this set noref operation.
360 * subbuffer_set_noref() uses a volatile store to deal with concurrent
361 * readers of the noref flag.
362 */
363 CHAN_WARN_ON(shmp(handle, bufb->chan),
364 subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
365 /*
366 * Memory barrier that ensures counter stores are ordered before set
367 * noref and offset.
368 */
369 cmm_smp_mb();
370 subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
371}
372
373/**
374 * update_read_sb_index - Read-side subbuffer index update.
375 */
376static inline
377int update_read_sb_index(const struct lib_ring_buffer_config *config,
378 struct lib_ring_buffer_backend *bufb,
379 struct channel_backend *chanb,
380 unsigned long consumed_idx,
381 unsigned long consumed_count,
382 struct shm_handle *handle)
383{
384 unsigned long old_id, new_id;
385
386 if (config->mode == RING_BUFFER_OVERWRITE) {
387 /*
388 * Exchange the target writer subbuffer with our own unused
389 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
390 * old_wpage, because the value read will be confirmed by the
391 * following cmpxchg().
392 */
393 old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
394 if (unlikely(!subbuffer_id_is_noref(config, old_id)))
395 return -EAGAIN;
396 /*
397 * Make sure the offset count we are expecting matches the one
398 * indicated by the writer.
399 */
400 if (unlikely(!subbuffer_id_compare_offset(config, old_id,
401 consumed_count)))
402 return -EAGAIN;
403 CHAN_WARN_ON(shmp(handle, bufb->chan),
404 !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
405 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
406 consumed_count);
407 new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
408 bufb->buf_rsb.id);
409 if (unlikely(old_id != new_id))
410 return -EAGAIN;
411 bufb->buf_rsb.id = new_id;
412 } else {
413 /* No page exchange, use the writer page directly */
414 bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
415 }
416 return 0;
417}
418
419/*
420 * Use the architecture-specific memcpy implementation for constant-sized
421 * inputs, but rely on an inline memcpy for length statically unknown.
422 * The function call to memcpy is just way too expensive for a fast path.
423 */
424#define lib_ring_buffer_do_copy(config, dest, src, len) \
425do { \
426 size_t __len = (len); \
427 if (__builtin_constant_p(len)) \
428 memcpy(dest, src, __len); \
429 else \
430 inline_memcpy(dest, src, __len); \
431} while (0)
432
433#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.025611 seconds and 4 git commands to generate.