a88a400395cde93aeeecdea3f6dcfbcb39167bce
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_H
2 #define _LINUX_RING_BUFFER_FRONTEND_H
5 * linux/ringbuffer/frontend.h
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring Buffer Library Synchronization Header (API).
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
16 * Dual LGPL v2.1/GPL v2 license.
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
23 /* Internal helpers */
24 #include "frontend_internal.h"
26 /* Buffer creation/removal and setup operations */
29 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
30 * padding to let readers get those sub-buffers. Used for live streaming.
32 * read_timer_interval is the time interval (in us) to wake up pending readers.
34 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
35 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
36 * be set to NULL for other backends.
40 struct shm_handle
*channel_create(const struct lib_ring_buffer_config
*config
,
41 const char *name
, void *priv
,
43 size_t subbuf_size
, size_t num_subbuf
,
44 unsigned int switch_timer_interval
,
45 unsigned int read_timer_interval
,
46 int *shm_fd
, int *wait_fd
,
47 uint64_t *memory_map_size
);
49 /* channel_handle_create - for consumer. */
51 struct shm_handle
*channel_handle_create(int shm_fd
, int wait_fd
,
52 uint64_t memory_map_size
);
54 /* channel_handle_add_stream - for consumer. */
56 int channel_handle_add_stream(struct shm_handle
*handle
,
57 int shm_fd
, int wait_fd
, uint64_t memory_map_size
);
60 * channel_destroy returns the private data pointer. It finalizes all channel's
61 * buffers, waits for readers to release all references, and destroys the
65 void *channel_destroy(struct channel
*chan
, struct shm_handle
*handle
);
68 /* Buffer read operations */
71 * Iteration on channel cpumask needs to issue a read barrier to match the write
72 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
73 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
74 * only performed at channel destruction.
76 #define for_each_channel_cpu(cpu, chan) \
77 for_each_possible_cpu(cpu)
79 extern struct lib_ring_buffer
*channel_get_ring_buffer(
80 const struct lib_ring_buffer_config
*config
,
81 struct channel
*chan
, int cpu
,
82 struct shm_handle
*handle
,
83 int *shm_fd
, int *wait_fd
,
84 uint64_t *memory_map_size
);
85 extern int lib_ring_buffer_open_read(struct lib_ring_buffer
*buf
,
86 struct shm_handle
*handle
);
87 extern void lib_ring_buffer_release_read(struct lib_ring_buffer
*buf
,
88 struct shm_handle
*handle
);
91 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
93 extern int lib_ring_buffer_snapshot(struct lib_ring_buffer
*buf
,
94 unsigned long *consumed
,
95 unsigned long *produced
,
96 struct shm_handle
*handle
);
97 extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer
*buf
,
98 unsigned long consumed_new
,
99 struct shm_handle
*handle
);
101 extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer
*buf
,
102 unsigned long consumed
,
103 struct shm_handle
*handle
);
104 extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer
*buf
,
105 struct shm_handle
*handle
);
108 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
109 * to read sub-buffers sequentially.
111 static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer
*buf
,
112 struct shm_handle
*handle
)
116 ret
= lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
117 &buf
->prod_snapshot
, handle
);
120 ret
= lib_ring_buffer_get_subbuf(buf
, buf
->cons_snapshot
, handle
);
125 void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer
*buf
,
126 struct shm_handle
*handle
)
128 lib_ring_buffer_put_subbuf(buf
, handle
);
129 lib_ring_buffer_move_consumer(buf
, subbuf_align(buf
->cons_snapshot
,
130 shmp(handle
, buf
->backend
.chan
)), handle
);
133 extern void channel_reset(struct channel
*chan
);
134 extern void lib_ring_buffer_reset(struct lib_ring_buffer
*buf
,
135 struct shm_handle
*handle
);
138 unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config
*config
,
139 struct lib_ring_buffer
*buf
)
141 return v_read(config
, &buf
->offset
);
145 unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config
*config
,
146 struct lib_ring_buffer
*buf
)
148 return uatomic_read(&buf
->consumed
);
152 * Must call lib_ring_buffer_is_finalized before reading counters (memory
153 * ordering enforced with respect to trace teardown).
156 int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config
*config
,
157 struct lib_ring_buffer
*buf
)
159 int finalized
= CMM_ACCESS_ONCE(buf
->finalized
);
161 * Read finalized before counters.
168 int lib_ring_buffer_channel_is_finalized(const struct channel
*chan
)
170 return chan
->finalized
;
174 int lib_ring_buffer_channel_is_disabled(const struct channel
*chan
)
176 return uatomic_read(&chan
->record_disabled
);
180 unsigned long lib_ring_buffer_get_read_data_size(
181 const struct lib_ring_buffer_config
*config
,
182 struct lib_ring_buffer
*buf
,
183 struct shm_handle
*handle
)
185 return subbuffer_get_read_data_size(config
, &buf
->backend
, handle
);
189 unsigned long lib_ring_buffer_get_records_count(
190 const struct lib_ring_buffer_config
*config
,
191 struct lib_ring_buffer
*buf
)
193 return v_read(config
, &buf
->records_count
);
197 unsigned long lib_ring_buffer_get_records_overrun(
198 const struct lib_ring_buffer_config
*config
,
199 struct lib_ring_buffer
*buf
)
201 return v_read(config
, &buf
->records_overrun
);
205 unsigned long lib_ring_buffer_get_records_lost_full(
206 const struct lib_ring_buffer_config
*config
,
207 struct lib_ring_buffer
*buf
)
209 return v_read(config
, &buf
->records_lost_full
);
213 unsigned long lib_ring_buffer_get_records_lost_wrap(
214 const struct lib_ring_buffer_config
*config
,
215 struct lib_ring_buffer
*buf
)
217 return v_read(config
, &buf
->records_lost_wrap
);
221 unsigned long lib_ring_buffer_get_records_lost_big(
222 const struct lib_ring_buffer_config
*config
,
223 struct lib_ring_buffer
*buf
)
225 return v_read(config
, &buf
->records_lost_big
);
229 unsigned long lib_ring_buffer_get_records_read(
230 const struct lib_ring_buffer_config
*config
,
231 struct lib_ring_buffer
*buf
)
233 return v_read(config
, &buf
->backend
.records_read
);
236 #endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.053892 seconds and 4 git commands to generate.