Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (API). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
852c2936 | 19 | /* Internal helpers */ |
4931a13e | 20 | #include "frontend_internal.h" |
852c2936 MD |
21 | |
22 | /* Buffer creation/removal and setup operations */ | |
23 | ||
24 | /* | |
25 | * switch_timer_interval is the time interval (in us) to fill sub-buffers with | |
26 | * padding to let readers get those sub-buffers. Used for live streaming. | |
27 | * | |
28 | * read_timer_interval is the time interval (in us) to wake up pending readers. | |
29 | * | |
30 | * buf_addr is a pointer the the beginning of the preallocated buffer contiguous | |
31 | * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can | |
32 | * be set to NULL for other backends. | |
33 | */ | |
34 | ||
35 | extern | |
36 | struct channel *channel_create(const struct lib_ring_buffer_config *config, | |
37 | const char *name, void *priv, | |
38 | void *buf_addr, | |
39 | size_t subbuf_size, size_t num_subbuf, | |
40 | unsigned int switch_timer_interval, | |
41 | unsigned int read_timer_interval); | |
42 | ||
43 | /* | |
44 | * channel_destroy returns the private data pointer. It finalizes all channel's | |
45 | * buffers, waits for readers to release all references, and destroys the | |
46 | * channel. | |
47 | */ | |
48 | extern | |
49 | void *channel_destroy(struct channel *chan); | |
50 | ||
51 | ||
52 | /* Buffer read operations */ | |
53 | ||
54 | /* | |
55 | * Iteration on channel cpumask needs to issue a read barrier to match the write | |
56 | * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu | |
57 | * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is | |
58 | * only performed at channel destruction. | |
59 | */ | |
60 | #define for_each_channel_cpu(cpu, chan) \ | |
61 | for ((cpu) = -1; \ | |
62 | ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \ | |
63 | smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });) | |
64 | ||
65 | extern struct lib_ring_buffer *channel_get_ring_buffer( | |
66 | const struct lib_ring_buffer_config *config, | |
67 | struct channel *chan, int cpu); | |
68 | extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf); | |
69 | extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf); | |
70 | ||
71 | /* | |
72 | * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer. | |
73 | */ | |
74 | extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, | |
75 | unsigned long *consumed, | |
76 | unsigned long *produced); | |
77 | extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, | |
78 | unsigned long consumed_new); | |
79 | ||
80 | extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, | |
81 | unsigned long consumed); | |
82 | extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf); | |
83 | ||
84 | /* | |
85 | * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers | |
86 | * to read sub-buffers sequentially. | |
87 | */ | |
88 | static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf) | |
89 | { | |
90 | int ret; | |
91 | ||
92 | ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot, | |
93 | &buf->prod_snapshot); | |
94 | if (ret) | |
95 | return ret; | |
96 | ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot); | |
97 | return ret; | |
98 | } | |
99 | ||
100 | static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf) | |
101 | { | |
102 | lib_ring_buffer_put_subbuf(buf); | |
103 | lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, | |
104 | buf->backend.chan)); | |
105 | } | |
106 | ||
107 | extern void channel_reset(struct channel *chan); | |
108 | extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf); | |
109 | ||
110 | static inline | |
111 | unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config, | |
112 | struct lib_ring_buffer *buf) | |
113 | { | |
114 | return v_read(config, &buf->offset); | |
115 | } | |
116 | ||
117 | static inline | |
118 | unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config, | |
119 | struct lib_ring_buffer *buf) | |
120 | { | |
121 | return atomic_long_read(&buf->consumed); | |
122 | } | |
123 | ||
124 | /* | |
125 | * Must call lib_ring_buffer_is_finalized before reading counters (memory | |
126 | * ordering enforced with respect to trace teardown). | |
127 | */ | |
128 | static inline | |
129 | int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config, | |
130 | struct lib_ring_buffer *buf) | |
131 | { | |
132 | int finalized = ACCESS_ONCE(buf->finalized); | |
133 | /* | |
134 | * Read finalized before counters. | |
135 | */ | |
136 | smp_rmb(); | |
137 | return finalized; | |
138 | } | |
139 | ||
140 | static inline | |
141 | int lib_ring_buffer_channel_is_finalized(const struct channel *chan) | |
142 | { | |
143 | return chan->finalized; | |
144 | } | |
145 | ||
146 | static inline | |
147 | int lib_ring_buffer_channel_is_disabled(const struct channel *chan) | |
148 | { | |
149 | return atomic_read(&chan->record_disabled); | |
150 | } | |
151 | ||
152 | static inline | |
153 | unsigned long lib_ring_buffer_get_read_data_size( | |
154 | const struct lib_ring_buffer_config *config, | |
155 | struct lib_ring_buffer *buf) | |
156 | { | |
157 | return subbuffer_get_read_data_size(config, &buf->backend); | |
158 | } | |
159 | ||
160 | static inline | |
161 | unsigned long lib_ring_buffer_get_records_count( | |
162 | const struct lib_ring_buffer_config *config, | |
163 | struct lib_ring_buffer *buf) | |
164 | { | |
165 | return v_read(config, &buf->records_count); | |
166 | } | |
167 | ||
168 | static inline | |
169 | unsigned long lib_ring_buffer_get_records_overrun( | |
170 | const struct lib_ring_buffer_config *config, | |
171 | struct lib_ring_buffer *buf) | |
172 | { | |
173 | return v_read(config, &buf->records_overrun); | |
174 | } | |
175 | ||
176 | static inline | |
177 | unsigned long lib_ring_buffer_get_records_lost_full( | |
178 | const struct lib_ring_buffer_config *config, | |
179 | struct lib_ring_buffer *buf) | |
180 | { | |
181 | return v_read(config, &buf->records_lost_full); | |
182 | } | |
183 | ||
184 | static inline | |
185 | unsigned long lib_ring_buffer_get_records_lost_wrap( | |
186 | const struct lib_ring_buffer_config *config, | |
187 | struct lib_ring_buffer *buf) | |
188 | { | |
189 | return v_read(config, &buf->records_lost_wrap); | |
190 | } | |
191 | ||
192 | static inline | |
193 | unsigned long lib_ring_buffer_get_records_lost_big( | |
194 | const struct lib_ring_buffer_config *config, | |
195 | struct lib_ring_buffer *buf) | |
196 | { | |
197 | return v_read(config, &buf->records_lost_big); | |
198 | } | |
199 | ||
200 | static inline | |
201 | unsigned long lib_ring_buffer_get_records_read( | |
202 | const struct lib_ring_buffer_config *config, | |
203 | struct lib_ring_buffer *buf) | |
204 | { | |
205 | return v_read(config, &buf->backend.records_read); | |
206 | } | |
207 | ||
208 | #endif /* _LINUX_RING_BUFFER_FRONTEND_H */ |