Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H |
2 | #define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/backend_internal.h | |
6 | * | |
7 | * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring buffer backend (internal helpers). | |
10 | * | |
11 | * Dual LGPL v2.1/GPL v2 license. | |
12 | */ | |
13 | ||
14641deb MD |
14 | #include <unistd.h> |
15 | #include <urcu/compiler.h> | |
16 | ||
4318ae1b | 17 | #include <lttng/ringbuffer-config.h> |
4931a13e MD |
18 | #include "backend_types.h" |
19 | #include "frontend_types.h" | |
a6352fd4 | 20 | #include "shm.h" |
852c2936 MD |
21 | |
22 | /* Ring buffer backend API presented to the frontend */ | |
23 | ||
24 | /* Ring buffer and channel backend create/free */ | |
25 | ||
4cfec15c | 26 | int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, |
a6352fd4 | 27 | struct channel_backend *chan, int cpu, |
38fae1d3 | 28 | struct lttng_ust_shm_handle *handle, |
1d498196 | 29 | struct shm_object *shmobj); |
852c2936 | 30 | void channel_backend_unregister_notifiers(struct channel_backend *chanb); |
4cfec15c | 31 | void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb); |
852c2936 MD |
32 | int channel_backend_init(struct channel_backend *chanb, |
33 | const char *name, | |
4cfec15c | 34 | const struct lttng_ust_lib_ring_buffer_config *config, |
a3f61e7f | 35 | size_t subbuf_size, |
38fae1d3 | 36 | size_t num_subbuf, struct lttng_ust_shm_handle *handle); |
1d498196 | 37 | void channel_backend_free(struct channel_backend *chanb, |
38fae1d3 | 38 | struct lttng_ust_shm_handle *handle); |
852c2936 | 39 | |
4cfec15c | 40 | void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, |
38fae1d3 | 41 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
42 | void channel_backend_reset(struct channel_backend *chanb); |
43 | ||
44 | int lib_ring_buffer_backend_init(void); | |
45 | void lib_ring_buffer_backend_exit(void); | |
46 | ||
4cfec15c | 47 | extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb, |
852c2936 MD |
48 | size_t offset, const void *src, size_t len, |
49 | ssize_t pagecpy); | |
50 | ||
51 | /* | |
52 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
53 | * exchanged atomically. | |
54 | * | |
55 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
56 | * to count the produced buffers. For overwrite mode, this provides the | |
57 | * consumer with the capacity to read subbuffers in order, handling the | |
58 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
59 | * systems) concurrently with a single execution of get_subbuf (between offset | |
60 | * sampling and subbuffer ID exchange). | |
61 | */ | |
62 | ||
14641deb | 63 | #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1) |
852c2936 MD |
64 | |
65 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
66 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
67 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
68 | /* | |
69 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
70 | */ | |
71 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
72 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
73 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
74 | /* | |
75 | * In overwrite mode: lowest half of word is used for index. | |
76 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
77 | * In producer-consumer mode: whole word used for index. | |
78 | */ | |
79 | #define SB_ID_INDEX_SHIFT 0 | |
80 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
81 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
82 | ||
83 | /* | |
84 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
85 | * for producer-consumer mode (offset and noref are only used in overwrite | |
86 | * mode). | |
87 | */ | |
88 | static inline | |
4cfec15c | 89 | unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
90 | unsigned long offset, unsigned long noref, |
91 | unsigned long index) | |
92 | { | |
93 | if (config->mode == RING_BUFFER_OVERWRITE) | |
94 | return (offset << SB_ID_OFFSET_SHIFT) | |
95 | | (noref << SB_ID_NOREF_SHIFT) | |
96 | | index; | |
97 | else | |
98 | return index; | |
99 | } | |
100 | ||
101 | /* | |
102 | * Compare offset with the offset contained within id. Return 1 if the offset | |
103 | * bits are identical, else 0. | |
104 | */ | |
105 | static inline | |
4cfec15c | 106 | int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
107 | unsigned long id, unsigned long offset) |
108 | { | |
109 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
110 | } | |
111 | ||
112 | static inline | |
4cfec15c | 113 | unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
114 | unsigned long id) |
115 | { | |
116 | if (config->mode == RING_BUFFER_OVERWRITE) | |
117 | return id & SB_ID_INDEX_MASK; | |
118 | else | |
119 | return id; | |
120 | } | |
121 | ||
122 | static inline | |
4cfec15c | 123 | unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
124 | unsigned long id) |
125 | { | |
126 | if (config->mode == RING_BUFFER_OVERWRITE) | |
127 | return !!(id & SB_ID_NOREF_MASK); | |
128 | else | |
129 | return 1; | |
130 | } | |
131 | ||
132 | /* | |
133 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
134 | * needed. | |
135 | */ | |
136 | static inline | |
4cfec15c | 137 | void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
138 | unsigned long *id) |
139 | { | |
140 | if (config->mode == RING_BUFFER_OVERWRITE) | |
141 | *id |= SB_ID_NOREF_MASK; | |
142 | } | |
143 | ||
144 | static inline | |
4cfec15c | 145 | void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
146 | unsigned long *id, unsigned long offset) |
147 | { | |
148 | unsigned long tmp; | |
149 | ||
150 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
151 | tmp = *id; | |
152 | tmp &= ~SB_ID_OFFSET_MASK; | |
153 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
154 | tmp |= SB_ID_NOREF_MASK; | |
155 | /* Volatile store, read concurrently by readers. */ | |
14641deb | 156 | CMM_ACCESS_ONCE(*id) = tmp; |
852c2936 MD |
157 | } |
158 | } | |
159 | ||
160 | /* No volatile access, since already used locally */ | |
161 | static inline | |
4cfec15c | 162 | void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
163 | unsigned long *id) |
164 | { | |
165 | if (config->mode == RING_BUFFER_OVERWRITE) | |
166 | *id &= ~SB_ID_NOREF_MASK; | |
167 | } | |
168 | ||
169 | /* | |
170 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
171 | * 2^16 on 32-bit architectures | |
172 | * 2^32 on 64-bit architectures | |
173 | * This is required to fit in the index part of the ID. Return 0 on success, | |
174 | * -EPERM on failure. | |
175 | */ | |
176 | static inline | |
4cfec15c | 177 | int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
178 | unsigned long num_subbuf) |
179 | { | |
180 | if (config->mode == RING_BUFFER_OVERWRITE) | |
181 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
182 | else | |
183 | return 0; | |
184 | } | |
185 | ||
186 | static inline | |
4cfec15c MD |
187 | void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config, |
188 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 189 | unsigned long idx, struct lttng_ust_shm_handle *handle) |
852c2936 MD |
190 | { |
191 | unsigned long sb_bindex; | |
192 | ||
4746ae29 MD |
193 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
194 | v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); | |
852c2936 MD |
195 | } |
196 | ||
197 | /* | |
198 | * Reader has exclusive subbuffer access for record consumption. No need to | |
199 | * perform the decrement atomically. | |
200 | */ | |
201 | static inline | |
4cfec15c MD |
202 | void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config, |
203 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 204 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
205 | { |
206 | unsigned long sb_bindex; | |
207 | ||
208 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
1d498196 | 209 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
4746ae29 | 210 | !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread)); |
852c2936 | 211 | /* Non-atomic decrement protected by exclusive subbuffer access */ |
4746ae29 | 212 | _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread); |
852c2936 MD |
213 | v_inc(config, &bufb->records_read); |
214 | } | |
215 | ||
216 | static inline | |
217 | unsigned long subbuffer_get_records_count( | |
4cfec15c MD |
218 | const struct lttng_ust_lib_ring_buffer_config *config, |
219 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 220 | unsigned long idx, |
38fae1d3 | 221 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
222 | { |
223 | unsigned long sb_bindex; | |
224 | ||
4746ae29 MD |
225 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
226 | return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); | |
852c2936 MD |
227 | } |
228 | ||
229 | /* | |
230 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
231 | * subbuffer access. See ring_buffer_check_deliver() for details. | |
232 | * ring_buffer_get_records_count() must be called to get the records count | |
233 | * before this function, because it resets the records_commit count. | |
234 | */ | |
235 | static inline | |
236 | unsigned long subbuffer_count_records_overrun( | |
4cfec15c MD |
237 | const struct lttng_ust_lib_ring_buffer_config *config, |
238 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 239 | unsigned long idx, |
38fae1d3 | 240 | struct lttng_ust_shm_handle *handle) |
852c2936 | 241 | { |
4cfec15c | 242 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
243 | unsigned long overruns, sb_bindex; |
244 | ||
4746ae29 MD |
245 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
246 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 MD |
247 | overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread); |
248 | v_set(config, &shmp(handle, pages->shmp)->records_unread, | |
249 | v_read(config, &shmp(handle, pages->shmp)->records_commit)); | |
250 | v_set(config, &shmp(handle, pages->shmp)->records_commit, 0); | |
852c2936 MD |
251 | |
252 | return overruns; | |
253 | } | |
254 | ||
255 | static inline | |
4cfec15c MD |
256 | void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
257 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 | 258 | unsigned long idx, |
1d498196 | 259 | unsigned long data_size, |
38fae1d3 | 260 | struct lttng_ust_shm_handle *handle) |
852c2936 | 261 | { |
4cfec15c | 262 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
263 | unsigned long sb_bindex; |
264 | ||
4746ae29 MD |
265 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
266 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 | 267 | shmp(handle, pages->shmp)->data_size = data_size; |
852c2936 MD |
268 | } |
269 | ||
270 | static inline | |
271 | unsigned long subbuffer_get_read_data_size( | |
4cfec15c MD |
272 | const struct lttng_ust_lib_ring_buffer_config *config, |
273 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 274 | struct lttng_ust_shm_handle *handle) |
852c2936 | 275 | { |
4cfec15c | 276 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
277 | unsigned long sb_bindex; |
278 | ||
279 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
4746ae29 | 280 | pages = shmp_index(handle, bufb->array, sb_bindex); |
1d498196 | 281 | return shmp(handle, pages->shmp)->data_size; |
852c2936 MD |
282 | } |
283 | ||
284 | static inline | |
285 | unsigned long subbuffer_get_data_size( | |
4cfec15c MD |
286 | const struct lttng_ust_lib_ring_buffer_config *config, |
287 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 288 | unsigned long idx, |
38fae1d3 | 289 | struct lttng_ust_shm_handle *handle) |
852c2936 | 290 | { |
4cfec15c | 291 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
292 | unsigned long sb_bindex; |
293 | ||
4746ae29 MD |
294 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
295 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 | 296 | return shmp(handle, pages->shmp)->data_size; |
852c2936 MD |
297 | } |
298 | ||
299 | /** | |
300 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
301 | * writer. | |
302 | */ | |
303 | static inline | |
4cfec15c MD |
304 | void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
305 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 306 | unsigned long idx, |
38fae1d3 | 307 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
308 | { |
309 | unsigned long id, new_id; | |
310 | ||
311 | if (config->mode != RING_BUFFER_OVERWRITE) | |
312 | return; | |
313 | ||
314 | /* | |
315 | * Performing a volatile access to read the sb_pages, because we want to | |
316 | * read a coherent version of the pointer and the associated noref flag. | |
317 | */ | |
4746ae29 | 318 | id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id); |
852c2936 MD |
319 | for (;;) { |
320 | /* This check is called on the fast path for each record. */ | |
b5a3dfa5 | 321 | if (caa_likely(!subbuffer_id_is_noref(config, id))) { |
852c2936 MD |
322 | /* |
323 | * Store after load dependency ordering the writes to | |
324 | * the subbuffer after load and test of the noref flag | |
325 | * matches the memory barrier implied by the cmpxchg() | |
326 | * in update_read_sb_index(). | |
327 | */ | |
328 | return; /* Already writing to this buffer */ | |
329 | } | |
330 | new_id = id; | |
331 | subbuffer_id_clear_noref(config, &new_id); | |
4746ae29 | 332 | new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id); |
b5a3dfa5 | 333 | if (caa_likely(new_id == id)) |
852c2936 MD |
334 | break; |
335 | id = new_id; | |
336 | } | |
337 | } | |
338 | ||
339 | /** | |
340 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
341 | * called by writer. | |
342 | */ | |
343 | static inline | |
4cfec15c MD |
344 | void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
345 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 346 | unsigned long idx, unsigned long offset, |
38fae1d3 | 347 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
348 | { |
349 | if (config->mode != RING_BUFFER_OVERWRITE) | |
350 | return; | |
351 | ||
352 | /* | |
353 | * Because ring_buffer_set_noref() is only called by a single thread | |
354 | * (the one which updated the cc_sb value), there are no concurrent | |
355 | * updates to take care of: other writers have not updated cc_sb, so | |
356 | * they cannot set the noref flag, and concurrent readers cannot modify | |
357 | * the pointer because the noref flag is not set yet. | |
358 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
359 | * to the subbuffer before this set noref operation. | |
360 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
361 | * readers of the noref flag. | |
362 | */ | |
1d498196 | 363 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
4746ae29 | 364 | subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id)); |
852c2936 MD |
365 | /* |
366 | * Memory barrier that ensures counter stores are ordered before set | |
367 | * noref and offset. | |
368 | */ | |
14641deb | 369 | cmm_smp_mb(); |
4746ae29 | 370 | subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset); |
852c2936 MD |
371 | } |
372 | ||
373 | /** | |
374 | * update_read_sb_index - Read-side subbuffer index update. | |
375 | */ | |
376 | static inline | |
4cfec15c MD |
377 | int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, |
378 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 MD |
379 | struct channel_backend *chanb, |
380 | unsigned long consumed_idx, | |
1d498196 | 381 | unsigned long consumed_count, |
38fae1d3 | 382 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
383 | { |
384 | unsigned long old_id, new_id; | |
385 | ||
386 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
387 | /* | |
388 | * Exchange the target writer subbuffer with our own unused | |
14641deb | 389 | * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the |
852c2936 MD |
390 | * old_wpage, because the value read will be confirmed by the |
391 | * following cmpxchg(). | |
392 | */ | |
4746ae29 | 393 | old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; |
b5a3dfa5 | 394 | if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) |
852c2936 MD |
395 | return -EAGAIN; |
396 | /* | |
397 | * Make sure the offset count we are expecting matches the one | |
398 | * indicated by the writer. | |
399 | */ | |
b5a3dfa5 | 400 | if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, |
852c2936 MD |
401 | consumed_count))) |
402 | return -EAGAIN; | |
1d498196 | 403 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
852c2936 MD |
404 | !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); |
405 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, | |
406 | consumed_count); | |
4746ae29 | 407 | new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id, |
852c2936 | 408 | bufb->buf_rsb.id); |
b5a3dfa5 | 409 | if (caa_unlikely(old_id != new_id)) |
852c2936 MD |
410 | return -EAGAIN; |
411 | bufb->buf_rsb.id = new_id; | |
412 | } else { | |
413 | /* No page exchange, use the writer page directly */ | |
4746ae29 | 414 | bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; |
852c2936 MD |
415 | } |
416 | return 0; | |
417 | } | |
418 | ||
0d4aa2df MD |
419 | #ifndef inline_memcpy |
420 | #define inline_memcpy(dest, src, n) memcpy(dest, src, n) | |
421 | #endif | |
422 | ||
852c2936 MD |
423 | /* |
424 | * Use the architecture-specific memcpy implementation for constant-sized | |
425 | * inputs, but rely on an inline memcpy for length statically unknown. | |
426 | * The function call to memcpy is just way too expensive for a fast path. | |
427 | */ | |
428 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
429 | do { \ | |
430 | size_t __len = (len); \ | |
431 | if (__builtin_constant_p(len)) \ | |
432 | memcpy(dest, src, __len); \ | |
433 | else \ | |
434 | inline_memcpy(dest, src, __len); \ | |
435 | } while (0) | |
436 | ||
b728d87e MD |
437 | /* arch-agnostic implementation */ |
438 | ||
6b95cebf | 439 | static inline int lttng_ust_fls(unsigned int x) |
b728d87e MD |
440 | { |
441 | int r = 32; | |
442 | ||
443 | if (!x) | |
444 | return 0; | |
445 | if (!(x & 0xFFFF0000U)) { | |
446 | x <<= 16; | |
447 | r -= 16; | |
448 | } | |
449 | if (!(x & 0xFF000000U)) { | |
450 | x <<= 8; | |
451 | r -= 8; | |
452 | } | |
453 | if (!(x & 0xF0000000U)) { | |
454 | x <<= 4; | |
455 | r -= 4; | |
456 | } | |
457 | if (!(x & 0xC0000000U)) { | |
458 | x <<= 2; | |
459 | r -= 2; | |
460 | } | |
461 | if (!(x & 0x80000000U)) { | |
462 | x <<= 1; | |
463 | r -= 1; | |
464 | } | |
465 | return r; | |
466 | } | |
467 | ||
468 | static inline int get_count_order(unsigned int count) | |
469 | { | |
470 | int order; | |
471 | ||
6b95cebf | 472 | order = lttng_ust_fls(count) - 1; |
b728d87e MD |
473 | if (count & (count - 1)) |
474 | order++; | |
475 | return order; | |
476 | } | |
477 | ||
35897f8b MD |
478 | static inline |
479 | unsigned int hweight32(unsigned int value) | |
480 | { | |
481 | unsigned int r; | |
482 | ||
483 | r = value; | |
484 | r = r - ((r >> 1) & 0x55555555); | |
485 | r = (r & 0x33333333) + ((r >> 2) & 0x33333333); | |
486 | r += r >> 4; | |
487 | r &= 0x0F0F0F0F; | |
488 | r += r >> 8; | |
489 | r += r >> 16; | |
490 | r &= 0x000000FF; | |
491 | return r; | |
492 | } | |
493 | ||
852c2936 | 494 | #endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */ |