Commit | Line | Data |
---|---|---|
d3d3857f MJ |
1 | // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
2 | // SPDX-FileCopyrightText: 2011-2012 Lai Jiangshan <laijs@cn.fujitsu.com> | |
3 | // | |
4 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
5 | ||
8ad4ce58 MD |
6 | #ifndef _URCU_WFCQUEUE_STATIC_H |
7 | #define _URCU_WFCQUEUE_STATIC_H | |
8 | ||
9 | /* | |
8ad4ce58 MD |
10 | * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue |
11 | * | |
07c2a4fd MD |
12 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for |
13 | * linking dynamically with the userspace rcu library. | |
8ad4ce58 MD |
14 | */ |
15 | ||
16 | #include <pthread.h> | |
8ad4ce58 MD |
17 | #include <poll.h> |
18 | #include <stdbool.h> | |
01477510 | 19 | #include <urcu/assert.h> |
8ad4ce58 MD |
20 | #include <urcu/compiler.h> |
21 | #include <urcu/uatomic.h> | |
22 | ||
23 | #ifdef __cplusplus | |
24 | extern "C" { | |
25 | #endif | |
26 | ||
27 | /* | |
28 | * Concurrent queue with wait-free enqueue/blocking dequeue. | |
29 | * | |
ebfd2673 MD |
30 | * This queue has been designed and implemented collaboratively by |
31 | * Mathieu Desnoyers and Lai Jiangshan. Inspired from | |
32 | * half-wait-free/half-blocking queue implementation done by Paul E. | |
33 | * McKenney. | |
8ad4ce58 | 34 | * |
f878b49e MD |
35 | * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API |
36 | * | |
37 | * Synchronization table: | |
38 | * | |
39 | * External synchronization techniques described in the API below is | |
40 | * required between pairs marked with "X". No external synchronization | |
41 | * required between pairs marked with "-". | |
42 | * | |
43 | * Legend: | |
44 | * [1] cds_wfcq_enqueue | |
45 | * [2] __cds_wfcq_splice (destination queue) | |
46 | * [3] __cds_wfcq_dequeue | |
47 | * [4] __cds_wfcq_splice (source queue) | |
48 | * [5] __cds_wfcq_first | |
49 | * [6] __cds_wfcq_next | |
50 | * | |
51 | * [1] [2] [3] [4] [5] [6] | |
52 | * [1] - - - - - - | |
53 | * [2] - - - - - - | |
54 | * [3] - - X X X X | |
55 | * [4] - - X - X X | |
56 | * [5] - - X X - - | |
57 | * [6] - - X X - - | |
8ad4ce58 | 58 | * |
8ad4ce58 MD |
59 | * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock(). |
60 | * | |
61 | * For convenience, cds_wfcq_dequeue_blocking() and | |
62 | * cds_wfcq_splice_blocking() hold the dequeue lock. | |
1fe734e1 MD |
63 | * |
64 | * Besides locking, mutual exclusion of dequeue, splice and iteration | |
65 | * can be ensured by performing all of those operations from a single | |
66 | * thread, without requiring any lock. | |
8ad4ce58 MD |
67 | */ |
68 | ||
69 | #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ | |
70 | #define WFCQ_WAIT 10 /* Wait 10 ms if being set */ | |
71 | ||
72 | /* | |
73 | * cds_wfcq_node_init: initialize wait-free queue node. | |
74 | */ | |
75 | static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node) | |
76 | { | |
77 | node->next = NULL; | |
78 | } | |
79 | ||
80 | /* | |
200d100e MD |
81 | * cds_wfcq_init: initialize wait-free queue (with lock). Pair with |
82 | * cds_wfcq_destroy(). | |
8ad4ce58 MD |
83 | */ |
84 | static inline void _cds_wfcq_init(struct cds_wfcq_head *head, | |
85 | struct cds_wfcq_tail *tail) | |
86 | { | |
87 | int ret; | |
88 | ||
89 | /* Set queue head and tail */ | |
90 | _cds_wfcq_node_init(&head->node); | |
91 | tail->p = &head->node; | |
92 | ret = pthread_mutex_init(&head->lock, NULL); | |
01477510 | 93 | urcu_posix_assert(!ret); |
8ad4ce58 MD |
94 | } |
95 | ||
f637f191 | 96 | /* |
200d100e MD |
97 | * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with |
98 | * cds_wfcq_init(). | |
99 | */ | |
100 | static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head, | |
70469b43 | 101 | struct cds_wfcq_tail *tail __attribute__((unused))) |
200d100e MD |
102 | { |
103 | int ret = pthread_mutex_destroy(&head->lock); | |
01477510 | 104 | urcu_posix_assert(!ret); |
200d100e MD |
105 | } |
106 | ||
107 | /* | |
108 | * __cds_wfcq_init: initialize wait-free queue (without lock). Don't | |
109 | * pair with any destroy function. | |
f637f191 MD |
110 | */ |
111 | static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head, | |
112 | struct cds_wfcq_tail *tail) | |
113 | { | |
114 | /* Set queue head and tail */ | |
115 | _cds_wfcq_node_init(&head->node); | |
116 | tail->p = &head->node; | |
117 | } | |
118 | ||
8ad4ce58 MD |
119 | /* |
120 | * cds_wfcq_empty: return whether wait-free queue is empty. | |
121 | * | |
122 | * No memory barrier is issued. No mutual exclusion is required. | |
6d5729f7 MD |
123 | * |
124 | * We perform the test on head->node.next to check if the queue is | |
125 | * possibly empty, but we confirm this by checking if the tail pointer | |
126 | * points to the head node because the tail pointer is the linearisation | |
127 | * point of the enqueuers. Just checking the head next pointer could | |
128 | * make a queue appear empty if an enqueuer is preempted for a long time | |
129 | * between xchg() and setting the previous node's next pointer. | |
8ad4ce58 | 130 | */ |
f637f191 | 131 | static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
132 | struct cds_wfcq_tail *tail) |
133 | { | |
f637f191 | 134 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
135 | /* |
136 | * Queue is empty if no node is pointed by head->node.next nor | |
137 | * tail->p. Even though the tail->p check is sufficient to find | |
138 | * out of the queue is empty, we first check head->node.next as a | |
139 | * common case to ensure that dequeuers do not frequently access | |
140 | * enqueuer's tail->p cache line. | |
141 | */ | |
142 | return CMM_LOAD_SHARED(head->node.next) == NULL | |
143 | && CMM_LOAD_SHARED(tail->p) == &head->node; | |
144 | } | |
145 | ||
146 | static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head, | |
70469b43 | 147 | struct cds_wfcq_tail *tail __attribute__((unused))) |
8ad4ce58 MD |
148 | { |
149 | int ret; | |
150 | ||
151 | ret = pthread_mutex_lock(&head->lock); | |
01477510 | 152 | urcu_posix_assert(!ret); |
8ad4ce58 MD |
153 | } |
154 | ||
155 | static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head, | |
70469b43 | 156 | struct cds_wfcq_tail *tail __attribute__((unused))) |
8ad4ce58 MD |
157 | { |
158 | int ret; | |
159 | ||
160 | ret = pthread_mutex_unlock(&head->lock); | |
01477510 | 161 | urcu_posix_assert(!ret); |
8ad4ce58 MD |
162 | } |
163 | ||
f637f191 | 164 | static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head, |
8ad4ce58 MD |
165 | struct cds_wfcq_tail *tail, |
166 | struct cds_wfcq_node *new_head, | |
167 | struct cds_wfcq_node *new_tail) | |
168 | { | |
f637f191 | 169 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
170 | struct cds_wfcq_node *old_tail; |
171 | ||
172 | /* | |
173 | * Implicit memory barrier before uatomic_xchg() orders earlier | |
174 | * stores to data structure containing node and setting | |
175 | * node->next to NULL before publication. | |
176 | */ | |
177 | old_tail = uatomic_xchg(&tail->p, new_tail); | |
178 | ||
179 | /* | |
180 | * Implicit memory barrier after uatomic_xchg() orders store to | |
181 | * q->tail before store to old_tail->next. | |
182 | * | |
183 | * At this point, dequeuers see a NULL tail->p->next, which | |
184 | * indicates that the queue is being appended to. The following | |
185 | * store will append "node" to the queue from a dequeuer | |
186 | * perspective. | |
187 | */ | |
188 | CMM_STORE_SHARED(old_tail->next, new_head); | |
23773356 MD |
189 | /* |
190 | * Return false if queue was empty prior to adding the node, | |
191 | * else return true. | |
192 | */ | |
193 | return old_tail != &head->node; | |
8ad4ce58 MD |
194 | } |
195 | ||
196 | /* | |
197 | * cds_wfcq_enqueue: enqueue a node into a wait-free queue. | |
198 | * | |
199 | * Issues a full memory barrier before enqueue. No mutual exclusion is | |
200 | * required. | |
23773356 MD |
201 | * |
202 | * Returns false if the queue was empty prior to adding the node. | |
203 | * Returns true otherwise. | |
8ad4ce58 | 204 | */ |
f637f191 | 205 | static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head, |
8ad4ce58 MD |
206 | struct cds_wfcq_tail *tail, |
207 | struct cds_wfcq_node *new_tail) | |
208 | { | |
23773356 | 209 | return ___cds_wfcq_append(head, tail, new_tail, new_tail); |
8ad4ce58 MD |
210 | } |
211 | ||
34b11dd4 EW |
212 | /* |
213 | * CDS_WFCQ_WAIT_SLEEP: | |
214 | * | |
215 | * By default, this sleeps for the given @msec milliseconds. | |
216 | * This is a macro which LGPL users may #define themselves before | |
217 | * including wfcqueue.h to override the default behavior (e.g. | |
218 | * to log a warning or perform other background work). | |
219 | */ | |
220 | #ifndef CDS_WFCQ_WAIT_SLEEP | |
221 | #define CDS_WFCQ_WAIT_SLEEP(msec) ___cds_wfcq_wait_sleep(msec) | |
222 | #endif | |
223 | ||
224 | static inline void ___cds_wfcq_wait_sleep(int msec) | |
225 | { | |
226 | (void) poll(NULL, 0, msec); | |
227 | } | |
228 | ||
f878b49e MD |
229 | /* |
230 | * ___cds_wfcq_busy_wait: adaptative busy-wait. | |
231 | * | |
232 | * Returns 1 if nonblocking and needs to block, 0 otherwise. | |
233 | */ | |
234 | static inline bool | |
235 | ___cds_wfcq_busy_wait(int *attempt, int blocking) | |
236 | { | |
237 | if (!blocking) | |
238 | return 1; | |
239 | if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) { | |
34b11dd4 | 240 | CDS_WFCQ_WAIT_SLEEP(WFCQ_WAIT); /* Wait for 10ms */ |
f878b49e MD |
241 | *attempt = 0; |
242 | } else { | |
243 | caa_cpu_relax(); | |
244 | } | |
245 | return 0; | |
246 | } | |
247 | ||
8ad4ce58 MD |
248 | /* |
249 | * Waiting for enqueuer to complete enqueue and return the next node. | |
250 | */ | |
251 | static inline struct cds_wfcq_node * | |
47215721 | 252 | ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking) |
8ad4ce58 MD |
253 | { |
254 | struct cds_wfcq_node *next; | |
255 | int attempt = 0; | |
256 | ||
257 | /* | |
258 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
259 | */ | |
260 | while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
f878b49e | 261 | if (___cds_wfcq_busy_wait(&attempt, blocking)) |
47215721 | 262 | return CDS_WFCQ_WOULDBLOCK; |
8ad4ce58 MD |
263 | } |
264 | ||
265 | return next; | |
266 | } | |
267 | ||
8ad4ce58 | 268 | static inline struct cds_wfcq_node * |
f637f191 | 269 | ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head, |
47215721 MD |
270 | struct cds_wfcq_tail *tail, |
271 | int blocking) | |
8ad4ce58 | 272 | { |
f637f191 | 273 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
274 | struct cds_wfcq_node *node; |
275 | ||
4d0d7cbc | 276 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) |
8ad4ce58 | 277 | return NULL; |
47215721 | 278 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
8ad4ce58 MD |
279 | /* Load head->node.next before loading node's content */ |
280 | cmm_smp_read_barrier_depends(); | |
281 | return node; | |
282 | } | |
283 | ||
284 | /* | |
47215721 | 285 | * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing. |
8ad4ce58 MD |
286 | * |
287 | * Content written into the node before enqueue is guaranteed to be | |
288 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
289 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
290 | * caller. | |
f94061a3 MD |
291 | * |
292 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
293 | * __cds_wfcq_for_each_blocking() | |
294 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
295 | * |
296 | * Returns NULL if queue is empty, first node otherwise. | |
8ad4ce58 MD |
297 | */ |
298 | static inline struct cds_wfcq_node * | |
f637f191 | 299 | ___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
300 | struct cds_wfcq_tail *tail) |
301 | { | |
302 | return ___cds_wfcq_first(head, tail, 1); | |
303 | } | |
304 | ||
305 | ||
306 | /* | |
307 | * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing. | |
308 | * | |
309 | * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
310 | * it needs to block. | |
311 | */ | |
312 | static inline struct cds_wfcq_node * | |
f637f191 | 313 | ___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
314 | struct cds_wfcq_tail *tail) |
315 | { | |
316 | return ___cds_wfcq_first(head, tail, 0); | |
317 | } | |
318 | ||
319 | static inline struct cds_wfcq_node * | |
70469b43 | 320 | ___cds_wfcq_next(cds_wfcq_head_ptr_t head __attribute__((unused)), |
8ad4ce58 | 321 | struct cds_wfcq_tail *tail, |
47215721 MD |
322 | struct cds_wfcq_node *node, |
323 | int blocking) | |
8ad4ce58 MD |
324 | { |
325 | struct cds_wfcq_node *next; | |
326 | ||
327 | /* | |
328 | * Even though the following tail->p check is sufficient to find | |
329 | * out if we reached the end of the queue, we first check | |
330 | * node->next as a common case to ensure that iteration on nodes | |
331 | * do not frequently access enqueuer's tail->p cache line. | |
332 | */ | |
333 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
334 | /* Load node->next before tail->p */ | |
335 | cmm_smp_rmb(); | |
336 | if (CMM_LOAD_SHARED(tail->p) == node) | |
337 | return NULL; | |
47215721 | 338 | next = ___cds_wfcq_node_sync_next(node, blocking); |
8ad4ce58 MD |
339 | } |
340 | /* Load node->next before loading next's content */ | |
341 | cmm_smp_read_barrier_depends(); | |
342 | return next; | |
343 | } | |
344 | ||
345 | /* | |
47215721 | 346 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. |
8ad4ce58 | 347 | * |
8ad4ce58 MD |
348 | * Content written into the node before enqueue is guaranteed to be |
349 | * consistent, but no other memory ordering is ensured. | |
1fe734e1 MD |
350 | * Dequeue/splice/iteration mutual exclusion should be ensured by the |
351 | * caller. | |
47215721 MD |
352 | * |
353 | * Used by for-like iteration macros in urcu/wfqueue.h: | |
354 | * __cds_wfcq_for_each_blocking() | |
355 | * __cds_wfcq_for_each_blocking_safe() | |
131a29a6 MD |
356 | * |
357 | * Returns NULL if reached end of queue, non-NULL next queue node | |
358 | * otherwise. | |
8ad4ce58 MD |
359 | */ |
360 | static inline struct cds_wfcq_node * | |
f637f191 | 361 | ___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
362 | struct cds_wfcq_tail *tail, |
363 | struct cds_wfcq_node *node) | |
364 | { | |
365 | return ___cds_wfcq_next(head, tail, node, 1); | |
366 | } | |
367 | ||
368 | /* | |
369 | * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing. | |
370 | * | |
371 | * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if | |
372 | * it needs to block. | |
373 | */ | |
374 | static inline struct cds_wfcq_node * | |
f637f191 | 375 | ___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
376 | struct cds_wfcq_tail *tail, |
377 | struct cds_wfcq_node *node) | |
378 | { | |
379 | return ___cds_wfcq_next(head, tail, node, 0); | |
380 | } | |
381 | ||
382 | static inline struct cds_wfcq_node * | |
f637f191 | 383 | ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head, |
47215721 | 384 | struct cds_wfcq_tail *tail, |
eec791af | 385 | int *state, |
47215721 | 386 | int blocking) |
8ad4ce58 | 387 | { |
f637f191 | 388 | struct __cds_wfcq_head *head = u_head._h; |
8ad4ce58 MD |
389 | struct cds_wfcq_node *node, *next; |
390 | ||
eec791af MD |
391 | if (state) |
392 | *state = 0; | |
393 | ||
4d0d7cbc | 394 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) { |
8ad4ce58 | 395 | return NULL; |
eec791af | 396 | } |
8ad4ce58 | 397 | |
47215721 | 398 | node = ___cds_wfcq_node_sync_next(&head->node, blocking); |
eec791af | 399 | if (!blocking && node == CDS_WFCQ_WOULDBLOCK) { |
dfb65fd3 | 400 | return CDS_WFCQ_WOULDBLOCK; |
eec791af | 401 | } |
8ad4ce58 MD |
402 | |
403 | if ((next = CMM_LOAD_SHARED(node->next)) == NULL) { | |
404 | /* | |
405 | * @node is probably the only node in the queue. | |
406 | * Try to move the tail to &q->head. | |
407 | * q->head.next is set to NULL here, and stays | |
408 | * NULL if the cmpxchg succeeds. Should the | |
409 | * cmpxchg fail due to a concurrent enqueue, the | |
410 | * q->head.next will be set to the next node. | |
411 | * The implicit memory barrier before | |
412 | * uatomic_cmpxchg() orders load node->next | |
413 | * before loading q->tail. | |
414 | * The implicit memory barrier before uatomic_cmpxchg | |
415 | * orders load q->head.next before loading node's | |
416 | * content. | |
417 | */ | |
418 | _cds_wfcq_node_init(&head->node); | |
eec791af MD |
419 | if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) { |
420 | if (state) | |
421 | *state |= CDS_WFCQ_STATE_LAST; | |
8ad4ce58 | 422 | return node; |
eec791af | 423 | } |
47215721 | 424 | next = ___cds_wfcq_node_sync_next(node, blocking); |
dfb65fd3 MD |
425 | /* |
426 | * In nonblocking mode, if we would need to block to | |
427 | * get node's next, set the head next node pointer | |
428 | * (currently NULL) back to its original value. | |
429 | */ | |
430 | if (!blocking && next == CDS_WFCQ_WOULDBLOCK) { | |
431 | head->node.next = node; | |
432 | return CDS_WFCQ_WOULDBLOCK; | |
433 | } | |
8ad4ce58 MD |
434 | } |
435 | ||
436 | /* | |
437 | * Move queue head forward. | |
438 | */ | |
439 | head->node.next = next; | |
440 | ||
441 | /* Load q->head.next before loading node's content */ | |
442 | cmm_smp_read_barrier_depends(); | |
443 | return node; | |
444 | } | |
445 | ||
446 | /* | |
eec791af | 447 | * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state. |
8ad4ce58 | 448 | * |
47215721 MD |
449 | * Content written into the node before enqueue is guaranteed to be |
450 | * consistent, but no other memory ordering is ensured. | |
451 | * It is valid to reuse and free a dequeued node immediately. | |
452 | * Dequeue/splice/iteration mutual exclusion should be ensured by the | |
453 | * caller. | |
8ad4ce58 | 454 | */ |
47215721 | 455 | static inline struct cds_wfcq_node * |
f637f191 | 456 | ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
457 | struct cds_wfcq_tail *tail, int *state) |
458 | { | |
459 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 1); | |
460 | } | |
461 | ||
462 | /* | |
463 | * ___cds_wfcq_dequeue_blocking: dequeue node from queue. | |
464 | * | |
465 | * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving | |
466 | * state. | |
467 | */ | |
468 | static inline struct cds_wfcq_node * | |
f637f191 | 469 | ___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
470 | struct cds_wfcq_tail *tail) |
471 | { | |
eec791af | 472 | return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); |
47215721 MD |
473 | } |
474 | ||
475 | /* | |
eec791af | 476 | * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state. |
47215721 MD |
477 | * |
478 | * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK | |
479 | * if it needs to block. | |
480 | */ | |
481 | static inline struct cds_wfcq_node * | |
f637f191 | 482 | ___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head, |
eec791af MD |
483 | struct cds_wfcq_tail *tail, int *state) |
484 | { | |
485 | return ___cds_wfcq_dequeue_with_state(head, tail, state, 0); | |
486 | } | |
487 | ||
488 | /* | |
489 | * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue. | |
490 | * | |
491 | * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving | |
492 | * state. | |
493 | */ | |
494 | static inline struct cds_wfcq_node * | |
f637f191 | 495 | ___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head, |
47215721 MD |
496 | struct cds_wfcq_tail *tail) |
497 | { | |
eec791af | 498 | return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL); |
47215721 MD |
499 | } |
500 | ||
f878b49e MD |
501 | /* |
502 | * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q. | |
503 | * | |
504 | * Dequeue all nodes from src_q. | |
505 | * dest_q must be already initialized. | |
506 | * Mutual exclusion for src_q should be ensured by the caller as | |
507 | * specified in the "Synchronisation table". | |
508 | * Returns enum cds_wfcq_ret which indicates the state of the src or | |
509 | * dest queue. | |
510 | */ | |
23773356 | 511 | static inline enum cds_wfcq_ret |
47215721 | 512 | ___cds_wfcq_splice( |
f637f191 | 513 | cds_wfcq_head_ptr_t u_dest_q_head, |
8ad4ce58 | 514 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 515 | cds_wfcq_head_ptr_t u_src_q_head, |
47215721 MD |
516 | struct cds_wfcq_tail *src_q_tail, |
517 | int blocking) | |
8ad4ce58 | 518 | { |
f637f191 MD |
519 | struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h; |
520 | struct __cds_wfcq_head *src_q_head = u_src_q_head._h; | |
8ad4ce58 | 521 | struct cds_wfcq_node *head, *tail; |
f878b49e | 522 | int attempt = 0; |
8ad4ce58 | 523 | |
f878b49e MD |
524 | /* |
525 | * Initial emptiness check to speed up cases where queue is | |
526 | * empty: only require loads to check if queue is empty. | |
527 | */ | |
4d0d7cbc | 528 | if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail)) |
23773356 | 529 | return CDS_WFCQ_RET_SRC_EMPTY; |
8ad4ce58 | 530 | |
f878b49e MD |
531 | for (;;) { |
532 | /* | |
533 | * Open-coded _cds_wfcq_empty() by testing result of | |
534 | * uatomic_xchg, as well as tail pointer vs head node | |
535 | * address. | |
536 | */ | |
537 | head = uatomic_xchg(&src_q_head->node.next, NULL); | |
538 | if (head) | |
539 | break; /* non-empty */ | |
540 | if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node) | |
541 | return CDS_WFCQ_RET_SRC_EMPTY; | |
542 | if (___cds_wfcq_busy_wait(&attempt, blocking)) | |
543 | return CDS_WFCQ_RET_WOULDBLOCK; | |
544 | } | |
8ad4ce58 MD |
545 | |
546 | /* | |
547 | * Memory barrier implied before uatomic_xchg() orders store to | |
548 | * src_q->head before store to src_q->tail. This is required by | |
549 | * concurrent enqueue on src_q, which exchanges the tail before | |
550 | * updating the previous tail's next pointer. | |
551 | */ | |
552 | tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node); | |
553 | ||
554 | /* | |
555 | * Append the spliced content of src_q into dest_q. Does not | |
556 | * require mutual exclusion on dest_q (wait-free). | |
557 | */ | |
4d0d7cbc MD |
558 | if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail, |
559 | head, tail)) | |
23773356 MD |
560 | return CDS_WFCQ_RET_DEST_NON_EMPTY; |
561 | else | |
562 | return CDS_WFCQ_RET_DEST_EMPTY; | |
47215721 MD |
563 | } |
564 | ||
47215721 MD |
565 | /* |
566 | * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
567 | * | |
568 | * Dequeue all nodes from src_q. | |
569 | * dest_q must be already initialized. | |
f878b49e MD |
570 | * Mutual exclusion for src_q should be ensured by the caller as |
571 | * specified in the "Synchronisation table". | |
23773356 MD |
572 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
573 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
47215721 | 574 | */ |
23773356 | 575 | static inline enum cds_wfcq_ret |
47215721 | 576 | ___cds_wfcq_splice_blocking( |
f637f191 | 577 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 578 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 579 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
580 | struct cds_wfcq_tail *src_q_tail) |
581 | { | |
23773356 | 582 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, |
47215721 MD |
583 | src_q_head, src_q_tail, 1); |
584 | } | |
585 | ||
586 | /* | |
587 | * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q. | |
588 | * | |
23773356 MD |
589 | * Same as __cds_wfcq_splice_blocking, but returns |
590 | * CDS_WFCQ_RET_WOULDBLOCK if it needs to block. | |
47215721 | 591 | */ |
23773356 | 592 | static inline enum cds_wfcq_ret |
47215721 | 593 | ___cds_wfcq_splice_nonblocking( |
f637f191 | 594 | cds_wfcq_head_ptr_t dest_q_head, |
47215721 | 595 | struct cds_wfcq_tail *dest_q_tail, |
f637f191 | 596 | cds_wfcq_head_ptr_t src_q_head, |
47215721 MD |
597 | struct cds_wfcq_tail *src_q_tail) |
598 | { | |
599 | return ___cds_wfcq_splice(dest_q_head, dest_q_tail, | |
600 | src_q_head, src_q_tail, 0); | |
8ad4ce58 MD |
601 | } |
602 | ||
603 | /* | |
eec791af | 604 | * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue. |
8ad4ce58 MD |
605 | * |
606 | * Content written into the node before enqueue is guaranteed to be | |
607 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 608 | * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is |
8ad4ce58 MD |
609 | * ensured. |
610 | * It is valid to reuse and free a dequeued node immediately. | |
611 | */ | |
612 | static inline struct cds_wfcq_node * | |
eec791af MD |
613 | _cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head, |
614 | struct cds_wfcq_tail *tail, int *state) | |
8ad4ce58 MD |
615 | { |
616 | struct cds_wfcq_node *retval; | |
617 | ||
618 | _cds_wfcq_dequeue_lock(head, tail); | |
4d0d7cbc MD |
619 | retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head), |
620 | tail, state); | |
8ad4ce58 MD |
621 | _cds_wfcq_dequeue_unlock(head, tail); |
622 | return retval; | |
623 | } | |
624 | ||
eec791af MD |
625 | /* |
626 | * cds_wfcq_dequeue_blocking: dequeue node from queue. | |
627 | * | |
628 | * Same as cds_wfcq_dequeue_blocking, but without saving state. | |
629 | */ | |
630 | static inline struct cds_wfcq_node * | |
631 | _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head, | |
632 | struct cds_wfcq_tail *tail) | |
633 | { | |
634 | return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL); | |
635 | } | |
636 | ||
8ad4ce58 MD |
637 | /* |
638 | * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q. | |
639 | * | |
640 | * Dequeue all nodes from src_q. | |
641 | * dest_q must be already initialized. | |
642 | * Content written into the node before enqueue is guaranteed to be | |
643 | * consistent, but no other memory ordering is ensured. | |
ffa11a18 | 644 | * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is |
8ad4ce58 | 645 | * ensured. |
23773356 MD |
646 | * Returns enum cds_wfcq_ret which indicates the state of the src or |
647 | * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK. | |
8ad4ce58 | 648 | */ |
23773356 | 649 | static inline enum cds_wfcq_ret |
8ad4ce58 MD |
650 | _cds_wfcq_splice_blocking( |
651 | struct cds_wfcq_head *dest_q_head, | |
652 | struct cds_wfcq_tail *dest_q_tail, | |
653 | struct cds_wfcq_head *src_q_head, | |
654 | struct cds_wfcq_tail *src_q_tail) | |
655 | { | |
23773356 MD |
656 | enum cds_wfcq_ret ret; |
657 | ||
8ad4ce58 | 658 | _cds_wfcq_dequeue_lock(src_q_head, src_q_tail); |
4d0d7cbc MD |
659 | ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail, |
660 | cds_wfcq_head_cast(src_q_head), src_q_tail); | |
8ad4ce58 | 661 | _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail); |
23773356 | 662 | return ret; |
8ad4ce58 MD |
663 | } |
664 | ||
665 | #ifdef __cplusplus | |
666 | } | |
667 | #endif | |
668 | ||
669 | #endif /* _URCU_WFCQUEUE_STATIC_H */ |