cleanup: explicitly mark unused parameters (-Wunused-parameter)
[userspace-rcu.git] / include / urcu / static / wfcqueue.h
1 #ifndef _URCU_WFCQUEUE_STATIC_H
2 #define _URCU_WFCQUEUE_STATIC_H
3
4 /*
5 * urcu/static/wfcqueue.h
6 *
7 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
10 * linking dynamically with the userspace rcu library.
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #include <pthread.h>
31 #include <assert.h>
32 #include <poll.h>
33 #include <stdbool.h>
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * Concurrent queue with wait-free enqueue/blocking dequeue.
43 *
44 * This queue has been designed and implemented collaboratively by
45 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
46 * half-wait-free/half-blocking queue implementation done by Paul E.
47 * McKenney.
48 *
49 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
50 *
51 * Synchronization table:
52 *
53 * External synchronization techniques described in the API below is
54 * required between pairs marked with "X". No external synchronization
55 * required between pairs marked with "-".
56 *
57 * Legend:
58 * [1] cds_wfcq_enqueue
59 * [2] __cds_wfcq_splice (destination queue)
60 * [3] __cds_wfcq_dequeue
61 * [4] __cds_wfcq_splice (source queue)
62 * [5] __cds_wfcq_first
63 * [6] __cds_wfcq_next
64 *
65 * [1] [2] [3] [4] [5] [6]
66 * [1] - - - - - -
67 * [2] - - - - - -
68 * [3] - - X X X X
69 * [4] - - X - X X
70 * [5] - - X X - -
71 * [6] - - X X - -
72 *
73 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
74 *
75 * For convenience, cds_wfcq_dequeue_blocking() and
76 * cds_wfcq_splice_blocking() hold the dequeue lock.
77 *
78 * Besides locking, mutual exclusion of dequeue, splice and iteration
79 * can be ensured by performing all of those operations from a single
80 * thread, without requiring any lock.
81 */
82
83 #define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
84 #define WFCQ_WAIT 10 /* Wait 10 ms if being set */
85
86 /*
87 * cds_wfcq_node_init: initialize wait-free queue node.
88 */
89 static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node)
90 {
91 node->next = NULL;
92 }
93
94 /*
95 * cds_wfcq_init: initialize wait-free queue (with lock). Pair with
96 * cds_wfcq_destroy().
97 */
98 static inline void _cds_wfcq_init(struct cds_wfcq_head *head,
99 struct cds_wfcq_tail *tail)
100 {
101 int ret;
102
103 /* Set queue head and tail */
104 _cds_wfcq_node_init(&head->node);
105 tail->p = &head->node;
106 ret = pthread_mutex_init(&head->lock, NULL);
107 assert(!ret);
108 }
109
110 /*
111 * cds_wfcq_destroy: destroy wait-free queue (with lock). Pair with
112 * cds_wfcq_init().
113 */
114 static inline void _cds_wfcq_destroy(struct cds_wfcq_head *head,
115 struct cds_wfcq_tail *tail __attribute__((unused)))
116 {
117 int ret = pthread_mutex_destroy(&head->lock);
118 assert(!ret);
119 }
120
121 /*
122 * __cds_wfcq_init: initialize wait-free queue (without lock). Don't
123 * pair with any destroy function.
124 */
125 static inline void ___cds_wfcq_init(struct __cds_wfcq_head *head,
126 struct cds_wfcq_tail *tail)
127 {
128 /* Set queue head and tail */
129 _cds_wfcq_node_init(&head->node);
130 tail->p = &head->node;
131 }
132
133 /*
134 * cds_wfcq_empty: return whether wait-free queue is empty.
135 *
136 * No memory barrier is issued. No mutual exclusion is required.
137 *
138 * We perform the test on head->node.next to check if the queue is
139 * possibly empty, but we confirm this by checking if the tail pointer
140 * points to the head node because the tail pointer is the linearisation
141 * point of the enqueuers. Just checking the head next pointer could
142 * make a queue appear empty if an enqueuer is preempted for a long time
143 * between xchg() and setting the previous node's next pointer.
144 */
145 static inline bool _cds_wfcq_empty(cds_wfcq_head_ptr_t u_head,
146 struct cds_wfcq_tail *tail)
147 {
148 struct __cds_wfcq_head *head = u_head._h;
149 /*
150 * Queue is empty if no node is pointed by head->node.next nor
151 * tail->p. Even though the tail->p check is sufficient to find
152 * out of the queue is empty, we first check head->node.next as a
153 * common case to ensure that dequeuers do not frequently access
154 * enqueuer's tail->p cache line.
155 */
156 return CMM_LOAD_SHARED(head->node.next) == NULL
157 && CMM_LOAD_SHARED(tail->p) == &head->node;
158 }
159
160 static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
161 struct cds_wfcq_tail *tail __attribute__((unused)))
162 {
163 int ret;
164
165 ret = pthread_mutex_lock(&head->lock);
166 assert(!ret);
167 }
168
169 static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
170 struct cds_wfcq_tail *tail __attribute__((unused)))
171 {
172 int ret;
173
174 ret = pthread_mutex_unlock(&head->lock);
175 assert(!ret);
176 }
177
178 static inline bool ___cds_wfcq_append(cds_wfcq_head_ptr_t u_head,
179 struct cds_wfcq_tail *tail,
180 struct cds_wfcq_node *new_head,
181 struct cds_wfcq_node *new_tail)
182 {
183 struct __cds_wfcq_head *head = u_head._h;
184 struct cds_wfcq_node *old_tail;
185
186 /*
187 * Implicit memory barrier before uatomic_xchg() orders earlier
188 * stores to data structure containing node and setting
189 * node->next to NULL before publication.
190 */
191 old_tail = uatomic_xchg(&tail->p, new_tail);
192
193 /*
194 * Implicit memory barrier after uatomic_xchg() orders store to
195 * q->tail before store to old_tail->next.
196 *
197 * At this point, dequeuers see a NULL tail->p->next, which
198 * indicates that the queue is being appended to. The following
199 * store will append "node" to the queue from a dequeuer
200 * perspective.
201 */
202 CMM_STORE_SHARED(old_tail->next, new_head);
203 /*
204 * Return false if queue was empty prior to adding the node,
205 * else return true.
206 */
207 return old_tail != &head->node;
208 }
209
210 /*
211 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
212 *
213 * Issues a full memory barrier before enqueue. No mutual exclusion is
214 * required.
215 *
216 * Returns false if the queue was empty prior to adding the node.
217 * Returns true otherwise.
218 */
219 static inline bool _cds_wfcq_enqueue(cds_wfcq_head_ptr_t head,
220 struct cds_wfcq_tail *tail,
221 struct cds_wfcq_node *new_tail)
222 {
223 return ___cds_wfcq_append(head, tail, new_tail, new_tail);
224 }
225
226 /*
227 * CDS_WFCQ_WAIT_SLEEP:
228 *
229 * By default, this sleeps for the given @msec milliseconds.
230 * This is a macro which LGPL users may #define themselves before
231 * including wfcqueue.h to override the default behavior (e.g.
232 * to log a warning or perform other background work).
233 */
234 #ifndef CDS_WFCQ_WAIT_SLEEP
235 #define CDS_WFCQ_WAIT_SLEEP(msec) ___cds_wfcq_wait_sleep(msec)
236 #endif
237
238 static inline void ___cds_wfcq_wait_sleep(int msec)
239 {
240 (void) poll(NULL, 0, msec);
241 }
242
243 /*
244 * ___cds_wfcq_busy_wait: adaptative busy-wait.
245 *
246 * Returns 1 if nonblocking and needs to block, 0 otherwise.
247 */
248 static inline bool
249 ___cds_wfcq_busy_wait(int *attempt, int blocking)
250 {
251 if (!blocking)
252 return 1;
253 if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
254 CDS_WFCQ_WAIT_SLEEP(WFCQ_WAIT); /* Wait for 10ms */
255 *attempt = 0;
256 } else {
257 caa_cpu_relax();
258 }
259 return 0;
260 }
261
262 /*
263 * Waiting for enqueuer to complete enqueue and return the next node.
264 */
265 static inline struct cds_wfcq_node *
266 ___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
267 {
268 struct cds_wfcq_node *next;
269 int attempt = 0;
270
271 /*
272 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
273 */
274 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
275 if (___cds_wfcq_busy_wait(&attempt, blocking))
276 return CDS_WFCQ_WOULDBLOCK;
277 }
278
279 return next;
280 }
281
282 static inline struct cds_wfcq_node *
283 ___cds_wfcq_first(cds_wfcq_head_ptr_t u_head,
284 struct cds_wfcq_tail *tail,
285 int blocking)
286 {
287 struct __cds_wfcq_head *head = u_head._h;
288 struct cds_wfcq_node *node;
289
290 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail))
291 return NULL;
292 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
293 /* Load head->node.next before loading node's content */
294 cmm_smp_read_barrier_depends();
295 return node;
296 }
297
298 /*
299 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
300 *
301 * Content written into the node before enqueue is guaranteed to be
302 * consistent, but no other memory ordering is ensured.
303 * Dequeue/splice/iteration mutual exclusion should be ensured by the
304 * caller.
305 *
306 * Used by for-like iteration macros in urcu/wfqueue.h:
307 * __cds_wfcq_for_each_blocking()
308 * __cds_wfcq_for_each_blocking_safe()
309 *
310 * Returns NULL if queue is empty, first node otherwise.
311 */
312 static inline struct cds_wfcq_node *
313 ___cds_wfcq_first_blocking(cds_wfcq_head_ptr_t head,
314 struct cds_wfcq_tail *tail)
315 {
316 return ___cds_wfcq_first(head, tail, 1);
317 }
318
319
320 /*
321 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
322 *
323 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
324 * it needs to block.
325 */
326 static inline struct cds_wfcq_node *
327 ___cds_wfcq_first_nonblocking(cds_wfcq_head_ptr_t head,
328 struct cds_wfcq_tail *tail)
329 {
330 return ___cds_wfcq_first(head, tail, 0);
331 }
332
333 static inline struct cds_wfcq_node *
334 ___cds_wfcq_next(cds_wfcq_head_ptr_t head __attribute__((unused)),
335 struct cds_wfcq_tail *tail,
336 struct cds_wfcq_node *node,
337 int blocking)
338 {
339 struct cds_wfcq_node *next;
340
341 /*
342 * Even though the following tail->p check is sufficient to find
343 * out if we reached the end of the queue, we first check
344 * node->next as a common case to ensure that iteration on nodes
345 * do not frequently access enqueuer's tail->p cache line.
346 */
347 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
348 /* Load node->next before tail->p */
349 cmm_smp_rmb();
350 if (CMM_LOAD_SHARED(tail->p) == node)
351 return NULL;
352 next = ___cds_wfcq_node_sync_next(node, blocking);
353 }
354 /* Load node->next before loading next's content */
355 cmm_smp_read_barrier_depends();
356 return next;
357 }
358
359 /*
360 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
361 *
362 * Content written into the node before enqueue is guaranteed to be
363 * consistent, but no other memory ordering is ensured.
364 * Dequeue/splice/iteration mutual exclusion should be ensured by the
365 * caller.
366 *
367 * Used by for-like iteration macros in urcu/wfqueue.h:
368 * __cds_wfcq_for_each_blocking()
369 * __cds_wfcq_for_each_blocking_safe()
370 *
371 * Returns NULL if reached end of queue, non-NULL next queue node
372 * otherwise.
373 */
374 static inline struct cds_wfcq_node *
375 ___cds_wfcq_next_blocking(cds_wfcq_head_ptr_t head,
376 struct cds_wfcq_tail *tail,
377 struct cds_wfcq_node *node)
378 {
379 return ___cds_wfcq_next(head, tail, node, 1);
380 }
381
382 /*
383 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
384 *
385 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
386 * it needs to block.
387 */
388 static inline struct cds_wfcq_node *
389 ___cds_wfcq_next_nonblocking(cds_wfcq_head_ptr_t head,
390 struct cds_wfcq_tail *tail,
391 struct cds_wfcq_node *node)
392 {
393 return ___cds_wfcq_next(head, tail, node, 0);
394 }
395
396 static inline struct cds_wfcq_node *
397 ___cds_wfcq_dequeue_with_state(cds_wfcq_head_ptr_t u_head,
398 struct cds_wfcq_tail *tail,
399 int *state,
400 int blocking)
401 {
402 struct __cds_wfcq_head *head = u_head._h;
403 struct cds_wfcq_node *node, *next;
404
405 if (state)
406 *state = 0;
407
408 if (_cds_wfcq_empty(__cds_wfcq_head_cast(head), tail)) {
409 return NULL;
410 }
411
412 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
413 if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
414 return CDS_WFCQ_WOULDBLOCK;
415 }
416
417 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
418 /*
419 * @node is probably the only node in the queue.
420 * Try to move the tail to &q->head.
421 * q->head.next is set to NULL here, and stays
422 * NULL if the cmpxchg succeeds. Should the
423 * cmpxchg fail due to a concurrent enqueue, the
424 * q->head.next will be set to the next node.
425 * The implicit memory barrier before
426 * uatomic_cmpxchg() orders load node->next
427 * before loading q->tail.
428 * The implicit memory barrier before uatomic_cmpxchg
429 * orders load q->head.next before loading node's
430 * content.
431 */
432 _cds_wfcq_node_init(&head->node);
433 if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) {
434 if (state)
435 *state |= CDS_WFCQ_STATE_LAST;
436 return node;
437 }
438 next = ___cds_wfcq_node_sync_next(node, blocking);
439 /*
440 * In nonblocking mode, if we would need to block to
441 * get node's next, set the head next node pointer
442 * (currently NULL) back to its original value.
443 */
444 if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
445 head->node.next = node;
446 return CDS_WFCQ_WOULDBLOCK;
447 }
448 }
449
450 /*
451 * Move queue head forward.
452 */
453 head->node.next = next;
454
455 /* Load q->head.next before loading node's content */
456 cmm_smp_read_barrier_depends();
457 return node;
458 }
459
460 /*
461 * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
462 *
463 * Content written into the node before enqueue is guaranteed to be
464 * consistent, but no other memory ordering is ensured.
465 * It is valid to reuse and free a dequeued node immediately.
466 * Dequeue/splice/iteration mutual exclusion should be ensured by the
467 * caller.
468 */
469 static inline struct cds_wfcq_node *
470 ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_ptr_t head,
471 struct cds_wfcq_tail *tail, int *state)
472 {
473 return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
474 }
475
476 /*
477 * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
478 *
479 * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
480 * state.
481 */
482 static inline struct cds_wfcq_node *
483 ___cds_wfcq_dequeue_blocking(cds_wfcq_head_ptr_t head,
484 struct cds_wfcq_tail *tail)
485 {
486 return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
487 }
488
489 /*
490 * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
491 *
492 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
493 * if it needs to block.
494 */
495 static inline struct cds_wfcq_node *
496 ___cds_wfcq_dequeue_with_state_nonblocking(cds_wfcq_head_ptr_t head,
497 struct cds_wfcq_tail *tail, int *state)
498 {
499 return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
500 }
501
502 /*
503 * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
504 *
505 * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
506 * state.
507 */
508 static inline struct cds_wfcq_node *
509 ___cds_wfcq_dequeue_nonblocking(cds_wfcq_head_ptr_t head,
510 struct cds_wfcq_tail *tail)
511 {
512 return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
513 }
514
515 /*
516 * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
517 *
518 * Dequeue all nodes from src_q.
519 * dest_q must be already initialized.
520 * Mutual exclusion for src_q should be ensured by the caller as
521 * specified in the "Synchronisation table".
522 * Returns enum cds_wfcq_ret which indicates the state of the src or
523 * dest queue.
524 */
525 static inline enum cds_wfcq_ret
526 ___cds_wfcq_splice(
527 cds_wfcq_head_ptr_t u_dest_q_head,
528 struct cds_wfcq_tail *dest_q_tail,
529 cds_wfcq_head_ptr_t u_src_q_head,
530 struct cds_wfcq_tail *src_q_tail,
531 int blocking)
532 {
533 struct __cds_wfcq_head *dest_q_head = u_dest_q_head._h;
534 struct __cds_wfcq_head *src_q_head = u_src_q_head._h;
535 struct cds_wfcq_node *head, *tail;
536 int attempt = 0;
537
538 /*
539 * Initial emptiness check to speed up cases where queue is
540 * empty: only require loads to check if queue is empty.
541 */
542 if (_cds_wfcq_empty(__cds_wfcq_head_cast(src_q_head), src_q_tail))
543 return CDS_WFCQ_RET_SRC_EMPTY;
544
545 for (;;) {
546 /*
547 * Open-coded _cds_wfcq_empty() by testing result of
548 * uatomic_xchg, as well as tail pointer vs head node
549 * address.
550 */
551 head = uatomic_xchg(&src_q_head->node.next, NULL);
552 if (head)
553 break; /* non-empty */
554 if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node)
555 return CDS_WFCQ_RET_SRC_EMPTY;
556 if (___cds_wfcq_busy_wait(&attempt, blocking))
557 return CDS_WFCQ_RET_WOULDBLOCK;
558 }
559
560 /*
561 * Memory barrier implied before uatomic_xchg() orders store to
562 * src_q->head before store to src_q->tail. This is required by
563 * concurrent enqueue on src_q, which exchanges the tail before
564 * updating the previous tail's next pointer.
565 */
566 tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node);
567
568 /*
569 * Append the spliced content of src_q into dest_q. Does not
570 * require mutual exclusion on dest_q (wait-free).
571 */
572 if (___cds_wfcq_append(__cds_wfcq_head_cast(dest_q_head), dest_q_tail,
573 head, tail))
574 return CDS_WFCQ_RET_DEST_NON_EMPTY;
575 else
576 return CDS_WFCQ_RET_DEST_EMPTY;
577 }
578
579 /*
580 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
581 *
582 * Dequeue all nodes from src_q.
583 * dest_q must be already initialized.
584 * Mutual exclusion for src_q should be ensured by the caller as
585 * specified in the "Synchronisation table".
586 * Returns enum cds_wfcq_ret which indicates the state of the src or
587 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
588 */
589 static inline enum cds_wfcq_ret
590 ___cds_wfcq_splice_blocking(
591 cds_wfcq_head_ptr_t dest_q_head,
592 struct cds_wfcq_tail *dest_q_tail,
593 cds_wfcq_head_ptr_t src_q_head,
594 struct cds_wfcq_tail *src_q_tail)
595 {
596 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
597 src_q_head, src_q_tail, 1);
598 }
599
600 /*
601 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
602 *
603 * Same as __cds_wfcq_splice_blocking, but returns
604 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
605 */
606 static inline enum cds_wfcq_ret
607 ___cds_wfcq_splice_nonblocking(
608 cds_wfcq_head_ptr_t dest_q_head,
609 struct cds_wfcq_tail *dest_q_tail,
610 cds_wfcq_head_ptr_t src_q_head,
611 struct cds_wfcq_tail *src_q_tail)
612 {
613 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
614 src_q_head, src_q_tail, 0);
615 }
616
617 /*
618 * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
619 *
620 * Content written into the node before enqueue is guaranteed to be
621 * consistent, but no other memory ordering is ensured.
622 * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
623 * ensured.
624 * It is valid to reuse and free a dequeued node immediately.
625 */
626 static inline struct cds_wfcq_node *
627 _cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
628 struct cds_wfcq_tail *tail, int *state)
629 {
630 struct cds_wfcq_node *retval;
631
632 _cds_wfcq_dequeue_lock(head, tail);
633 retval = ___cds_wfcq_dequeue_with_state_blocking(cds_wfcq_head_cast(head),
634 tail, state);
635 _cds_wfcq_dequeue_unlock(head, tail);
636 return retval;
637 }
638
639 /*
640 * cds_wfcq_dequeue_blocking: dequeue node from queue.
641 *
642 * Same as cds_wfcq_dequeue_blocking, but without saving state.
643 */
644 static inline struct cds_wfcq_node *
645 _cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
646 struct cds_wfcq_tail *tail)
647 {
648 return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
649 }
650
651 /*
652 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
653 *
654 * Dequeue all nodes from src_q.
655 * dest_q must be already initialized.
656 * Content written into the node before enqueue is guaranteed to be
657 * consistent, but no other memory ordering is ensured.
658 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
659 * ensured.
660 * Returns enum cds_wfcq_ret which indicates the state of the src or
661 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
662 */
663 static inline enum cds_wfcq_ret
664 _cds_wfcq_splice_blocking(
665 struct cds_wfcq_head *dest_q_head,
666 struct cds_wfcq_tail *dest_q_tail,
667 struct cds_wfcq_head *src_q_head,
668 struct cds_wfcq_tail *src_q_tail)
669 {
670 enum cds_wfcq_ret ret;
671
672 _cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
673 ret = ___cds_wfcq_splice_blocking(cds_wfcq_head_cast(dest_q_head), dest_q_tail,
674 cds_wfcq_head_cast(src_q_head), src_q_tail);
675 _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
676 return ret;
677 }
678
679 #ifdef __cplusplus
680 }
681 #endif
682
683 #endif /* _URCU_WFCQUEUE_STATIC_H */
This page took 0.043549 seconds and 5 git commands to generate.