test_urcu_hash*: initialize TLS seeds
[userspace-rcu.git] / urcu / static / wfcqueue.h
CommitLineData
8ad4ce58
MD
1#ifndef _URCU_WFCQUEUE_STATIC_H
2#define _URCU_WFCQUEUE_STATIC_H
3
4/*
47215721 5 * urcu/static/wfcqueue.h
8ad4ce58
MD
6 *
7 * Userspace RCU library - Concurrent Queue with Wait-Free Enqueue/Blocking Dequeue
8 *
07c2a4fd
MD
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfcqueue.h for
10 * linking dynamically with the userspace rcu library.
8ad4ce58
MD
11 *
12 * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright 2011-2012 - Lai Jiangshan <laijs@cn.fujitsu.com>
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <pthread.h>
31#include <assert.h>
32#include <poll.h>
33#include <stdbool.h>
34#include <urcu/compiler.h>
35#include <urcu/uatomic.h>
36
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41/*
42 * Concurrent queue with wait-free enqueue/blocking dequeue.
43 *
ebfd2673
MD
44 * This queue has been designed and implemented collaboratively by
45 * Mathieu Desnoyers and Lai Jiangshan. Inspired from
46 * half-wait-free/half-blocking queue implementation done by Paul E.
47 * McKenney.
8ad4ce58 48 *
f878b49e
MD
49 * Mutual exclusion of cds_wfcq_* / __cds_wfcq_* API
50 *
51 * Synchronization table:
52 *
53 * External synchronization techniques described in the API below is
54 * required between pairs marked with "X". No external synchronization
55 * required between pairs marked with "-".
56 *
57 * Legend:
58 * [1] cds_wfcq_enqueue
59 * [2] __cds_wfcq_splice (destination queue)
60 * [3] __cds_wfcq_dequeue
61 * [4] __cds_wfcq_splice (source queue)
62 * [5] __cds_wfcq_first
63 * [6] __cds_wfcq_next
64 *
65 * [1] [2] [3] [4] [5] [6]
66 * [1] - - - - - -
67 * [2] - - - - - -
68 * [3] - - X X X X
69 * [4] - - X - X X
70 * [5] - - X X - -
71 * [6] - - X X - -
8ad4ce58 72 *
8ad4ce58
MD
73 * Mutual exclusion can be ensured by holding cds_wfcq_dequeue_lock().
74 *
75 * For convenience, cds_wfcq_dequeue_blocking() and
76 * cds_wfcq_splice_blocking() hold the dequeue lock.
1fe734e1
MD
77 *
78 * Besides locking, mutual exclusion of dequeue, splice and iteration
79 * can be ensured by performing all of those operations from a single
80 * thread, without requiring any lock.
8ad4ce58
MD
81 */
82
83#define WFCQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
84#define WFCQ_WAIT 10 /* Wait 10 ms if being set */
85
86/*
87 * cds_wfcq_node_init: initialize wait-free queue node.
88 */
89static inline void _cds_wfcq_node_init(struct cds_wfcq_node *node)
90{
91 node->next = NULL;
92}
93
94/*
95 * cds_wfcq_init: initialize wait-free queue.
96 */
97static inline void _cds_wfcq_init(struct cds_wfcq_head *head,
98 struct cds_wfcq_tail *tail)
99{
100 int ret;
101
102 /* Set queue head and tail */
103 _cds_wfcq_node_init(&head->node);
104 tail->p = &head->node;
105 ret = pthread_mutex_init(&head->lock, NULL);
106 assert(!ret);
107}
108
109/*
110 * cds_wfcq_empty: return whether wait-free queue is empty.
111 *
112 * No memory barrier is issued. No mutual exclusion is required.
6d5729f7
MD
113 *
114 * We perform the test on head->node.next to check if the queue is
115 * possibly empty, but we confirm this by checking if the tail pointer
116 * points to the head node because the tail pointer is the linearisation
117 * point of the enqueuers. Just checking the head next pointer could
118 * make a queue appear empty if an enqueuer is preempted for a long time
119 * between xchg() and setting the previous node's next pointer.
8ad4ce58
MD
120 */
121static inline bool _cds_wfcq_empty(struct cds_wfcq_head *head,
122 struct cds_wfcq_tail *tail)
123{
124 /*
125 * Queue is empty if no node is pointed by head->node.next nor
126 * tail->p. Even though the tail->p check is sufficient to find
127 * out of the queue is empty, we first check head->node.next as a
128 * common case to ensure that dequeuers do not frequently access
129 * enqueuer's tail->p cache line.
130 */
131 return CMM_LOAD_SHARED(head->node.next) == NULL
132 && CMM_LOAD_SHARED(tail->p) == &head->node;
133}
134
135static inline void _cds_wfcq_dequeue_lock(struct cds_wfcq_head *head,
136 struct cds_wfcq_tail *tail)
137{
138 int ret;
139
140 ret = pthread_mutex_lock(&head->lock);
141 assert(!ret);
142}
143
144static inline void _cds_wfcq_dequeue_unlock(struct cds_wfcq_head *head,
145 struct cds_wfcq_tail *tail)
146{
147 int ret;
148
149 ret = pthread_mutex_unlock(&head->lock);
150 assert(!ret);
151}
152
23773356 153static inline bool ___cds_wfcq_append(struct cds_wfcq_head *head,
8ad4ce58
MD
154 struct cds_wfcq_tail *tail,
155 struct cds_wfcq_node *new_head,
156 struct cds_wfcq_node *new_tail)
157{
158 struct cds_wfcq_node *old_tail;
159
160 /*
161 * Implicit memory barrier before uatomic_xchg() orders earlier
162 * stores to data structure containing node and setting
163 * node->next to NULL before publication.
164 */
165 old_tail = uatomic_xchg(&tail->p, new_tail);
166
167 /*
168 * Implicit memory barrier after uatomic_xchg() orders store to
169 * q->tail before store to old_tail->next.
170 *
171 * At this point, dequeuers see a NULL tail->p->next, which
172 * indicates that the queue is being appended to. The following
173 * store will append "node" to the queue from a dequeuer
174 * perspective.
175 */
176 CMM_STORE_SHARED(old_tail->next, new_head);
23773356
MD
177 /*
178 * Return false if queue was empty prior to adding the node,
179 * else return true.
180 */
181 return old_tail != &head->node;
8ad4ce58
MD
182}
183
184/*
185 * cds_wfcq_enqueue: enqueue a node into a wait-free queue.
186 *
187 * Issues a full memory barrier before enqueue. No mutual exclusion is
188 * required.
23773356
MD
189 *
190 * Returns false if the queue was empty prior to adding the node.
191 * Returns true otherwise.
8ad4ce58 192 */
23773356 193static inline bool _cds_wfcq_enqueue(struct cds_wfcq_head *head,
8ad4ce58
MD
194 struct cds_wfcq_tail *tail,
195 struct cds_wfcq_node *new_tail)
196{
23773356 197 return ___cds_wfcq_append(head, tail, new_tail, new_tail);
8ad4ce58
MD
198}
199
f878b49e
MD
200/*
201 * ___cds_wfcq_busy_wait: adaptative busy-wait.
202 *
203 * Returns 1 if nonblocking and needs to block, 0 otherwise.
204 */
205static inline bool
206___cds_wfcq_busy_wait(int *attempt, int blocking)
207{
208 if (!blocking)
209 return 1;
210 if (++(*attempt) >= WFCQ_ADAPT_ATTEMPTS) {
211 poll(NULL, 0, WFCQ_WAIT); /* Wait for 10ms */
212 *attempt = 0;
213 } else {
214 caa_cpu_relax();
215 }
216 return 0;
217}
218
8ad4ce58
MD
219/*
220 * Waiting for enqueuer to complete enqueue and return the next node.
221 */
222static inline struct cds_wfcq_node *
47215721 223___cds_wfcq_node_sync_next(struct cds_wfcq_node *node, int blocking)
8ad4ce58
MD
224{
225 struct cds_wfcq_node *next;
226 int attempt = 0;
227
228 /*
229 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
230 */
231 while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
f878b49e 232 if (___cds_wfcq_busy_wait(&attempt, blocking))
47215721 233 return CDS_WFCQ_WOULDBLOCK;
8ad4ce58
MD
234 }
235
236 return next;
237}
238
8ad4ce58 239static inline struct cds_wfcq_node *
47215721
MD
240___cds_wfcq_first(struct cds_wfcq_head *head,
241 struct cds_wfcq_tail *tail,
242 int blocking)
8ad4ce58
MD
243{
244 struct cds_wfcq_node *node;
245
246 if (_cds_wfcq_empty(head, tail))
247 return NULL;
47215721 248 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
8ad4ce58
MD
249 /* Load head->node.next before loading node's content */
250 cmm_smp_read_barrier_depends();
251 return node;
252}
253
254/*
47215721 255 * __cds_wfcq_first_blocking: get first node of a queue, without dequeuing.
8ad4ce58
MD
256 *
257 * Content written into the node before enqueue is guaranteed to be
258 * consistent, but no other memory ordering is ensured.
1fe734e1
MD
259 * Dequeue/splice/iteration mutual exclusion should be ensured by the
260 * caller.
f94061a3
MD
261 *
262 * Used by for-like iteration macros in urcu/wfqueue.h:
263 * __cds_wfcq_for_each_blocking()
264 * __cds_wfcq_for_each_blocking_safe()
131a29a6
MD
265 *
266 * Returns NULL if queue is empty, first node otherwise.
8ad4ce58
MD
267 */
268static inline struct cds_wfcq_node *
47215721
MD
269___cds_wfcq_first_blocking(struct cds_wfcq_head *head,
270 struct cds_wfcq_tail *tail)
271{
272 return ___cds_wfcq_first(head, tail, 1);
273}
274
275
276/*
277 * __cds_wfcq_first_nonblocking: get first node of a queue, without dequeuing.
278 *
279 * Same as __cds_wfcq_first_blocking, but returns CDS_WFCQ_WOULDBLOCK if
280 * it needs to block.
281 */
282static inline struct cds_wfcq_node *
283___cds_wfcq_first_nonblocking(struct cds_wfcq_head *head,
284 struct cds_wfcq_tail *tail)
285{
286 return ___cds_wfcq_first(head, tail, 0);
287}
288
289static inline struct cds_wfcq_node *
290___cds_wfcq_next(struct cds_wfcq_head *head,
8ad4ce58 291 struct cds_wfcq_tail *tail,
47215721
MD
292 struct cds_wfcq_node *node,
293 int blocking)
8ad4ce58
MD
294{
295 struct cds_wfcq_node *next;
296
297 /*
298 * Even though the following tail->p check is sufficient to find
299 * out if we reached the end of the queue, we first check
300 * node->next as a common case to ensure that iteration on nodes
301 * do not frequently access enqueuer's tail->p cache line.
302 */
303 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
304 /* Load node->next before tail->p */
305 cmm_smp_rmb();
306 if (CMM_LOAD_SHARED(tail->p) == node)
307 return NULL;
47215721 308 next = ___cds_wfcq_node_sync_next(node, blocking);
8ad4ce58
MD
309 }
310 /* Load node->next before loading next's content */
311 cmm_smp_read_barrier_depends();
312 return next;
313}
314
315/*
47215721 316 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
8ad4ce58 317 *
8ad4ce58
MD
318 * Content written into the node before enqueue is guaranteed to be
319 * consistent, but no other memory ordering is ensured.
1fe734e1
MD
320 * Dequeue/splice/iteration mutual exclusion should be ensured by the
321 * caller.
47215721
MD
322 *
323 * Used by for-like iteration macros in urcu/wfqueue.h:
324 * __cds_wfcq_for_each_blocking()
325 * __cds_wfcq_for_each_blocking_safe()
131a29a6
MD
326 *
327 * Returns NULL if reached end of queue, non-NULL next queue node
328 * otherwise.
8ad4ce58
MD
329 */
330static inline struct cds_wfcq_node *
47215721
MD
331___cds_wfcq_next_blocking(struct cds_wfcq_head *head,
332 struct cds_wfcq_tail *tail,
333 struct cds_wfcq_node *node)
334{
335 return ___cds_wfcq_next(head, tail, node, 1);
336}
337
338/*
339 * __cds_wfcq_next_blocking: get next node of a queue, without dequeuing.
340 *
341 * Same as __cds_wfcq_next_blocking, but returns CDS_WFCQ_WOULDBLOCK if
342 * it needs to block.
343 */
344static inline struct cds_wfcq_node *
345___cds_wfcq_next_nonblocking(struct cds_wfcq_head *head,
346 struct cds_wfcq_tail *tail,
347 struct cds_wfcq_node *node)
348{
349 return ___cds_wfcq_next(head, tail, node, 0);
350}
351
352static inline struct cds_wfcq_node *
eec791af 353___cds_wfcq_dequeue_with_state(struct cds_wfcq_head *head,
47215721 354 struct cds_wfcq_tail *tail,
eec791af 355 int *state,
47215721 356 int blocking)
8ad4ce58
MD
357{
358 struct cds_wfcq_node *node, *next;
359
eec791af
MD
360 if (state)
361 *state = 0;
362
363 if (_cds_wfcq_empty(head, tail)) {
8ad4ce58 364 return NULL;
eec791af 365 }
8ad4ce58 366
47215721 367 node = ___cds_wfcq_node_sync_next(&head->node, blocking);
eec791af 368 if (!blocking && node == CDS_WFCQ_WOULDBLOCK) {
dfb65fd3 369 return CDS_WFCQ_WOULDBLOCK;
eec791af 370 }
8ad4ce58
MD
371
372 if ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
373 /*
374 * @node is probably the only node in the queue.
375 * Try to move the tail to &q->head.
376 * q->head.next is set to NULL here, and stays
377 * NULL if the cmpxchg succeeds. Should the
378 * cmpxchg fail due to a concurrent enqueue, the
379 * q->head.next will be set to the next node.
380 * The implicit memory barrier before
381 * uatomic_cmpxchg() orders load node->next
382 * before loading q->tail.
383 * The implicit memory barrier before uatomic_cmpxchg
384 * orders load q->head.next before loading node's
385 * content.
386 */
387 _cds_wfcq_node_init(&head->node);
eec791af
MD
388 if (uatomic_cmpxchg(&tail->p, node, &head->node) == node) {
389 if (state)
390 *state |= CDS_WFCQ_STATE_LAST;
8ad4ce58 391 return node;
eec791af 392 }
47215721 393 next = ___cds_wfcq_node_sync_next(node, blocking);
dfb65fd3
MD
394 /*
395 * In nonblocking mode, if we would need to block to
396 * get node's next, set the head next node pointer
397 * (currently NULL) back to its original value.
398 */
399 if (!blocking && next == CDS_WFCQ_WOULDBLOCK) {
400 head->node.next = node;
401 return CDS_WFCQ_WOULDBLOCK;
402 }
8ad4ce58
MD
403 }
404
405 /*
406 * Move queue head forward.
407 */
408 head->node.next = next;
409
410 /* Load q->head.next before loading node's content */
411 cmm_smp_read_barrier_depends();
412 return node;
413}
414
415/*
eec791af 416 * __cds_wfcq_dequeue_with_state_blocking: dequeue node from queue, with state.
8ad4ce58 417 *
47215721
MD
418 * Content written into the node before enqueue is guaranteed to be
419 * consistent, but no other memory ordering is ensured.
420 * It is valid to reuse and free a dequeued node immediately.
421 * Dequeue/splice/iteration mutual exclusion should be ensured by the
422 * caller.
8ad4ce58 423 */
47215721 424static inline struct cds_wfcq_node *
eec791af
MD
425___cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
426 struct cds_wfcq_tail *tail, int *state)
427{
428 return ___cds_wfcq_dequeue_with_state(head, tail, state, 1);
429}
430
431/*
432 * ___cds_wfcq_dequeue_blocking: dequeue node from queue.
433 *
434 * Same as __cds_wfcq_dequeue_with_state_blocking, but without saving
435 * state.
436 */
437static inline struct cds_wfcq_node *
47215721
MD
438___cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
439 struct cds_wfcq_tail *tail)
440{
eec791af 441 return ___cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
47215721
MD
442}
443
444/*
eec791af 445 * __cds_wfcq_dequeue_with_state_nonblocking: dequeue node, with state.
47215721
MD
446 *
447 * Same as __cds_wfcq_dequeue_blocking, but returns CDS_WFCQ_WOULDBLOCK
448 * if it needs to block.
449 */
450static inline struct cds_wfcq_node *
eec791af
MD
451___cds_wfcq_dequeue_with_state_nonblocking(struct cds_wfcq_head *head,
452 struct cds_wfcq_tail *tail, int *state)
453{
454 return ___cds_wfcq_dequeue_with_state(head, tail, state, 0);
455}
456
457/*
458 * ___cds_wfcq_dequeue_nonblocking: dequeue node from queue.
459 *
460 * Same as __cds_wfcq_dequeue_with_state_nonblocking, but without saving
461 * state.
462 */
463static inline struct cds_wfcq_node *
47215721
MD
464___cds_wfcq_dequeue_nonblocking(struct cds_wfcq_head *head,
465 struct cds_wfcq_tail *tail)
466{
eec791af 467 return ___cds_wfcq_dequeue_with_state_nonblocking(head, tail, NULL);
47215721
MD
468}
469
f878b49e
MD
470/*
471 * __cds_wfcq_splice: enqueue all src_q nodes at the end of dest_q.
472 *
473 * Dequeue all nodes from src_q.
474 * dest_q must be already initialized.
475 * Mutual exclusion for src_q should be ensured by the caller as
476 * specified in the "Synchronisation table".
477 * Returns enum cds_wfcq_ret which indicates the state of the src or
478 * dest queue.
479 */
23773356 480static inline enum cds_wfcq_ret
47215721 481___cds_wfcq_splice(
8ad4ce58
MD
482 struct cds_wfcq_head *dest_q_head,
483 struct cds_wfcq_tail *dest_q_tail,
484 struct cds_wfcq_head *src_q_head,
47215721
MD
485 struct cds_wfcq_tail *src_q_tail,
486 int blocking)
8ad4ce58
MD
487{
488 struct cds_wfcq_node *head, *tail;
f878b49e 489 int attempt = 0;
8ad4ce58 490
f878b49e
MD
491 /*
492 * Initial emptiness check to speed up cases where queue is
493 * empty: only require loads to check if queue is empty.
494 */
8ad4ce58 495 if (_cds_wfcq_empty(src_q_head, src_q_tail))
23773356 496 return CDS_WFCQ_RET_SRC_EMPTY;
8ad4ce58 497
f878b49e
MD
498 for (;;) {
499 /*
500 * Open-coded _cds_wfcq_empty() by testing result of
501 * uatomic_xchg, as well as tail pointer vs head node
502 * address.
503 */
504 head = uatomic_xchg(&src_q_head->node.next, NULL);
505 if (head)
506 break; /* non-empty */
507 if (CMM_LOAD_SHARED(src_q_tail->p) == &src_q_head->node)
508 return CDS_WFCQ_RET_SRC_EMPTY;
509 if (___cds_wfcq_busy_wait(&attempt, blocking))
510 return CDS_WFCQ_RET_WOULDBLOCK;
511 }
8ad4ce58
MD
512
513 /*
514 * Memory barrier implied before uatomic_xchg() orders store to
515 * src_q->head before store to src_q->tail. This is required by
516 * concurrent enqueue on src_q, which exchanges the tail before
517 * updating the previous tail's next pointer.
518 */
519 tail = uatomic_xchg(&src_q_tail->p, &src_q_head->node);
520
521 /*
522 * Append the spliced content of src_q into dest_q. Does not
523 * require mutual exclusion on dest_q (wait-free).
524 */
23773356
MD
525 if (___cds_wfcq_append(dest_q_head, dest_q_tail, head, tail))
526 return CDS_WFCQ_RET_DEST_NON_EMPTY;
527 else
528 return CDS_WFCQ_RET_DEST_EMPTY;
47215721
MD
529}
530
47215721
MD
531/*
532 * __cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
533 *
534 * Dequeue all nodes from src_q.
535 * dest_q must be already initialized.
f878b49e
MD
536 * Mutual exclusion for src_q should be ensured by the caller as
537 * specified in the "Synchronisation table".
23773356
MD
538 * Returns enum cds_wfcq_ret which indicates the state of the src or
539 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
47215721 540 */
23773356 541static inline enum cds_wfcq_ret
47215721
MD
542___cds_wfcq_splice_blocking(
543 struct cds_wfcq_head *dest_q_head,
544 struct cds_wfcq_tail *dest_q_tail,
545 struct cds_wfcq_head *src_q_head,
546 struct cds_wfcq_tail *src_q_tail)
547{
23773356 548 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
47215721
MD
549 src_q_head, src_q_tail, 1);
550}
551
552/*
553 * __cds_wfcq_splice_nonblocking: enqueue all src_q nodes at the end of dest_q.
554 *
23773356
MD
555 * Same as __cds_wfcq_splice_blocking, but returns
556 * CDS_WFCQ_RET_WOULDBLOCK if it needs to block.
47215721 557 */
23773356 558static inline enum cds_wfcq_ret
47215721
MD
559___cds_wfcq_splice_nonblocking(
560 struct cds_wfcq_head *dest_q_head,
561 struct cds_wfcq_tail *dest_q_tail,
562 struct cds_wfcq_head *src_q_head,
563 struct cds_wfcq_tail *src_q_tail)
564{
565 return ___cds_wfcq_splice(dest_q_head, dest_q_tail,
566 src_q_head, src_q_tail, 0);
8ad4ce58
MD
567}
568
569/*
eec791af 570 * cds_wfcq_dequeue_with_state_blocking: dequeue a node from a wait-free queue.
8ad4ce58
MD
571 *
572 * Content written into the node before enqueue is guaranteed to be
573 * consistent, but no other memory ordering is ensured.
ffa11a18 574 * Mutual exclusion with cds_wfcq_splice_blocking and dequeue lock is
8ad4ce58
MD
575 * ensured.
576 * It is valid to reuse and free a dequeued node immediately.
577 */
578static inline struct cds_wfcq_node *
eec791af
MD
579_cds_wfcq_dequeue_with_state_blocking(struct cds_wfcq_head *head,
580 struct cds_wfcq_tail *tail, int *state)
8ad4ce58
MD
581{
582 struct cds_wfcq_node *retval;
583
584 _cds_wfcq_dequeue_lock(head, tail);
eec791af 585 retval = ___cds_wfcq_dequeue_with_state_blocking(head, tail, state);
8ad4ce58
MD
586 _cds_wfcq_dequeue_unlock(head, tail);
587 return retval;
588}
589
eec791af
MD
590/*
591 * cds_wfcq_dequeue_blocking: dequeue node from queue.
592 *
593 * Same as cds_wfcq_dequeue_blocking, but without saving state.
594 */
595static inline struct cds_wfcq_node *
596_cds_wfcq_dequeue_blocking(struct cds_wfcq_head *head,
597 struct cds_wfcq_tail *tail)
598{
599 return _cds_wfcq_dequeue_with_state_blocking(head, tail, NULL);
600}
601
8ad4ce58
MD
602/*
603 * cds_wfcq_splice_blocking: enqueue all src_q nodes at the end of dest_q.
604 *
605 * Dequeue all nodes from src_q.
606 * dest_q must be already initialized.
607 * Content written into the node before enqueue is guaranteed to be
608 * consistent, but no other memory ordering is ensured.
ffa11a18 609 * Mutual exclusion with cds_wfcq_dequeue_blocking and dequeue lock is
8ad4ce58 610 * ensured.
23773356
MD
611 * Returns enum cds_wfcq_ret which indicates the state of the src or
612 * dest queue. Never returns CDS_WFCQ_RET_WOULDBLOCK.
8ad4ce58 613 */
23773356 614static inline enum cds_wfcq_ret
8ad4ce58
MD
615_cds_wfcq_splice_blocking(
616 struct cds_wfcq_head *dest_q_head,
617 struct cds_wfcq_tail *dest_q_tail,
618 struct cds_wfcq_head *src_q_head,
619 struct cds_wfcq_tail *src_q_tail)
620{
23773356
MD
621 enum cds_wfcq_ret ret;
622
8ad4ce58 623 _cds_wfcq_dequeue_lock(src_q_head, src_q_tail);
23773356 624 ret = ___cds_wfcq_splice_blocking(dest_q_head, dest_q_tail,
8ad4ce58
MD
625 src_q_head, src_q_tail);
626 _cds_wfcq_dequeue_unlock(src_q_head, src_q_tail);
23773356 627 return ret;
8ad4ce58
MD
628}
629
630#ifdef __cplusplus
631}
632#endif
633
634#endif /* _URCU_WFCQUEUE_STATIC_H */
This page took 0.04897 seconds and 4 git commands to generate.