1 // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
5 #ifndef _URCU_WFQUEUE_STATIC_H
6 #define _URCU_WFQUEUE_STATIC_H
9 * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
12 * dynamically with the userspace rcu library.
17 #include <urcu/assert.h>
18 #include <urcu/compiler.h>
19 #include <urcu/uatomic.h>
26 * Queue with wait-free enqueue/blocking dequeue.
27 * This implementation adds a dummy head node when the queue is empty to ensure
28 * we can always update the queue locklessly.
30 * Inspired from half-wait-free/half-blocking queue implementation done by
34 #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
35 #define WFQ_WAIT 10 /* Wait 10 ms if being set */
37 static inline void _cds_wfq_node_init(struct cds_wfq_node
*node
)
42 static inline void _cds_wfq_init(struct cds_wfq_queue
*q
)
46 _cds_wfq_node_init(&q
->dummy
);
47 /* Set queue head and tail */
49 q
->tail
= &q
->dummy
.next
;
50 ret
= pthread_mutex_init(&q
->lock
, NULL
);
51 urcu_posix_assert(!ret
);
54 static inline void _cds_wfq_destroy(struct cds_wfq_queue
*q
)
56 int ret
= pthread_mutex_destroy(&q
->lock
);
57 urcu_posix_assert(!ret
);
60 static inline void _cds_wfq_enqueue(struct cds_wfq_queue
*q
,
61 struct cds_wfq_node
*node
)
63 struct cds_wfq_node
**old_tail
;
66 * uatomic_xchg() implicit memory barrier orders earlier stores to data
67 * structure containing node and setting node->next to NULL before
70 cmm_emit_legacy_smp_mb();
71 old_tail
= uatomic_xchg_mo(&q
->tail
, &node
->next
, CMM_SEQ_CST
);
73 * At this point, dequeuers see a NULL old_tail->next, which indicates
74 * that the queue is being appended to. The following store will append
75 * "node" to the queue from a dequeuer perspective.
77 uatomic_store(old_tail
, node
, CMM_RELEASE
);
81 * Waiting for enqueuer to complete enqueue and return the next node
83 static inline struct cds_wfq_node
*
84 ___cds_wfq_node_sync_next(struct cds_wfq_node
*node
)
86 struct cds_wfq_node
*next
;
90 * Adaptative busy-looping waiting for enqueuer to complete enqueue.
92 while ((next
= uatomic_load(&node
->next
, CMM_CONSUME
)) == NULL
) {
93 if (++attempt
>= WFQ_ADAPT_ATTEMPTS
) {
94 (void) poll(NULL
, 0, WFQ_WAIT
); /* Wait for 10ms */
104 * It is valid to reuse and free a dequeued node immediately.
106 * No need to go on a waitqueue here, as there is no possible state in which the
107 * list could cause dequeue to busy-loop needlessly while waiting for another
108 * thread to be scheduled. The queue appears empty until tail->next is set by
111 static inline struct cds_wfq_node
*
112 ___cds_wfq_dequeue_blocking(struct cds_wfq_queue
*q
)
114 struct cds_wfq_node
*node
, *next
;
117 * Queue is empty if it only contains the dummy node.
119 if (q
->head
== &q
->dummy
&& uatomic_load(&q
->tail
, CMM_CONSUME
) == &q
->dummy
.next
)
123 next
= ___cds_wfq_node_sync_next(node
);
126 * Move queue head forward.
130 * Requeue dummy node if we just dequeued it.
132 if (node
== &q
->dummy
) {
133 _cds_wfq_node_init(node
);
134 _cds_wfq_enqueue(q
, node
);
135 return ___cds_wfq_dequeue_blocking(q
);
140 static inline struct cds_wfq_node
*
141 _cds_wfq_dequeue_blocking(struct cds_wfq_queue
*q
)
143 struct cds_wfq_node
*retnode
;
146 ret
= pthread_mutex_lock(&q
->lock
);
147 urcu_posix_assert(!ret
);
148 retnode
= ___cds_wfq_dequeue_blocking(q
);
149 ret
= pthread_mutex_unlock(&q
->lock
);
150 urcu_posix_assert(!ret
);
158 #endif /* _URCU_WFQUEUE_STATIC_H */
This page took 0.0815 seconds and 5 git commands to generate.