Commit | Line | Data |
---|---|---|
d3d3857f MJ |
1 | // SPDX-FileCopyrightText: 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
2 | // | |
3 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
4 | ||
4d001e96 MD |
5 | #ifndef _URCU_WFQUEUE_STATIC_H |
6 | #define _URCU_WFQUEUE_STATIC_H | |
7 | ||
8 | /* | |
4d001e96 MD |
9 | * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue |
10 | * | |
11 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking | |
12 | * dynamically with the userspace rcu library. | |
4d001e96 MD |
13 | */ |
14 | ||
15 | #include <pthread.h> | |
b57aee66 | 16 | #include <poll.h> |
01477510 | 17 | #include <urcu/assert.h> |
4d001e96 | 18 | #include <urcu/compiler.h> |
a2e7bf9c | 19 | #include <urcu/uatomic.h> |
4d001e96 MD |
20 | |
21 | #ifdef __cplusplus | |
22 | extern "C" { | |
23 | #endif | |
24 | ||
25 | /* | |
26 | * Queue with wait-free enqueue/blocking dequeue. | |
27 | * This implementation adds a dummy head node when the queue is empty to ensure | |
28 | * we can always update the queue locklessly. | |
29 | * | |
30 | * Inspired from half-wait-free/half-blocking queue implementation done by | |
31 | * Paul E. McKenney. | |
32 | */ | |
33 | ||
34 | #define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */ | |
35 | #define WFQ_WAIT 10 /* Wait 10 ms if being set */ | |
36 | ||
b57aee66 | 37 | static inline void _cds_wfq_node_init(struct cds_wfq_node *node) |
4d001e96 MD |
38 | { |
39 | node->next = NULL; | |
40 | } | |
41 | ||
b57aee66 | 42 | static inline void _cds_wfq_init(struct cds_wfq_queue *q) |
4d001e96 MD |
43 | { |
44 | int ret; | |
45 | ||
16aa9ee8 | 46 | _cds_wfq_node_init(&q->dummy); |
4d001e96 MD |
47 | /* Set queue head and tail */ |
48 | q->head = &q->dummy; | |
49 | q->tail = &q->dummy.next; | |
50 | ret = pthread_mutex_init(&q->lock, NULL); | |
01477510 | 51 | urcu_posix_assert(!ret); |
4d001e96 MD |
52 | } |
53 | ||
200d100e MD |
54 | static inline void _cds_wfq_destroy(struct cds_wfq_queue *q) |
55 | { | |
56 | int ret = pthread_mutex_destroy(&q->lock); | |
01477510 | 57 | urcu_posix_assert(!ret); |
200d100e MD |
58 | } |
59 | ||
b57aee66 PM |
60 | static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q, |
61 | struct cds_wfq_node *node) | |
4d001e96 | 62 | { |
16aa9ee8 | 63 | struct cds_wfq_node **old_tail; |
4d001e96 MD |
64 | |
65 | /* | |
66 | * uatomic_xchg() implicit memory barrier orders earlier stores to data | |
67 | * structure containing node and setting node->next to NULL before | |
68 | * publication. | |
69 | */ | |
0e2125fb OD |
70 | cmm_emit_legacy_smp_mb(); |
71 | old_tail = uatomic_xchg_mo(&q->tail, &node->next, CMM_SEQ_CST); | |
4d001e96 MD |
72 | /* |
73 | * At this point, dequeuers see a NULL old_tail->next, which indicates | |
74 | * that the queue is being appended to. The following store will append | |
75 | * "node" to the queue from a dequeuer perspective. | |
76 | */ | |
0e2125fb | 77 | uatomic_store(old_tail, node, CMM_RELEASE); |
4d001e96 MD |
78 | } |
79 | ||
b9103f30 LJ |
80 | /* |
81 | * Waiting for enqueuer to complete enqueue and return the next node | |
82 | */ | |
83 | static inline struct cds_wfq_node * | |
84 | ___cds_wfq_node_sync_next(struct cds_wfq_node *node) | |
85 | { | |
86 | struct cds_wfq_node *next; | |
87 | int attempt = 0; | |
88 | ||
89 | /* | |
90 | * Adaptative busy-looping waiting for enqueuer to complete enqueue. | |
91 | */ | |
0e2125fb | 92 | while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) { |
b9103f30 | 93 | if (++attempt >= WFQ_ADAPT_ATTEMPTS) { |
124bd5c7 | 94 | (void) poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */ |
b9103f30 LJ |
95 | attempt = 0; |
96 | } else | |
97 | caa_cpu_relax(); | |
98 | } | |
99 | ||
100 | return next; | |
101 | } | |
102 | ||
4d001e96 MD |
103 | /* |
104 | * It is valid to reuse and free a dequeued node immediately. | |
105 | * | |
106 | * No need to go on a waitqueue here, as there is no possible state in which the | |
107 | * list could cause dequeue to busy-loop needlessly while waiting for another | |
108 | * thread to be scheduled. The queue appears empty until tail->next is set by | |
109 | * enqueue. | |
110 | */ | |
b57aee66 | 111 | static inline struct cds_wfq_node * |
16aa9ee8 | 112 | ___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) |
4d001e96 | 113 | { |
16aa9ee8 | 114 | struct cds_wfq_node *node, *next; |
4d001e96 MD |
115 | |
116 | /* | |
117 | * Queue is empty if it only contains the dummy node. | |
118 | */ | |
0e2125fb | 119 | if (q->head == &q->dummy && uatomic_load(&q->tail, CMM_CONSUME) == &q->dummy.next) |
4d001e96 MD |
120 | return NULL; |
121 | node = q->head; | |
122 | ||
b9103f30 LJ |
123 | next = ___cds_wfq_node_sync_next(node); |
124 | ||
4d001e96 MD |
125 | /* |
126 | * Move queue head forward. | |
127 | */ | |
128 | q->head = next; | |
129 | /* | |
130 | * Requeue dummy node if we just dequeued it. | |
131 | */ | |
132 | if (node == &q->dummy) { | |
16aa9ee8 DG |
133 | _cds_wfq_node_init(node); |
134 | _cds_wfq_enqueue(q, node); | |
135 | return ___cds_wfq_dequeue_blocking(q); | |
4d001e96 MD |
136 | } |
137 | return node; | |
138 | } | |
139 | ||
b57aee66 | 140 | static inline struct cds_wfq_node * |
16aa9ee8 | 141 | _cds_wfq_dequeue_blocking(struct cds_wfq_queue *q) |
4d001e96 | 142 | { |
16aa9ee8 | 143 | struct cds_wfq_node *retnode; |
4d001e96 MD |
144 | int ret; |
145 | ||
146 | ret = pthread_mutex_lock(&q->lock); | |
01477510 | 147 | urcu_posix_assert(!ret); |
16aa9ee8 | 148 | retnode = ___cds_wfq_dequeue_blocking(q); |
4d001e96 | 149 | ret = pthread_mutex_unlock(&q->lock); |
01477510 | 150 | urcu_posix_assert(!ret); |
4d001e96 MD |
151 | return retnode; |
152 | } | |
153 | ||
154 | #ifdef __cplusplus | |
155 | } | |
156 | #endif | |
157 | ||
158 | #endif /* _URCU_WFQUEUE_STATIC_H */ |