Fix: join worker thread in call_rcu_data_free
[userspace-rcu.git] / src / urcu-wait.h
1 #ifndef _URCU_WAIT_H
2 #define _URCU_WAIT_H
3
4 /*
5 * urcu-wait.h
6 *
7 * Userspace RCU library wait/wakeup management
8 *
9 * Copyright (c) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <urcu/uatomic.h>
27 #include <urcu/wfstack.h>
28 #include "urcu-die.h"
29
30 /*
31 * Number of busy-loop attempts before waiting on futex for grace period
32 * batching.
33 */
34 #define URCU_WAIT_ATTEMPTS 1000
35
36 enum urcu_wait_state {
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING = 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP = (1 << 0),
41 URCU_WAIT_RUNNING = (1 << 1),
42 URCU_WAIT_TEARDOWN = (1 << 2),
43 };
44
45 struct urcu_wait_node {
46 struct cds_wfs_node node;
47 int32_t state; /* enum urcu_wait_state */
48 };
49
50 #define URCU_WAIT_NODE_INIT(name, _state) \
51 { .state = _state }
52
53 #define DEFINE_URCU_WAIT_NODE(name, state) \
54 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
55
56 #define DECLARE_URCU_WAIT_NODE(name) \
57 struct urcu_wait_node name
58
59 struct urcu_wait_queue {
60 struct cds_wfs_stack stack;
61 };
62
63 #define URCU_WAIT_QUEUE_HEAD_INIT(name) \
64 { \
65 .stack = { \
66 .head = CDS_WFS_END, \
67 .lock = PTHREAD_MUTEX_INITIALIZER, \
68 }, \
69 }
70
71 #define DECLARE_URCU_WAIT_QUEUE(name) \
72 struct urcu_wait_queue name
73
74 #define DEFINE_URCU_WAIT_QUEUE(name) \
75 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
76
77 struct urcu_waiters {
78 struct cds_wfs_head *head;
79 };
80
81 /*
82 * Add ourself atomically to a wait queue. Return 0 if queue was
83 * previously empty, else return 1.
84 * A full memory barrier is issued before being added to the wait queue.
85 */
86 static inline
87 bool urcu_wait_add(struct urcu_wait_queue *queue,
88 struct urcu_wait_node *node)
89 {
90 return cds_wfs_push(&queue->stack, &node->node);
91 }
92
93 /*
94 * Atomically move all waiters from wait queue into our local struct
95 * urcu_waiters.
96 */
97 static inline
98 void urcu_move_waiters(struct urcu_waiters *waiters,
99 struct urcu_wait_queue *queue)
100 {
101 waiters->head = __cds_wfs_pop_all(&queue->stack);
102 }
103
104 static inline
105 void urcu_wait_set_state(struct urcu_wait_node *node,
106 enum urcu_wait_state state)
107 {
108 node->state = state;
109 }
110
111 static inline
112 void urcu_wait_node_init(struct urcu_wait_node *node,
113 enum urcu_wait_state state)
114 {
115 urcu_wait_set_state(node, state);
116 cds_wfs_node_init(&node->node);
117 }
118
119 /*
120 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
121 * throughout its execution. In this scheme, the waiter owns the node
122 * memory, and we only allow it to free this memory when it receives the
123 * URCU_WAIT_TEARDOWN flag.
124 */
125 static inline
126 void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
127 {
128 cmm_smp_mb();
129 assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING);
130 uatomic_set(&wait->state, URCU_WAIT_WAKEUP);
131 if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
132 if (futex_noasync(&wait->state, FUTEX_WAKE, 1,
133 NULL, NULL, 0) < 0)
134 urcu_die(errno);
135 }
136 /* Allow teardown of struct urcu_wait memory. */
137 uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
138 }
139
140 /*
141 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
142 * memory to waker thread.
143 */
144 static inline
145 void urcu_adaptative_busy_wait(struct urcu_wait_node *wait)
146 {
147 unsigned int i;
148
149 /* Load and test condition before read state */
150 cmm_smp_rmb();
151 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
152 if (uatomic_read(&wait->state) != URCU_WAIT_WAITING)
153 goto skip_futex_wait;
154 caa_cpu_relax();
155 }
156 while (uatomic_read(&wait->state) == URCU_WAIT_WAITING) {
157 if (!futex_noasync(&wait->state, FUTEX_WAIT, URCU_WAIT_WAITING, NULL, NULL, 0)) {
158 /*
159 * Prior queued wakeups queued by unrelated code
160 * using the same address can cause futex wait to
161 * return 0 even through the futex value is still
162 * URCU_WAIT_WAITING (spurious wakeups). Check
163 * the value again in user-space to validate
164 * whether it really differs from
165 * URCU_WAIT_WAITING.
166 */
167 continue;
168 }
169 switch (errno) {
170 case EAGAIN:
171 /* Value already changed. */
172 goto skip_futex_wait;
173 case EINTR:
174 /* Retry if interrupted by signal. */
175 break; /* Get out of switch. Check again. */
176 default:
177 /* Unexpected error. */
178 urcu_die(errno);
179 }
180 }
181 skip_futex_wait:
182
183 /* Tell waker thread than we are running. */
184 uatomic_or(&wait->state, URCU_WAIT_RUNNING);
185
186 /*
187 * Wait until waker thread lets us know it's ok to tear down
188 * memory allocated for struct urcu_wait.
189 */
190 for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
191 if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)
192 break;
193 caa_cpu_relax();
194 }
195 while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
196 poll(NULL, 0, 10);
197 assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
198 }
199
200 static inline
201 void urcu_wake_all_waiters(struct urcu_waiters *waiters)
202 {
203 struct cds_wfs_node *iter, *iter_n;
204
205 /* Wake all waiters in our stack head */
206 cds_wfs_for_each_blocking_safe(waiters->head, iter, iter_n) {
207 struct urcu_wait_node *wait_node =
208 caa_container_of(iter, struct urcu_wait_node, node);
209
210 /* Don't wake already running threads */
211 if (wait_node->state & URCU_WAIT_RUNNING)
212 continue;
213 urcu_adaptative_wake_up(wait_node);
214 }
215 }
216
217 #endif /* _URCU_WAIT_H */
This page took 0.035147 seconds and 4 git commands to generate.