Commit | Line | Data |
---|---|---|
786ee85b MD |
1 | /* |
2 | * urcu-defer.c | |
3 | * | |
4 | * Userspace RCU library - batch memory reclamation | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <stdio.h> | |
24 | #include <pthread.h> | |
25 | #include <signal.h> | |
26 | #include <assert.h> | |
27 | #include <stdlib.h> | |
28 | #include <string.h> | |
29 | #include <errno.h> | |
30 | #include <poll.h> | |
4ce9e4f2 MD |
31 | #include <sys/time.h> |
32 | #include <syscall.h> | |
33 | #include <unistd.h> | |
786ee85b MD |
34 | |
35 | #include "urcu-defer-static.h" | |
36 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
37 | #include "urcu-defer.h" | |
38 | ||
4ce9e4f2 | 39 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) |
80cca311 MD |
40 | #define FUTEX_WAIT 0 |
41 | #define FUTEX_WAKE 1 | |
4ce9e4f2 | 42 | |
786ee85b MD |
43 | void __attribute__((destructor)) urcu_defer_exit(void); |
44 | ||
45 | extern void synchronize_rcu(void); | |
46 | ||
47 | /* | |
48 | * urcu_defer_mutex nests inside defer_thread_mutex. | |
49 | */ | |
50 | static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; | |
51 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; | |
52 | ||
3dce4bfa | 53 | static int defer_thread_futex; |
4ce9e4f2 | 54 | |
786ee85b MD |
55 | /* |
56 | * Written to only by each individual deferer. Read by both the deferer and | |
57 | * the reclamation tread. | |
58 | */ | |
3dce4bfa | 59 | static struct defer_queue __thread defer_queue; |
dbc6128f | 60 | static LIST_HEAD(registry); |
786ee85b | 61 | static pthread_t tid_defer; |
4ce9e4f2 | 62 | |
786ee85b MD |
63 | static void internal_urcu_lock(pthread_mutex_t *mutex) |
64 | { | |
65 | int ret; | |
66 | ||
67 | #ifndef DISTRUST_SIGNALS_EXTREME | |
68 | ret = pthread_mutex_lock(mutex); | |
69 | if (ret) { | |
70 | perror("Error in pthread mutex lock"); | |
71 | exit(-1); | |
72 | } | |
73 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
74 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
75 | if (ret != EBUSY && ret != EINTR) { | |
76 | printf("ret = %d, errno = %d\n", ret, errno); | |
77 | perror("Error in pthread mutex lock"); | |
78 | exit(-1); | |
79 | } | |
4ce9e4f2 | 80 | pthread_testcancel(); |
786ee85b MD |
81 | poll(NULL,0,10); |
82 | } | |
83 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
84 | } | |
85 | ||
86 | static void internal_urcu_unlock(pthread_mutex_t *mutex) | |
87 | { | |
88 | int ret; | |
89 | ||
90 | ret = pthread_mutex_unlock(mutex); | |
91 | if (ret) { | |
92 | perror("Error in pthread mutex unlock"); | |
93 | exit(-1); | |
94 | } | |
95 | } | |
96 | ||
04eb9c4f MD |
97 | /* |
98 | * Wake-up any waiting defer thread. Called from many concurrent threads. | |
99 | */ | |
100 | static void wake_up_defer(void) | |
101 | { | |
ec4e58a3 MD |
102 | if (unlikely(uatomic_read(&defer_thread_futex) == -1)) { |
103 | uatomic_set(&defer_thread_futex, 0); | |
8fd3b219 | 104 | futex(&defer_thread_futex, FUTEX_WAKE, 1, |
04eb9c4f MD |
105 | NULL, NULL, 0); |
106 | } | |
107 | } | |
108 | ||
109 | static unsigned long rcu_defer_num_callbacks(void) | |
110 | { | |
111 | unsigned long num_items = 0, head; | |
dbc6128f | 112 | struct defer_queue *index; |
04eb9c4f MD |
113 | |
114 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
115 | list_for_each_entry(index, ®istry, list) { |
116 | head = LOAD_SHARED(index->head); | |
117 | num_items += head - index->tail; | |
04eb9c4f MD |
118 | } |
119 | internal_urcu_unlock(&urcu_defer_mutex); | |
120 | return num_items; | |
121 | } | |
122 | ||
123 | /* | |
124 | * Defer thread waiting. Single thread. | |
125 | */ | |
126 | static void wait_defer(void) | |
127 | { | |
ec4e58a3 | 128 | uatomic_dec(&defer_thread_futex); |
04eb9c4f MD |
129 | smp_mb(); /* Write futex before read queue */ |
130 | if (rcu_defer_num_callbacks()) { | |
131 | smp_mb(); /* Read queue before write futex */ | |
132 | /* Callbacks are queued, don't wait. */ | |
ec4e58a3 | 133 | uatomic_set(&defer_thread_futex, 0); |
04eb9c4f MD |
134 | } else { |
135 | smp_rmb(); /* Read queue before read futex */ | |
ec4e58a3 | 136 | if (uatomic_read(&defer_thread_futex) == -1) |
04eb9c4f MD |
137 | futex(&defer_thread_futex, FUTEX_WAIT, -1, |
138 | NULL, NULL, 0); | |
139 | } | |
140 | } | |
141 | ||
786ee85b MD |
142 | /* |
143 | * Must be called after Q.S. is reached. | |
144 | */ | |
145 | static void rcu_defer_barrier_queue(struct defer_queue *queue, | |
804b4375 | 146 | unsigned long head) |
786ee85b MD |
147 | { |
148 | unsigned long i; | |
804b4375 MD |
149 | void (*fct)(void *p); |
150 | void *p; | |
786ee85b MD |
151 | |
152 | /* | |
153 | * Tail is only modified when lock is held. | |
154 | * Head is only modified by owner thread. | |
155 | */ | |
156 | ||
804b4375 | 157 | for (i = queue->tail; i != head;) { |
786ee85b | 158 | smp_rmb(); /* read head before q[]. */ |
804b4375 MD |
159 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
160 | if (unlikely(DQ_IS_FCT_BIT(p))) { | |
804b4375 MD |
161 | DQ_CLEAR_FCT_BIT(p); |
162 | queue->last_fct_out = p; | |
163 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
164 | } else if (unlikely(p == DQ_FCT_MARK)) { | |
804b4375 MD |
165 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
166 | queue->last_fct_out = p; | |
167 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
29cdb8d8 | 168 | } |
804b4375 | 169 | fct = queue->last_fct_out; |
804b4375 | 170 | fct(p); |
786ee85b MD |
171 | } |
172 | smp_mb(); /* push tail after having used q[] */ | |
173 | STORE_SHARED(queue->tail, i); | |
174 | } | |
175 | ||
176 | static void _rcu_defer_barrier_thread(void) | |
177 | { | |
0d0e6c21 | 178 | unsigned long head, num_items; |
786ee85b MD |
179 | |
180 | head = defer_queue.head; | |
0d0e6c21 MD |
181 | num_items = head - defer_queue.tail; |
182 | if (unlikely(!num_items)) | |
183 | return; | |
786ee85b MD |
184 | synchronize_rcu(); |
185 | rcu_defer_barrier_queue(&defer_queue, head); | |
186 | } | |
187 | ||
786ee85b MD |
188 | void rcu_defer_barrier_thread(void) |
189 | { | |
190 | internal_urcu_lock(&urcu_defer_mutex); | |
191 | _rcu_defer_barrier_thread(); | |
192 | internal_urcu_unlock(&urcu_defer_mutex); | |
193 | } | |
194 | ||
0d0e6c21 MD |
195 | /* |
196 | * rcu_defer_barrier - Execute all queued rcu callbacks. | |
197 | * | |
198 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. | |
199 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call | |
200 | * are guaranteed to be executed. | |
201 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() | |
202 | * execution are not guaranteed to be executed in the current batch (could | |
203 | * be left for the next batch). These callbacks queued by other threads are only | |
204 | * guaranteed to be executed if there is explicit synchronization between | |
205 | * the thread adding to the queue and the thread issuing the defer_barrier call. | |
206 | */ | |
207 | ||
786ee85b MD |
208 | void rcu_defer_barrier(void) |
209 | { | |
dbc6128f | 210 | struct defer_queue *index; |
0d0e6c21 | 211 | unsigned long num_items = 0; |
786ee85b | 212 | |
dbc6128f | 213 | if (list_empty(®istry)) |
786ee85b MD |
214 | return; |
215 | ||
216 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
217 | list_for_each_entry(index, ®istry, list) { |
218 | index->last_head = LOAD_SHARED(index->head); | |
219 | num_items += index->last_head - index->tail; | |
0d0e6c21 MD |
220 | } |
221 | if (likely(!num_items)) { | |
222 | /* | |
223 | * We skip the grace period because there are no queued | |
224 | * callbacks to execute. | |
225 | */ | |
226 | goto end; | |
227 | } | |
786ee85b | 228 | synchronize_rcu(); |
dbc6128f MD |
229 | list_for_each_entry(index, ®istry, list) |
230 | rcu_defer_barrier_queue(index, index->last_head); | |
0d0e6c21 | 231 | end: |
786ee85b MD |
232 | internal_urcu_unlock(&urcu_defer_mutex); |
233 | } | |
234 | ||
2c22932b MD |
235 | /* |
236 | * _rcu_defer_queue - Queue a RCU callback. | |
237 | */ | |
238 | void _rcu_defer_queue(void (*fct)(void *p), void *p) | |
239 | { | |
240 | unsigned long head, tail; | |
241 | ||
242 | /* | |
243 | * Head is only modified by ourself. Tail can be modified by reclamation | |
244 | * thread. | |
245 | */ | |
246 | head = defer_queue.head; | |
247 | tail = LOAD_SHARED(defer_queue.tail); | |
248 | ||
249 | /* | |
250 | * If queue is full, empty it ourself. | |
251 | * Worse-case: must allow 2 supplementary entries for fct pointer. | |
252 | */ | |
253 | if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { | |
254 | assert(head - tail <= DEFER_QUEUE_SIZE); | |
255 | rcu_defer_barrier_thread(); | |
256 | assert(head - LOAD_SHARED(defer_queue.tail) == 0); | |
257 | } | |
258 | ||
259 | if (unlikely(defer_queue.last_fct_in != fct)) { | |
260 | defer_queue.last_fct_in = fct; | |
261 | if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { | |
262 | /* | |
263 | * If the function to encode is not aligned or the | |
264 | * marker, write DQ_FCT_MARK followed by the function | |
265 | * pointer. | |
266 | */ | |
267 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
268 | DQ_FCT_MARK); | |
269 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
270 | fct); | |
271 | } else { | |
272 | DQ_SET_FCT_BIT(fct); | |
273 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
274 | fct); | |
275 | } | |
276 | } else { | |
277 | if (unlikely(DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) { | |
278 | /* | |
279 | * If the data to encode is not aligned or the marker, | |
280 | * write DQ_FCT_MARK followed by the function pointer. | |
281 | */ | |
282 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
283 | DQ_FCT_MARK); | |
284 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], | |
285 | fct); | |
286 | } | |
287 | } | |
288 | _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); | |
289 | smp_wmb(); /* Publish new pointer before head */ | |
290 | /* Write q[] before head. */ | |
291 | STORE_SHARED(defer_queue.head, head); | |
04eb9c4f | 292 | smp_mb(); /* Write queue head before read futex */ |
2c22932b MD |
293 | /* |
294 | * Wake-up any waiting defer thread. | |
295 | */ | |
296 | wake_up_defer(); | |
297 | } | |
298 | ||
786ee85b MD |
299 | void *thr_defer(void *args) |
300 | { | |
301 | for (;;) { | |
4ce9e4f2 | 302 | pthread_testcancel(); |
4ce9e4f2 MD |
303 | /* |
304 | * "Be green". Don't wake up the CPU if there is no RCU work | |
305 | * to perform whatsoever. Aims at saving laptop battery life by | |
306 | * leaving the processor in sleep state when idle. | |
307 | */ | |
4ce9e4f2 | 308 | wait_defer(); |
4ce9e4f2 | 309 | /* Sleeping after wait_defer to let many callbacks enqueue */ |
71df5ef4 | 310 | poll(NULL,0,100); /* wait for 100ms */ |
786ee85b MD |
311 | rcu_defer_barrier(); |
312 | } | |
313 | ||
314 | return NULL; | |
315 | } | |
316 | ||
317 | /* | |
318 | * library wrappers to be used by non-LGPL compatible source code. | |
319 | */ | |
320 | ||
804b4375 | 321 | void rcu_defer_queue(void (*fct)(void *p), void *p) |
786ee85b | 322 | { |
804b4375 | 323 | _rcu_defer_queue(fct, p); |
786ee85b MD |
324 | } |
325 | ||
786ee85b MD |
326 | static void start_defer_thread(void) |
327 | { | |
328 | int ret; | |
329 | ||
dbc6128f | 330 | ret = pthread_create(&tid_defer, NULL, thr_defer, NULL); |
786ee85b MD |
331 | assert(!ret); |
332 | } | |
333 | ||
334 | static void stop_defer_thread(void) | |
335 | { | |
336 | int ret; | |
337 | void *tret; | |
338 | ||
4ce9e4f2 MD |
339 | pthread_cancel(tid_defer); |
340 | wake_up_defer(); | |
786ee85b MD |
341 | ret = pthread_join(tid_defer, &tret); |
342 | assert(!ret); | |
343 | } | |
344 | ||
345 | void rcu_defer_register_thread(void) | |
346 | { | |
dbc6128f MD |
347 | int was_empty; |
348 | ||
349 | assert(defer_queue.last_head == 0); | |
350 | assert(defer_queue.q == NULL); | |
351 | defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); | |
786ee85b MD |
352 | |
353 | internal_urcu_lock(&defer_thread_mutex); | |
354 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f MD |
355 | was_empty = list_empty(®istry); |
356 | list_add(&defer_queue.list, ®istry); | |
786ee85b MD |
357 | internal_urcu_unlock(&urcu_defer_mutex); |
358 | ||
dbc6128f | 359 | if (was_empty) |
786ee85b MD |
360 | start_defer_thread(); |
361 | internal_urcu_unlock(&defer_thread_mutex); | |
362 | } | |
363 | ||
364 | void rcu_defer_unregister_thread(void) | |
365 | { | |
dbc6128f | 366 | int is_empty; |
786ee85b MD |
367 | |
368 | internal_urcu_lock(&defer_thread_mutex); | |
369 | internal_urcu_lock(&urcu_defer_mutex); | |
dbc6128f | 370 | list_del(&defer_queue.list); |
786ee85b MD |
371 | _rcu_defer_barrier_thread(); |
372 | free(defer_queue.q); | |
373 | defer_queue.q = NULL; | |
dbc6128f | 374 | is_empty = list_empty(®istry); |
786ee85b MD |
375 | internal_urcu_unlock(&urcu_defer_mutex); |
376 | ||
dbc6128f | 377 | if (is_empty) |
786ee85b MD |
378 | stop_defer_thread(); |
379 | internal_urcu_unlock(&defer_thread_mutex); | |
380 | } | |
381 | ||
382 | void urcu_defer_exit(void) | |
383 | { | |
dbc6128f | 384 | assert(list_empty(®istry)); |
786ee85b | 385 | } |