Commit | Line | Data |
---|---|---|
786ee85b MD |
1 | /* |
2 | * urcu-defer.c | |
3 | * | |
4 | * Userspace RCU library - batch memory reclamation | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
23 | #include <stdio.h> | |
24 | #include <pthread.h> | |
25 | #include <signal.h> | |
26 | #include <assert.h> | |
27 | #include <stdlib.h> | |
28 | #include <string.h> | |
29 | #include <errno.h> | |
30 | #include <poll.h> | |
4ce9e4f2 MD |
31 | #include <linux/futex.h> |
32 | #include <sys/time.h> | |
33 | #include <syscall.h> | |
34 | #include <unistd.h> | |
786ee85b MD |
35 | |
36 | #include "urcu-defer-static.h" | |
37 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
38 | #include "urcu-defer.h" | |
39 | ||
4ce9e4f2 MD |
40 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) |
41 | ||
786ee85b MD |
42 | void __attribute__((destructor)) urcu_defer_exit(void); |
43 | ||
44 | extern void synchronize_rcu(void); | |
45 | ||
46 | /* | |
47 | * urcu_defer_mutex nests inside defer_thread_mutex. | |
48 | */ | |
49 | static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; | |
50 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; | |
51 | ||
4ce9e4f2 MD |
52 | int defer_thread_futex; |
53 | ||
786ee85b MD |
54 | /* |
55 | * Written to only by each individual deferer. Read by both the deferer and | |
56 | * the reclamation tread. | |
57 | */ | |
58 | struct defer_queue __thread defer_queue; | |
59 | ||
60 | /* Thread IDs of registered deferers */ | |
61 | #define INIT_NUM_THREADS 4 | |
62 | ||
63 | struct deferer_registry { | |
64 | pthread_t tid; | |
65 | struct defer_queue *defer_queue; | |
66 | unsigned long last_head; | |
67 | }; | |
68 | ||
69 | static struct deferer_registry *registry; | |
70 | static int num_deferers, alloc_deferers; | |
71 | ||
72 | static pthread_t tid_defer; | |
4ce9e4f2 MD |
73 | |
74 | /* | |
75 | * Wake-up any waiting defer thread. Called from many concurrent threads. | |
76 | */ | |
77 | void wake_up_defer(void) | |
78 | { | |
79 | if (unlikely(atomic_read(&defer_thread_futex) == -1)) | |
80 | atomic_set(&defer_thread_futex, 0); | |
81 | futex(&defer_thread_futex, FUTEX_WAKE, | |
82 | 0, NULL, NULL, 0); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Defer thread waiting. Single thread. | |
87 | */ | |
88 | static void wait_defer(void) | |
89 | { | |
90 | atomic_dec(&defer_thread_futex); | |
91 | if (atomic_read(&defer_thread_futex) == -1) | |
92 | futex(&defer_thread_futex, FUTEX_WAIT, -1, | |
93 | NULL, NULL, 0); | |
94 | } | |
786ee85b MD |
95 | |
96 | static void internal_urcu_lock(pthread_mutex_t *mutex) | |
97 | { | |
98 | int ret; | |
99 | ||
100 | #ifndef DISTRUST_SIGNALS_EXTREME | |
101 | ret = pthread_mutex_lock(mutex); | |
102 | if (ret) { | |
103 | perror("Error in pthread mutex lock"); | |
104 | exit(-1); | |
105 | } | |
106 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
107 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
108 | if (ret != EBUSY && ret != EINTR) { | |
109 | printf("ret = %d, errno = %d\n", ret, errno); | |
110 | perror("Error in pthread mutex lock"); | |
111 | exit(-1); | |
112 | } | |
4ce9e4f2 | 113 | pthread_testcancel(); |
786ee85b MD |
114 | poll(NULL,0,10); |
115 | } | |
116 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
117 | } | |
118 | ||
119 | static void internal_urcu_unlock(pthread_mutex_t *mutex) | |
120 | { | |
121 | int ret; | |
122 | ||
123 | ret = pthread_mutex_unlock(mutex); | |
124 | if (ret) { | |
125 | perror("Error in pthread mutex unlock"); | |
126 | exit(-1); | |
127 | } | |
128 | } | |
129 | ||
130 | /* | |
131 | * Must be called after Q.S. is reached. | |
132 | */ | |
133 | static void rcu_defer_barrier_queue(struct defer_queue *queue, | |
804b4375 | 134 | unsigned long head) |
786ee85b MD |
135 | { |
136 | unsigned long i; | |
804b4375 MD |
137 | void (*fct)(void *p); |
138 | void *p; | |
786ee85b MD |
139 | |
140 | /* | |
141 | * Tail is only modified when lock is held. | |
142 | * Head is only modified by owner thread. | |
143 | */ | |
144 | ||
804b4375 | 145 | for (i = queue->tail; i != head;) { |
786ee85b | 146 | smp_rmb(); /* read head before q[]. */ |
804b4375 MD |
147 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
148 | if (unlikely(DQ_IS_FCT_BIT(p))) { | |
804b4375 MD |
149 | DQ_CLEAR_FCT_BIT(p); |
150 | queue->last_fct_out = p; | |
151 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
152 | } else if (unlikely(p == DQ_FCT_MARK)) { | |
804b4375 MD |
153 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
154 | queue->last_fct_out = p; | |
155 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); | |
29cdb8d8 | 156 | } |
804b4375 | 157 | fct = queue->last_fct_out; |
804b4375 | 158 | fct(p); |
786ee85b MD |
159 | } |
160 | smp_mb(); /* push tail after having used q[] */ | |
161 | STORE_SHARED(queue->tail, i); | |
162 | } | |
163 | ||
164 | static void _rcu_defer_barrier_thread(void) | |
165 | { | |
0d0e6c21 | 166 | unsigned long head, num_items; |
786ee85b MD |
167 | |
168 | head = defer_queue.head; | |
0d0e6c21 MD |
169 | num_items = head - defer_queue.tail; |
170 | if (unlikely(!num_items)) | |
171 | return; | |
786ee85b MD |
172 | synchronize_rcu(); |
173 | rcu_defer_barrier_queue(&defer_queue, head); | |
174 | } | |
175 | ||
176 | ||
177 | void rcu_defer_barrier_thread(void) | |
178 | { | |
179 | internal_urcu_lock(&urcu_defer_mutex); | |
180 | _rcu_defer_barrier_thread(); | |
181 | internal_urcu_unlock(&urcu_defer_mutex); | |
182 | } | |
183 | ||
0d0e6c21 MD |
184 | /* |
185 | * rcu_defer_barrier - Execute all queued rcu callbacks. | |
186 | * | |
187 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. | |
188 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call | |
189 | * are guaranteed to be executed. | |
190 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() | |
191 | * execution are not guaranteed to be executed in the current batch (could | |
192 | * be left for the next batch). These callbacks queued by other threads are only | |
193 | * guaranteed to be executed if there is explicit synchronization between | |
194 | * the thread adding to the queue and the thread issuing the defer_barrier call. | |
195 | */ | |
196 | ||
786ee85b MD |
197 | void rcu_defer_barrier(void) |
198 | { | |
199 | struct deferer_registry *index; | |
0d0e6c21 | 200 | unsigned long num_items = 0; |
786ee85b MD |
201 | |
202 | if (!registry) | |
203 | return; | |
204 | ||
205 | internal_urcu_lock(&urcu_defer_mutex); | |
0d0e6c21 | 206 | for (index = registry; index < registry + num_deferers; index++) { |
786ee85b | 207 | index->last_head = LOAD_SHARED(index->defer_queue->head); |
0d0e6c21 MD |
208 | num_items += index->last_head - index->defer_queue->tail; |
209 | } | |
210 | if (likely(!num_items)) { | |
211 | /* | |
212 | * We skip the grace period because there are no queued | |
213 | * callbacks to execute. | |
214 | */ | |
215 | goto end; | |
216 | } | |
786ee85b MD |
217 | synchronize_rcu(); |
218 | for (index = registry; index < registry + num_deferers; index++) | |
219 | rcu_defer_barrier_queue(index->defer_queue, | |
220 | index->last_head); | |
0d0e6c21 | 221 | end: |
786ee85b MD |
222 | internal_urcu_unlock(&urcu_defer_mutex); |
223 | } | |
224 | ||
225 | void *thr_defer(void *args) | |
226 | { | |
227 | for (;;) { | |
4ce9e4f2 MD |
228 | pthread_testcancel(); |
229 | printf("a\n"); | |
230 | /* | |
231 | * "Be green". Don't wake up the CPU if there is no RCU work | |
232 | * to perform whatsoever. Aims at saving laptop battery life by | |
233 | * leaving the processor in sleep state when idle. | |
234 | */ | |
235 | printf("b\n"); | |
236 | wait_defer(); | |
237 | printf("e\n"); | |
238 | /* Sleeping after wait_defer to let many callbacks enqueue */ | |
239 | //TEST poll(NULL,0,100); /* wait for 100ms */ | |
240 | printf("f\n"); | |
786ee85b | 241 | rcu_defer_barrier(); |
4ce9e4f2 MD |
242 | printf("perform deferred call_rcu() from worker thread %lu.\n", |
243 | time(NULL)); | |
786ee85b MD |
244 | } |
245 | ||
246 | return NULL; | |
247 | } | |
248 | ||
249 | /* | |
250 | * library wrappers to be used by non-LGPL compatible source code. | |
251 | */ | |
252 | ||
804b4375 | 253 | void rcu_defer_queue(void (*fct)(void *p), void *p) |
786ee85b | 254 | { |
804b4375 | 255 | _rcu_defer_queue(fct, p); |
786ee85b MD |
256 | } |
257 | ||
258 | static void rcu_add_deferer(pthread_t id) | |
259 | { | |
260 | struct deferer_registry *oldarray; | |
261 | ||
262 | if (!registry) { | |
263 | alloc_deferers = INIT_NUM_THREADS; | |
264 | num_deferers = 0; | |
265 | registry = | |
266 | malloc(sizeof(struct deferer_registry) * alloc_deferers); | |
267 | } | |
268 | if (alloc_deferers < num_deferers + 1) { | |
269 | oldarray = registry; | |
270 | registry = malloc(sizeof(struct deferer_registry) | |
271 | * (alloc_deferers << 1)); | |
272 | memcpy(registry, oldarray, | |
273 | sizeof(struct deferer_registry) * alloc_deferers); | |
274 | alloc_deferers <<= 1; | |
275 | free(oldarray); | |
276 | } | |
277 | registry[num_deferers].tid = id; | |
278 | /* reference to the TLS of _this_ deferer thread. */ | |
279 | registry[num_deferers].defer_queue = &defer_queue; | |
804b4375 | 280 | registry[num_deferers].last_head = 0; |
786ee85b MD |
281 | num_deferers++; |
282 | } | |
283 | ||
284 | /* | |
285 | * Never shrink (implementation limitation). | |
286 | * This is O(nb threads). Eventually use a hash table. | |
287 | */ | |
288 | static void rcu_remove_deferer(pthread_t id) | |
289 | { | |
290 | struct deferer_registry *index; | |
291 | ||
292 | assert(registry != NULL); | |
293 | for (index = registry; index < registry + num_deferers; index++) { | |
294 | if (pthread_equal(index->tid, id)) { | |
295 | memcpy(index, ®istry[num_deferers - 1], | |
296 | sizeof(struct deferer_registry)); | |
297 | registry[num_deferers - 1].tid = 0; | |
298 | registry[num_deferers - 1].defer_queue = NULL; | |
804b4375 | 299 | registry[num_deferers - 1].last_head = 0; |
786ee85b MD |
300 | num_deferers--; |
301 | return; | |
302 | } | |
303 | } | |
304 | /* Hrm not found, forgot to register ? */ | |
305 | assert(0); | |
306 | } | |
307 | ||
308 | static void start_defer_thread(void) | |
309 | { | |
310 | int ret; | |
311 | ||
312 | ret = pthread_create(&tid_defer, NULL, thr_defer, | |
313 | NULL); | |
314 | assert(!ret); | |
315 | } | |
316 | ||
317 | static void stop_defer_thread(void) | |
318 | { | |
319 | int ret; | |
320 | void *tret; | |
321 | ||
4ce9e4f2 MD |
322 | pthread_cancel(tid_defer); |
323 | wake_up_defer(); | |
786ee85b MD |
324 | ret = pthread_join(tid_defer, &tret); |
325 | assert(!ret); | |
326 | } | |
327 | ||
328 | void rcu_defer_register_thread(void) | |
329 | { | |
330 | int deferers; | |
331 | ||
332 | internal_urcu_lock(&defer_thread_mutex); | |
333 | internal_urcu_lock(&urcu_defer_mutex); | |
334 | defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); | |
335 | rcu_add_deferer(pthread_self()); | |
336 | deferers = num_deferers; | |
337 | internal_urcu_unlock(&urcu_defer_mutex); | |
338 | ||
339 | if (deferers == 1) | |
340 | start_defer_thread(); | |
341 | internal_urcu_unlock(&defer_thread_mutex); | |
342 | } | |
343 | ||
344 | void rcu_defer_unregister_thread(void) | |
345 | { | |
346 | int deferers; | |
347 | ||
348 | internal_urcu_lock(&defer_thread_mutex); | |
349 | internal_urcu_lock(&urcu_defer_mutex); | |
350 | rcu_remove_deferer(pthread_self()); | |
351 | _rcu_defer_barrier_thread(); | |
352 | free(defer_queue.q); | |
353 | defer_queue.q = NULL; | |
354 | deferers = num_deferers; | |
355 | internal_urcu_unlock(&urcu_defer_mutex); | |
356 | ||
357 | if (deferers == 0) | |
358 | stop_defer_thread(); | |
359 | internal_urcu_unlock(&defer_thread_mutex); | |
360 | } | |
361 | ||
362 | void urcu_defer_exit(void) | |
363 | { | |
364 | free(registry); | |
365 | } |