Commit | Line | Data |
---|---|---|
acdb82a2 MJ |
1 | // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
2 | // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation. | |
3 | // | |
4 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
5 | ||
0376e7b2 PM |
6 | #ifndef _URCU_DEFER_IMPL_H |
7 | #define _URCU_DEFER_IMPL_H | |
8 | ||
786ee85b | 9 | /* |
0376e7b2 | 10 | * Userspace RCU header - memory reclamation. |
786ee85b | 11 | * |
0376e7b2 PM |
12 | * TO BE INCLUDED ONLY FROM URCU LIBRARY CODE. See urcu-defer.h for linking |
13 | * dynamically with the userspace rcu reclamation library. | |
786ee85b | 14 | * |
0376e7b2 | 15 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
786ee85b MD |
16 | */ |
17 | ||
0376e7b2 | 18 | #include <stdlib.h> |
786ee85b | 19 | #include <pthread.h> |
0376e7b2 | 20 | #include <stdio.h> |
786ee85b | 21 | #include <signal.h> |
786ee85b MD |
22 | #include <string.h> |
23 | #include <errno.h> | |
24 | #include <poll.h> | |
4ce9e4f2 | 25 | #include <sys/time.h> |
4ce9e4f2 | 26 | #include <unistd.h> |
6d841bc2 | 27 | #include <stdint.h> |
786ee85b | 28 | |
41849996 | 29 | #include "urcu/futex.h" |
0376e7b2 | 30 | |
01477510 | 31 | #include <urcu/assert.h> |
0376e7b2 PM |
32 | #include <urcu/compiler.h> |
33 | #include <urcu/arch.h> | |
a2e7bf9c | 34 | #include <urcu/uatomic.h> |
0376e7b2 PM |
35 | #include <urcu/list.h> |
36 | #include <urcu/system.h> | |
bd252a04 | 37 | #include <urcu/tls-compat.h> |
4a6d7378 | 38 | #include "urcu-die.h" |
4477a870 | 39 | #include "urcu-utils.h" |
0376e7b2 PM |
40 | |
41 | /* | |
42 | * Number of entries in the per-thread defer queue. Must be power of 2. | |
43 | */ | |
44 | #define DEFER_QUEUE_SIZE (1 << 12) | |
45 | #define DEFER_QUEUE_MASK (DEFER_QUEUE_SIZE - 1) | |
46 | ||
47 | /* | |
48 | * Typically, data is aligned at least on the architecture size. | |
49 | * Use lowest bit to indicate that the current callback is changing. | |
50 | * Assumes that (void *)-2L is not used often. Used to encode non-aligned | |
51 | * functions and non-aligned data using extra space. | |
52 | * We encode the (void *)-2L fct as: -2L, fct, data. | |
2ef1bfb2 LJ |
53 | * We encode the (void *)-2L data as either: |
54 | * fct | DQ_FCT_BIT, data (if fct is aligned), or | |
55 | * -2L, fct, data (if fct is not aligned). | |
0376e7b2 PM |
56 | * Here, DQ_FCT_MARK == ~DQ_FCT_BIT. Required for the test order. |
57 | */ | |
58 | #define DQ_FCT_BIT (1 << 0) | |
59 | #define DQ_IS_FCT_BIT(x) ((unsigned long)(x) & DQ_FCT_BIT) | |
60 | #define DQ_SET_FCT_BIT(x) \ | |
61 | (x = (void *)((unsigned long)(x) | DQ_FCT_BIT)) | |
62 | #define DQ_CLEAR_FCT_BIT(x) \ | |
63 | (x = (void *)((unsigned long)(x) & ~DQ_FCT_BIT)) | |
64 | #define DQ_FCT_MARK ((void *)(~DQ_FCT_BIT)) | |
65 | ||
66 | /* | |
67 | * This code section can only be included in LGPL 2.1 compatible source code. | |
68 | * See below for the function call wrappers which can be used in code meant to | |
69 | * be only linked with the Userspace RCU library. This comes with a small | |
70 | * performance degradation on the read-side due to the added function calls. | |
71 | * This is required to permit relinking with newer versions of the library. | |
72 | */ | |
73 | ||
0376e7b2 PM |
74 | /* |
75 | * defer queue. | |
76 | * Contains pointers. Encoded to save space when same callback is often used. | |
77 | * When looking up the next item: | |
78 | * - if DQ_FCT_BIT is set, set the current callback to DQ_CLEAR_FCT_BIT(ptr) | |
79 | * - next element contains pointer to data. | |
80 | * - else if item == DQ_FCT_MARK | |
81 | * - set the current callback to next element ptr | |
82 | * - following next element contains pointer to data. | |
83 | * - else current element contains data | |
84 | */ | |
85 | struct defer_queue { | |
86 | unsigned long head; /* add element at head */ | |
87 | void *last_fct_in; /* last fct pointer encoded */ | |
88 | unsigned long tail; /* next element to remove at tail */ | |
89 | void *last_fct_out; /* last fct pointer encoded */ | |
90 | void **q; | |
91 | /* registry information */ | |
92 | unsigned long last_head; | |
93 | struct cds_list_head list; /* list of thread queues */ | |
94 | }; | |
95 | ||
786ee85b | 96 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
4477a870 | 97 | #include <urcu/defer.h> |
786ee85b | 98 | |
02be5561 | 99 | void __attribute__((destructor)) rcu_defer_exit(void); |
786ee85b MD |
100 | |
101 | extern void synchronize_rcu(void); | |
102 | ||
103 | /* | |
02be5561 | 104 | * rcu_defer_mutex nests inside defer_thread_mutex. |
786ee85b | 105 | */ |
02be5561 | 106 | static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; |
786ee85b MD |
107 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; |
108 | ||
6d841bc2 | 109 | static int32_t defer_thread_futex; |
d7ff6cee | 110 | static int32_t defer_thread_stop; |
4ce9e4f2 | 111 | |
786ee85b MD |
112 | /* |
113 | * Written to only by each individual deferer. Read by both the deferer and | |
114 | * the reclamation tread. | |
115 | */ | |
2f661865 | 116 | static DEFINE_URCU_TLS(struct defer_queue, defer_queue); |
0376e7b2 | 117 | static CDS_LIST_HEAD(registry_defer); |
786ee85b | 118 | static pthread_t tid_defer; |
4ce9e4f2 | 119 | |
0376e7b2 | 120 | static void mutex_lock_defer(pthread_mutex_t *mutex) |
786ee85b MD |
121 | { |
122 | int ret; | |
123 | ||
124 | #ifndef DISTRUST_SIGNALS_EXTREME | |
125 | ret = pthread_mutex_lock(mutex); | |
4a6d7378 MD |
126 | if (ret) |
127 | urcu_die(ret); | |
786ee85b MD |
128 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
129 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
4a6d7378 MD |
130 | if (ret != EBUSY && ret != EINTR) |
131 | urcu_die(ret); | |
c060776f | 132 | (void) poll(NULL,0,10); |
786ee85b MD |
133 | } |
134 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
135 | } | |
136 | ||
04eb9c4f MD |
137 | /* |
138 | * Wake-up any waiting defer thread. Called from many concurrent threads. | |
139 | */ | |
140 | static void wake_up_defer(void) | |
141 | { | |
a0b7f7ea | 142 | if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) { |
ec4e58a3 | 143 | uatomic_set(&defer_thread_futex, 0); |
b0a841b4 MD |
144 | if (futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1, |
145 | NULL, NULL, 0) < 0) | |
146 | urcu_die(errno); | |
04eb9c4f MD |
147 | } |
148 | } | |
149 | ||
150 | static unsigned long rcu_defer_num_callbacks(void) | |
151 | { | |
152 | unsigned long num_items = 0, head; | |
dbc6128f | 153 | struct defer_queue *index; |
04eb9c4f | 154 | |
0376e7b2 PM |
155 | mutex_lock_defer(&rcu_defer_mutex); |
156 | cds_list_for_each_entry(index, ®istry_defer, list) { | |
6cf3827c | 157 | head = CMM_LOAD_SHARED(index->head); |
dbc6128f | 158 | num_items += head - index->tail; |
04eb9c4f | 159 | } |
6abb4bd5 | 160 | mutex_unlock(&rcu_defer_mutex); |
04eb9c4f MD |
161 | return num_items; |
162 | } | |
163 | ||
164 | /* | |
165 | * Defer thread waiting. Single thread. | |
166 | */ | |
167 | static void wait_defer(void) | |
168 | { | |
ec4e58a3 | 169 | uatomic_dec(&defer_thread_futex); |
d7ff6cee MD |
170 | /* Write futex before read queue */ |
171 | /* Write futex before read defer_thread_stop */ | |
172 | cmm_smp_mb(); | |
173 | if (_CMM_LOAD_SHARED(defer_thread_stop)) { | |
174 | uatomic_set(&defer_thread_futex, 0); | |
175 | pthread_exit(0); | |
176 | } | |
04eb9c4f | 177 | if (rcu_defer_num_callbacks()) { |
5481ddb3 | 178 | cmm_smp_mb(); /* Read queue before write futex */ |
04eb9c4f | 179 | /* Callbacks are queued, don't wait. */ |
ec4e58a3 | 180 | uatomic_set(&defer_thread_futex, 0); |
04eb9c4f | 181 | } else { |
5481ddb3 | 182 | cmm_smp_rmb(); /* Read queue before read futex */ |
879a3e16 MD |
183 | while (uatomic_read(&defer_thread_futex) == -1) { |
184 | if (!futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1, NULL, NULL, 0)) { | |
185 | /* | |
186 | * Prior queued wakeups queued by unrelated code | |
187 | * using the same address can cause futex wait to | |
188 | * return 0 even through the futex value is still | |
189 | * -1 (spurious wakeups). Check the value again | |
190 | * in user-space to validate whether it really | |
191 | * differs from -1. | |
192 | */ | |
193 | continue; | |
194 | } | |
b0a841b4 | 195 | switch (errno) { |
879a3e16 | 196 | case EAGAIN: |
b0a841b4 MD |
197 | /* Value already changed. */ |
198 | return; | |
199 | case EINTR: | |
200 | /* Retry if interrupted by signal. */ | |
879a3e16 | 201 | break; /* Get out of switch. Check again. */ |
b0a841b4 MD |
202 | default: |
203 | /* Unexpected error. */ | |
204 | urcu_die(errno); | |
205 | } | |
206 | } | |
04eb9c4f MD |
207 | } |
208 | } | |
209 | ||
786ee85b MD |
210 | /* |
211 | * Must be called after Q.S. is reached. | |
212 | */ | |
213 | static void rcu_defer_barrier_queue(struct defer_queue *queue, | |
804b4375 | 214 | unsigned long head) |
786ee85b MD |
215 | { |
216 | unsigned long i; | |
804b4375 MD |
217 | void (*fct)(void *p); |
218 | void *p; | |
786ee85b MD |
219 | |
220 | /* | |
221 | * Tail is only modified when lock is held. | |
222 | * Head is only modified by owner thread. | |
223 | */ | |
224 | ||
804b4375 | 225 | for (i = queue->tail; i != head;) { |
5481ddb3 | 226 | cmm_smp_rmb(); /* read head before q[]. */ |
6cf3827c | 227 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
a0b7f7ea | 228 | if (caa_unlikely(DQ_IS_FCT_BIT(p))) { |
804b4375 MD |
229 | DQ_CLEAR_FCT_BIT(p); |
230 | queue->last_fct_out = p; | |
6cf3827c | 231 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
a0b7f7ea | 232 | } else if (caa_unlikely(p == DQ_FCT_MARK)) { |
6cf3827c | 233 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
804b4375 | 234 | queue->last_fct_out = p; |
6cf3827c | 235 | p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
29cdb8d8 | 236 | } |
804b4375 | 237 | fct = queue->last_fct_out; |
804b4375 | 238 | fct(p); |
786ee85b | 239 | } |
5481ddb3 | 240 | cmm_smp_mb(); /* push tail after having used q[] */ |
6cf3827c | 241 | CMM_STORE_SHARED(queue->tail, i); |
786ee85b MD |
242 | } |
243 | ||
244 | static void _rcu_defer_barrier_thread(void) | |
245 | { | |
0d0e6c21 | 246 | unsigned long head, num_items; |
786ee85b | 247 | |
bd252a04 MD |
248 | head = URCU_TLS(defer_queue).head; |
249 | num_items = head - URCU_TLS(defer_queue).tail; | |
a0b7f7ea | 250 | if (caa_unlikely(!num_items)) |
0d0e6c21 | 251 | return; |
786ee85b | 252 | synchronize_rcu(); |
bd252a04 | 253 | rcu_defer_barrier_queue(&URCU_TLS(defer_queue), head); |
786ee85b MD |
254 | } |
255 | ||
786ee85b MD |
256 | void rcu_defer_barrier_thread(void) |
257 | { | |
0376e7b2 | 258 | mutex_lock_defer(&rcu_defer_mutex); |
786ee85b | 259 | _rcu_defer_barrier_thread(); |
6abb4bd5 | 260 | mutex_unlock(&rcu_defer_mutex); |
786ee85b MD |
261 | } |
262 | ||
0d0e6c21 MD |
263 | /* |
264 | * rcu_defer_barrier - Execute all queued rcu callbacks. | |
265 | * | |
266 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. | |
267 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call | |
268 | * are guaranteed to be executed. | |
269 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() | |
270 | * execution are not guaranteed to be executed in the current batch (could | |
271 | * be left for the next batch). These callbacks queued by other threads are only | |
272 | * guaranteed to be executed if there is explicit synchronization between | |
273 | * the thread adding to the queue and the thread issuing the defer_barrier call. | |
274 | */ | |
275 | ||
786ee85b MD |
276 | void rcu_defer_barrier(void) |
277 | { | |
dbc6128f | 278 | struct defer_queue *index; |
0d0e6c21 | 279 | unsigned long num_items = 0; |
786ee85b | 280 | |
0376e7b2 | 281 | if (cds_list_empty(®istry_defer)) |
786ee85b MD |
282 | return; |
283 | ||
0376e7b2 PM |
284 | mutex_lock_defer(&rcu_defer_mutex); |
285 | cds_list_for_each_entry(index, ®istry_defer, list) { | |
6cf3827c | 286 | index->last_head = CMM_LOAD_SHARED(index->head); |
dbc6128f | 287 | num_items += index->last_head - index->tail; |
0d0e6c21 | 288 | } |
a0b7f7ea | 289 | if (caa_likely(!num_items)) { |
0d0e6c21 MD |
290 | /* |
291 | * We skip the grace period because there are no queued | |
292 | * callbacks to execute. | |
293 | */ | |
294 | goto end; | |
295 | } | |
786ee85b | 296 | synchronize_rcu(); |
0376e7b2 | 297 | cds_list_for_each_entry(index, ®istry_defer, list) |
dbc6128f | 298 | rcu_defer_barrier_queue(index, index->last_head); |
0d0e6c21 | 299 | end: |
6abb4bd5 | 300 | mutex_unlock(&rcu_defer_mutex); |
786ee85b MD |
301 | } |
302 | ||
2c22932b | 303 | /* |
b4f313b7 | 304 | * _defer_rcu - Queue a RCU callback. |
2c22932b | 305 | */ |
f50c11aa | 306 | static void _defer_rcu(void (*fct)(void *p), void *p) |
2c22932b MD |
307 | { |
308 | unsigned long head, tail; | |
309 | ||
310 | /* | |
311 | * Head is only modified by ourself. Tail can be modified by reclamation | |
312 | * thread. | |
313 | */ | |
bd252a04 MD |
314 | head = URCU_TLS(defer_queue).head; |
315 | tail = CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail); | |
2c22932b MD |
316 | |
317 | /* | |
ec8e44cf | 318 | * If queue is full, or reached threshold. Empty queue ourself. |
2c22932b MD |
319 | * Worse-case: must allow 2 supplementary entries for fct pointer. |
320 | */ | |
a0b7f7ea | 321 | if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { |
01477510 | 322 | urcu_posix_assert(head - tail <= DEFER_QUEUE_SIZE); |
2c22932b | 323 | rcu_defer_barrier_thread(); |
01477510 | 324 | urcu_posix_assert(head - CMM_LOAD_SHARED(URCU_TLS(defer_queue).tail) == 0); |
2c22932b MD |
325 | } |
326 | ||
2ef1bfb2 LJ |
327 | /* |
328 | * Encode: | |
329 | * if the function is not changed and the data is aligned and it is | |
330 | * not the marker: | |
331 | * store the data | |
332 | * otherwise if the function is aligned and its not the marker: | |
333 | * store the function with DQ_FCT_BIT | |
334 | * store the data | |
335 | * otherwise: | |
336 | * store the marker (DQ_FCT_MARK) | |
337 | * store the function | |
338 | * store the data | |
339 | * | |
340 | * Decode: see the comments before 'struct defer_queue' | |
341 | * or the code in rcu_defer_barrier_queue(). | |
342 | */ | |
bd252a04 | 343 | if (caa_unlikely(URCU_TLS(defer_queue).last_fct_in != fct |
2ef1bfb2 LJ |
344 | || DQ_IS_FCT_BIT(p) |
345 | || p == DQ_FCT_MARK)) { | |
bd252a04 | 346 | URCU_TLS(defer_queue).last_fct_in = fct; |
a0b7f7ea | 347 | if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { |
bd252a04 | 348 | _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], |
2c22932b | 349 | DQ_FCT_MARK); |
bd252a04 | 350 | _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], |
2c22932b MD |
351 | fct); |
352 | } else { | |
353 | DQ_SET_FCT_BIT(fct); | |
bd252a04 | 354 | _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], |
2c22932b MD |
355 | fct); |
356 | } | |
2c22932b | 357 | } |
bd252a04 | 358 | _CMM_STORE_SHARED(URCU_TLS(defer_queue).q[head++ & DEFER_QUEUE_MASK], p); |
5481ddb3 | 359 | cmm_smp_wmb(); /* Publish new pointer before head */ |
2c22932b | 360 | /* Write q[] before head. */ |
bd252a04 | 361 | CMM_STORE_SHARED(URCU_TLS(defer_queue).head, head); |
5481ddb3 | 362 | cmm_smp_mb(); /* Write queue head before read futex */ |
2c22932b MD |
363 | /* |
364 | * Wake-up any waiting defer thread. | |
365 | */ | |
366 | wake_up_defer(); | |
367 | } | |
368 | ||
70469b43 | 369 | static void *thr_defer(void *args __attribute__((unused))) |
786ee85b MD |
370 | { |
371 | for (;;) { | |
4ce9e4f2 MD |
372 | /* |
373 | * "Be green". Don't wake up the CPU if there is no RCU work | |
374 | * to perform whatsoever. Aims at saving laptop battery life by | |
375 | * leaving the processor in sleep state when idle. | |
376 | */ | |
4ce9e4f2 | 377 | wait_defer(); |
4ce9e4f2 | 378 | /* Sleeping after wait_defer to let many callbacks enqueue */ |
c060776f | 379 | (void) poll(NULL,0,100); /* wait for 100ms */ |
786ee85b MD |
380 | rcu_defer_barrier(); |
381 | } | |
382 | ||
383 | return NULL; | |
384 | } | |
385 | ||
386 | /* | |
387 | * library wrappers to be used by non-LGPL compatible source code. | |
388 | */ | |
389 | ||
3614f13c | 390 | void defer_rcu(void (*fct)(void *p), void *p) |
786ee85b | 391 | { |
3614f13c | 392 | _defer_rcu(fct, p); |
786ee85b MD |
393 | } |
394 | ||
786ee85b MD |
395 | static void start_defer_thread(void) |
396 | { | |
397 | int ret; | |
ea3a28a3 MD |
398 | sigset_t newmask, oldmask; |
399 | ||
400 | ret = sigfillset(&newmask); | |
401 | urcu_posix_assert(!ret); | |
402 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
403 | urcu_posix_assert(!ret); | |
786ee85b | 404 | |
dbc6128f | 405 | ret = pthread_create(&tid_defer, NULL, thr_defer, NULL); |
8c677380 EW |
406 | if (ret) |
407 | urcu_die(ret); | |
ea3a28a3 MD |
408 | |
409 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
410 | urcu_posix_assert(!ret); | |
786ee85b MD |
411 | } |
412 | ||
413 | static void stop_defer_thread(void) | |
414 | { | |
415 | int ret; | |
416 | void *tret; | |
417 | ||
d7ff6cee MD |
418 | _CMM_STORE_SHARED(defer_thread_stop, 1); |
419 | /* Store defer_thread_stop before testing futex */ | |
420 | cmm_smp_mb(); | |
4ce9e4f2 | 421 | wake_up_defer(); |
d7ff6cee | 422 | |
786ee85b | 423 | ret = pthread_join(tid_defer, &tret); |
01477510 | 424 | urcu_posix_assert(!ret); |
d7ff6cee MD |
425 | |
426 | CMM_STORE_SHARED(defer_thread_stop, 0); | |
427 | /* defer thread should always exit when futex value is 0 */ | |
01477510 | 428 | urcu_posix_assert(uatomic_read(&defer_thread_futex) == 0); |
786ee85b MD |
429 | } |
430 | ||
7fdbbd61 | 431 | int rcu_defer_register_thread(void) |
786ee85b | 432 | { |
dbc6128f MD |
433 | int was_empty; |
434 | ||
01477510 FD |
435 | urcu_posix_assert(URCU_TLS(defer_queue).last_head == 0); |
436 | urcu_posix_assert(URCU_TLS(defer_queue).q == NULL); | |
bd252a04 MD |
437 | URCU_TLS(defer_queue).q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); |
438 | if (!URCU_TLS(defer_queue).q) | |
7fdbbd61 | 439 | return -ENOMEM; |
786ee85b | 440 | |
0376e7b2 PM |
441 | mutex_lock_defer(&defer_thread_mutex); |
442 | mutex_lock_defer(&rcu_defer_mutex); | |
443 | was_empty = cds_list_empty(®istry_defer); | |
bd252a04 | 444 | cds_list_add(&URCU_TLS(defer_queue).list, ®istry_defer); |
6abb4bd5 | 445 | mutex_unlock(&rcu_defer_mutex); |
786ee85b | 446 | |
dbc6128f | 447 | if (was_empty) |
786ee85b | 448 | start_defer_thread(); |
6abb4bd5 | 449 | mutex_unlock(&defer_thread_mutex); |
7fdbbd61 | 450 | return 0; |
786ee85b MD |
451 | } |
452 | ||
453 | void rcu_defer_unregister_thread(void) | |
454 | { | |
dbc6128f | 455 | int is_empty; |
786ee85b | 456 | |
0376e7b2 PM |
457 | mutex_lock_defer(&defer_thread_mutex); |
458 | mutex_lock_defer(&rcu_defer_mutex); | |
bd252a04 | 459 | cds_list_del(&URCU_TLS(defer_queue).list); |
786ee85b | 460 | _rcu_defer_barrier_thread(); |
bd252a04 MD |
461 | free(URCU_TLS(defer_queue).q); |
462 | URCU_TLS(defer_queue).q = NULL; | |
0376e7b2 | 463 | is_empty = cds_list_empty(®istry_defer); |
6abb4bd5 | 464 | mutex_unlock(&rcu_defer_mutex); |
786ee85b | 465 | |
dbc6128f | 466 | if (is_empty) |
786ee85b | 467 | stop_defer_thread(); |
6abb4bd5 | 468 | mutex_unlock(&defer_thread_mutex); |
786ee85b MD |
469 | } |
470 | ||
02be5561 | 471 | void rcu_defer_exit(void) |
786ee85b | 472 | { |
01477510 | 473 | urcu_posix_assert(cds_list_empty(®istry_defer)); |
786ee85b | 474 | } |
0376e7b2 PM |
475 | |
476 | #endif /* _URCU_DEFER_IMPL_H */ |