| 1 | /* |
| 2 | * urcu-defer.c |
| 3 | * |
| 4 | * Userspace RCU library - batch memory reclamation |
| 5 | * |
| 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
| 7 | * |
| 8 | * This library is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation; either |
| 11 | * version 2.1 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This library is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with this library; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | #include <stdio.h> |
| 24 | #include <pthread.h> |
| 25 | #include <signal.h> |
| 26 | #include <assert.h> |
| 27 | #include <stdlib.h> |
| 28 | #include <string.h> |
| 29 | #include <errno.h> |
| 30 | #include <poll.h> |
| 31 | |
| 32 | #include "urcu-defer-static.h" |
| 33 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
| 34 | #include "urcu-defer.h" |
| 35 | |
| 36 | void __attribute__((destructor)) urcu_defer_exit(void); |
| 37 | |
| 38 | extern void synchronize_rcu(void); |
| 39 | |
| 40 | /* |
| 41 | * urcu_defer_mutex nests inside defer_thread_mutex. |
| 42 | */ |
| 43 | static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 44 | static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 45 | |
| 46 | /* |
| 47 | * Written to only by each individual deferer. Read by both the deferer and |
| 48 | * the reclamation tread. |
| 49 | */ |
| 50 | struct defer_queue __thread defer_queue; |
| 51 | |
| 52 | /* Thread IDs of registered deferers */ |
| 53 | #define INIT_NUM_THREADS 4 |
| 54 | |
| 55 | struct deferer_registry { |
| 56 | pthread_t tid; |
| 57 | struct defer_queue *defer_queue; |
| 58 | unsigned long last_head; |
| 59 | }; |
| 60 | |
| 61 | static struct deferer_registry *registry; |
| 62 | static int num_deferers, alloc_deferers; |
| 63 | |
| 64 | static pthread_t tid_defer; |
| 65 | static int exit_defer; |
| 66 | |
| 67 | static void internal_urcu_lock(pthread_mutex_t *mutex) |
| 68 | { |
| 69 | int ret; |
| 70 | |
| 71 | #ifndef DISTRUST_SIGNALS_EXTREME |
| 72 | ret = pthread_mutex_lock(mutex); |
| 73 | if (ret) { |
| 74 | perror("Error in pthread mutex lock"); |
| 75 | exit(-1); |
| 76 | } |
| 77 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 78 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
| 79 | if (ret != EBUSY && ret != EINTR) { |
| 80 | printf("ret = %d, errno = %d\n", ret, errno); |
| 81 | perror("Error in pthread mutex lock"); |
| 82 | exit(-1); |
| 83 | } |
| 84 | poll(NULL,0,10); |
| 85 | } |
| 86 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 87 | } |
| 88 | |
| 89 | static void internal_urcu_unlock(pthread_mutex_t *mutex) |
| 90 | { |
| 91 | int ret; |
| 92 | |
| 93 | ret = pthread_mutex_unlock(mutex); |
| 94 | if (ret) { |
| 95 | perror("Error in pthread mutex unlock"); |
| 96 | exit(-1); |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Must be called after Q.S. is reached. |
| 102 | */ |
| 103 | static void rcu_defer_barrier_queue(struct defer_queue *queue, |
| 104 | unsigned long head) |
| 105 | { |
| 106 | unsigned long i; |
| 107 | void (*fct)(void *p); |
| 108 | void *p; |
| 109 | |
| 110 | /* |
| 111 | * Tail is only modified when lock is held. |
| 112 | * Head is only modified by owner thread. |
| 113 | */ |
| 114 | |
| 115 | for (i = queue->tail; i != head;) { |
| 116 | smp_rmb(); /* read head before q[]. */ |
| 117 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
| 118 | if (unlikely(DQ_IS_FCT_BIT(p))) { |
| 119 | DQ_CLEAR_FCT_BIT(p); |
| 120 | queue->last_fct_out = p; |
| 121 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
| 122 | } else if (unlikely(p == DQ_FCT_MARK)) { |
| 123 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
| 124 | queue->last_fct_out = p; |
| 125 | p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); |
| 126 | } |
| 127 | fct = queue->last_fct_out; |
| 128 | fct(p); |
| 129 | } |
| 130 | smp_mb(); /* push tail after having used q[] */ |
| 131 | STORE_SHARED(queue->tail, i); |
| 132 | } |
| 133 | |
| 134 | static void _rcu_defer_barrier_thread(void) |
| 135 | { |
| 136 | unsigned long head, num_items; |
| 137 | |
| 138 | head = defer_queue.head; |
| 139 | num_items = head - defer_queue.tail; |
| 140 | if (unlikely(!num_items)) |
| 141 | return; |
| 142 | synchronize_rcu(); |
| 143 | rcu_defer_barrier_queue(&defer_queue, head); |
| 144 | } |
| 145 | |
| 146 | |
| 147 | void rcu_defer_barrier_thread(void) |
| 148 | { |
| 149 | internal_urcu_lock(&urcu_defer_mutex); |
| 150 | _rcu_defer_barrier_thread(); |
| 151 | internal_urcu_unlock(&urcu_defer_mutex); |
| 152 | } |
| 153 | |
| 154 | /* |
| 155 | * rcu_defer_barrier - Execute all queued rcu callbacks. |
| 156 | * |
| 157 | * Execute all RCU callbacks queued before rcu_defer_barrier() execution. |
| 158 | * All callbacks queued on the local thread prior to a rcu_defer_barrier() call |
| 159 | * are guaranteed to be executed. |
| 160 | * Callbacks queued by other threads concurrently with rcu_defer_barrier() |
| 161 | * execution are not guaranteed to be executed in the current batch (could |
| 162 | * be left for the next batch). These callbacks queued by other threads are only |
| 163 | * guaranteed to be executed if there is explicit synchronization between |
| 164 | * the thread adding to the queue and the thread issuing the defer_barrier call. |
| 165 | */ |
| 166 | |
| 167 | void rcu_defer_barrier(void) |
| 168 | { |
| 169 | struct deferer_registry *index; |
| 170 | unsigned long num_items = 0; |
| 171 | |
| 172 | if (!registry) |
| 173 | return; |
| 174 | |
| 175 | internal_urcu_lock(&urcu_defer_mutex); |
| 176 | for (index = registry; index < registry + num_deferers; index++) { |
| 177 | index->last_head = LOAD_SHARED(index->defer_queue->head); |
| 178 | num_items += index->last_head - index->defer_queue->tail; |
| 179 | } |
| 180 | if (likely(!num_items)) { |
| 181 | /* |
| 182 | * We skip the grace period because there are no queued |
| 183 | * callbacks to execute. |
| 184 | */ |
| 185 | goto end; |
| 186 | } |
| 187 | synchronize_rcu(); |
| 188 | for (index = registry; index < registry + num_deferers; index++) |
| 189 | rcu_defer_barrier_queue(index->defer_queue, |
| 190 | index->last_head); |
| 191 | end: |
| 192 | internal_urcu_unlock(&urcu_defer_mutex); |
| 193 | } |
| 194 | |
| 195 | void *thr_defer(void *args) |
| 196 | { |
| 197 | for (;;) { |
| 198 | if (LOAD_SHARED(exit_defer)) |
| 199 | break; |
| 200 | poll(NULL,0,100); /* wait for 100ms */ |
| 201 | rcu_defer_barrier(); |
| 202 | } |
| 203 | |
| 204 | return NULL; |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * library wrappers to be used by non-LGPL compatible source code. |
| 209 | */ |
| 210 | |
| 211 | void rcu_defer_queue(void (*fct)(void *p), void *p) |
| 212 | { |
| 213 | _rcu_defer_queue(fct, p); |
| 214 | } |
| 215 | |
| 216 | static void rcu_add_deferer(pthread_t id) |
| 217 | { |
| 218 | struct deferer_registry *oldarray; |
| 219 | |
| 220 | if (!registry) { |
| 221 | alloc_deferers = INIT_NUM_THREADS; |
| 222 | num_deferers = 0; |
| 223 | registry = |
| 224 | malloc(sizeof(struct deferer_registry) * alloc_deferers); |
| 225 | } |
| 226 | if (alloc_deferers < num_deferers + 1) { |
| 227 | oldarray = registry; |
| 228 | registry = malloc(sizeof(struct deferer_registry) |
| 229 | * (alloc_deferers << 1)); |
| 230 | memcpy(registry, oldarray, |
| 231 | sizeof(struct deferer_registry) * alloc_deferers); |
| 232 | alloc_deferers <<= 1; |
| 233 | free(oldarray); |
| 234 | } |
| 235 | registry[num_deferers].tid = id; |
| 236 | /* reference to the TLS of _this_ deferer thread. */ |
| 237 | registry[num_deferers].defer_queue = &defer_queue; |
| 238 | registry[num_deferers].last_head = 0; |
| 239 | num_deferers++; |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * Never shrink (implementation limitation). |
| 244 | * This is O(nb threads). Eventually use a hash table. |
| 245 | */ |
| 246 | static void rcu_remove_deferer(pthread_t id) |
| 247 | { |
| 248 | struct deferer_registry *index; |
| 249 | |
| 250 | assert(registry != NULL); |
| 251 | for (index = registry; index < registry + num_deferers; index++) { |
| 252 | if (pthread_equal(index->tid, id)) { |
| 253 | memcpy(index, ®istry[num_deferers - 1], |
| 254 | sizeof(struct deferer_registry)); |
| 255 | registry[num_deferers - 1].tid = 0; |
| 256 | registry[num_deferers - 1].defer_queue = NULL; |
| 257 | registry[num_deferers - 1].last_head = 0; |
| 258 | num_deferers--; |
| 259 | return; |
| 260 | } |
| 261 | } |
| 262 | /* Hrm not found, forgot to register ? */ |
| 263 | assert(0); |
| 264 | } |
| 265 | |
| 266 | static void start_defer_thread(void) |
| 267 | { |
| 268 | int ret; |
| 269 | |
| 270 | ret = pthread_create(&tid_defer, NULL, thr_defer, |
| 271 | NULL); |
| 272 | assert(!ret); |
| 273 | } |
| 274 | |
| 275 | static void stop_defer_thread(void) |
| 276 | { |
| 277 | int ret; |
| 278 | void *tret; |
| 279 | |
| 280 | STORE_SHARED(exit_defer, 1); |
| 281 | ret = pthread_join(tid_defer, &tret); |
| 282 | assert(!ret); |
| 283 | } |
| 284 | |
| 285 | void rcu_defer_register_thread(void) |
| 286 | { |
| 287 | int deferers; |
| 288 | |
| 289 | internal_urcu_lock(&defer_thread_mutex); |
| 290 | internal_urcu_lock(&urcu_defer_mutex); |
| 291 | defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE); |
| 292 | rcu_add_deferer(pthread_self()); |
| 293 | deferers = num_deferers; |
| 294 | internal_urcu_unlock(&urcu_defer_mutex); |
| 295 | |
| 296 | if (deferers == 1) |
| 297 | start_defer_thread(); |
| 298 | internal_urcu_unlock(&defer_thread_mutex); |
| 299 | } |
| 300 | |
| 301 | void rcu_defer_unregister_thread(void) |
| 302 | { |
| 303 | int deferers; |
| 304 | |
| 305 | internal_urcu_lock(&defer_thread_mutex); |
| 306 | internal_urcu_lock(&urcu_defer_mutex); |
| 307 | rcu_remove_deferer(pthread_self()); |
| 308 | _rcu_defer_barrier_thread(); |
| 309 | free(defer_queue.q); |
| 310 | defer_queue.q = NULL; |
| 311 | deferers = num_deferers; |
| 312 | internal_urcu_unlock(&urcu_defer_mutex); |
| 313 | |
| 314 | if (deferers == 0) |
| 315 | stop_defer_thread(); |
| 316 | internal_urcu_unlock(&defer_thread_mutex); |
| 317 | } |
| 318 | |
| 319 | void urcu_defer_exit(void) |
| 320 | { |
| 321 | free(registry); |
| 322 | } |