| 1 | /* |
| 2 | * urcu.c |
| 3 | * |
| 4 | * Userspace RCU library |
| 5 | * |
| 6 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
| 7 | * |
| 8 | * Distributed under GPLv2 |
| 9 | */ |
| 10 | |
| 11 | #include <stdio.h> |
| 12 | #include <pthread.h> |
| 13 | #include <signal.h> |
| 14 | #include <assert.h> |
| 15 | #include <stdlib.h> |
| 16 | #include <string.h> |
| 17 | |
| 18 | #include "urcu.h" |
| 19 | |
| 20 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 21 | |
| 22 | /* |
| 23 | * Global grace period counter. |
| 24 | * Contains the current RCU_GP_CTR_BIT. |
| 25 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. |
| 26 | */ |
| 27 | long urcu_gp_ctr = RCU_GP_COUNT; |
| 28 | |
| 29 | long __thread urcu_active_readers; |
| 30 | |
| 31 | /* Thread IDs of registered readers */ |
| 32 | #define INIT_NUM_THREADS 4 |
| 33 | |
| 34 | struct reader_data { |
| 35 | pthread_t tid; |
| 36 | long *urcu_active_readers; |
| 37 | }; |
| 38 | |
| 39 | #ifdef DEBUG_YIELD |
| 40 | unsigned int yield_active; |
| 41 | unsigned int __thread rand_yield; |
| 42 | #endif |
| 43 | |
| 44 | static struct reader_data *reader_data; |
| 45 | static int num_readers, alloc_readers; |
| 46 | #ifndef DEBUG_FULL_MB |
| 47 | static int sig_done; |
| 48 | #endif |
| 49 | |
| 50 | void internal_urcu_lock(void) |
| 51 | { |
| 52 | int ret; |
| 53 | ret = pthread_mutex_lock(&urcu_mutex); |
| 54 | if (ret) { |
| 55 | perror("Error in pthread mutex lock"); |
| 56 | exit(-1); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | void internal_urcu_unlock(void) |
| 61 | { |
| 62 | int ret; |
| 63 | |
| 64 | ret = pthread_mutex_unlock(&urcu_mutex); |
| 65 | if (ret) { |
| 66 | perror("Error in pthread mutex unlock"); |
| 67 | exit(-1); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * called with urcu_mutex held. |
| 73 | */ |
| 74 | static void switch_next_urcu_qparity(void) |
| 75 | { |
| 76 | urcu_gp_ctr ^= RCU_GP_CTR_BIT; |
| 77 | } |
| 78 | |
| 79 | #ifdef DEBUG_FULL_MB |
| 80 | static void force_mb_single_thread(pthread_t tid) |
| 81 | { |
| 82 | smp_mb(); |
| 83 | } |
| 84 | |
| 85 | static void force_mb_all_threads(void) |
| 86 | { |
| 87 | smp_mb(); |
| 88 | } |
| 89 | #else |
| 90 | |
| 91 | static void force_mb_single_thread(pthread_t tid) |
| 92 | { |
| 93 | assert(reader_data); |
| 94 | sig_done = 0; |
| 95 | smp_mb(); /* write sig_done before sending the signals */ |
| 96 | pthread_kill(tid, SIGURCU); |
| 97 | /* |
| 98 | * Wait for sighandler (and thus mb()) to execute on every thread. |
| 99 | * BUSY-LOOP. |
| 100 | */ |
| 101 | while (LOAD_REMOTE(sig_done) < 1) |
| 102 | cpu_relax(); |
| 103 | smp_mb(); /* read sig_done before ending the barrier */ |
| 104 | } |
| 105 | |
| 106 | static void force_mb_all_threads(void) |
| 107 | { |
| 108 | struct reader_data *index; |
| 109 | /* |
| 110 | * Ask for each threads to execute a smp_mb() so we can consider the |
| 111 | * compiler barriers around rcu read lock as real memory barriers. |
| 112 | */ |
| 113 | if (!reader_data) |
| 114 | return; |
| 115 | sig_done = 0; |
| 116 | /* |
| 117 | * pthread_kill has a smp_mb(). But beware, we assume it performs |
| 118 | * a cache flush on architectures with non-coherent cache. |
| 119 | * smp_mb(); write sig_done before sending the signals |
| 120 | */ |
| 121 | for (index = reader_data; index < reader_data + num_readers; index++) |
| 122 | pthread_kill(index->tid, SIGURCU); |
| 123 | /* |
| 124 | * Wait for sighandler (and thus mb()) to execute on every thread. |
| 125 | * BUSY-LOOP. |
| 126 | */ |
| 127 | while (LOAD_REMOTE(sig_done) < num_readers) |
| 128 | cpu_relax(); |
| 129 | smp_mb(); /* read sig_done before ending the barrier */ |
| 130 | } |
| 131 | #endif |
| 132 | |
| 133 | void wait_for_quiescent_state(void) |
| 134 | { |
| 135 | struct reader_data *index; |
| 136 | |
| 137 | if (!reader_data) |
| 138 | return; |
| 139 | /* |
| 140 | * Wait for each thread urcu_active_readers count to become 0. |
| 141 | */ |
| 142 | for (index = reader_data; index < reader_data + num_readers; index++) { |
| 143 | int wait_loops = 0; |
| 144 | /* |
| 145 | * BUSY-LOOP. Force the reader thread to commit its |
| 146 | * urcu_active_readers update to memory if we wait for too long. |
| 147 | */ |
| 148 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
| 149 | if (wait_loops++ == KICK_READER_LOOPS) { |
| 150 | force_mb_single_thread(index->tid); |
| 151 | wait_loops = 0; |
| 152 | } |
| 153 | } |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | void synchronize_rcu(void) |
| 158 | { |
| 159 | internal_urcu_lock(); |
| 160 | |
| 161 | /* All threads should read qparity before accessing data structure |
| 162 | * where new ptr points to. Must be done within internal_urcu_lock |
| 163 | * because it iterates on reader threads.*/ |
| 164 | /* Write new ptr before changing the qparity */ |
| 165 | force_mb_all_threads(); |
| 166 | |
| 167 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
| 168 | |
| 169 | /* |
| 170 | * Must commit qparity update to memory before waiting for parity |
| 171 | * 0 quiescent state. Failure to do so could result in the writer |
| 172 | * waiting forever while new readers are always accessing data (no |
| 173 | * progress). |
| 174 | */ |
| 175 | smp_mc(); |
| 176 | |
| 177 | /* |
| 178 | * Wait for previous parity to be empty of readers. |
| 179 | */ |
| 180 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ |
| 181 | |
| 182 | /* |
| 183 | * Must finish waiting for quiescent state for parity 0 before |
| 184 | * committing qparity update to memory. Failure to do so could result in |
| 185 | * the writer waiting forever while new readers are always accessing |
| 186 | * data (no progress). |
| 187 | */ |
| 188 | smp_mc(); |
| 189 | |
| 190 | switch_next_urcu_qparity(); /* 1 -> 0 */ |
| 191 | |
| 192 | /* |
| 193 | * Must commit qparity update to memory before waiting for parity |
| 194 | * 1 quiescent state. Failure to do so could result in the writer |
| 195 | * waiting forever while new readers are always accessing data (no |
| 196 | * progress). |
| 197 | */ |
| 198 | smp_mc(); |
| 199 | |
| 200 | /* |
| 201 | * Wait for previous parity to be empty of readers. |
| 202 | */ |
| 203 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ |
| 204 | |
| 205 | /* Finish waiting for reader threads before letting the old ptr being |
| 206 | * freed. Must be done within internal_urcu_lock because it iterates on |
| 207 | * reader threads. */ |
| 208 | force_mb_all_threads(); |
| 209 | |
| 210 | internal_urcu_unlock(); |
| 211 | } |
| 212 | |
| 213 | void urcu_add_reader(pthread_t id) |
| 214 | { |
| 215 | struct reader_data *oldarray; |
| 216 | |
| 217 | if (!reader_data) { |
| 218 | alloc_readers = INIT_NUM_THREADS; |
| 219 | num_readers = 0; |
| 220 | reader_data = |
| 221 | malloc(sizeof(struct reader_data) * alloc_readers); |
| 222 | } |
| 223 | if (alloc_readers < num_readers + 1) { |
| 224 | oldarray = reader_data; |
| 225 | reader_data = malloc(sizeof(struct reader_data) |
| 226 | * (alloc_readers << 1)); |
| 227 | memcpy(reader_data, oldarray, |
| 228 | sizeof(struct reader_data) * alloc_readers); |
| 229 | alloc_readers <<= 1; |
| 230 | free(oldarray); |
| 231 | } |
| 232 | reader_data[num_readers].tid = id; |
| 233 | /* reference to the TLS of _this_ reader thread. */ |
| 234 | reader_data[num_readers].urcu_active_readers = &urcu_active_readers; |
| 235 | num_readers++; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Never shrink (implementation limitation). |
| 240 | * This is O(nb threads). Eventually use a hash table. |
| 241 | */ |
| 242 | void urcu_remove_reader(pthread_t id) |
| 243 | { |
| 244 | struct reader_data *index; |
| 245 | |
| 246 | assert(reader_data != NULL); |
| 247 | for (index = reader_data; index < reader_data + num_readers; index++) { |
| 248 | if (pthread_equal(index->tid, id)) { |
| 249 | memcpy(index, &reader_data[num_readers - 1], |
| 250 | sizeof(struct reader_data)); |
| 251 | reader_data[num_readers - 1].tid = 0; |
| 252 | reader_data[num_readers - 1].urcu_active_readers = NULL; |
| 253 | num_readers--; |
| 254 | return; |
| 255 | } |
| 256 | } |
| 257 | /* Hrm not found, forgot to register ? */ |
| 258 | assert(0); |
| 259 | } |
| 260 | |
| 261 | void urcu_register_thread(void) |
| 262 | { |
| 263 | internal_urcu_lock(); |
| 264 | urcu_add_reader(pthread_self()); |
| 265 | internal_urcu_unlock(); |
| 266 | } |
| 267 | |
| 268 | void urcu_unregister_thread(void) |
| 269 | { |
| 270 | internal_urcu_lock(); |
| 271 | urcu_remove_reader(pthread_self()); |
| 272 | internal_urcu_unlock(); |
| 273 | } |
| 274 | |
| 275 | #ifndef DEBUG_FULL_MB |
| 276 | void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
| 277 | { |
| 278 | /* |
| 279 | * Executing this smp_mb() is the only purpose of this signal handler. |
| 280 | * It punctually promotes barrier() into smp_mb() on every thread it is |
| 281 | * executed on. |
| 282 | */ |
| 283 | smp_mb(); |
| 284 | atomic_inc(&sig_done); |
| 285 | } |
| 286 | |
| 287 | void __attribute__((constructor)) urcu_init(void) |
| 288 | { |
| 289 | struct sigaction act; |
| 290 | int ret; |
| 291 | |
| 292 | act.sa_sigaction = sigurcu_handler; |
| 293 | ret = sigaction(SIGURCU, &act, NULL); |
| 294 | if (ret) { |
| 295 | perror("Error in sigaction"); |
| 296 | exit(-1); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | void __attribute__((destructor)) urcu_exit(void) |
| 301 | { |
| 302 | struct sigaction act; |
| 303 | int ret; |
| 304 | |
| 305 | ret = sigaction(SIGURCU, NULL, &act); |
| 306 | if (ret) { |
| 307 | perror("Error in sigaction"); |
| 308 | exit(-1); |
| 309 | } |
| 310 | assert(act.sa_sigaction == sigurcu_handler); |
| 311 | free(reader_data); |
| 312 | } |
| 313 | #endif |