| 1 | /* |
| 2 | * rculfhash.c |
| 3 | * |
| 4 | * Userspace RCU library - Lock-Free Expandable RCU Hash Table |
| 5 | * |
| 6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 7 | * |
| 8 | * This library is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation; either |
| 11 | * version 2.1 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This library is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with this library; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Based on the following articles: |
| 25 | * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free |
| 26 | * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. |
| 27 | * - Michael, M. M. High performance dynamic lock-free hash tables |
| 28 | * and list-based sets. In Proceedings of the fourteenth annual ACM |
| 29 | * symposium on Parallel algorithms and architectures, ACM Press, |
| 30 | * (2002), 73-82. |
| 31 | * |
| 32 | * Some specificities of this Lock-Free Expandable RCU Hash Table |
| 33 | * implementation: |
| 34 | * |
| 35 | * - RCU read-side critical section allows readers to perform hash |
| 36 | * table lookups and use the returned objects safely by delaying |
| 37 | * memory reclaim of a grace period. |
| 38 | * - Add and remove operations are lock-free, and do not need to |
| 39 | * allocate memory. They need to be executed within RCU read-side |
| 40 | * critical section to ensure the objects they read are valid and to |
| 41 | * deal with the cmpxchg ABA problem. |
| 42 | * - add and add_unique operations are supported. add_unique checks if |
| 43 | * the node key already exists in the hash table. It ensures no key |
| 44 | * duplicata exists. |
| 45 | * - The resize operation executes concurrently with add/remove/lookup. |
| 46 | * - Hash table nodes are contained within a split-ordered list. This |
| 47 | * list is ordered by incrementing reversed-bits-hash value. |
| 48 | * - An index of dummy nodes is kept. These dummy nodes are the hash |
| 49 | * table "buckets", and they are also chained together in the |
| 50 | * split-ordered list, which allows recursive expansion. |
| 51 | * - The resize operation only allows expanding the hash table. |
| 52 | * It is triggered either through an API call or automatically by |
| 53 | * detecting long chains in the add operation. |
| 54 | * - Resize operation initiated by long chain detection is executed by a |
| 55 | * call_rcu thread, which keeps lock-freedom of add and remove. |
| 56 | * - Resize operations are protected by a mutex. |
| 57 | * - The removal operation is split in two parts: first, a "removed" |
| 58 | * flag is set in the next pointer within the node to remove. Then, |
| 59 | * a "garbage collection" is performed in the bucket containing the |
| 60 | * removed node (from the start of the bucket up to the removed node). |
| 61 | * All encountered nodes with "removed" flag set in their next |
| 62 | * pointers are removed from the linked-list. If the cmpxchg used for |
| 63 | * removal fails (due to concurrent garbage-collection or concurrent |
| 64 | * add), we retry from the beginning of the bucket. This ensures that |
| 65 | * the node with "removed" flag set is removed from the hash table |
| 66 | * (not visible to lookups anymore) before the RCU read-side critical |
| 67 | * section held across removal ends. Furthermore, this ensures that |
| 68 | * the node with "removed" flag set is removed from the linked-list |
| 69 | * before its memory is reclaimed. Only the thread which removal |
| 70 | * successfully set the "removed" flag (with a cmpxchg) into a node's |
| 71 | * next pointer is considered to have succeeded its removal (and thus |
| 72 | * owns the node to reclaim). Because we garbage-collect starting from |
| 73 | * an invariant node (the start-of-bucket dummy node) up to the |
| 74 | * "removed" node (or find a reverse-hash that is higher), we are sure |
| 75 | * that a successful traversal of the chain leads to a chain that is |
| 76 | * present in the linked-list (the start node is never removed) and |
| 77 | * that is does not contain the "removed" node anymore, even if |
| 78 | * concurrent delete/add operations are changing the structure of the |
| 79 | * list concurrently. |
| 80 | * - The add operation performs gargage collection of buckets if it |
| 81 | * encounters nodes with removed flag set in the bucket where it wants |
| 82 | * to add its new node. This ensures lock-freedom of add operation by |
| 83 | * helping the remover unlink nodes from the list rather than to wait |
| 84 | * for it do to so. |
| 85 | * - A RCU "order table" indexed by log2(hash index) is copied and |
| 86 | * expanded by the resize operation. This order table allows finding |
| 87 | * the "dummy node" tables. |
| 88 | * - There is one dummy node table per hash index order. The size of |
| 89 | * each dummy node table is half the number of hashes contained in |
| 90 | * this order. |
| 91 | * - call_rcu is used to garbage-collect the old order table. |
| 92 | * - The per-order dummy node tables contain a compact version of the |
| 93 | * hash table nodes. These tables are invariant after they are |
| 94 | * populated into the hash table. |
| 95 | */ |
| 96 | |
| 97 | #define _LGPL_SOURCE |
| 98 | #include <stdlib.h> |
| 99 | #include <errno.h> |
| 100 | #include <assert.h> |
| 101 | #include <stdio.h> |
| 102 | #include <stdint.h> |
| 103 | #include <string.h> |
| 104 | |
| 105 | #include <urcu.h> |
| 106 | #include <urcu-call-rcu.h> |
| 107 | #include <urcu/arch.h> |
| 108 | #include <urcu/uatomic.h> |
| 109 | #include <urcu/jhash.h> |
| 110 | #include <urcu/compiler.h> |
| 111 | #include <urcu/rculfhash.h> |
| 112 | #include <stdio.h> |
| 113 | #include <pthread.h> |
| 114 | |
| 115 | #ifdef DEBUG |
| 116 | #define dbg_printf(fmt, args...) printf(fmt, ## args) |
| 117 | #else |
| 118 | #define dbg_printf(fmt, args...) |
| 119 | #endif |
| 120 | |
| 121 | #define CHAIN_LEN_TARGET 4 |
| 122 | #define CHAIN_LEN_RESIZE_THRESHOLD 8 |
| 123 | |
| 124 | #ifndef max |
| 125 | #define max(a, b) ((a) > (b) ? (a) : (b)) |
| 126 | #endif |
| 127 | |
| 128 | /* |
| 129 | * The removed flag needs to be updated atomically with the pointer. |
| 130 | * The dummy flag does not require to be updated atomically with the |
| 131 | * pointer, but it is added as a pointer low bit flag to save space. |
| 132 | */ |
| 133 | #define REMOVED_FLAG (1UL << 0) |
| 134 | #define DUMMY_FLAG (1UL << 1) |
| 135 | #define FLAGS_MASK ((1UL << 2) - 1) |
| 136 | |
| 137 | struct rcu_table { |
| 138 | unsigned long size; /* always a power of 2 */ |
| 139 | unsigned long resize_target; |
| 140 | int resize_initiated; |
| 141 | struct rcu_head head; |
| 142 | struct _rcu_ht_node *tbl[0]; |
| 143 | }; |
| 144 | |
| 145 | struct rcu_ht { |
| 146 | struct rcu_table *t; /* shared */ |
| 147 | ht_hash_fct hash_fct; |
| 148 | ht_compare_fct compare_fct; |
| 149 | unsigned long hash_seed; |
| 150 | pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ |
| 151 | unsigned int in_progress_resize, in_progress_destroy; |
| 152 | void (*ht_call_rcu)(struct rcu_head *head, |
| 153 | void (*func)(struct rcu_head *head)); |
| 154 | }; |
| 155 | |
| 156 | struct rcu_resize_work { |
| 157 | struct rcu_head head; |
| 158 | struct rcu_ht *ht; |
| 159 | }; |
| 160 | |
| 161 | /* |
| 162 | * Algorithm to reverse bits in a word by lookup table, extended to |
| 163 | * 64-bit words. |
| 164 | * Source: |
| 165 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
| 166 | * Originally from Public Domain. |
| 167 | */ |
| 168 | |
| 169 | static const uint8_t BitReverseTable256[256] = |
| 170 | { |
| 171 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
| 172 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) |
| 173 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) |
| 174 | R6(0), R6(2), R6(1), R6(3) |
| 175 | }; |
| 176 | #undef R2 |
| 177 | #undef R4 |
| 178 | #undef R6 |
| 179 | |
| 180 | static |
| 181 | uint8_t bit_reverse_u8(uint8_t v) |
| 182 | { |
| 183 | return BitReverseTable256[v]; |
| 184 | } |
| 185 | |
| 186 | static __attribute__((unused)) |
| 187 | uint32_t bit_reverse_u32(uint32_t v) |
| 188 | { |
| 189 | return ((uint32_t) bit_reverse_u8(v) << 24) | |
| 190 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | |
| 191 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | |
| 192 | ((uint32_t) bit_reverse_u8(v >> 24)); |
| 193 | } |
| 194 | |
| 195 | static __attribute__((unused)) |
| 196 | uint64_t bit_reverse_u64(uint64_t v) |
| 197 | { |
| 198 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
| 199 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | |
| 200 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | |
| 201 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | |
| 202 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | |
| 203 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | |
| 204 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | |
| 205 | ((uint64_t) bit_reverse_u8(v >> 56)); |
| 206 | } |
| 207 | |
| 208 | static |
| 209 | unsigned long bit_reverse_ulong(unsigned long v) |
| 210 | { |
| 211 | #if (CAA_BITS_PER_LONG == 32) |
| 212 | return bit_reverse_u32(v); |
| 213 | #else |
| 214 | return bit_reverse_u64(v); |
| 215 | #endif |
| 216 | } |
| 217 | |
| 218 | /* |
| 219 | * fls: returns the position of the most significant bit. |
| 220 | * Returns 0 if no bit is set, else returns the position of the most |
| 221 | * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). |
| 222 | */ |
| 223 | #if defined(__i386) || defined(__x86_64) |
| 224 | static inline |
| 225 | unsigned int fls_u32(uint32_t x) |
| 226 | { |
| 227 | int r; |
| 228 | |
| 229 | asm("bsrl %1,%0\n\t" |
| 230 | "jnz 1f\n\t" |
| 231 | "movl $-1,%0\n\t" |
| 232 | "1:\n\t" |
| 233 | : "=r" (r) : "rm" (x)); |
| 234 | return r + 1; |
| 235 | } |
| 236 | #define HAS_FLS_U32 |
| 237 | #endif |
| 238 | |
| 239 | #if defined(__x86_64) |
| 240 | static inline |
| 241 | unsigned int fls_u64(uint64_t x) |
| 242 | { |
| 243 | long r; |
| 244 | |
| 245 | asm("bsrq %1,%0\n\t" |
| 246 | "jnz 1f\n\t" |
| 247 | "movq $-1,%0\n\t" |
| 248 | "1:\n\t" |
| 249 | : "=r" (r) : "rm" (x)); |
| 250 | return r + 1; |
| 251 | } |
| 252 | #define HAS_FLS_U64 |
| 253 | #endif |
| 254 | |
| 255 | #ifndef HAS_FLS_U64 |
| 256 | static __attribute__((unused)) |
| 257 | unsigned int fls_u64(uint64_t x) |
| 258 | { |
| 259 | unsigned int r = 64; |
| 260 | |
| 261 | if (!x) |
| 262 | return 0; |
| 263 | |
| 264 | if (!(x & 0xFFFFFFFF00000000ULL)) { |
| 265 | x <<= 32; |
| 266 | r -= 32; |
| 267 | } |
| 268 | if (!(x & 0xFFFF000000000000ULL)) { |
| 269 | x <<= 16; |
| 270 | r -= 16; |
| 271 | } |
| 272 | if (!(x & 0xFF00000000000000ULL)) { |
| 273 | x <<= 8; |
| 274 | r -= 8; |
| 275 | } |
| 276 | if (!(x & 0xF000000000000000ULL)) { |
| 277 | x <<= 4; |
| 278 | r -= 4; |
| 279 | } |
| 280 | if (!(x & 0xC000000000000000ULL)) { |
| 281 | x <<= 2; |
| 282 | r -= 2; |
| 283 | } |
| 284 | if (!(x & 0x8000000000000000ULL)) { |
| 285 | x <<= 1; |
| 286 | r -= 1; |
| 287 | } |
| 288 | return r; |
| 289 | } |
| 290 | #endif |
| 291 | |
| 292 | #ifndef HAS_FLS_U32 |
| 293 | static __attribute__((unused)) |
| 294 | unsigned int fls_u32(uint32_t x) |
| 295 | { |
| 296 | unsigned int r = 32; |
| 297 | |
| 298 | if (!x) |
| 299 | return 0; |
| 300 | if (!(x & 0xFFFF0000U)) { |
| 301 | x <<= 16; |
| 302 | r -= 16; |
| 303 | } |
| 304 | if (!(x & 0xFF000000U)) { |
| 305 | x <<= 8; |
| 306 | r -= 8; |
| 307 | } |
| 308 | if (!(x & 0xF0000000U)) { |
| 309 | x <<= 4; |
| 310 | r -= 4; |
| 311 | } |
| 312 | if (!(x & 0xC0000000U)) { |
| 313 | x <<= 2; |
| 314 | r -= 2; |
| 315 | } |
| 316 | if (!(x & 0x80000000U)) { |
| 317 | x <<= 1; |
| 318 | r -= 1; |
| 319 | } |
| 320 | return r; |
| 321 | } |
| 322 | #endif |
| 323 | |
| 324 | unsigned int fls_ulong(unsigned long x) |
| 325 | { |
| 326 | #if (CAA_BITS_PER_lONG == 32) |
| 327 | return fls_u32(x); |
| 328 | #else |
| 329 | return fls_u64(x); |
| 330 | #endif |
| 331 | } |
| 332 | |
| 333 | int get_count_order_u32(uint32_t x) |
| 334 | { |
| 335 | int order; |
| 336 | |
| 337 | order = fls_u32(x) - 1; |
| 338 | if (x & (x - 1)) |
| 339 | order++; |
| 340 | return order; |
| 341 | } |
| 342 | |
| 343 | int get_count_order_ulong(unsigned long x) |
| 344 | { |
| 345 | int order; |
| 346 | |
| 347 | order = fls_ulong(x) - 1; |
| 348 | if (x & (x - 1)) |
| 349 | order++; |
| 350 | return order; |
| 351 | } |
| 352 | |
| 353 | static |
| 354 | void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth); |
| 355 | |
| 356 | static |
| 357 | void check_resize(struct rcu_ht *ht, struct rcu_table *t, |
| 358 | uint32_t chain_len) |
| 359 | { |
| 360 | if (chain_len > 100) |
| 361 | dbg_printf("rculfhash: WARNING: large chain length: %u.\n", |
| 362 | chain_len); |
| 363 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
| 364 | ht_resize_lazy(ht, t, |
| 365 | get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); |
| 366 | } |
| 367 | |
| 368 | static |
| 369 | struct rcu_ht_node *clear_flag(struct rcu_ht_node *node) |
| 370 | { |
| 371 | return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK); |
| 372 | } |
| 373 | |
| 374 | static |
| 375 | int is_removed(struct rcu_ht_node *node) |
| 376 | { |
| 377 | return ((unsigned long) node) & REMOVED_FLAG; |
| 378 | } |
| 379 | |
| 380 | static |
| 381 | struct rcu_ht_node *flag_removed(struct rcu_ht_node *node) |
| 382 | { |
| 383 | return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG); |
| 384 | } |
| 385 | |
| 386 | static |
| 387 | int is_dummy(struct rcu_ht_node *node) |
| 388 | { |
| 389 | return ((unsigned long) node) & DUMMY_FLAG; |
| 390 | } |
| 391 | |
| 392 | static |
| 393 | struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node) |
| 394 | { |
| 395 | return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG); |
| 396 | } |
| 397 | |
| 398 | static |
| 399 | unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) |
| 400 | { |
| 401 | unsigned long old1, old2; |
| 402 | |
| 403 | old1 = uatomic_read(ptr); |
| 404 | do { |
| 405 | old2 = old1; |
| 406 | if (old2 >= v) |
| 407 | return old2; |
| 408 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
| 409 | return v; |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Remove all logically deleted nodes from a bucket up to a certain node key. |
| 414 | */ |
| 415 | static |
| 416 | void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node) |
| 417 | { |
| 418 | struct rcu_ht_node *iter_prev, *iter, *next, *new_next; |
| 419 | |
| 420 | for (;;) { |
| 421 | iter_prev = dummy; |
| 422 | /* We can always skip the dummy node initially */ |
| 423 | iter = rcu_dereference(iter_prev->p.next); |
| 424 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); |
| 425 | for (;;) { |
| 426 | if (unlikely(!clear_flag(iter))) |
| 427 | return; |
| 428 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
| 429 | return; |
| 430 | next = rcu_dereference(clear_flag(iter)->p.next); |
| 431 | if (likely(is_removed(next))) |
| 432 | break; |
| 433 | iter_prev = clear_flag(iter); |
| 434 | iter = next; |
| 435 | } |
| 436 | assert(!is_removed(iter)); |
| 437 | if (is_dummy(iter)) |
| 438 | new_next = flag_dummy(clear_flag(next)); |
| 439 | else |
| 440 | new_next = clear_flag(next); |
| 441 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | static |
| 446 | struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, |
| 447 | struct rcu_ht_node *node, int unique, int dummy) |
| 448 | { |
| 449 | struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next, |
| 450 | *dummy_node; |
| 451 | struct _rcu_ht_node *lookup; |
| 452 | unsigned long hash, index, order; |
| 453 | |
| 454 | if (!t->size) { |
| 455 | assert(dummy); |
| 456 | node->p.next = flag_dummy(NULL); |
| 457 | return node; /* Initial first add (head) */ |
| 458 | } |
| 459 | hash = bit_reverse_ulong(node->p.reverse_hash); |
| 460 | for (;;) { |
| 461 | uint32_t chain_len = 0; |
| 462 | |
| 463 | /* |
| 464 | * iter_prev points to the non-removed node prior to the |
| 465 | * insert location. |
| 466 | */ |
| 467 | index = hash & (t->size - 1); |
| 468 | order = get_count_order_ulong(index + 1); |
| 469 | lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; |
| 470 | iter_prev = (struct rcu_ht_node *) lookup; |
| 471 | /* We can always skip the dummy node initially */ |
| 472 | iter = rcu_dereference(iter_prev->p.next); |
| 473 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); |
| 474 | for (;;) { |
| 475 | if (unlikely(!clear_flag(iter))) |
| 476 | goto insert; |
| 477 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
| 478 | goto insert; |
| 479 | next = rcu_dereference(clear_flag(iter)->p.next); |
| 480 | if (unlikely(is_removed(next))) |
| 481 | goto gc_node; |
| 482 | if (unique |
| 483 | && !is_dummy(next) |
| 484 | && !ht->compare_fct(node->key, node->key_len, |
| 485 | clear_flag(iter)->key, |
| 486 | clear_flag(iter)->key_len)) |
| 487 | return clear_flag(iter); |
| 488 | /* Only account for identical reverse hash once */ |
| 489 | if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash |
| 490 | && !is_dummy(next)) |
| 491 | check_resize(ht, t, ++chain_len); |
| 492 | iter_prev = clear_flag(iter); |
| 493 | iter = next; |
| 494 | } |
| 495 | insert: |
| 496 | assert(node != clear_flag(iter)); |
| 497 | assert(!is_removed(iter_prev)); |
| 498 | assert(iter_prev != node); |
| 499 | if (!dummy) |
| 500 | node->p.next = clear_flag(iter); |
| 501 | else |
| 502 | node->p.next = flag_dummy(clear_flag(iter)); |
| 503 | if (is_dummy(iter)) |
| 504 | new_node = flag_dummy(node); |
| 505 | else |
| 506 | new_node = node; |
| 507 | if (uatomic_cmpxchg(&iter_prev->p.next, iter, |
| 508 | new_node) != iter) |
| 509 | continue; /* retry */ |
| 510 | else |
| 511 | goto gc_end; |
| 512 | gc_node: |
| 513 | assert(!is_removed(iter)); |
| 514 | if (is_dummy(iter)) |
| 515 | new_next = flag_dummy(clear_flag(next)); |
| 516 | else |
| 517 | new_next = clear_flag(next); |
| 518 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); |
| 519 | /* retry */ |
| 520 | } |
| 521 | gc_end: |
| 522 | /* Garbage collect logically removed nodes in the bucket */ |
| 523 | index = hash & (t->size - 1); |
| 524 | order = get_count_order_ulong(index + 1); |
| 525 | lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; |
| 526 | dummy_node = (struct rcu_ht_node *) lookup; |
| 527 | _ht_gc_bucket(dummy_node, node); |
| 528 | return node; |
| 529 | } |
| 530 | |
| 531 | static |
| 532 | int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) |
| 533 | { |
| 534 | struct rcu_ht_node *dummy, *next, *old; |
| 535 | struct _rcu_ht_node *lookup; |
| 536 | int flagged = 0; |
| 537 | unsigned long hash, index, order; |
| 538 | |
| 539 | /* logically delete the node */ |
| 540 | old = rcu_dereference(node->p.next); |
| 541 | do { |
| 542 | next = old; |
| 543 | if (unlikely(is_removed(next))) |
| 544 | goto end; |
| 545 | assert(!is_dummy(next)); |
| 546 | old = uatomic_cmpxchg(&node->p.next, next, |
| 547 | flag_removed(next)); |
| 548 | } while (old != next); |
| 549 | |
| 550 | /* We performed the (logical) deletion. */ |
| 551 | flagged = 1; |
| 552 | |
| 553 | /* |
| 554 | * Ensure that the node is not visible to readers anymore: lookup for |
| 555 | * the node, and remove it (along with any other logically removed node) |
| 556 | * if found. |
| 557 | */ |
| 558 | hash = bit_reverse_ulong(node->p.reverse_hash); |
| 559 | index = hash & (t->size - 1); |
| 560 | order = get_count_order_ulong(index + 1); |
| 561 | lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; |
| 562 | dummy = (struct rcu_ht_node *) lookup; |
| 563 | _ht_gc_bucket(dummy, node); |
| 564 | end: |
| 565 | /* |
| 566 | * Only the flagging action indicated that we (and no other) |
| 567 | * removed the node from the hash. |
| 568 | */ |
| 569 | if (flagged) { |
| 570 | assert(is_removed(rcu_dereference(node->p.next))); |
| 571 | return 0; |
| 572 | } else |
| 573 | return -ENOENT; |
| 574 | } |
| 575 | |
| 576 | static |
| 577 | void init_table(struct rcu_ht *ht, struct rcu_table *t, |
| 578 | unsigned long first_order, unsigned long len_order) |
| 579 | { |
| 580 | unsigned long i, end_order; |
| 581 | |
| 582 | dbg_printf("rculfhash: init table: first_order %lu end_order %lu\n", |
| 583 | first_order, first_order + len_order); |
| 584 | end_order = first_order + len_order; |
| 585 | t->size = !first_order ? 0 : (1UL << (first_order - 1)); |
| 586 | for (i = first_order; i < end_order; i++) { |
| 587 | unsigned long j, len; |
| 588 | |
| 589 | len = !i ? 1 : 1UL << (i - 1); |
| 590 | dbg_printf("rculfhash: init order %lu len: %lu\n", i, len); |
| 591 | t->tbl[i] = calloc(len, sizeof(struct _rcu_ht_node)); |
| 592 | for (j = 0; j < len; j++) { |
| 593 | dbg_printf("rculfhash: init entry: i %lu j %lu hash %lu\n", |
| 594 | i, j, !i ? 0 : (1UL << (i - 1)) + j); |
| 595 | struct rcu_ht_node *new_node = |
| 596 | (struct rcu_ht_node *) &t->tbl[i][j]; |
| 597 | new_node->p.reverse_hash = |
| 598 | bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); |
| 599 | (void) _ht_add(ht, t, new_node, 0, 1); |
| 600 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 601 | break; |
| 602 | } |
| 603 | /* Update table size */ |
| 604 | t->size = !i ? 1 : (1UL << i); |
| 605 | dbg_printf("rculfhash: init new size: %lu\n", t->size); |
| 606 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 607 | break; |
| 608 | } |
| 609 | t->resize_target = t->size; |
| 610 | t->resize_initiated = 0; |
| 611 | } |
| 612 | |
| 613 | struct rcu_ht *ht_new(ht_hash_fct hash_fct, |
| 614 | ht_compare_fct compare_fct, |
| 615 | unsigned long hash_seed, |
| 616 | unsigned long init_size, |
| 617 | void (*ht_call_rcu)(struct rcu_head *head, |
| 618 | void (*func)(struct rcu_head *head))) |
| 619 | { |
| 620 | struct rcu_ht *ht; |
| 621 | unsigned long order; |
| 622 | |
| 623 | ht = calloc(1, sizeof(struct rcu_ht)); |
| 624 | ht->hash_fct = hash_fct; |
| 625 | ht->compare_fct = compare_fct; |
| 626 | ht->hash_seed = hash_seed; |
| 627 | ht->ht_call_rcu = ht_call_rcu; |
| 628 | ht->in_progress_resize = 0; |
| 629 | /* this mutex should not nest in read-side C.S. */ |
| 630 | pthread_mutex_init(&ht->resize_mutex, NULL); |
| 631 | order = get_count_order_ulong(max(init_size, 1)) + 1; |
| 632 | ht->t = calloc(1, sizeof(struct rcu_table) |
| 633 | + (order * sizeof(struct _rcu_ht_node *))); |
| 634 | ht->t->size = 0; |
| 635 | pthread_mutex_lock(&ht->resize_mutex); |
| 636 | init_table(ht, ht->t, 0, order); |
| 637 | pthread_mutex_unlock(&ht->resize_mutex); |
| 638 | return ht; |
| 639 | } |
| 640 | |
| 641 | struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) |
| 642 | { |
| 643 | struct rcu_table *t; |
| 644 | struct rcu_ht_node *node, *next; |
| 645 | struct _rcu_ht_node *lookup; |
| 646 | unsigned long hash, reverse_hash, index, order; |
| 647 | |
| 648 | hash = ht->hash_fct(key, key_len, ht->hash_seed); |
| 649 | reverse_hash = bit_reverse_ulong(hash); |
| 650 | |
| 651 | t = rcu_dereference(ht->t); |
| 652 | index = hash & (t->size - 1); |
| 653 | order = get_count_order_ulong(index + 1); |
| 654 | lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; |
| 655 | dbg_printf("rculfhash: lookup hash %lu index %lu order %lu aridx %lu\n", |
| 656 | hash, index, order, index & ((1UL << (order - 1)) - 1)); |
| 657 | node = (struct rcu_ht_node *) lookup; |
| 658 | for (;;) { |
| 659 | if (unlikely(!node)) |
| 660 | break; |
| 661 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
| 662 | node = NULL; |
| 663 | break; |
| 664 | } |
| 665 | next = rcu_dereference(node->p.next); |
| 666 | if (likely(!is_removed(next)) |
| 667 | && !is_dummy(next) |
| 668 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { |
| 669 | break; |
| 670 | } |
| 671 | node = clear_flag(next); |
| 672 | } |
| 673 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); |
| 674 | return node; |
| 675 | } |
| 676 | |
| 677 | void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node) |
| 678 | { |
| 679 | struct rcu_table *t; |
| 680 | unsigned long hash; |
| 681 | |
| 682 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
| 683 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
| 684 | |
| 685 | t = rcu_dereference(ht->t); |
| 686 | (void) _ht_add(ht, t, node, 0, 0); |
| 687 | } |
| 688 | |
| 689 | struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) |
| 690 | { |
| 691 | struct rcu_table *t; |
| 692 | unsigned long hash; |
| 693 | |
| 694 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
| 695 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
| 696 | |
| 697 | t = rcu_dereference(ht->t); |
| 698 | return _ht_add(ht, t, node, 1, 0); |
| 699 | } |
| 700 | |
| 701 | int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node) |
| 702 | { |
| 703 | struct rcu_table *t; |
| 704 | |
| 705 | t = rcu_dereference(ht->t); |
| 706 | return _ht_remove(ht, t, node); |
| 707 | } |
| 708 | |
| 709 | static |
| 710 | int ht_delete_dummy(struct rcu_ht *ht) |
| 711 | { |
| 712 | struct rcu_table *t; |
| 713 | struct rcu_ht_node *node; |
| 714 | struct _rcu_ht_node *lookup; |
| 715 | unsigned long order, i; |
| 716 | |
| 717 | t = ht->t; |
| 718 | /* Check that the table is empty */ |
| 719 | lookup = &t->tbl[0][0]; |
| 720 | node = (struct rcu_ht_node *) lookup; |
| 721 | do { |
| 722 | node = clear_flag(node)->p.next; |
| 723 | if (!is_dummy(node)) |
| 724 | return -EPERM; |
| 725 | assert(!is_removed(node)); |
| 726 | } while (clear_flag(node)); |
| 727 | /* Internal sanity check: all nodes left should be dummy */ |
| 728 | for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) { |
| 729 | unsigned long len; |
| 730 | |
| 731 | len = !order ? 1 : 1UL << (order - 1); |
| 732 | for (i = 0; i < len; i++) { |
| 733 | dbg_printf("rculfhash: delete order %lu i %lu hash %lu\n", |
| 734 | order, i, |
| 735 | bit_reverse_ulong(t->tbl[order][i].reverse_hash)); |
| 736 | assert(is_dummy(t->tbl[order][i].next)); |
| 737 | } |
| 738 | free(t->tbl[order]); |
| 739 | } |
| 740 | return 0; |
| 741 | } |
| 742 | |
| 743 | /* |
| 744 | * Should only be called when no more concurrent readers nor writers can |
| 745 | * possibly access the table. |
| 746 | */ |
| 747 | int ht_destroy(struct rcu_ht *ht) |
| 748 | { |
| 749 | int ret; |
| 750 | |
| 751 | /* Wait for in-flight resize operations to complete */ |
| 752 | CMM_STORE_SHARED(ht->in_progress_destroy, 1); |
| 753 | while (uatomic_read(&ht->in_progress_resize)) |
| 754 | poll(NULL, 0, 100); /* wait for 100ms */ |
| 755 | ret = ht_delete_dummy(ht); |
| 756 | if (ret) |
| 757 | return ret; |
| 758 | free(ht->t); |
| 759 | free(ht); |
| 760 | return ret; |
| 761 | } |
| 762 | |
| 763 | void ht_count_nodes(struct rcu_ht *ht, |
| 764 | unsigned long *count, |
| 765 | unsigned long *removed) |
| 766 | { |
| 767 | struct rcu_table *t; |
| 768 | struct rcu_ht_node *node, *next; |
| 769 | struct _rcu_ht_node *lookup; |
| 770 | unsigned long nr_dummy = 0; |
| 771 | |
| 772 | *count = 0; |
| 773 | *removed = 0; |
| 774 | |
| 775 | t = rcu_dereference(ht->t); |
| 776 | /* Count non-dummy nodes in the table */ |
| 777 | lookup = &t->tbl[0][0]; |
| 778 | node = (struct rcu_ht_node *) lookup; |
| 779 | do { |
| 780 | next = rcu_dereference(node->p.next); |
| 781 | if (is_removed(next)) { |
| 782 | assert(!is_dummy(next)); |
| 783 | (*removed)++; |
| 784 | } else if (!is_dummy(next)) |
| 785 | (*count)++; |
| 786 | else |
| 787 | (nr_dummy)++; |
| 788 | node = clear_flag(next); |
| 789 | } while (node); |
| 790 | dbg_printf("rculfhash: number of dummy nodes: %lu\n", nr_dummy); |
| 791 | } |
| 792 | |
| 793 | static |
| 794 | void ht_free_table_cb(struct rcu_head *head) |
| 795 | { |
| 796 | struct rcu_table *t = |
| 797 | caa_container_of(head, struct rcu_table, head); |
| 798 | free(t); |
| 799 | } |
| 800 | |
| 801 | /* called with resize mutex held */ |
| 802 | static |
| 803 | void _do_ht_resize(struct rcu_ht *ht) |
| 804 | { |
| 805 | unsigned long new_size, old_size, old_order, new_order; |
| 806 | struct rcu_table *new_t, *old_t; |
| 807 | |
| 808 | old_t = ht->t; |
| 809 | old_size = old_t->size; |
| 810 | old_order = get_count_order_ulong(old_size) + 1; |
| 811 | |
| 812 | new_size = CMM_LOAD_SHARED(old_t->resize_target); |
| 813 | if (old_size == new_size) |
| 814 | return; |
| 815 | new_order = get_count_order_ulong(new_size) + 1; |
| 816 | printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
| 817 | old_size, old_order, new_size, new_order); |
| 818 | new_t = malloc(sizeof(struct rcu_table) |
| 819 | + (new_order * sizeof(struct _rcu_ht_node *))); |
| 820 | assert(new_size > old_size); |
| 821 | memcpy(&new_t->tbl, &old_t->tbl, |
| 822 | old_order * sizeof(struct _rcu_ht_node *)); |
| 823 | init_table(ht, new_t, old_order, new_order - old_order); |
| 824 | /* Changing table and size atomically wrt lookups */ |
| 825 | rcu_assign_pointer(ht->t, new_t); |
| 826 | ht->ht_call_rcu(&old_t->head, ht_free_table_cb); |
| 827 | } |
| 828 | |
| 829 | static |
| 830 | unsigned long resize_target_update(struct rcu_table *t, |
| 831 | int growth_order) |
| 832 | { |
| 833 | return _uatomic_max(&t->resize_target, |
| 834 | t->size << growth_order); |
| 835 | } |
| 836 | |
| 837 | void ht_resize(struct rcu_ht *ht, int growth) |
| 838 | { |
| 839 | struct rcu_table *t = rcu_dereference(ht->t); |
| 840 | unsigned long target_size; |
| 841 | |
| 842 | target_size = resize_target_update(t, growth); |
| 843 | if (t->size < target_size) { |
| 844 | CMM_STORE_SHARED(t->resize_initiated, 1); |
| 845 | pthread_mutex_lock(&ht->resize_mutex); |
| 846 | _do_ht_resize(ht); |
| 847 | pthread_mutex_unlock(&ht->resize_mutex); |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | static |
| 852 | void do_resize_cb(struct rcu_head *head) |
| 853 | { |
| 854 | struct rcu_resize_work *work = |
| 855 | caa_container_of(head, struct rcu_resize_work, head); |
| 856 | struct rcu_ht *ht = work->ht; |
| 857 | |
| 858 | pthread_mutex_lock(&ht->resize_mutex); |
| 859 | _do_ht_resize(ht); |
| 860 | pthread_mutex_unlock(&ht->resize_mutex); |
| 861 | free(work); |
| 862 | cmm_smp_mb(); /* finish resize before decrement */ |
| 863 | uatomic_dec(&ht->in_progress_resize); |
| 864 | } |
| 865 | |
| 866 | static |
| 867 | void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth) |
| 868 | { |
| 869 | struct rcu_resize_work *work; |
| 870 | unsigned long target_size; |
| 871 | |
| 872 | target_size = resize_target_update(t, growth); |
| 873 | if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { |
| 874 | uatomic_inc(&ht->in_progress_resize); |
| 875 | cmm_smp_mb(); /* increment resize count before calling it */ |
| 876 | work = malloc(sizeof(*work)); |
| 877 | work->ht = ht; |
| 878 | ht->ht_call_rcu(&work->head, do_resize_cb); |
| 879 | CMM_STORE_SHARED(t->resize_initiated, 1); |
| 880 | } |
| 881 | } |