| 1 | /* |
| 2 | * rculfhash.c |
| 3 | * |
| 4 | * Userspace RCU library - Lock-Free Resizable RCU Hash Table |
| 5 | * |
| 6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 7 | * |
| 8 | * This library is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation; either |
| 11 | * version 2.1 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This library is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with this library; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Based on the following articles: |
| 25 | * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free |
| 26 | * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. |
| 27 | * - Michael, M. M. High performance dynamic lock-free hash tables |
| 28 | * and list-based sets. In Proceedings of the fourteenth annual ACM |
| 29 | * symposium on Parallel algorithms and architectures, ACM Press, |
| 30 | * (2002), 73-82. |
| 31 | * |
| 32 | * Some specificities of this Lock-Free Resizable RCU Hash Table |
| 33 | * implementation: |
| 34 | * |
| 35 | * - RCU read-side critical section allows readers to perform hash |
| 36 | * table lookups and use the returned objects safely by delaying |
| 37 | * memory reclaim of a grace period. |
| 38 | * - Add and remove operations are lock-free, and do not need to |
| 39 | * allocate memory. They need to be executed within RCU read-side |
| 40 | * critical section to ensure the objects they read are valid and to |
| 41 | * deal with the cmpxchg ABA problem. |
| 42 | * - add and add_unique operations are supported. add_unique checks if |
| 43 | * the node key already exists in the hash table. It ensures no key |
| 44 | * duplicata exists. |
| 45 | * - The resize operation executes concurrently with add/remove/lookup. |
| 46 | * - Hash table nodes are contained within a split-ordered list. This |
| 47 | * list is ordered by incrementing reversed-bits-hash value. |
| 48 | * - An index of dummy nodes is kept. These dummy nodes are the hash |
| 49 | * table "buckets", and they are also chained together in the |
| 50 | * split-ordered list, which allows recursive expansion. |
| 51 | * - The resize operation for small tables only allows expanding the hash table. |
| 52 | * It is triggered automatically by detecting long chains in the add |
| 53 | * operation. |
| 54 | * - The resize operation for larger tables (and available through an |
| 55 | * API) allows both expanding and shrinking the hash table. |
| 56 | * - Per-CPU Split-counters are used to keep track of the number of |
| 57 | * nodes within the hash table for automatic resize triggering. |
| 58 | * - Resize operation initiated by long chain detection is executed by a |
| 59 | * call_rcu thread, which keeps lock-freedom of add and remove. |
| 60 | * - Resize operations are protected by a mutex. |
| 61 | * - The removal operation is split in two parts: first, a "removed" |
| 62 | * flag is set in the next pointer within the node to remove. Then, |
| 63 | * a "garbage collection" is performed in the bucket containing the |
| 64 | * removed node (from the start of the bucket up to the removed node). |
| 65 | * All encountered nodes with "removed" flag set in their next |
| 66 | * pointers are removed from the linked-list. If the cmpxchg used for |
| 67 | * removal fails (due to concurrent garbage-collection or concurrent |
| 68 | * add), we retry from the beginning of the bucket. This ensures that |
| 69 | * the node with "removed" flag set is removed from the hash table |
| 70 | * (not visible to lookups anymore) before the RCU read-side critical |
| 71 | * section held across removal ends. Furthermore, this ensures that |
| 72 | * the node with "removed" flag set is removed from the linked-list |
| 73 | * before its memory is reclaimed. Only the thread which removal |
| 74 | * successfully set the "removed" flag (with a cmpxchg) into a node's |
| 75 | * next pointer is considered to have succeeded its removal (and thus |
| 76 | * owns the node to reclaim). Because we garbage-collect starting from |
| 77 | * an invariant node (the start-of-bucket dummy node) up to the |
| 78 | * "removed" node (or find a reverse-hash that is higher), we are sure |
| 79 | * that a successful traversal of the chain leads to a chain that is |
| 80 | * present in the linked-list (the start node is never removed) and |
| 81 | * that is does not contain the "removed" node anymore, even if |
| 82 | * concurrent delete/add operations are changing the structure of the |
| 83 | * list concurrently. |
| 84 | * - The add operation performs gargage collection of buckets if it |
| 85 | * encounters nodes with removed flag set in the bucket where it wants |
| 86 | * to add its new node. This ensures lock-freedom of add operation by |
| 87 | * helping the remover unlink nodes from the list rather than to wait |
| 88 | * for it do to so. |
| 89 | * - A RCU "order table" indexed by log2(hash index) is copied and |
| 90 | * expanded by the resize operation. This order table allows finding |
| 91 | * the "dummy node" tables. |
| 92 | * - There is one dummy node table per hash index order. The size of |
| 93 | * each dummy node table is half the number of hashes contained in |
| 94 | * this order. |
| 95 | * - call_rcu is used to garbage-collect the old order table. |
| 96 | * - The per-order dummy node tables contain a compact version of the |
| 97 | * hash table nodes. These tables are invariant after they are |
| 98 | * populated into the hash table. |
| 99 | * |
| 100 | * A bit of ascii art explanation: |
| 101 | * |
| 102 | * Order index is the off-by-one compare to the actual power of 2 because |
| 103 | * we use index 0 to deal with the 0 special-case. |
| 104 | * |
| 105 | * This shows the nodes for a small table ordered by reversed bits: |
| 106 | * |
| 107 | * bits reverse |
| 108 | * 0 000 000 |
| 109 | * 4 100 001 |
| 110 | * 2 010 010 |
| 111 | * 6 110 011 |
| 112 | * 1 001 100 |
| 113 | * 5 101 101 |
| 114 | * 3 011 110 |
| 115 | * 7 111 111 |
| 116 | * |
| 117 | * This shows the nodes in order of non-reversed bits, linked by |
| 118 | * reversed-bit order. |
| 119 | * |
| 120 | * order bits reverse |
| 121 | * 0 0 000 000 |
| 122 | * | |
| 123 | * 1 | 1 001 100 <- <- |
| 124 | * | | | | |
| 125 | * 2 | | 2 010 010 | | |
| 126 | * | | | 3 011 110 | <- | |
| 127 | * | | | | | | | |
| 128 | * 3 -> | | | 4 100 001 | | |
| 129 | * -> | | 5 101 101 | |
| 130 | * -> | 6 110 011 |
| 131 | * -> 7 111 111 |
| 132 | */ |
| 133 | |
| 134 | #define _LGPL_SOURCE |
| 135 | #include <stdlib.h> |
| 136 | #include <errno.h> |
| 137 | #include <assert.h> |
| 138 | #include <stdio.h> |
| 139 | #include <stdint.h> |
| 140 | #include <string.h> |
| 141 | |
| 142 | #include "config.h" |
| 143 | #include <urcu.h> |
| 144 | #include <urcu-call-rcu.h> |
| 145 | #include <urcu/arch.h> |
| 146 | #include <urcu/uatomic.h> |
| 147 | #include <urcu/jhash.h> |
| 148 | #include <urcu/compiler.h> |
| 149 | #include <urcu/rculfhash.h> |
| 150 | #include <stdio.h> |
| 151 | #include <pthread.h> |
| 152 | |
| 153 | #ifdef DEBUG |
| 154 | #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args) |
| 155 | #else |
| 156 | #define dbg_printf(fmt, args...) |
| 157 | #endif |
| 158 | |
| 159 | /* |
| 160 | * Per-CPU split-counters lazily update the global counter each 1024 |
| 161 | * addition/removal. It automatically keeps track of resize required. |
| 162 | * We use the bucket length as indicator for need to expand for small |
| 163 | * tables and machines lacking per-cpu data suppport. |
| 164 | */ |
| 165 | #define COUNT_COMMIT_ORDER 10 |
| 166 | #define CHAIN_LEN_TARGET 1 |
| 167 | #define CHAIN_LEN_RESIZE_THRESHOLD 3 |
| 168 | |
| 169 | /* |
| 170 | * Define the minimum table size. |
| 171 | */ |
| 172 | #define MIN_TABLE_SIZE 1 |
| 173 | |
| 174 | #if (CAA_BITS_PER_LONG == 32) |
| 175 | #define MAX_TABLE_ORDER 32 |
| 176 | #else |
| 177 | #define MAX_TABLE_ORDER 64 |
| 178 | #endif |
| 179 | |
| 180 | /* |
| 181 | * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. |
| 182 | */ |
| 183 | #define MIN_PARTITION_PER_THREAD_ORDER 12 |
| 184 | #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) |
| 185 | |
| 186 | #ifndef min |
| 187 | #define min(a, b) ((a) < (b) ? (a) : (b)) |
| 188 | #endif |
| 189 | |
| 190 | #ifndef max |
| 191 | #define max(a, b) ((a) > (b) ? (a) : (b)) |
| 192 | #endif |
| 193 | |
| 194 | /* |
| 195 | * The removed flag needs to be updated atomically with the pointer. |
| 196 | * It indicates that no node must attach to the node scheduled for |
| 197 | * removal. The gc flag also needs to be updated atomically with the |
| 198 | * pointer. It indicates that node garbage collection must be performed. |
| 199 | * "removed" and "gc" flags are separate for the benefit of replacement |
| 200 | * operation. |
| 201 | * The dummy flag does not require to be updated atomically with the |
| 202 | * pointer, but it is added as a pointer low bit flag to save space. |
| 203 | */ |
| 204 | #define REMOVED_FLAG (1UL << 0) |
| 205 | #define GC_FLAG (1UL << 1) |
| 206 | #define DUMMY_FLAG (1UL << 2) |
| 207 | #define FLAGS_MASK ((1UL << 3) - 1) |
| 208 | |
| 209 | /* Value of the end pointer. Should not interact with flags. */ |
| 210 | #define END_VALUE NULL |
| 211 | |
| 212 | struct ht_items_count { |
| 213 | unsigned long add, del; |
| 214 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); |
| 215 | |
| 216 | struct rcu_level { |
| 217 | struct rcu_head head; |
| 218 | struct _cds_lfht_node nodes[0]; |
| 219 | }; |
| 220 | |
| 221 | struct rcu_table { |
| 222 | unsigned long size; /* always a power of 2, shared (RCU) */ |
| 223 | unsigned long resize_target; |
| 224 | int resize_initiated; |
| 225 | struct rcu_level *tbl[MAX_TABLE_ORDER]; |
| 226 | }; |
| 227 | |
| 228 | struct cds_lfht { |
| 229 | struct rcu_table t; |
| 230 | cds_lfht_hash_fct hash_fct; |
| 231 | cds_lfht_compare_fct compare_fct; |
| 232 | unsigned long hash_seed; |
| 233 | int flags; |
| 234 | /* |
| 235 | * We need to put the work threads offline (QSBR) when taking this |
| 236 | * mutex, because we use synchronize_rcu within this mutex critical |
| 237 | * section, which waits on read-side critical sections, and could |
| 238 | * therefore cause grace-period deadlock if we hold off RCU G.P. |
| 239 | * completion. |
| 240 | */ |
| 241 | pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ |
| 242 | unsigned int in_progress_resize, in_progress_destroy; |
| 243 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
| 244 | void (*func)(struct rcu_head *head)); |
| 245 | void (*cds_lfht_synchronize_rcu)(void); |
| 246 | void (*cds_lfht_rcu_read_lock)(void); |
| 247 | void (*cds_lfht_rcu_read_unlock)(void); |
| 248 | void (*cds_lfht_rcu_thread_offline)(void); |
| 249 | void (*cds_lfht_rcu_thread_online)(void); |
| 250 | void (*cds_lfht_rcu_register_thread)(void); |
| 251 | void (*cds_lfht_rcu_unregister_thread)(void); |
| 252 | pthread_attr_t *resize_attr; /* Resize threads attributes */ |
| 253 | unsigned long count; /* global approximate item count */ |
| 254 | struct ht_items_count *percpu_count; /* per-cpu item count */ |
| 255 | }; |
| 256 | |
| 257 | struct rcu_resize_work { |
| 258 | struct rcu_head head; |
| 259 | struct cds_lfht *ht; |
| 260 | }; |
| 261 | |
| 262 | struct partition_resize_work { |
| 263 | struct rcu_head head; |
| 264 | struct cds_lfht *ht; |
| 265 | unsigned long i, start, len; |
| 266 | void (*fct)(struct cds_lfht *ht, unsigned long i, |
| 267 | unsigned long start, unsigned long len); |
| 268 | }; |
| 269 | |
| 270 | enum add_mode { |
| 271 | ADD_DEFAULT = 0, |
| 272 | ADD_UNIQUE = 1, |
| 273 | ADD_REPLACE = 2, |
| 274 | }; |
| 275 | |
| 276 | static |
| 277 | struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, |
| 278 | unsigned long size, |
| 279 | struct cds_lfht_node *node, |
| 280 | enum add_mode mode, int dummy); |
| 281 | |
| 282 | static |
| 283 | int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, |
| 284 | struct cds_lfht_node *node, |
| 285 | int dummy_removal, int do_gc); |
| 286 | |
| 287 | /* |
| 288 | * Algorithm to reverse bits in a word by lookup table, extended to |
| 289 | * 64-bit words. |
| 290 | * Source: |
| 291 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
| 292 | * Originally from Public Domain. |
| 293 | */ |
| 294 | |
| 295 | static const uint8_t BitReverseTable256[256] = |
| 296 | { |
| 297 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
| 298 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) |
| 299 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) |
| 300 | R6(0), R6(2), R6(1), R6(3) |
| 301 | }; |
| 302 | #undef R2 |
| 303 | #undef R4 |
| 304 | #undef R6 |
| 305 | |
| 306 | static |
| 307 | uint8_t bit_reverse_u8(uint8_t v) |
| 308 | { |
| 309 | return BitReverseTable256[v]; |
| 310 | } |
| 311 | |
| 312 | static __attribute__((unused)) |
| 313 | uint32_t bit_reverse_u32(uint32_t v) |
| 314 | { |
| 315 | return ((uint32_t) bit_reverse_u8(v) << 24) | |
| 316 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | |
| 317 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | |
| 318 | ((uint32_t) bit_reverse_u8(v >> 24)); |
| 319 | } |
| 320 | |
| 321 | static __attribute__((unused)) |
| 322 | uint64_t bit_reverse_u64(uint64_t v) |
| 323 | { |
| 324 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
| 325 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | |
| 326 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | |
| 327 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | |
| 328 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | |
| 329 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | |
| 330 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | |
| 331 | ((uint64_t) bit_reverse_u8(v >> 56)); |
| 332 | } |
| 333 | |
| 334 | static |
| 335 | unsigned long bit_reverse_ulong(unsigned long v) |
| 336 | { |
| 337 | #if (CAA_BITS_PER_LONG == 32) |
| 338 | return bit_reverse_u32(v); |
| 339 | #else |
| 340 | return bit_reverse_u64(v); |
| 341 | #endif |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * fls: returns the position of the most significant bit. |
| 346 | * Returns 0 if no bit is set, else returns the position of the most |
| 347 | * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). |
| 348 | */ |
| 349 | #if defined(__i386) || defined(__x86_64) |
| 350 | static inline |
| 351 | unsigned int fls_u32(uint32_t x) |
| 352 | { |
| 353 | int r; |
| 354 | |
| 355 | asm("bsrl %1,%0\n\t" |
| 356 | "jnz 1f\n\t" |
| 357 | "movl $-1,%0\n\t" |
| 358 | "1:\n\t" |
| 359 | : "=r" (r) : "rm" (x)); |
| 360 | return r + 1; |
| 361 | } |
| 362 | #define HAS_FLS_U32 |
| 363 | #endif |
| 364 | |
| 365 | #if defined(__x86_64) |
| 366 | static inline |
| 367 | unsigned int fls_u64(uint64_t x) |
| 368 | { |
| 369 | long r; |
| 370 | |
| 371 | asm("bsrq %1,%0\n\t" |
| 372 | "jnz 1f\n\t" |
| 373 | "movq $-1,%0\n\t" |
| 374 | "1:\n\t" |
| 375 | : "=r" (r) : "rm" (x)); |
| 376 | return r + 1; |
| 377 | } |
| 378 | #define HAS_FLS_U64 |
| 379 | #endif |
| 380 | |
| 381 | #ifndef HAS_FLS_U64 |
| 382 | static __attribute__((unused)) |
| 383 | unsigned int fls_u64(uint64_t x) |
| 384 | { |
| 385 | unsigned int r = 64; |
| 386 | |
| 387 | if (!x) |
| 388 | return 0; |
| 389 | |
| 390 | if (!(x & 0xFFFFFFFF00000000ULL)) { |
| 391 | x <<= 32; |
| 392 | r -= 32; |
| 393 | } |
| 394 | if (!(x & 0xFFFF000000000000ULL)) { |
| 395 | x <<= 16; |
| 396 | r -= 16; |
| 397 | } |
| 398 | if (!(x & 0xFF00000000000000ULL)) { |
| 399 | x <<= 8; |
| 400 | r -= 8; |
| 401 | } |
| 402 | if (!(x & 0xF000000000000000ULL)) { |
| 403 | x <<= 4; |
| 404 | r -= 4; |
| 405 | } |
| 406 | if (!(x & 0xC000000000000000ULL)) { |
| 407 | x <<= 2; |
| 408 | r -= 2; |
| 409 | } |
| 410 | if (!(x & 0x8000000000000000ULL)) { |
| 411 | x <<= 1; |
| 412 | r -= 1; |
| 413 | } |
| 414 | return r; |
| 415 | } |
| 416 | #endif |
| 417 | |
| 418 | #ifndef HAS_FLS_U32 |
| 419 | static __attribute__((unused)) |
| 420 | unsigned int fls_u32(uint32_t x) |
| 421 | { |
| 422 | unsigned int r = 32; |
| 423 | |
| 424 | if (!x) |
| 425 | return 0; |
| 426 | if (!(x & 0xFFFF0000U)) { |
| 427 | x <<= 16; |
| 428 | r -= 16; |
| 429 | } |
| 430 | if (!(x & 0xFF000000U)) { |
| 431 | x <<= 8; |
| 432 | r -= 8; |
| 433 | } |
| 434 | if (!(x & 0xF0000000U)) { |
| 435 | x <<= 4; |
| 436 | r -= 4; |
| 437 | } |
| 438 | if (!(x & 0xC0000000U)) { |
| 439 | x <<= 2; |
| 440 | r -= 2; |
| 441 | } |
| 442 | if (!(x & 0x80000000U)) { |
| 443 | x <<= 1; |
| 444 | r -= 1; |
| 445 | } |
| 446 | return r; |
| 447 | } |
| 448 | #endif |
| 449 | |
| 450 | unsigned int fls_ulong(unsigned long x) |
| 451 | { |
| 452 | #if (CAA_BITS_PER_lONG == 32) |
| 453 | return fls_u32(x); |
| 454 | #else |
| 455 | return fls_u64(x); |
| 456 | #endif |
| 457 | } |
| 458 | |
| 459 | int get_count_order_u32(uint32_t x) |
| 460 | { |
| 461 | int order; |
| 462 | |
| 463 | order = fls_u32(x) - 1; |
| 464 | if (x & (x - 1)) |
| 465 | order++; |
| 466 | return order; |
| 467 | } |
| 468 | |
| 469 | int get_count_order_ulong(unsigned long x) |
| 470 | { |
| 471 | int order; |
| 472 | |
| 473 | order = fls_ulong(x) - 1; |
| 474 | if (x & (x - 1)) |
| 475 | order++; |
| 476 | return order; |
| 477 | } |
| 478 | |
| 479 | #ifdef POISON_FREE |
| 480 | #define poison_free(ptr) \ |
| 481 | do { \ |
| 482 | memset(ptr, 0x42, sizeof(*(ptr))); \ |
| 483 | free(ptr); \ |
| 484 | } while (0) |
| 485 | #else |
| 486 | #define poison_free(ptr) free(ptr) |
| 487 | #endif |
| 488 | |
| 489 | static |
| 490 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); |
| 491 | |
| 492 | /* |
| 493 | * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are |
| 494 | * available, then we support hash table item accounting. |
| 495 | * In the unfortunate event the number of CPUs reported would be |
| 496 | * inaccurate, we use modulo arithmetic on the number of CPUs we got. |
| 497 | */ |
| 498 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
| 499 | |
| 500 | static |
| 501 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
| 502 | unsigned long count); |
| 503 | |
| 504 | static long nr_cpus_mask = -1; |
| 505 | |
| 506 | static |
| 507 | struct ht_items_count *alloc_per_cpu_items_count(void) |
| 508 | { |
| 509 | struct ht_items_count *count; |
| 510 | |
| 511 | switch (nr_cpus_mask) { |
| 512 | case -2: |
| 513 | return NULL; |
| 514 | case -1: |
| 515 | { |
| 516 | long maxcpus; |
| 517 | |
| 518 | maxcpus = sysconf(_SC_NPROCESSORS_CONF); |
| 519 | if (maxcpus <= 0) { |
| 520 | nr_cpus_mask = -2; |
| 521 | return NULL; |
| 522 | } |
| 523 | /* |
| 524 | * round up number of CPUs to next power of two, so we |
| 525 | * can use & for modulo. |
| 526 | */ |
| 527 | maxcpus = 1UL << get_count_order_ulong(maxcpus); |
| 528 | nr_cpus_mask = maxcpus - 1; |
| 529 | } |
| 530 | /* Fall-through */ |
| 531 | default: |
| 532 | return calloc(nr_cpus_mask + 1, sizeof(*count)); |
| 533 | } |
| 534 | } |
| 535 | |
| 536 | static |
| 537 | void free_per_cpu_items_count(struct ht_items_count *count) |
| 538 | { |
| 539 | poison_free(count); |
| 540 | } |
| 541 | |
| 542 | static |
| 543 | int ht_get_cpu(void) |
| 544 | { |
| 545 | int cpu; |
| 546 | |
| 547 | assert(nr_cpus_mask >= 0); |
| 548 | cpu = sched_getcpu(); |
| 549 | if (unlikely(cpu < 0)) |
| 550 | return cpu; |
| 551 | else |
| 552 | return cpu & nr_cpus_mask; |
| 553 | } |
| 554 | |
| 555 | static |
| 556 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
| 557 | { |
| 558 | unsigned long percpu_count; |
| 559 | int cpu; |
| 560 | |
| 561 | if (unlikely(!ht->percpu_count)) |
| 562 | return; |
| 563 | cpu = ht_get_cpu(); |
| 564 | if (unlikely(cpu < 0)) |
| 565 | return; |
| 566 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); |
| 567 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
| 568 | unsigned long count; |
| 569 | |
| 570 | dbg_printf("add percpu %lu\n", percpu_count); |
| 571 | count = uatomic_add_return(&ht->count, |
| 572 | 1UL << COUNT_COMMIT_ORDER); |
| 573 | /* If power of 2 */ |
| 574 | if (!(count & (count - 1))) { |
| 575 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) |
| 576 | return; |
| 577 | dbg_printf("add set global %lu\n", count); |
| 578 | cds_lfht_resize_lazy_count(ht, size, |
| 579 | count >> (CHAIN_LEN_TARGET - 1)); |
| 580 | } |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | static |
| 585 | void ht_count_del(struct cds_lfht *ht, unsigned long size) |
| 586 | { |
| 587 | unsigned long percpu_count; |
| 588 | int cpu; |
| 589 | |
| 590 | if (unlikely(!ht->percpu_count)) |
| 591 | return; |
| 592 | cpu = ht_get_cpu(); |
| 593 | if (unlikely(cpu < 0)) |
| 594 | return; |
| 595 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, -1); |
| 596 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
| 597 | unsigned long count; |
| 598 | |
| 599 | dbg_printf("del percpu %lu\n", percpu_count); |
| 600 | count = uatomic_add_return(&ht->count, |
| 601 | -(1UL << COUNT_COMMIT_ORDER)); |
| 602 | /* If power of 2 */ |
| 603 | if (!(count & (count - 1))) { |
| 604 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) |
| 605 | return; |
| 606 | dbg_printf("del set global %lu\n", count); |
| 607 | cds_lfht_resize_lazy_count(ht, size, |
| 608 | count >> (CHAIN_LEN_TARGET - 1)); |
| 609 | } |
| 610 | } |
| 611 | } |
| 612 | |
| 613 | #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ |
| 614 | |
| 615 | static const long nr_cpus_mask = -1; |
| 616 | |
| 617 | static |
| 618 | struct ht_items_count *alloc_per_cpu_items_count(void) |
| 619 | { |
| 620 | return NULL; |
| 621 | } |
| 622 | |
| 623 | static |
| 624 | void free_per_cpu_items_count(struct ht_items_count *count) |
| 625 | { |
| 626 | } |
| 627 | |
| 628 | static |
| 629 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
| 630 | { |
| 631 | } |
| 632 | |
| 633 | static |
| 634 | void ht_count_del(struct cds_lfht *ht, unsigned long size) |
| 635 | { |
| 636 | } |
| 637 | |
| 638 | #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ |
| 639 | |
| 640 | |
| 641 | static |
| 642 | void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) |
| 643 | { |
| 644 | unsigned long count; |
| 645 | |
| 646 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
| 647 | return; |
| 648 | count = uatomic_read(&ht->count); |
| 649 | /* |
| 650 | * Use bucket-local length for small table expand and for |
| 651 | * environments lacking per-cpu data support. |
| 652 | */ |
| 653 | if (count >= (1UL << COUNT_COMMIT_ORDER)) |
| 654 | return; |
| 655 | if (chain_len > 100) |
| 656 | dbg_printf("WARNING: large chain length: %u.\n", |
| 657 | chain_len); |
| 658 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
| 659 | cds_lfht_resize_lazy(ht, size, |
| 660 | get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); |
| 661 | } |
| 662 | |
| 663 | static |
| 664 | struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) |
| 665 | { |
| 666 | return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK); |
| 667 | } |
| 668 | |
| 669 | static |
| 670 | int is_removed(struct cds_lfht_node *node) |
| 671 | { |
| 672 | return ((unsigned long) node) & REMOVED_FLAG; |
| 673 | } |
| 674 | |
| 675 | static |
| 676 | struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) |
| 677 | { |
| 678 | return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); |
| 679 | } |
| 680 | |
| 681 | static |
| 682 | int is_gc(struct cds_lfht_node *node) |
| 683 | { |
| 684 | return ((unsigned long) node) & GC_FLAG; |
| 685 | } |
| 686 | |
| 687 | static |
| 688 | struct cds_lfht_node *flag_gc(struct cds_lfht_node *node) |
| 689 | { |
| 690 | return (struct cds_lfht_node *) (((unsigned long) node) | GC_FLAG); |
| 691 | } |
| 692 | |
| 693 | static |
| 694 | int is_dummy(struct cds_lfht_node *node) |
| 695 | { |
| 696 | return ((unsigned long) node) & DUMMY_FLAG; |
| 697 | } |
| 698 | |
| 699 | static |
| 700 | struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) |
| 701 | { |
| 702 | return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); |
| 703 | } |
| 704 | |
| 705 | static |
| 706 | struct cds_lfht_node *get_end(void) |
| 707 | { |
| 708 | return (struct cds_lfht_node *) END_VALUE; |
| 709 | } |
| 710 | |
| 711 | static |
| 712 | int is_end(struct cds_lfht_node *node) |
| 713 | { |
| 714 | return clear_flag(node) == (struct cds_lfht_node *) END_VALUE; |
| 715 | } |
| 716 | |
| 717 | static |
| 718 | unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) |
| 719 | { |
| 720 | unsigned long old1, old2; |
| 721 | |
| 722 | old1 = uatomic_read(ptr); |
| 723 | do { |
| 724 | old2 = old1; |
| 725 | if (old2 >= v) |
| 726 | return old2; |
| 727 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
| 728 | return v; |
| 729 | } |
| 730 | |
| 731 | static |
| 732 | void cds_lfht_free_level(struct rcu_head *head) |
| 733 | { |
| 734 | struct rcu_level *l = |
| 735 | caa_container_of(head, struct rcu_level, head); |
| 736 | poison_free(l); |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Remove all logically deleted nodes from a bucket up to a certain node key. |
| 741 | */ |
| 742 | static |
| 743 | void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) |
| 744 | { |
| 745 | struct cds_lfht_node *iter_prev, *iter, *next, *new_next; |
| 746 | |
| 747 | assert(!is_dummy(dummy)); |
| 748 | assert(!is_gc(dummy)); |
| 749 | assert(!is_removed(dummy)); |
| 750 | assert(!is_dummy(node)); |
| 751 | assert(!is_gc(node)); |
| 752 | assert(!is_removed(node)); |
| 753 | for (;;) { |
| 754 | iter_prev = dummy; |
| 755 | /* We can always skip the dummy node initially */ |
| 756 | iter = rcu_dereference(iter_prev->p.next); |
| 757 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); |
| 758 | /* |
| 759 | * We should never be called with dummy (start of chain) |
| 760 | * and logically removed node (end of path compression |
| 761 | * marker) being the actual same node. This would be a |
| 762 | * bug in the algorithm implementation. |
| 763 | */ |
| 764 | assert(dummy != node); |
| 765 | for (;;) { |
| 766 | if (unlikely(is_end(iter))) |
| 767 | return; |
| 768 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
| 769 | return; |
| 770 | next = rcu_dereference(clear_flag(iter)->p.next); |
| 771 | if (likely(is_gc(next))) |
| 772 | break; |
| 773 | iter_prev = clear_flag(iter); |
| 774 | iter = next; |
| 775 | } |
| 776 | assert(!is_gc(iter)); |
| 777 | if (is_dummy(iter)) |
| 778 | new_next = flag_dummy(clear_flag(next)); |
| 779 | else |
| 780 | new_next = clear_flag(next); |
| 781 | if (is_removed(iter)) |
| 782 | new_next = flag_removed(new_next); |
| 783 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); |
| 784 | } |
| 785 | return; |
| 786 | } |
| 787 | |
| 788 | static |
| 789 | struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, |
| 790 | unsigned long size, |
| 791 | struct cds_lfht_node *node, |
| 792 | enum add_mode mode, int dummy) |
| 793 | { |
| 794 | struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, |
| 795 | *dummy_node, *return_node, *replace_pinned = NULL; |
| 796 | struct _cds_lfht_node *lookup; |
| 797 | unsigned long hash, index, order; |
| 798 | |
| 799 | assert(!is_dummy(node)); |
| 800 | assert(!is_gc(node)); |
| 801 | assert(!is_removed(node)); |
| 802 | if (!size) { |
| 803 | assert(dummy); |
| 804 | node->p.next = flag_dummy(get_end()); |
| 805 | return node; /* Initial first add (head) */ |
| 806 | } |
| 807 | hash = bit_reverse_ulong(node->p.reverse_hash); |
| 808 | for (;;) { |
| 809 | uint32_t chain_len; |
| 810 | |
| 811 | retry: |
| 812 | chain_len = 0; |
| 813 | /* |
| 814 | * iter_prev points to the non-removed node prior to the |
| 815 | * insert location. |
| 816 | */ |
| 817 | index = hash & (size - 1); |
| 818 | order = get_count_order_ulong(index + 1); |
| 819 | lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; |
| 820 | iter_prev = (struct cds_lfht_node *) lookup; |
| 821 | /* We can always skip the dummy node initially */ |
| 822 | iter = rcu_dereference(iter_prev->p.next); |
| 823 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); |
| 824 | for (;;) { |
| 825 | if (unlikely(is_end(iter))) |
| 826 | goto insert; |
| 827 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
| 828 | goto insert; |
| 829 | next = rcu_dereference(clear_flag(iter)->p.next); |
| 830 | if (unlikely(is_gc(next))) |
| 831 | goto gc_node; |
| 832 | if (unlikely(replace_pinned)) { |
| 833 | /* |
| 834 | * We're in the retry of a node |
| 835 | * replacement. Only get exact iter |
| 836 | * pointer match. We own it, so it |
| 837 | * _needs_ to be there at some point. |
| 838 | */ |
| 839 | if (clear_flag(iter) == replace_pinned) |
| 840 | goto replace; |
| 841 | } |
| 842 | /* |
| 843 | * Next is removed but not gc'd. We need to |
| 844 | * busy-loop, because a concurrent replacement |
| 845 | * is keeping it temporarily pinned there but we |
| 846 | * cannot attach to it. The easiest solution is |
| 847 | * to retry. |
| 848 | */ |
| 849 | if (unlikely(is_removed(next))) |
| 850 | goto retry; |
| 851 | if ((mode == ADD_UNIQUE || mode == ADD_REPLACE) |
| 852 | && !is_dummy(next) |
| 853 | && !ht->compare_fct(node->key, node->key_len, |
| 854 | clear_flag(iter)->key, |
| 855 | clear_flag(iter)->key_len)) { |
| 856 | if (mode == ADD_UNIQUE) |
| 857 | return clear_flag(iter); |
| 858 | else /* mode == ADD_REPLACE */ |
| 859 | goto replace; |
| 860 | } |
| 861 | /* Only account for identical reverse hash once */ |
| 862 | if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash |
| 863 | && !is_dummy(next)) |
| 864 | check_resize(ht, size, ++chain_len); |
| 865 | iter_prev = clear_flag(iter); |
| 866 | iter = next; |
| 867 | } |
| 868 | |
| 869 | insert: |
| 870 | assert(node != clear_flag(iter)); |
| 871 | assert(!is_removed(iter_prev)); |
| 872 | assert(!is_removed(iter)); |
| 873 | assert(!is_gc(iter_prev)); |
| 874 | assert(!is_gc(iter)); |
| 875 | assert(iter_prev != node); |
| 876 | assert(!replace_pinned); |
| 877 | if (!dummy) |
| 878 | node->p.next = clear_flag(iter); |
| 879 | else |
| 880 | node->p.next = flag_dummy(clear_flag(iter)); |
| 881 | if (is_dummy(iter)) |
| 882 | new_node = flag_dummy(node); |
| 883 | else |
| 884 | new_node = node; |
| 885 | if (uatomic_cmpxchg(&iter_prev->p.next, iter, |
| 886 | new_node) != iter) { |
| 887 | continue; /* retry */ |
| 888 | } else { |
| 889 | if (mode == ADD_REPLACE) |
| 890 | return_node = NULL; |
| 891 | else /* ADD_DEFAULT and ADD_UNIQUE */ |
| 892 | return_node = node; |
| 893 | goto gc_end; |
| 894 | } |
| 895 | |
| 896 | replace: |
| 897 | assert(node != clear_flag(iter)); |
| 898 | assert(!is_removed(iter_prev)); |
| 899 | assert(!is_removed(iter)); |
| 900 | assert(!is_gc(iter_prev)); |
| 901 | assert(!is_gc(iter)); |
| 902 | assert(iter_prev != node); |
| 903 | assert(!dummy); |
| 904 | node->p.next = clear_flag(next); |
| 905 | if (is_dummy(iter)) |
| 906 | new_node = flag_dummy(node); |
| 907 | else |
| 908 | new_node = node; |
| 909 | /* |
| 910 | * Try to delete to-be-replaced node. Don't gc yet. Not |
| 911 | * performing gc here is important, because this lets |
| 912 | * concurrent lookups see the old node until we |
| 913 | * atomically swap the new node into its place. |
| 914 | * |
| 915 | * This algorithm is _not_ strictly lock-free between |
| 916 | * _cds_lfht_del and the uatomic_cmpxchg of the |
| 917 | * replacement operation, so a replacement should _not_ |
| 918 | * crash here (which means: don't do replacements if you |
| 919 | * need strict lock-free guarantees). |
| 920 | */ |
| 921 | if (!replace_pinned) { |
| 922 | if (_cds_lfht_del(ht, size, clear_flag(iter), 0, 0)) |
| 923 | continue; /* concurrently removed. retry. */ |
| 924 | } |
| 925 | /* |
| 926 | * After _cds_lfht_del succeeds, we have pinned the |
| 927 | * to-be-removed node in place by setting its removed |
| 928 | * flag, but not its gc flag. If we fail to cmpxchg our |
| 929 | * new node with this node, we need to retry everything |
| 930 | * from the initial lookup, and only stop when we reach |
| 931 | * the node we pinned into place. |
| 932 | */ |
| 933 | return_node = uatomic_cmpxchg(&iter_prev->p.next, |
| 934 | iter, new_node); |
| 935 | if (return_node != iter) { |
| 936 | /* |
| 937 | * If cmpxchg fails, we need to do path |
| 938 | * compression, but end it by placing our own |
| 939 | * node into place. |
| 940 | */ |
| 941 | replace_pinned = clear_flag(iter); |
| 942 | continue; /* retry */ |
| 943 | } else { |
| 944 | /* |
| 945 | * cmpxchg succeeded. gc unnecessary, because we |
| 946 | * unlinked the return_node ourself with the |
| 947 | * cmpxchg. |
| 948 | */ |
| 949 | return_node = clear_flag(return_node); |
| 950 | goto end; |
| 951 | } |
| 952 | |
| 953 | gc_node: |
| 954 | assert(!is_removed(iter)); |
| 955 | assert(!is_gc(iter)); |
| 956 | if (is_dummy(iter)) |
| 957 | new_next = flag_dummy(clear_flag(next)); |
| 958 | else |
| 959 | new_next = clear_flag(next); |
| 960 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); |
| 961 | /* retry */ |
| 962 | } |
| 963 | gc_end: |
| 964 | /* Garbage collect logically removed nodes in the bucket */ |
| 965 | index = hash & (size - 1); |
| 966 | order = get_count_order_ulong(index + 1); |
| 967 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; |
| 968 | dummy_node = (struct cds_lfht_node *) lookup; |
| 969 | _cds_lfht_gc_bucket(dummy_node, node); |
| 970 | end: |
| 971 | return return_node; |
| 972 | } |
| 973 | |
| 974 | static |
| 975 | int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, |
| 976 | struct cds_lfht_node *node, |
| 977 | int dummy_removal, int do_gc) |
| 978 | { |
| 979 | struct cds_lfht_node *dummy, *next, *old; |
| 980 | struct _cds_lfht_node *lookup; |
| 981 | int flagged = 0; |
| 982 | unsigned long hash, index, order; |
| 983 | |
| 984 | /* logically delete the node */ |
| 985 | assert(!is_dummy(node)); |
| 986 | assert(!is_gc(node)); |
| 987 | assert(!is_removed(node)); |
| 988 | old = rcu_dereference(node->p.next); |
| 989 | do { |
| 990 | struct cds_lfht_node *new_next; |
| 991 | |
| 992 | next = old; |
| 993 | if (unlikely(is_removed(next))) |
| 994 | goto end; |
| 995 | if (dummy_removal) |
| 996 | assert(is_dummy(next)); |
| 997 | else |
| 998 | assert(!is_dummy(next)); |
| 999 | new_next = flag_removed(next); |
| 1000 | if (do_gc) |
| 1001 | new_next = flag_gc(new_next); |
| 1002 | old = uatomic_cmpxchg(&node->p.next, next, new_next); |
| 1003 | } while (old != next); |
| 1004 | |
| 1005 | /* We performed the (logical) deletion. */ |
| 1006 | flagged = 1; |
| 1007 | |
| 1008 | if (!do_gc) |
| 1009 | goto end; |
| 1010 | |
| 1011 | /* |
| 1012 | * Ensure that the node is not visible to readers anymore: lookup for |
| 1013 | * the node, and remove it (along with any other logically removed node) |
| 1014 | * if found. |
| 1015 | */ |
| 1016 | hash = bit_reverse_ulong(node->p.reverse_hash); |
| 1017 | assert(size > 0); |
| 1018 | index = hash & (size - 1); |
| 1019 | order = get_count_order_ulong(index + 1); |
| 1020 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; |
| 1021 | dummy = (struct cds_lfht_node *) lookup; |
| 1022 | _cds_lfht_gc_bucket(dummy, node); |
| 1023 | end: |
| 1024 | /* |
| 1025 | * Only the flagging action indicated that we (and no other) |
| 1026 | * removed the node from the hash. |
| 1027 | */ |
| 1028 | if (flagged) { |
| 1029 | assert(is_removed(rcu_dereference(node->p.next))); |
| 1030 | return 0; |
| 1031 | } else |
| 1032 | return -ENOENT; |
| 1033 | } |
| 1034 | |
| 1035 | static |
| 1036 | void *partition_resize_thread(void *arg) |
| 1037 | { |
| 1038 | struct partition_resize_work *work = arg; |
| 1039 | |
| 1040 | work->ht->cds_lfht_rcu_register_thread(); |
| 1041 | work->fct(work->ht, work->i, work->start, work->len); |
| 1042 | work->ht->cds_lfht_rcu_unregister_thread(); |
| 1043 | return NULL; |
| 1044 | } |
| 1045 | |
| 1046 | static |
| 1047 | void partition_resize_helper(struct cds_lfht *ht, unsigned long i, |
| 1048 | unsigned long len, |
| 1049 | void (*fct)(struct cds_lfht *ht, unsigned long i, |
| 1050 | unsigned long start, unsigned long len)) |
| 1051 | { |
| 1052 | unsigned long partition_len; |
| 1053 | struct partition_resize_work *work; |
| 1054 | int thread, ret; |
| 1055 | unsigned long nr_threads; |
| 1056 | pthread_t *thread_id; |
| 1057 | |
| 1058 | /* |
| 1059 | * Note: nr_cpus_mask + 1 is always power of 2. |
| 1060 | * We spawn just the number of threads we need to satisfy the minimum |
| 1061 | * partition size, up to the number of CPUs in the system. |
| 1062 | */ |
| 1063 | nr_threads = min(nr_cpus_mask + 1, |
| 1064 | len >> MIN_PARTITION_PER_THREAD_ORDER); |
| 1065 | partition_len = len >> get_count_order_ulong(nr_threads); |
| 1066 | work = calloc(nr_threads, sizeof(*work)); |
| 1067 | thread_id = calloc(nr_threads, sizeof(*thread_id)); |
| 1068 | assert(work); |
| 1069 | for (thread = 0; thread < nr_threads; thread++) { |
| 1070 | work[thread].ht = ht; |
| 1071 | work[thread].i = i; |
| 1072 | work[thread].len = partition_len; |
| 1073 | work[thread].start = thread * partition_len; |
| 1074 | work[thread].fct = fct; |
| 1075 | ret = pthread_create(&thread_id[thread], ht->resize_attr, |
| 1076 | partition_resize_thread, &work[thread]); |
| 1077 | assert(!ret); |
| 1078 | } |
| 1079 | for (thread = 0; thread < nr_threads; thread++) { |
| 1080 | ret = pthread_join(thread_id[thread], NULL); |
| 1081 | assert(!ret); |
| 1082 | } |
| 1083 | free(work); |
| 1084 | free(thread_id); |
| 1085 | } |
| 1086 | |
| 1087 | /* |
| 1088 | * Holding RCU read lock to protect _cds_lfht_add against memory |
| 1089 | * reclaim that could be performed by other call_rcu worker threads (ABA |
| 1090 | * problem). |
| 1091 | * |
| 1092 | * When we reach a certain length, we can split this population phase over |
| 1093 | * many worker threads, based on the number of CPUs available in the system. |
| 1094 | * This should therefore take care of not having the expand lagging behind too |
| 1095 | * many concurrent insertion threads by using the scheduler's ability to |
| 1096 | * schedule dummy node population fairly with insertions. |
| 1097 | */ |
| 1098 | static |
| 1099 | void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, |
| 1100 | unsigned long start, unsigned long len) |
| 1101 | { |
| 1102 | unsigned long j; |
| 1103 | |
| 1104 | ht->cds_lfht_rcu_read_lock(); |
| 1105 | for (j = start; j < start + len; j++) { |
| 1106 | struct cds_lfht_node *new_node = |
| 1107 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; |
| 1108 | |
| 1109 | dbg_printf("init populate: i %lu j %lu hash %lu\n", |
| 1110 | i, j, !i ? 0 : (1UL << (i - 1)) + j); |
| 1111 | new_node->p.reverse_hash = |
| 1112 | bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); |
| 1113 | (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), |
| 1114 | new_node, ADD_DEFAULT, 1); |
| 1115 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 1116 | break; |
| 1117 | } |
| 1118 | ht->cds_lfht_rcu_read_unlock(); |
| 1119 | } |
| 1120 | |
| 1121 | static |
| 1122 | void init_table_populate(struct cds_lfht *ht, unsigned long i, |
| 1123 | unsigned long len) |
| 1124 | { |
| 1125 | assert(nr_cpus_mask != -1); |
| 1126 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
| 1127 | ht->cds_lfht_rcu_thread_online(); |
| 1128 | init_table_populate_partition(ht, i, 0, len); |
| 1129 | ht->cds_lfht_rcu_thread_offline(); |
| 1130 | return; |
| 1131 | } |
| 1132 | partition_resize_helper(ht, i, len, init_table_populate_partition); |
| 1133 | } |
| 1134 | |
| 1135 | static |
| 1136 | void init_table(struct cds_lfht *ht, |
| 1137 | unsigned long first_order, unsigned long len_order) |
| 1138 | { |
| 1139 | unsigned long i, end_order; |
| 1140 | |
| 1141 | dbg_printf("init table: first_order %lu end_order %lu\n", |
| 1142 | first_order, first_order + len_order); |
| 1143 | end_order = first_order + len_order; |
| 1144 | for (i = first_order; i < end_order; i++) { |
| 1145 | unsigned long len; |
| 1146 | |
| 1147 | len = !i ? 1 : 1UL << (i - 1); |
| 1148 | dbg_printf("init order %lu len: %lu\n", i, len); |
| 1149 | |
| 1150 | /* Stop expand if the resize target changes under us */ |
| 1151 | if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) |
| 1152 | break; |
| 1153 | |
| 1154 | ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) |
| 1155 | + (len * sizeof(struct _cds_lfht_node))); |
| 1156 | assert(ht->t.tbl[i]); |
| 1157 | |
| 1158 | /* |
| 1159 | * Set all dummy nodes reverse hash values for a level and |
| 1160 | * link all dummy nodes into the table. |
| 1161 | */ |
| 1162 | init_table_populate(ht, i, len); |
| 1163 | |
| 1164 | /* |
| 1165 | * Update table size. |
| 1166 | */ |
| 1167 | cmm_smp_wmb(); /* populate data before RCU size */ |
| 1168 | CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i)); |
| 1169 | |
| 1170 | dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i)); |
| 1171 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 1172 | break; |
| 1173 | } |
| 1174 | } |
| 1175 | |
| 1176 | /* |
| 1177 | * Holding RCU read lock to protect _cds_lfht_remove against memory |
| 1178 | * reclaim that could be performed by other call_rcu worker threads (ABA |
| 1179 | * problem). |
| 1180 | * For a single level, we logically remove and garbage collect each node. |
| 1181 | * |
| 1182 | * As a design choice, we perform logical removal and garbage collection on a |
| 1183 | * node-per-node basis to simplify this algorithm. We also assume keeping good |
| 1184 | * cache locality of the operation would overweight possible performance gain |
| 1185 | * that could be achieved by batching garbage collection for multiple levels. |
| 1186 | * However, this would have to be justified by benchmarks. |
| 1187 | * |
| 1188 | * Concurrent removal and add operations are helping us perform garbage |
| 1189 | * collection of logically removed nodes. We guarantee that all logically |
| 1190 | * removed nodes have been garbage-collected (unlinked) before call_rcu is |
| 1191 | * invoked to free a hole level of dummy nodes (after a grace period). |
| 1192 | * |
| 1193 | * Logical removal and garbage collection can therefore be done in batch or on a |
| 1194 | * node-per-node basis, as long as the guarantee above holds. |
| 1195 | * |
| 1196 | * When we reach a certain length, we can split this removal over many worker |
| 1197 | * threads, based on the number of CPUs available in the system. This should |
| 1198 | * take care of not letting resize process lag behind too many concurrent |
| 1199 | * updater threads actively inserting into the hash table. |
| 1200 | */ |
| 1201 | static |
| 1202 | void remove_table_partition(struct cds_lfht *ht, unsigned long i, |
| 1203 | unsigned long start, unsigned long len) |
| 1204 | { |
| 1205 | unsigned long j; |
| 1206 | |
| 1207 | ht->cds_lfht_rcu_read_lock(); |
| 1208 | for (j = start; j < start + len; j++) { |
| 1209 | struct cds_lfht_node *fini_node = |
| 1210 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; |
| 1211 | |
| 1212 | dbg_printf("remove entry: i %lu j %lu hash %lu\n", |
| 1213 | i, j, !i ? 0 : (1UL << (i - 1)) + j); |
| 1214 | fini_node->p.reverse_hash = |
| 1215 | bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); |
| 1216 | (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), |
| 1217 | fini_node, 1, 1); |
| 1218 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 1219 | break; |
| 1220 | } |
| 1221 | ht->cds_lfht_rcu_read_unlock(); |
| 1222 | } |
| 1223 | |
| 1224 | static |
| 1225 | void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) |
| 1226 | { |
| 1227 | |
| 1228 | assert(nr_cpus_mask != -1); |
| 1229 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
| 1230 | ht->cds_lfht_rcu_thread_online(); |
| 1231 | remove_table_partition(ht, i, 0, len); |
| 1232 | ht->cds_lfht_rcu_thread_offline(); |
| 1233 | return; |
| 1234 | } |
| 1235 | partition_resize_helper(ht, i, len, remove_table_partition); |
| 1236 | } |
| 1237 | |
| 1238 | static |
| 1239 | void fini_table(struct cds_lfht *ht, |
| 1240 | unsigned long first_order, unsigned long len_order) |
| 1241 | { |
| 1242 | long i, end_order; |
| 1243 | |
| 1244 | dbg_printf("fini table: first_order %lu end_order %lu\n", |
| 1245 | first_order, first_order + len_order); |
| 1246 | end_order = first_order + len_order; |
| 1247 | assert(first_order > 0); |
| 1248 | for (i = end_order - 1; i >= first_order; i--) { |
| 1249 | unsigned long len; |
| 1250 | |
| 1251 | len = !i ? 1 : 1UL << (i - 1); |
| 1252 | dbg_printf("fini order %lu len: %lu\n", i, len); |
| 1253 | |
| 1254 | /* Stop shrink if the resize target changes under us */ |
| 1255 | if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1))) |
| 1256 | break; |
| 1257 | |
| 1258 | cmm_smp_wmb(); /* populate data before RCU size */ |
| 1259 | CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1)); |
| 1260 | |
| 1261 | /* |
| 1262 | * We need to wait for all add operations to reach Q.S. (and |
| 1263 | * thus use the new table for lookups) before we can start |
| 1264 | * releasing the old dummy nodes. Otherwise their lookup will |
| 1265 | * return a logically removed node as insert position. |
| 1266 | */ |
| 1267 | ht->cds_lfht_synchronize_rcu(); |
| 1268 | |
| 1269 | /* |
| 1270 | * Set "removed" flag in dummy nodes about to be removed. |
| 1271 | * Unlink all now-logically-removed dummy node pointers. |
| 1272 | * Concurrent add/remove operation are helping us doing |
| 1273 | * the gc. |
| 1274 | */ |
| 1275 | remove_table(ht, i, len); |
| 1276 | |
| 1277 | ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level); |
| 1278 | |
| 1279 | dbg_printf("fini new size: %lu\n", 1UL << i); |
| 1280 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
| 1281 | break; |
| 1282 | } |
| 1283 | } |
| 1284 | |
| 1285 | struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, |
| 1286 | cds_lfht_compare_fct compare_fct, |
| 1287 | unsigned long hash_seed, |
| 1288 | unsigned long init_size, |
| 1289 | int flags, |
| 1290 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
| 1291 | void (*func)(struct rcu_head *head)), |
| 1292 | void (*cds_lfht_synchronize_rcu)(void), |
| 1293 | void (*cds_lfht_rcu_read_lock)(void), |
| 1294 | void (*cds_lfht_rcu_read_unlock)(void), |
| 1295 | void (*cds_lfht_rcu_thread_offline)(void), |
| 1296 | void (*cds_lfht_rcu_thread_online)(void), |
| 1297 | void (*cds_lfht_rcu_register_thread)(void), |
| 1298 | void (*cds_lfht_rcu_unregister_thread)(void), |
| 1299 | pthread_attr_t *attr) |
| 1300 | { |
| 1301 | struct cds_lfht *ht; |
| 1302 | unsigned long order; |
| 1303 | |
| 1304 | /* init_size must be power of two */ |
| 1305 | if (init_size && (init_size & (init_size - 1))) |
| 1306 | return NULL; |
| 1307 | ht = calloc(1, sizeof(struct cds_lfht)); |
| 1308 | assert(ht); |
| 1309 | ht->hash_fct = hash_fct; |
| 1310 | ht->compare_fct = compare_fct; |
| 1311 | ht->hash_seed = hash_seed; |
| 1312 | ht->cds_lfht_call_rcu = cds_lfht_call_rcu; |
| 1313 | ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; |
| 1314 | ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; |
| 1315 | ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; |
| 1316 | ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; |
| 1317 | ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; |
| 1318 | ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; |
| 1319 | ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; |
| 1320 | ht->resize_attr = attr; |
| 1321 | ht->percpu_count = alloc_per_cpu_items_count(); |
| 1322 | /* this mutex should not nest in read-side C.S. */ |
| 1323 | pthread_mutex_init(&ht->resize_mutex, NULL); |
| 1324 | order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; |
| 1325 | ht->flags = flags; |
| 1326 | ht->cds_lfht_rcu_thread_offline(); |
| 1327 | pthread_mutex_lock(&ht->resize_mutex); |
| 1328 | ht->t.resize_target = 1UL << (order - 1); |
| 1329 | init_table(ht, 0, order); |
| 1330 | pthread_mutex_unlock(&ht->resize_mutex); |
| 1331 | ht->cds_lfht_rcu_thread_online(); |
| 1332 | return ht; |
| 1333 | } |
| 1334 | |
| 1335 | struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) |
| 1336 | { |
| 1337 | struct cds_lfht_node *node, *next, *dummy_node; |
| 1338 | struct _cds_lfht_node *lookup; |
| 1339 | unsigned long hash, reverse_hash, index, order, size; |
| 1340 | |
| 1341 | hash = ht->hash_fct(key, key_len, ht->hash_seed); |
| 1342 | reverse_hash = bit_reverse_ulong(hash); |
| 1343 | |
| 1344 | size = rcu_dereference(ht->t.size); |
| 1345 | index = hash & (size - 1); |
| 1346 | order = get_count_order_ulong(index + 1); |
| 1347 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; |
| 1348 | dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", |
| 1349 | hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); |
| 1350 | dummy_node = (struct cds_lfht_node *) lookup; |
| 1351 | /* We can always skip the dummy node initially */ |
| 1352 | node = rcu_dereference(dummy_node->p.next); |
| 1353 | node = clear_flag(node); |
| 1354 | for (;;) { |
| 1355 | if (unlikely(is_end(node))) { |
| 1356 | node = NULL; |
| 1357 | break; |
| 1358 | } |
| 1359 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
| 1360 | node = NULL; |
| 1361 | break; |
| 1362 | } |
| 1363 | next = rcu_dereference(node->p.next); |
| 1364 | /* |
| 1365 | * We consider return nodes marked removed but not gc as |
| 1366 | * hits for lookup vs replacement consistency. |
| 1367 | */ |
| 1368 | if (likely(!is_gc(next)) |
| 1369 | && !is_dummy(next) |
| 1370 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { |
| 1371 | break; |
| 1372 | } |
| 1373 | node = clear_flag(next); |
| 1374 | } |
| 1375 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); |
| 1376 | return node; |
| 1377 | } |
| 1378 | |
| 1379 | struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, |
| 1380 | struct cds_lfht_node *node) |
| 1381 | { |
| 1382 | struct cds_lfht_node *next; |
| 1383 | unsigned long reverse_hash; |
| 1384 | void *key; |
| 1385 | size_t key_len; |
| 1386 | |
| 1387 | reverse_hash = node->p.reverse_hash; |
| 1388 | key = node->key; |
| 1389 | key_len = node->key_len; |
| 1390 | next = rcu_dereference(node->p.next); |
| 1391 | node = clear_flag(next); |
| 1392 | |
| 1393 | for (;;) { |
| 1394 | if (unlikely(is_end(node))) { |
| 1395 | node = NULL; |
| 1396 | break; |
| 1397 | } |
| 1398 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
| 1399 | node = NULL; |
| 1400 | break; |
| 1401 | } |
| 1402 | next = rcu_dereference(node->p.next); |
| 1403 | /* |
| 1404 | * We consider return nodes marked removed but not gc as |
| 1405 | * hits for lookup vs replacement consistency. |
| 1406 | */ |
| 1407 | if (likely(!is_gc(next)) |
| 1408 | && !is_dummy(next) |
| 1409 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { |
| 1410 | break; |
| 1411 | } |
| 1412 | node = clear_flag(next); |
| 1413 | } |
| 1414 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); |
| 1415 | return node; |
| 1416 | } |
| 1417 | |
| 1418 | void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) |
| 1419 | { |
| 1420 | unsigned long hash, size; |
| 1421 | |
| 1422 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
| 1423 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
| 1424 | |
| 1425 | size = rcu_dereference(ht->t.size); |
| 1426 | (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0); |
| 1427 | ht_count_add(ht, size); |
| 1428 | } |
| 1429 | |
| 1430 | struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, |
| 1431 | struct cds_lfht_node *node) |
| 1432 | { |
| 1433 | unsigned long hash, size; |
| 1434 | struct cds_lfht_node *ret; |
| 1435 | |
| 1436 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
| 1437 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
| 1438 | |
| 1439 | size = rcu_dereference(ht->t.size); |
| 1440 | ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0); |
| 1441 | if (ret == node) |
| 1442 | ht_count_add(ht, size); |
| 1443 | return ret; |
| 1444 | } |
| 1445 | |
| 1446 | struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, |
| 1447 | struct cds_lfht_node *node) |
| 1448 | { |
| 1449 | unsigned long hash, size; |
| 1450 | struct cds_lfht_node *ret; |
| 1451 | |
| 1452 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
| 1453 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
| 1454 | |
| 1455 | size = rcu_dereference(ht->t.size); |
| 1456 | ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0); |
| 1457 | if (ret == NULL) |
| 1458 | ht_count_add(ht, size); |
| 1459 | return ret; |
| 1460 | } |
| 1461 | |
| 1462 | int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) |
| 1463 | { |
| 1464 | unsigned long size; |
| 1465 | int ret; |
| 1466 | |
| 1467 | size = rcu_dereference(ht->t.size); |
| 1468 | ret = _cds_lfht_del(ht, size, node, 0, 1); |
| 1469 | if (!ret) |
| 1470 | ht_count_del(ht, size); |
| 1471 | return ret; |
| 1472 | } |
| 1473 | |
| 1474 | static |
| 1475 | int cds_lfht_delete_dummy(struct cds_lfht *ht) |
| 1476 | { |
| 1477 | struct cds_lfht_node *node; |
| 1478 | struct _cds_lfht_node *lookup; |
| 1479 | unsigned long order, i, size; |
| 1480 | |
| 1481 | /* Check that the table is empty */ |
| 1482 | lookup = &ht->t.tbl[0]->nodes[0]; |
| 1483 | node = (struct cds_lfht_node *) lookup; |
| 1484 | do { |
| 1485 | node = clear_flag(node)->p.next; |
| 1486 | if (!is_dummy(node)) |
| 1487 | return -EPERM; |
| 1488 | assert(!is_removed(node)); |
| 1489 | assert(!is_gc(node)); |
| 1490 | } while (!is_end(node)); |
| 1491 | /* |
| 1492 | * size accessed without rcu_dereference because hash table is |
| 1493 | * being destroyed. |
| 1494 | */ |
| 1495 | size = ht->t.size; |
| 1496 | /* Internal sanity check: all nodes left should be dummy */ |
| 1497 | for (order = 0; order < get_count_order_ulong(size) + 1; order++) { |
| 1498 | unsigned long len; |
| 1499 | |
| 1500 | len = !order ? 1 : 1UL << (order - 1); |
| 1501 | for (i = 0; i < len; i++) { |
| 1502 | dbg_printf("delete order %lu i %lu hash %lu\n", |
| 1503 | order, i, |
| 1504 | bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); |
| 1505 | assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); |
| 1506 | } |
| 1507 | poison_free(ht->t.tbl[order]); |
| 1508 | } |
| 1509 | return 0; |
| 1510 | } |
| 1511 | |
| 1512 | /* |
| 1513 | * Should only be called when no more concurrent readers nor writers can |
| 1514 | * possibly access the table. |
| 1515 | */ |
| 1516 | int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) |
| 1517 | { |
| 1518 | int ret; |
| 1519 | |
| 1520 | /* Wait for in-flight resize operations to complete */ |
| 1521 | CMM_STORE_SHARED(ht->in_progress_destroy, 1); |
| 1522 | while (uatomic_read(&ht->in_progress_resize)) |
| 1523 | poll(NULL, 0, 100); /* wait for 100ms */ |
| 1524 | ret = cds_lfht_delete_dummy(ht); |
| 1525 | if (ret) |
| 1526 | return ret; |
| 1527 | free_per_cpu_items_count(ht->percpu_count); |
| 1528 | if (attr) |
| 1529 | *attr = ht->resize_attr; |
| 1530 | poison_free(ht); |
| 1531 | return ret; |
| 1532 | } |
| 1533 | |
| 1534 | void cds_lfht_count_nodes(struct cds_lfht *ht, |
| 1535 | unsigned long *count, |
| 1536 | unsigned long *removed) |
| 1537 | { |
| 1538 | struct cds_lfht_node *node, *next; |
| 1539 | struct _cds_lfht_node *lookup; |
| 1540 | unsigned long nr_dummy = 0; |
| 1541 | |
| 1542 | *count = 0; |
| 1543 | *removed = 0; |
| 1544 | |
| 1545 | /* Count non-dummy nodes in the table */ |
| 1546 | lookup = &ht->t.tbl[0]->nodes[0]; |
| 1547 | node = (struct cds_lfht_node *) lookup; |
| 1548 | do { |
| 1549 | next = rcu_dereference(node->p.next); |
| 1550 | if (is_removed(next) || is_gc(next)) { |
| 1551 | assert(!is_dummy(next)); |
| 1552 | (*removed)++; |
| 1553 | } else if (!is_dummy(next)) |
| 1554 | (*count)++; |
| 1555 | else |
| 1556 | (nr_dummy)++; |
| 1557 | node = clear_flag(next); |
| 1558 | } while (!is_end(node)); |
| 1559 | dbg_printf("number of dummy nodes: %lu\n", nr_dummy); |
| 1560 | } |
| 1561 | |
| 1562 | /* called with resize mutex held */ |
| 1563 | static |
| 1564 | void _do_cds_lfht_grow(struct cds_lfht *ht, |
| 1565 | unsigned long old_size, unsigned long new_size) |
| 1566 | { |
| 1567 | unsigned long old_order, new_order; |
| 1568 | |
| 1569 | old_order = get_count_order_ulong(old_size) + 1; |
| 1570 | new_order = get_count_order_ulong(new_size) + 1; |
| 1571 | printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
| 1572 | old_size, old_order, new_size, new_order); |
| 1573 | assert(new_size > old_size); |
| 1574 | init_table(ht, old_order, new_order - old_order); |
| 1575 | } |
| 1576 | |
| 1577 | /* called with resize mutex held */ |
| 1578 | static |
| 1579 | void _do_cds_lfht_shrink(struct cds_lfht *ht, |
| 1580 | unsigned long old_size, unsigned long new_size) |
| 1581 | { |
| 1582 | unsigned long old_order, new_order; |
| 1583 | |
| 1584 | new_size = max(new_size, MIN_TABLE_SIZE); |
| 1585 | old_order = get_count_order_ulong(old_size) + 1; |
| 1586 | new_order = get_count_order_ulong(new_size) + 1; |
| 1587 | printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
| 1588 | old_size, old_order, new_size, new_order); |
| 1589 | assert(new_size < old_size); |
| 1590 | |
| 1591 | /* Remove and unlink all dummy nodes to remove. */ |
| 1592 | fini_table(ht, new_order, old_order - new_order); |
| 1593 | } |
| 1594 | |
| 1595 | |
| 1596 | /* called with resize mutex held */ |
| 1597 | static |
| 1598 | void _do_cds_lfht_resize(struct cds_lfht *ht) |
| 1599 | { |
| 1600 | unsigned long new_size, old_size; |
| 1601 | |
| 1602 | /* |
| 1603 | * Resize table, re-do if the target size has changed under us. |
| 1604 | */ |
| 1605 | do { |
| 1606 | ht->t.resize_initiated = 1; |
| 1607 | old_size = ht->t.size; |
| 1608 | new_size = CMM_LOAD_SHARED(ht->t.resize_target); |
| 1609 | if (old_size < new_size) |
| 1610 | _do_cds_lfht_grow(ht, old_size, new_size); |
| 1611 | else if (old_size > new_size) |
| 1612 | _do_cds_lfht_shrink(ht, old_size, new_size); |
| 1613 | ht->t.resize_initiated = 0; |
| 1614 | /* write resize_initiated before read resize_target */ |
| 1615 | cmm_smp_mb(); |
| 1616 | } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target)); |
| 1617 | } |
| 1618 | |
| 1619 | static |
| 1620 | unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size, |
| 1621 | int growth_order) |
| 1622 | { |
| 1623 | return _uatomic_max(&ht->t.resize_target, |
| 1624 | size << growth_order); |
| 1625 | } |
| 1626 | |
| 1627 | static |
| 1628 | void resize_target_update_count(struct cds_lfht *ht, |
| 1629 | unsigned long count) |
| 1630 | { |
| 1631 | count = max(count, MIN_TABLE_SIZE); |
| 1632 | uatomic_set(&ht->t.resize_target, count); |
| 1633 | } |
| 1634 | |
| 1635 | void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) |
| 1636 | { |
| 1637 | resize_target_update_count(ht, new_size); |
| 1638 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
| 1639 | ht->cds_lfht_rcu_thread_offline(); |
| 1640 | pthread_mutex_lock(&ht->resize_mutex); |
| 1641 | _do_cds_lfht_resize(ht); |
| 1642 | pthread_mutex_unlock(&ht->resize_mutex); |
| 1643 | ht->cds_lfht_rcu_thread_online(); |
| 1644 | } |
| 1645 | |
| 1646 | static |
| 1647 | void do_resize_cb(struct rcu_head *head) |
| 1648 | { |
| 1649 | struct rcu_resize_work *work = |
| 1650 | caa_container_of(head, struct rcu_resize_work, head); |
| 1651 | struct cds_lfht *ht = work->ht; |
| 1652 | |
| 1653 | ht->cds_lfht_rcu_thread_offline(); |
| 1654 | pthread_mutex_lock(&ht->resize_mutex); |
| 1655 | _do_cds_lfht_resize(ht); |
| 1656 | pthread_mutex_unlock(&ht->resize_mutex); |
| 1657 | ht->cds_lfht_rcu_thread_online(); |
| 1658 | poison_free(work); |
| 1659 | cmm_smp_mb(); /* finish resize before decrement */ |
| 1660 | uatomic_dec(&ht->in_progress_resize); |
| 1661 | } |
| 1662 | |
| 1663 | static |
| 1664 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) |
| 1665 | { |
| 1666 | struct rcu_resize_work *work; |
| 1667 | unsigned long target_size; |
| 1668 | |
| 1669 | target_size = resize_target_update(ht, size, growth); |
| 1670 | /* Store resize_target before read resize_initiated */ |
| 1671 | cmm_smp_mb(); |
| 1672 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { |
| 1673 | uatomic_inc(&ht->in_progress_resize); |
| 1674 | cmm_smp_mb(); /* increment resize count before calling it */ |
| 1675 | work = malloc(sizeof(*work)); |
| 1676 | work->ht = ht; |
| 1677 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); |
| 1678 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
| 1679 | } |
| 1680 | } |
| 1681 | |
| 1682 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
| 1683 | |
| 1684 | static |
| 1685 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
| 1686 | unsigned long count) |
| 1687 | { |
| 1688 | struct rcu_resize_work *work; |
| 1689 | |
| 1690 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
| 1691 | return; |
| 1692 | resize_target_update_count(ht, count); |
| 1693 | /* Store resize_target before read resize_initiated */ |
| 1694 | cmm_smp_mb(); |
| 1695 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { |
| 1696 | uatomic_inc(&ht->in_progress_resize); |
| 1697 | cmm_smp_mb(); /* increment resize count before calling it */ |
| 1698 | work = malloc(sizeof(*work)); |
| 1699 | work->ht = ht; |
| 1700 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); |
| 1701 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
| 1702 | } |
| 1703 | } |
| 1704 | |
| 1705 | #endif |