bda3bd6e5e79b6cae109226430b8a23dc0e85f07
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of bucket nodes is kept. These bucket nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket bucket node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "bucket node" tables.
92 * - There is one bucket node table per hash index order. The size of
93 * each bucket node table is half the number of hashes contained in
94 * this order (except for order 0).
95 * - synchronzie_rcu is used to garbage-collect the old bucket node table.
96 * - The per-order bucket node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
99 *
100 * Bucket node tables:
101 *
102 * hash table hash table the last all bucket node tables
103 * order size bucket node 0 1 2 3 4 5 6(index)
104 * table size
105 * 0 1 1 1
106 * 1 2 1 1 1
107 * 2 4 2 1 1 2
108 * 3 8 4 1 1 2 4
109 * 4 16 8 1 1 2 4 8
110 * 5 32 16 1 1 2 4 8 16
111 * 6 64 32 1 1 2 4 8 16 32
112 *
113 * When growing/shrinking, we only focus on the last bucket node table
114 * which size is (!order ? 1 : (1 << (order -1))).
115 *
116 * Example for growing/shrinking:
117 * grow hash table from order 5 to 6: init the index=6 bucket node table
118 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
119 *
120 * A bit of ascii art explanation:
121 *
122 * Order index is the off-by-one compare to the actual power of 2 because
123 * we use index 0 to deal with the 0 special-case.
124 *
125 * This shows the nodes for a small table ordered by reversed bits:
126 *
127 * bits reverse
128 * 0 000 000
129 * 4 100 001
130 * 2 010 010
131 * 6 110 011
132 * 1 001 100
133 * 5 101 101
134 * 3 011 110
135 * 7 111 111
136 *
137 * This shows the nodes in order of non-reversed bits, linked by
138 * reversed-bit order.
139 *
140 * order bits reverse
141 * 0 0 000 000
142 * 1 | 1 001 100 <-
143 * 2 | | 2 010 010 <- |
144 * | | | 3 011 110 | <- |
145 * 3 -> | | | 4 100 001 | |
146 * -> | | 5 101 101 |
147 * -> | 6 110 011
148 * -> 7 111 111
149 */
150
151 #define _LGPL_SOURCE
152 #include <stdlib.h>
153 #include <errno.h>
154 #include <assert.h>
155 #include <stdio.h>
156 #include <stdint.h>
157 #include <string.h>
158
159 #include "config.h"
160 #include <urcu.h>
161 #include <urcu-call-rcu.h>
162 #include <urcu/arch.h>
163 #include <urcu/uatomic.h>
164 #include <urcu/compiler.h>
165 #include <urcu/rculfhash.h>
166 #include <stdio.h>
167 #include <pthread.h>
168
169 #ifdef DEBUG
170 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
171 #else
172 #define dbg_printf(fmt, args...)
173 #endif
174
175 /*
176 * Split-counters lazily update the global counter each 1024
177 * addition/removal. It automatically keeps track of resize required.
178 * We use the bucket length as indicator for need to expand for small
179 * tables and machines lacking per-cpu data suppport.
180 */
181 #define COUNT_COMMIT_ORDER 10
182 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
183 #define CHAIN_LEN_TARGET 1
184 #define CHAIN_LEN_RESIZE_THRESHOLD 3
185
186 /*
187 * Define the minimum table size.
188 */
189 #define MIN_TABLE_SIZE 1
190
191 #if (CAA_BITS_PER_LONG == 32)
192 #define MAX_TABLE_ORDER 32
193 #else
194 #define MAX_TABLE_ORDER 64
195 #endif
196
197 /*
198 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
199 */
200 #define MIN_PARTITION_PER_THREAD_ORDER 12
201 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
202
203 #ifndef min
204 #define min(a, b) ((a) < (b) ? (a) : (b))
205 #endif
206
207 #ifndef max
208 #define max(a, b) ((a) > (b) ? (a) : (b))
209 #endif
210
211 /*
212 * The removed flag needs to be updated atomically with the pointer.
213 * It indicates that no node must attach to the node scheduled for
214 * removal, and that node garbage collection must be performed.
215 * The bucket flag does not require to be updated atomically with the
216 * pointer, but it is added as a pointer low bit flag to save space.
217 */
218 #define REMOVED_FLAG (1UL << 0)
219 #define BUCKET_FLAG (1UL << 1)
220 #define FLAGS_MASK ((1UL << 2) - 1)
221
222 /* Value of the end pointer. Should not interact with flags. */
223 #define END_VALUE NULL
224
225 /*
226 * ht_items_count: Split-counters counting the number of node addition
227 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
228 * is set at hash table creation.
229 *
230 * These are free-running counters, never reset to zero. They count the
231 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
232 * operations to update the global counter. We choose a power-of-2 value
233 * for the trigger to deal with 32 or 64-bit overflow of the counter.
234 */
235 struct ht_items_count {
236 unsigned long add, del;
237 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
238
239 /*
240 * rcu_level: Contains the per order-index-level bucket node table. The
241 * size of each bucket node table is half the number of hashes contained
242 * in this order (except for order 0). The minimum allocation size
243 * parameter allows combining the bucket node arrays of the lowermost
244 * levels to improve cache locality for small index orders.
245 */
246 struct rcu_level {
247 /* Note: manually update allocation length when adding a field */
248 struct cds_lfht_node nodes[0];
249 };
250
251 /*
252 * rcu_table: Contains the size and desired new size if a resize
253 * operation is in progress, as well as the statically-sized array of
254 * rcu_level pointers.
255 */
256 struct rcu_table {
257 unsigned long size; /* always a power of 2, shared (RCU) */
258 unsigned long resize_target;
259 int resize_initiated;
260 struct rcu_level *tbl[MAX_TABLE_ORDER];
261 };
262
263 /*
264 * cds_lfht: Top-level data structure representing a lock-free hash
265 * table. Defined in the implementation file to make it be an opaque
266 * cookie to users.
267 */
268 struct cds_lfht {
269 struct rcu_table t;
270 unsigned long min_alloc_order;
271 unsigned long min_alloc_size;
272 int flags;
273 /*
274 * We need to put the work threads offline (QSBR) when taking this
275 * mutex, because we use synchronize_rcu within this mutex critical
276 * section, which waits on read-side critical sections, and could
277 * therefore cause grace-period deadlock if we hold off RCU G.P.
278 * completion.
279 */
280 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
281 unsigned int in_progress_resize, in_progress_destroy;
282 void (*cds_lfht_call_rcu)(struct rcu_head *head,
283 void (*func)(struct rcu_head *head));
284 void (*cds_lfht_synchronize_rcu)(void);
285 void (*cds_lfht_rcu_read_lock)(void);
286 void (*cds_lfht_rcu_read_unlock)(void);
287 void (*cds_lfht_rcu_thread_offline)(void);
288 void (*cds_lfht_rcu_thread_online)(void);
289 void (*cds_lfht_rcu_register_thread)(void);
290 void (*cds_lfht_rcu_unregister_thread)(void);
291 pthread_attr_t *resize_attr; /* Resize threads attributes */
292 long count; /* global approximate item count */
293 struct ht_items_count *split_count; /* split item count */
294 };
295
296 /*
297 * rcu_resize_work: Contains arguments passed to RCU worker thread
298 * responsible for performing lazy resize.
299 */
300 struct rcu_resize_work {
301 struct rcu_head head;
302 struct cds_lfht *ht;
303 };
304
305 /*
306 * partition_resize_work: Contains arguments passed to worker threads
307 * executing the hash table resize on partitions of the hash table
308 * assigned to each processor's worker thread.
309 */
310 struct partition_resize_work {
311 pthread_t thread_id;
312 struct cds_lfht *ht;
313 unsigned long i, start, len;
314 void (*fct)(struct cds_lfht *ht, unsigned long i,
315 unsigned long start, unsigned long len);
316 };
317
318 static
319 void _cds_lfht_add(struct cds_lfht *ht,
320 cds_lfht_match_fct match,
321 void *key,
322 unsigned long size,
323 struct cds_lfht_node *node,
324 struct cds_lfht_iter *unique_ret,
325 int bucket);
326
327 /*
328 * Algorithm to reverse bits in a word by lookup table, extended to
329 * 64-bit words.
330 * Source:
331 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
332 * Originally from Public Domain.
333 */
334
335 static const uint8_t BitReverseTable256[256] =
336 {
337 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
338 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
339 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
340 R6(0), R6(2), R6(1), R6(3)
341 };
342 #undef R2
343 #undef R4
344 #undef R6
345
346 static
347 uint8_t bit_reverse_u8(uint8_t v)
348 {
349 return BitReverseTable256[v];
350 }
351
352 static __attribute__((unused))
353 uint32_t bit_reverse_u32(uint32_t v)
354 {
355 return ((uint32_t) bit_reverse_u8(v) << 24) |
356 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
357 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
358 ((uint32_t) bit_reverse_u8(v >> 24));
359 }
360
361 static __attribute__((unused))
362 uint64_t bit_reverse_u64(uint64_t v)
363 {
364 return ((uint64_t) bit_reverse_u8(v) << 56) |
365 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
366 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
367 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
368 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
369 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
370 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
371 ((uint64_t) bit_reverse_u8(v >> 56));
372 }
373
374 static
375 unsigned long bit_reverse_ulong(unsigned long v)
376 {
377 #if (CAA_BITS_PER_LONG == 32)
378 return bit_reverse_u32(v);
379 #else
380 return bit_reverse_u64(v);
381 #endif
382 }
383
384 /*
385 * fls: returns the position of the most significant bit.
386 * Returns 0 if no bit is set, else returns the position of the most
387 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
388 */
389 #if defined(__i386) || defined(__x86_64)
390 static inline
391 unsigned int fls_u32(uint32_t x)
392 {
393 int r;
394
395 asm("bsrl %1,%0\n\t"
396 "jnz 1f\n\t"
397 "movl $-1,%0\n\t"
398 "1:\n\t"
399 : "=r" (r) : "rm" (x));
400 return r + 1;
401 }
402 #define HAS_FLS_U32
403 #endif
404
405 #if defined(__x86_64)
406 static inline
407 unsigned int fls_u64(uint64_t x)
408 {
409 long r;
410
411 asm("bsrq %1,%0\n\t"
412 "jnz 1f\n\t"
413 "movq $-1,%0\n\t"
414 "1:\n\t"
415 : "=r" (r) : "rm" (x));
416 return r + 1;
417 }
418 #define HAS_FLS_U64
419 #endif
420
421 #ifndef HAS_FLS_U64
422 static __attribute__((unused))
423 unsigned int fls_u64(uint64_t x)
424 {
425 unsigned int r = 64;
426
427 if (!x)
428 return 0;
429
430 if (!(x & 0xFFFFFFFF00000000ULL)) {
431 x <<= 32;
432 r -= 32;
433 }
434 if (!(x & 0xFFFF000000000000ULL)) {
435 x <<= 16;
436 r -= 16;
437 }
438 if (!(x & 0xFF00000000000000ULL)) {
439 x <<= 8;
440 r -= 8;
441 }
442 if (!(x & 0xF000000000000000ULL)) {
443 x <<= 4;
444 r -= 4;
445 }
446 if (!(x & 0xC000000000000000ULL)) {
447 x <<= 2;
448 r -= 2;
449 }
450 if (!(x & 0x8000000000000000ULL)) {
451 x <<= 1;
452 r -= 1;
453 }
454 return r;
455 }
456 #endif
457
458 #ifndef HAS_FLS_U32
459 static __attribute__((unused))
460 unsigned int fls_u32(uint32_t x)
461 {
462 unsigned int r = 32;
463
464 if (!x)
465 return 0;
466 if (!(x & 0xFFFF0000U)) {
467 x <<= 16;
468 r -= 16;
469 }
470 if (!(x & 0xFF000000U)) {
471 x <<= 8;
472 r -= 8;
473 }
474 if (!(x & 0xF0000000U)) {
475 x <<= 4;
476 r -= 4;
477 }
478 if (!(x & 0xC0000000U)) {
479 x <<= 2;
480 r -= 2;
481 }
482 if (!(x & 0x80000000U)) {
483 x <<= 1;
484 r -= 1;
485 }
486 return r;
487 }
488 #endif
489
490 unsigned int fls_ulong(unsigned long x)
491 {
492 #if (CAA_BITS_PER_LONG == 32)
493 return fls_u32(x);
494 #else
495 return fls_u64(x);
496 #endif
497 }
498
499 /*
500 * Return the minimum order for which x <= (1UL << order).
501 * Return -1 if x is 0.
502 */
503 int get_count_order_u32(uint32_t x)
504 {
505 if (!x)
506 return -1;
507
508 return fls_u32(x - 1);
509 }
510
511 /*
512 * Return the minimum order for which x <= (1UL << order).
513 * Return -1 if x is 0.
514 */
515 int get_count_order_ulong(unsigned long x)
516 {
517 if (!x)
518 return -1;
519
520 return fls_ulong(x - 1);
521 }
522
523 #ifdef POISON_FREE
524 #define poison_free(ptr) \
525 do { \
526 if (ptr) { \
527 memset(ptr, 0x42, sizeof(*(ptr))); \
528 free(ptr); \
529 } \
530 } while (0)
531 #else
532 #define poison_free(ptr) free(ptr)
533 #endif
534
535 static
536 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
537
538 static
539 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
540 unsigned long count);
541
542 static long nr_cpus_mask = -1;
543 static long split_count_mask = -1;
544
545 #if defined(HAVE_SYSCONF)
546 static void ht_init_nr_cpus_mask(void)
547 {
548 long maxcpus;
549
550 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
551 if (maxcpus <= 0) {
552 nr_cpus_mask = -2;
553 return;
554 }
555 /*
556 * round up number of CPUs to next power of two, so we
557 * can use & for modulo.
558 */
559 maxcpus = 1UL << get_count_order_ulong(maxcpus);
560 nr_cpus_mask = maxcpus - 1;
561 }
562 #else /* #if defined(HAVE_SYSCONF) */
563 static void ht_init_nr_cpus_mask(void)
564 {
565 nr_cpus_mask = -2;
566 }
567 #endif /* #else #if defined(HAVE_SYSCONF) */
568
569 static
570 void alloc_split_items_count(struct cds_lfht *ht)
571 {
572 struct ht_items_count *count;
573
574 if (nr_cpus_mask == -1) {
575 ht_init_nr_cpus_mask();
576 if (nr_cpus_mask < 0)
577 split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
578 else
579 split_count_mask = nr_cpus_mask;
580 }
581
582 assert(split_count_mask >= 0);
583
584 if (ht->flags & CDS_LFHT_ACCOUNTING) {
585 ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
586 assert(ht->split_count);
587 } else {
588 ht->split_count = NULL;
589 }
590 }
591
592 static
593 void free_split_items_count(struct cds_lfht *ht)
594 {
595 poison_free(ht->split_count);
596 }
597
598 #if defined(HAVE_SCHED_GETCPU)
599 static
600 int ht_get_split_count_index(unsigned long hash)
601 {
602 int cpu;
603
604 assert(split_count_mask >= 0);
605 cpu = sched_getcpu();
606 if (caa_unlikely(cpu < 0))
607 return hash & split_count_mask;
608 else
609 return cpu & split_count_mask;
610 }
611 #else /* #if defined(HAVE_SCHED_GETCPU) */
612 static
613 int ht_get_split_count_index(unsigned long hash)
614 {
615 return hash & split_count_mask;
616 }
617 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
618
619 static
620 void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
621 {
622 unsigned long split_count;
623 int index;
624
625 if (caa_unlikely(!ht->split_count))
626 return;
627 index = ht_get_split_count_index(hash);
628 split_count = uatomic_add_return(&ht->split_count[index].add, 1);
629 if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
630 long count;
631
632 dbg_printf("add split count %lu\n", split_count);
633 count = uatomic_add_return(&ht->count,
634 1UL << COUNT_COMMIT_ORDER);
635 /* If power of 2 */
636 if (!(count & (count - 1))) {
637 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
638 return;
639 dbg_printf("add set global %ld\n", count);
640 cds_lfht_resize_lazy_count(ht, size,
641 count >> (CHAIN_LEN_TARGET - 1));
642 }
643 }
644 }
645
646 static
647 void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
648 {
649 unsigned long split_count;
650 int index;
651
652 if (caa_unlikely(!ht->split_count))
653 return;
654 index = ht_get_split_count_index(hash);
655 split_count = uatomic_add_return(&ht->split_count[index].del, 1);
656 if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
657 long count;
658
659 dbg_printf("del split count %lu\n", split_count);
660 count = uatomic_add_return(&ht->count,
661 -(1UL << COUNT_COMMIT_ORDER));
662 /* If power of 2 */
663 if (!(count & (count - 1))) {
664 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
665 return;
666 dbg_printf("del set global %ld\n", count);
667 /*
668 * Don't shrink table if the number of nodes is below a
669 * certain threshold.
670 */
671 if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
672 return;
673 cds_lfht_resize_lazy_count(ht, size,
674 count >> (CHAIN_LEN_TARGET - 1));
675 }
676 }
677 }
678
679 static
680 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
681 {
682 unsigned long count;
683
684 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
685 return;
686 count = uatomic_read(&ht->count);
687 /*
688 * Use bucket-local length for small table expand and for
689 * environments lacking per-cpu data support.
690 */
691 if (count >= (1UL << COUNT_COMMIT_ORDER))
692 return;
693 if (chain_len > 100)
694 dbg_printf("WARNING: large chain length: %u.\n",
695 chain_len);
696 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
697 cds_lfht_resize_lazy_grow(ht, size,
698 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
699 }
700
701 static
702 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
703 {
704 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
705 }
706
707 static
708 int is_removed(struct cds_lfht_node *node)
709 {
710 return ((unsigned long) node) & REMOVED_FLAG;
711 }
712
713 static
714 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
715 {
716 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
717 }
718
719 static
720 int is_bucket(struct cds_lfht_node *node)
721 {
722 return ((unsigned long) node) & BUCKET_FLAG;
723 }
724
725 static
726 struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
727 {
728 return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
729 }
730
731 static
732 struct cds_lfht_node *get_end(void)
733 {
734 return (struct cds_lfht_node *) END_VALUE;
735 }
736
737 static
738 int is_end(struct cds_lfht_node *node)
739 {
740 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
741 }
742
743 static
744 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
745 unsigned long v)
746 {
747 unsigned long old1, old2;
748
749 old1 = uatomic_read(ptr);
750 do {
751 old2 = old1;
752 if (old2 >= v)
753 return old2;
754 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
755 return old2;
756 }
757
758 static
759 struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
760 unsigned long hash)
761 {
762 unsigned long index, order;
763
764 assert(size > 0);
765 index = hash & (size - 1);
766
767 if (index < ht->min_alloc_size) {
768 dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n",
769 hash, index);
770 return &ht->t.tbl[0]->nodes[index];
771 }
772 /*
773 * equivalent to get_count_order_ulong(index + 1), but optimizes
774 * away the non-existing 0 special-case for
775 * get_count_order_ulong.
776 */
777 order = fls_ulong(index);
778 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
779 hash, index, order, index & ((1UL << (order - 1)) - 1));
780 return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
781 }
782
783 /*
784 * Remove all logically deleted nodes from a bucket up to a certain node key.
785 */
786 static
787 void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node)
788 {
789 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
790
791 assert(!is_bucket(bucket));
792 assert(!is_removed(bucket));
793 assert(!is_bucket(node));
794 assert(!is_removed(node));
795 for (;;) {
796 iter_prev = bucket;
797 /* We can always skip the bucket node initially */
798 iter = rcu_dereference(iter_prev->next);
799 assert(!is_removed(iter));
800 assert(iter_prev->reverse_hash <= node->reverse_hash);
801 /*
802 * We should never be called with bucket (start of chain)
803 * and logically removed node (end of path compression
804 * marker) being the actual same node. This would be a
805 * bug in the algorithm implementation.
806 */
807 assert(bucket != node);
808 for (;;) {
809 if (caa_unlikely(is_end(iter)))
810 return;
811 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
812 return;
813 next = rcu_dereference(clear_flag(iter)->next);
814 if (caa_likely(is_removed(next)))
815 break;
816 iter_prev = clear_flag(iter);
817 iter = next;
818 }
819 assert(!is_removed(iter));
820 if (is_bucket(iter))
821 new_next = flag_bucket(clear_flag(next));
822 else
823 new_next = clear_flag(next);
824 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
825 }
826 return;
827 }
828
829 static
830 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
831 struct cds_lfht_node *old_node,
832 struct cds_lfht_node *old_next,
833 struct cds_lfht_node *new_node)
834 {
835 struct cds_lfht_node *bucket, *ret_next;
836
837 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
838 return -ENOENT;
839
840 assert(!is_removed(old_node));
841 assert(!is_bucket(old_node));
842 assert(!is_removed(new_node));
843 assert(!is_bucket(new_node));
844 assert(new_node != old_node);
845 for (;;) {
846 /* Insert after node to be replaced */
847 if (is_removed(old_next)) {
848 /*
849 * Too late, the old node has been removed under us
850 * between lookup and replace. Fail.
851 */
852 return -ENOENT;
853 }
854 assert(!is_bucket(old_next));
855 assert(new_node != clear_flag(old_next));
856 new_node->next = clear_flag(old_next);
857 /*
858 * Here is the whole trick for lock-free replace: we add
859 * the replacement node _after_ the node we want to
860 * replace by atomically setting its next pointer at the
861 * same time we set its removal flag. Given that
862 * the lookups/get next use an iterator aware of the
863 * next pointer, they will either skip the old node due
864 * to the removal flag and see the new node, or use
865 * the old node, but will not see the new one.
866 */
867 ret_next = uatomic_cmpxchg(&old_node->next,
868 old_next, flag_removed(new_node));
869 if (ret_next == old_next)
870 break; /* We performed the replacement. */
871 old_next = ret_next;
872 }
873
874 /*
875 * Ensure that the old node is not visible to readers anymore:
876 * lookup for the node, and remove it (along with any other
877 * logically removed node) if found.
878 */
879 bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
880 _cds_lfht_gc_bucket(bucket, new_node);
881
882 assert(is_removed(rcu_dereference(old_node->next)));
883 return 0;
884 }
885
886 /*
887 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
888 * mode. A NULL unique_ret allows creation of duplicate keys.
889 */
890 static
891 void _cds_lfht_add(struct cds_lfht *ht,
892 cds_lfht_match_fct match,
893 void *key,
894 unsigned long size,
895 struct cds_lfht_node *node,
896 struct cds_lfht_iter *unique_ret,
897 int bucket_flag)
898 {
899 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
900 *return_node;
901 struct cds_lfht_node *bucket;
902
903 assert(!is_bucket(node));
904 assert(!is_removed(node));
905 bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
906 for (;;) {
907 uint32_t chain_len = 0;
908
909 /*
910 * iter_prev points to the non-removed node prior to the
911 * insert location.
912 */
913 iter_prev = bucket;
914 /* We can always skip the bucket node initially */
915 iter = rcu_dereference(iter_prev->next);
916 assert(iter_prev->reverse_hash <= node->reverse_hash);
917 for (;;) {
918 if (caa_unlikely(is_end(iter)))
919 goto insert;
920 if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
921 goto insert;
922
923 /* bucket node is the first node of the identical-hash-value chain */
924 if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
925 goto insert;
926
927 next = rcu_dereference(clear_flag(iter)->next);
928 if (caa_unlikely(is_removed(next)))
929 goto gc_node;
930
931 /* uniquely add */
932 if (unique_ret
933 && !is_bucket(next)
934 && clear_flag(iter)->reverse_hash == node->reverse_hash) {
935 struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
936
937 /*
938 * uniquely adding inserts the node as the first
939 * node of the identical-hash-value node chain.
940 *
941 * This semantic ensures no duplicated keys
942 * should ever be observable in the table
943 * (including observe one node by one node
944 * by forward iterations)
945 */
946 cds_lfht_next_duplicate(ht, match, key, &d_iter);
947 if (!d_iter.node)
948 goto insert;
949
950 *unique_ret = d_iter;
951 return;
952 }
953
954 /* Only account for identical reverse hash once */
955 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
956 && !is_bucket(next))
957 check_resize(ht, size, ++chain_len);
958 iter_prev = clear_flag(iter);
959 iter = next;
960 }
961
962 insert:
963 assert(node != clear_flag(iter));
964 assert(!is_removed(iter_prev));
965 assert(!is_removed(iter));
966 assert(iter_prev != node);
967 if (!bucket_flag)
968 node->next = clear_flag(iter);
969 else
970 node->next = flag_bucket(clear_flag(iter));
971 if (is_bucket(iter))
972 new_node = flag_bucket(node);
973 else
974 new_node = node;
975 if (uatomic_cmpxchg(&iter_prev->next, iter,
976 new_node) != iter) {
977 continue; /* retry */
978 } else {
979 return_node = node;
980 goto end;
981 }
982
983 gc_node:
984 assert(!is_removed(iter));
985 if (is_bucket(iter))
986 new_next = flag_bucket(clear_flag(next));
987 else
988 new_next = clear_flag(next);
989 (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
990 /* retry */
991 }
992 end:
993 if (unique_ret) {
994 unique_ret->node = return_node;
995 /* unique_ret->next left unset, never used. */
996 }
997 }
998
999 static
1000 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
1001 struct cds_lfht_node *node,
1002 int bucket_removal)
1003 {
1004 struct cds_lfht_node *bucket, *next, *old;
1005
1006 if (!node) /* Return -ENOENT if asked to delete NULL node */
1007 return -ENOENT;
1008
1009 /* logically delete the node */
1010 assert(!is_bucket(node));
1011 assert(!is_removed(node));
1012 old = rcu_dereference(node->next);
1013 do {
1014 struct cds_lfht_node *new_next;
1015
1016 next = old;
1017 if (caa_unlikely(is_removed(next)))
1018 return -ENOENT;
1019 if (bucket_removal)
1020 assert(is_bucket(next));
1021 else
1022 assert(!is_bucket(next));
1023 new_next = flag_removed(next);
1024 old = uatomic_cmpxchg(&node->next, next, new_next);
1025 } while (old != next);
1026 /* We performed the (logical) deletion. */
1027
1028 /*
1029 * Ensure that the node is not visible to readers anymore: lookup for
1030 * the node, and remove it (along with any other logically removed node)
1031 * if found.
1032 */
1033 bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
1034 _cds_lfht_gc_bucket(bucket, node);
1035
1036 assert(is_removed(rcu_dereference(node->next)));
1037 return 0;
1038 }
1039
1040 static
1041 void *partition_resize_thread(void *arg)
1042 {
1043 struct partition_resize_work *work = arg;
1044
1045 work->ht->cds_lfht_rcu_register_thread();
1046 work->fct(work->ht, work->i, work->start, work->len);
1047 work->ht->cds_lfht_rcu_unregister_thread();
1048 return NULL;
1049 }
1050
1051 static
1052 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1053 unsigned long len,
1054 void (*fct)(struct cds_lfht *ht, unsigned long i,
1055 unsigned long start, unsigned long len))
1056 {
1057 unsigned long partition_len;
1058 struct partition_resize_work *work;
1059 int thread, ret;
1060 unsigned long nr_threads;
1061
1062 /*
1063 * Note: nr_cpus_mask + 1 is always power of 2.
1064 * We spawn just the number of threads we need to satisfy the minimum
1065 * partition size, up to the number of CPUs in the system.
1066 */
1067 if (nr_cpus_mask > 0) {
1068 nr_threads = min(nr_cpus_mask + 1,
1069 len >> MIN_PARTITION_PER_THREAD_ORDER);
1070 } else {
1071 nr_threads = 1;
1072 }
1073 partition_len = len >> get_count_order_ulong(nr_threads);
1074 work = calloc(nr_threads, sizeof(*work));
1075 assert(work);
1076 for (thread = 0; thread < nr_threads; thread++) {
1077 work[thread].ht = ht;
1078 work[thread].i = i;
1079 work[thread].len = partition_len;
1080 work[thread].start = thread * partition_len;
1081 work[thread].fct = fct;
1082 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
1083 partition_resize_thread, &work[thread]);
1084 assert(!ret);
1085 }
1086 for (thread = 0; thread < nr_threads; thread++) {
1087 ret = pthread_join(work[thread].thread_id, NULL);
1088 assert(!ret);
1089 }
1090 free(work);
1091 }
1092
1093 /*
1094 * Holding RCU read lock to protect _cds_lfht_add against memory
1095 * reclaim that could be performed by other call_rcu worker threads (ABA
1096 * problem).
1097 *
1098 * When we reach a certain length, we can split this population phase over
1099 * many worker threads, based on the number of CPUs available in the system.
1100 * This should therefore take care of not having the expand lagging behind too
1101 * many concurrent insertion threads by using the scheduler's ability to
1102 * schedule bucket node population fairly with insertions.
1103 */
1104 static
1105 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1106 unsigned long start, unsigned long len)
1107 {
1108 unsigned long j;
1109
1110 assert(i > ht->min_alloc_order);
1111 ht->cds_lfht_rcu_read_lock();
1112 for (j = start; j < start + len; j++) {
1113 struct cds_lfht_node *new_node = &ht->t.tbl[i]->nodes[j];
1114
1115 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1116 i, j, (1UL << (i - 1)) + j);
1117 new_node->reverse_hash =
1118 bit_reverse_ulong((1UL << (i - 1)) + j);
1119 _cds_lfht_add(ht, NULL, NULL, 1UL << (i - 1),
1120 new_node, NULL, 1);
1121 }
1122 ht->cds_lfht_rcu_read_unlock();
1123 }
1124
1125 static
1126 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1127 unsigned long len)
1128 {
1129 assert(nr_cpus_mask != -1);
1130 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1131 ht->cds_lfht_rcu_thread_online();
1132 init_table_populate_partition(ht, i, 0, len);
1133 ht->cds_lfht_rcu_thread_offline();
1134 return;
1135 }
1136 partition_resize_helper(ht, i, len, init_table_populate_partition);
1137 }
1138
1139 static
1140 void init_table(struct cds_lfht *ht,
1141 unsigned long first_order, unsigned long last_order)
1142 {
1143 unsigned long i;
1144
1145 dbg_printf("init table: first_order %lu last_order %lu\n",
1146 first_order, last_order);
1147 assert(first_order > ht->min_alloc_order);
1148 for (i = first_order; i <= last_order; i++) {
1149 unsigned long len;
1150
1151 len = 1UL << (i - 1);
1152 dbg_printf("init order %lu len: %lu\n", i, len);
1153
1154 /* Stop expand if the resize target changes under us */
1155 if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
1156 break;
1157
1158 ht->t.tbl[i] = calloc(1, len * sizeof(struct cds_lfht_node));
1159 assert(ht->t.tbl[i]);
1160
1161 /*
1162 * Set all bucket nodes reverse hash values for a level and
1163 * link all bucket nodes into the table.
1164 */
1165 init_table_populate(ht, i, len);
1166
1167 /*
1168 * Update table size.
1169 */
1170 cmm_smp_wmb(); /* populate data before RCU size */
1171 CMM_STORE_SHARED(ht->t.size, 1UL << i);
1172
1173 dbg_printf("init new size: %lu\n", 1UL << i);
1174 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1175 break;
1176 }
1177 }
1178
1179 /*
1180 * Holding RCU read lock to protect _cds_lfht_remove against memory
1181 * reclaim that could be performed by other call_rcu worker threads (ABA
1182 * problem).
1183 * For a single level, we logically remove and garbage collect each node.
1184 *
1185 * As a design choice, we perform logical removal and garbage collection on a
1186 * node-per-node basis to simplify this algorithm. We also assume keeping good
1187 * cache locality of the operation would overweight possible performance gain
1188 * that could be achieved by batching garbage collection for multiple levels.
1189 * However, this would have to be justified by benchmarks.
1190 *
1191 * Concurrent removal and add operations are helping us perform garbage
1192 * collection of logically removed nodes. We guarantee that all logically
1193 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1194 * invoked to free a hole level of bucket nodes (after a grace period).
1195 *
1196 * Logical removal and garbage collection can therefore be done in batch or on a
1197 * node-per-node basis, as long as the guarantee above holds.
1198 *
1199 * When we reach a certain length, we can split this removal over many worker
1200 * threads, based on the number of CPUs available in the system. This should
1201 * take care of not letting resize process lag behind too many concurrent
1202 * updater threads actively inserting into the hash table.
1203 */
1204 static
1205 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1206 unsigned long start, unsigned long len)
1207 {
1208 unsigned long j;
1209
1210 assert(i > ht->min_alloc_order);
1211 ht->cds_lfht_rcu_read_lock();
1212 for (j = start; j < start + len; j++) {
1213 struct cds_lfht_node *fini_node = &ht->t.tbl[i]->nodes[j];
1214
1215 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1216 i, j, (1UL << (i - 1)) + j);
1217 fini_node->reverse_hash =
1218 bit_reverse_ulong((1UL << (i - 1)) + j);
1219 (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
1220 }
1221 ht->cds_lfht_rcu_read_unlock();
1222 }
1223
1224 static
1225 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1226 {
1227
1228 assert(nr_cpus_mask != -1);
1229 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1230 ht->cds_lfht_rcu_thread_online();
1231 remove_table_partition(ht, i, 0, len);
1232 ht->cds_lfht_rcu_thread_offline();
1233 return;
1234 }
1235 partition_resize_helper(ht, i, len, remove_table_partition);
1236 }
1237
1238 static
1239 void fini_table(struct cds_lfht *ht,
1240 unsigned long first_order, unsigned long last_order)
1241 {
1242 long i;
1243 void *free_by_rcu = NULL;
1244
1245 dbg_printf("fini table: first_order %lu last_order %lu\n",
1246 first_order, last_order);
1247 assert(first_order > ht->min_alloc_order);
1248 for (i = last_order; i >= first_order; i--) {
1249 unsigned long len;
1250
1251 len = 1UL << (i - 1);
1252 dbg_printf("fini order %lu len: %lu\n", i, len);
1253
1254 /* Stop shrink if the resize target changes under us */
1255 if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
1256 break;
1257
1258 cmm_smp_wmb(); /* populate data before RCU size */
1259 CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
1260
1261 /*
1262 * We need to wait for all add operations to reach Q.S. (and
1263 * thus use the new table for lookups) before we can start
1264 * releasing the old bucket nodes. Otherwise their lookup will
1265 * return a logically removed node as insert position.
1266 */
1267 ht->cds_lfht_synchronize_rcu();
1268 if (free_by_rcu)
1269 free(free_by_rcu);
1270
1271 /*
1272 * Set "removed" flag in bucket nodes about to be removed.
1273 * Unlink all now-logically-removed bucket node pointers.
1274 * Concurrent add/remove operation are helping us doing
1275 * the gc.
1276 */
1277 remove_table(ht, i, len);
1278
1279 free_by_rcu = ht->t.tbl[i];
1280
1281 dbg_printf("fini new size: %lu\n", 1UL << i);
1282 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1283 break;
1284 }
1285
1286 if (free_by_rcu) {
1287 ht->cds_lfht_synchronize_rcu();
1288 free(free_by_rcu);
1289 }
1290 }
1291
1292 static
1293 void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
1294 {
1295 struct cds_lfht_node *prev, *node;
1296 unsigned long order, len, i, j;
1297
1298 ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct cds_lfht_node));
1299 assert(ht->t.tbl[0]);
1300
1301 dbg_printf("create bucket: order %lu index %lu hash %lu\n", 0, 0, 0);
1302 ht->t.tbl[0]->nodes[0].next = flag_bucket(get_end());
1303 ht->t.tbl[0]->nodes[0].reverse_hash = 0;
1304
1305 for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
1306 len = 1UL << (order - 1);
1307 if (order <= ht->min_alloc_order) {
1308 ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
1309 } else {
1310 ht->t.tbl[order] = calloc(1, len * sizeof(struct cds_lfht_node));
1311 assert(ht->t.tbl[order]);
1312 }
1313
1314 i = 0;
1315 prev = ht->t.tbl[i]->nodes;
1316 for (j = 0; j < len; j++) {
1317 if (j & (j - 1)) { /* Between power of 2 */
1318 prev++;
1319 } else if (j) { /* At each power of 2 */
1320 i++;
1321 prev = ht->t.tbl[i]->nodes;
1322 }
1323
1324 node = &ht->t.tbl[order]->nodes[j];
1325 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1326 order, j, j + len);
1327 node->next = prev->next;
1328 assert(is_bucket(node->next));
1329 node->reverse_hash = bit_reverse_ulong(j + len);
1330 prev->next = flag_bucket(node);
1331 }
1332 }
1333 }
1334
1335 struct cds_lfht *_cds_lfht_new(unsigned long init_size,
1336 unsigned long min_alloc_size,
1337 int flags,
1338 void (*cds_lfht_call_rcu)(struct rcu_head *head,
1339 void (*func)(struct rcu_head *head)),
1340 void (*cds_lfht_synchronize_rcu)(void),
1341 void (*cds_lfht_rcu_read_lock)(void),
1342 void (*cds_lfht_rcu_read_unlock)(void),
1343 void (*cds_lfht_rcu_thread_offline)(void),
1344 void (*cds_lfht_rcu_thread_online)(void),
1345 void (*cds_lfht_rcu_register_thread)(void),
1346 void (*cds_lfht_rcu_unregister_thread)(void),
1347 pthread_attr_t *attr)
1348 {
1349 struct cds_lfht *ht;
1350 unsigned long order;
1351
1352 /* min_alloc_size must be power of two */
1353 if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1)))
1354 return NULL;
1355 /* init_size must be power of two */
1356 if (!init_size || (init_size & (init_size - 1)))
1357 return NULL;
1358 min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE);
1359 init_size = max(init_size, min_alloc_size);
1360 ht = calloc(1, sizeof(struct cds_lfht));
1361 assert(ht);
1362 ht->flags = flags;
1363 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
1364 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
1365 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
1366 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
1367 ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
1368 ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
1369 ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
1370 ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
1371 ht->resize_attr = attr;
1372 alloc_split_items_count(ht);
1373 /* this mutex should not nest in read-side C.S. */
1374 pthread_mutex_init(&ht->resize_mutex, NULL);
1375 order = get_count_order_ulong(init_size);
1376 ht->t.resize_target = 1UL << order;
1377 ht->min_alloc_size = min_alloc_size;
1378 ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
1379 cds_lfht_create_bucket(ht, 1UL << order);
1380 ht->t.size = 1UL << order;
1381 return ht;
1382 }
1383
1384 void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
1385 cds_lfht_match_fct match, void *key,
1386 struct cds_lfht_iter *iter)
1387 {
1388 struct cds_lfht_node *node, *next, *bucket;
1389 unsigned long reverse_hash, size;
1390
1391 reverse_hash = bit_reverse_ulong(hash);
1392
1393 size = rcu_dereference(ht->t.size);
1394 bucket = lookup_bucket(ht, size, hash);
1395 /* We can always skip the bucket node initially */
1396 node = rcu_dereference(bucket->next);
1397 node = clear_flag(node);
1398 for (;;) {
1399 if (caa_unlikely(is_end(node))) {
1400 node = next = NULL;
1401 break;
1402 }
1403 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1404 node = next = NULL;
1405 break;
1406 }
1407 next = rcu_dereference(node->next);
1408 assert(node == clear_flag(node));
1409 if (caa_likely(!is_removed(next))
1410 && !is_bucket(next)
1411 && node->reverse_hash == reverse_hash
1412 && caa_likely(match(node, key))) {
1413 break;
1414 }
1415 node = clear_flag(next);
1416 }
1417 assert(!node || !is_bucket(rcu_dereference(node->next)));
1418 iter->node = node;
1419 iter->next = next;
1420 }
1421
1422 void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
1423 void *key, struct cds_lfht_iter *iter)
1424 {
1425 struct cds_lfht_node *node, *next;
1426 unsigned long reverse_hash;
1427
1428 node = iter->node;
1429 reverse_hash = node->reverse_hash;
1430 next = iter->next;
1431 node = clear_flag(next);
1432
1433 for (;;) {
1434 if (caa_unlikely(is_end(node))) {
1435 node = next = NULL;
1436 break;
1437 }
1438 if (caa_unlikely(node->reverse_hash > reverse_hash)) {
1439 node = next = NULL;
1440 break;
1441 }
1442 next = rcu_dereference(node->next);
1443 if (caa_likely(!is_removed(next))
1444 && !is_bucket(next)
1445 && caa_likely(match(node, key))) {
1446 break;
1447 }
1448 node = clear_flag(next);
1449 }
1450 assert(!node || !is_bucket(rcu_dereference(node->next)));
1451 iter->node = node;
1452 iter->next = next;
1453 }
1454
1455 void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1456 {
1457 struct cds_lfht_node *node, *next;
1458
1459 node = clear_flag(iter->next);
1460 for (;;) {
1461 if (caa_unlikely(is_end(node))) {
1462 node = next = NULL;
1463 break;
1464 }
1465 next = rcu_dereference(node->next);
1466 if (caa_likely(!is_removed(next))
1467 && !is_bucket(next)) {
1468 break;
1469 }
1470 node = clear_flag(next);
1471 }
1472 assert(!node || !is_bucket(rcu_dereference(node->next)));
1473 iter->node = node;
1474 iter->next = next;
1475 }
1476
1477 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1478 {
1479 struct cds_lfht_node *lookup;
1480
1481 /*
1482 * Get next after first bucket node. The first bucket node is the
1483 * first node of the linked list.
1484 */
1485 lookup = &ht->t.tbl[0]->nodes[0];
1486 iter->next = lookup->next;
1487 cds_lfht_next(ht, iter);
1488 }
1489
1490 void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
1491 struct cds_lfht_node *node)
1492 {
1493 unsigned long size;
1494
1495 node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
1496 size = rcu_dereference(ht->t.size);
1497 _cds_lfht_add(ht, NULL, NULL, size, node, NULL, 0);
1498 ht_count_add(ht, size, hash);
1499 }
1500
1501 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1502 unsigned long hash,
1503 cds_lfht_match_fct match,
1504 void *key,
1505 struct cds_lfht_node *node)
1506 {
1507 unsigned long size;
1508 struct cds_lfht_iter iter;
1509
1510 node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
1511 size = rcu_dereference(ht->t.size);
1512 _cds_lfht_add(ht, match, key, size, node, &iter, 0);
1513 if (iter.node == node)
1514 ht_count_add(ht, size, hash);
1515 return iter.node;
1516 }
1517
1518 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1519 unsigned long hash,
1520 cds_lfht_match_fct match,
1521 void *key,
1522 struct cds_lfht_node *node)
1523 {
1524 unsigned long size;
1525 struct cds_lfht_iter iter;
1526
1527 node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
1528 size = rcu_dereference(ht->t.size);
1529 for (;;) {
1530 _cds_lfht_add(ht, match, key, size, node, &iter, 0);
1531 if (iter.node == node) {
1532 ht_count_add(ht, size, hash);
1533 return NULL;
1534 }
1535
1536 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1537 return iter.node;
1538 }
1539 }
1540
1541 int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
1542 struct cds_lfht_node *new_node)
1543 {
1544 unsigned long size;
1545
1546 size = rcu_dereference(ht->t.size);
1547 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1548 new_node);
1549 }
1550
1551 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1552 {
1553 unsigned long size, hash;
1554 int ret;
1555
1556 size = rcu_dereference(ht->t.size);
1557 ret = _cds_lfht_del(ht, size, iter->node, 0);
1558 if (!ret) {
1559 hash = bit_reverse_ulong(iter->node->reverse_hash);
1560 ht_count_del(ht, size, hash);
1561 }
1562 return ret;
1563 }
1564
1565 static
1566 int cds_lfht_delete_bucket(struct cds_lfht *ht)
1567 {
1568 struct cds_lfht_node *node;
1569 unsigned long order, i, size;
1570
1571 /* Check that the table is empty */
1572 node = &ht->t.tbl[0]->nodes[0];
1573 do {
1574 node = clear_flag(node)->next;
1575 if (!is_bucket(node))
1576 return -EPERM;
1577 assert(!is_removed(node));
1578 } while (!is_end(node));
1579 /*
1580 * size accessed without rcu_dereference because hash table is
1581 * being destroyed.
1582 */
1583 size = ht->t.size;
1584 /* Internal sanity check: all nodes left should be bucket */
1585 for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
1586 unsigned long len;
1587
1588 len = !order ? 1 : 1UL << (order - 1);
1589 for (i = 0; i < len; i++) {
1590 dbg_printf("delete order %lu i %lu hash %lu\n",
1591 order, i,
1592 bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
1593 assert(is_bucket(ht->t.tbl[order]->nodes[i].next));
1594 }
1595
1596 if (order == ht->min_alloc_order)
1597 poison_free(ht->t.tbl[0]);
1598 else if (order > ht->min_alloc_order)
1599 poison_free(ht->t.tbl[order]);
1600 /* Nothing to delete for order < ht->min_alloc_order */
1601 }
1602 return 0;
1603 }
1604
1605 /*
1606 * Should only be called when no more concurrent readers nor writers can
1607 * possibly access the table.
1608 */
1609 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1610 {
1611 int ret;
1612
1613 /* Wait for in-flight resize operations to complete */
1614 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1615 cmm_smp_mb(); /* Store destroy before load resize */
1616 while (uatomic_read(&ht->in_progress_resize))
1617 poll(NULL, 0, 100); /* wait for 100ms */
1618 ret = cds_lfht_delete_bucket(ht);
1619 if (ret)
1620 return ret;
1621 free_split_items_count(ht);
1622 if (attr)
1623 *attr = ht->resize_attr;
1624 poison_free(ht);
1625 return ret;
1626 }
1627
1628 void cds_lfht_count_nodes(struct cds_lfht *ht,
1629 long *approx_before,
1630 unsigned long *count,
1631 unsigned long *removed,
1632 long *approx_after)
1633 {
1634 struct cds_lfht_node *node, *next;
1635 unsigned long nr_bucket = 0;
1636
1637 *approx_before = 0;
1638 if (ht->split_count) {
1639 int i;
1640
1641 for (i = 0; i < split_count_mask + 1; i++) {
1642 *approx_before += uatomic_read(&ht->split_count[i].add);
1643 *approx_before -= uatomic_read(&ht->split_count[i].del);
1644 }
1645 }
1646
1647 *count = 0;
1648 *removed = 0;
1649
1650 /* Count non-bucket nodes in the table */
1651 node = &ht->t.tbl[0]->nodes[0];
1652 do {
1653 next = rcu_dereference(node->next);
1654 if (is_removed(next)) {
1655 if (!is_bucket(next))
1656 (*removed)++;
1657 else
1658 (nr_bucket)++;
1659 } else if (!is_bucket(next))
1660 (*count)++;
1661 else
1662 (nr_bucket)++;
1663 node = clear_flag(next);
1664 } while (!is_end(node));
1665 dbg_printf("number of bucket nodes: %lu\n", nr_bucket);
1666 *approx_after = 0;
1667 if (ht->split_count) {
1668 int i;
1669
1670 for (i = 0; i < split_count_mask + 1; i++) {
1671 *approx_after += uatomic_read(&ht->split_count[i].add);
1672 *approx_after -= uatomic_read(&ht->split_count[i].del);
1673 }
1674 }
1675 }
1676
1677 /* called with resize mutex held */
1678 static
1679 void _do_cds_lfht_grow(struct cds_lfht *ht,
1680 unsigned long old_size, unsigned long new_size)
1681 {
1682 unsigned long old_order, new_order;
1683
1684 old_order = get_count_order_ulong(old_size);
1685 new_order = get_count_order_ulong(new_size);
1686 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1687 old_size, old_order, new_size, new_order);
1688 assert(new_size > old_size);
1689 init_table(ht, old_order + 1, new_order);
1690 }
1691
1692 /* called with resize mutex held */
1693 static
1694 void _do_cds_lfht_shrink(struct cds_lfht *ht,
1695 unsigned long old_size, unsigned long new_size)
1696 {
1697 unsigned long old_order, new_order;
1698
1699 new_size = max(new_size, ht->min_alloc_size);
1700 old_order = get_count_order_ulong(old_size);
1701 new_order = get_count_order_ulong(new_size);
1702 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1703 old_size, old_order, new_size, new_order);
1704 assert(new_size < old_size);
1705
1706 /* Remove and unlink all bucket nodes to remove. */
1707 fini_table(ht, new_order + 1, old_order);
1708 }
1709
1710
1711 /* called with resize mutex held */
1712 static
1713 void _do_cds_lfht_resize(struct cds_lfht *ht)
1714 {
1715 unsigned long new_size, old_size;
1716
1717 /*
1718 * Resize table, re-do if the target size has changed under us.
1719 */
1720 do {
1721 assert(uatomic_read(&ht->in_progress_resize));
1722 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1723 break;
1724 ht->t.resize_initiated = 1;
1725 old_size = ht->t.size;
1726 new_size = CMM_LOAD_SHARED(ht->t.resize_target);
1727 if (old_size < new_size)
1728 _do_cds_lfht_grow(ht, old_size, new_size);
1729 else if (old_size > new_size)
1730 _do_cds_lfht_shrink(ht, old_size, new_size);
1731 ht->t.resize_initiated = 0;
1732 /* write resize_initiated before read resize_target */
1733 cmm_smp_mb();
1734 } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
1735 }
1736
1737 static
1738 unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
1739 {
1740 return _uatomic_xchg_monotonic_increase(&ht->t.resize_target, new_size);
1741 }
1742
1743 static
1744 void resize_target_update_count(struct cds_lfht *ht,
1745 unsigned long count)
1746 {
1747 count = max(count, ht->min_alloc_size);
1748 uatomic_set(&ht->t.resize_target, count);
1749 }
1750
1751 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1752 {
1753 resize_target_update_count(ht, new_size);
1754 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1755 ht->cds_lfht_rcu_thread_offline();
1756 pthread_mutex_lock(&ht->resize_mutex);
1757 _do_cds_lfht_resize(ht);
1758 pthread_mutex_unlock(&ht->resize_mutex);
1759 ht->cds_lfht_rcu_thread_online();
1760 }
1761
1762 static
1763 void do_resize_cb(struct rcu_head *head)
1764 {
1765 struct rcu_resize_work *work =
1766 caa_container_of(head, struct rcu_resize_work, head);
1767 struct cds_lfht *ht = work->ht;
1768
1769 ht->cds_lfht_rcu_thread_offline();
1770 pthread_mutex_lock(&ht->resize_mutex);
1771 _do_cds_lfht_resize(ht);
1772 pthread_mutex_unlock(&ht->resize_mutex);
1773 ht->cds_lfht_rcu_thread_online();
1774 poison_free(work);
1775 cmm_smp_mb(); /* finish resize before decrement */
1776 uatomic_dec(&ht->in_progress_resize);
1777 }
1778
1779 static
1780 void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
1781 {
1782 struct rcu_resize_work *work;
1783
1784 /* Store resize_target before read resize_initiated */
1785 cmm_smp_mb();
1786 if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
1787 uatomic_inc(&ht->in_progress_resize);
1788 cmm_smp_mb(); /* increment resize count before load destroy */
1789 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1790 uatomic_dec(&ht->in_progress_resize);
1791 return;
1792 }
1793 work = malloc(sizeof(*work));
1794 work->ht = ht;
1795 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1796 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1797 }
1798 }
1799
1800 static
1801 void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
1802 {
1803 unsigned long target_size = size << growth;
1804
1805 if (resize_target_grow(ht, target_size) >= target_size)
1806 return;
1807
1808 __cds_lfht_resize_lazy_launch(ht);
1809 }
1810
1811 /*
1812 * We favor grow operations over shrink. A shrink operation never occurs
1813 * if a grow operation is queued for lazy execution. A grow operation
1814 * cancels any pending shrink lazy execution.
1815 */
1816 static
1817 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
1818 unsigned long count)
1819 {
1820 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1821 return;
1822 count = max(count, ht->min_alloc_size);
1823 if (count == size)
1824 return; /* Already the right size, no resize needed */
1825 if (count > size) { /* lazy grow */
1826 if (resize_target_grow(ht, count) >= count)
1827 return;
1828 } else { /* lazy shrink */
1829 for (;;) {
1830 unsigned long s;
1831
1832 s = uatomic_cmpxchg(&ht->t.resize_target, size, count);
1833 if (s == size)
1834 break; /* no resize needed */
1835 if (s > size)
1836 return; /* growing is/(was just) in progress */
1837 if (s <= count)
1838 return; /* some other thread do shrink */
1839 size = s;
1840 }
1841 }
1842 __cds_lfht_resize_lazy_launch(ht);
1843 }
This page took 0.084565 seconds and 4 git commands to generate.