4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
95 * - call_rcu is used to garbage-collect the old order table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
100 * A bit of ascii art explanation:
102 * Order index is the off-by-one compare to the actual power of 2 because
103 * we use index 0 to deal with the 0 special-case.
105 * This shows the nodes for a small table ordered by reversed bits:
117 * This shows the nodes in order of non-reversed bits, linked by
118 * reversed-bit order.
123 * 1 | 1 001 100 <- <-
125 * 2 | | 2 010 010 | |
126 * | | | 3 011 110 | <- |
128 * 3 -> | | | 4 100 001 | |
144 #include <urcu-call-rcu.h>
145 #include <urcu/arch.h>
146 #include <urcu/uatomic.h>
147 #include <urcu/jhash.h>
148 #include <urcu/compiler.h>
149 #include <urcu/rculfhash.h>
154 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
156 #define dbg_printf(fmt, args...)
160 * Per-CPU split-counters lazily update the global counter each 1024
161 * addition/removal. It automatically keeps track of resize required.
162 * We use the bucket length as indicator for need to expand for small
163 * tables and machines lacking per-cpu data suppport.
165 #define COUNT_COMMIT_ORDER 10
166 #define CHAIN_LEN_TARGET 1
167 #define CHAIN_LEN_RESIZE_THRESHOLD 3
170 * Define the minimum table size. Protects against hash table resize overload
171 * when too many entries are added quickly before the resize can complete.
172 * This is especially the case if the table could be shrinked to a size of 1.
173 * TODO: we might want to make the add/remove operations help the resize to
174 * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
177 #define MIN_TABLE_SIZE 128
180 #define max(a, b) ((a) > (b) ? (a) : (b))
184 * The removed flag needs to be updated atomically with the pointer.
185 * The dummy flag does not require to be updated atomically with the
186 * pointer, but it is added as a pointer low bit flag to save space.
188 #define REMOVED_FLAG (1UL << 0)
189 #define DUMMY_FLAG (1UL << 1)
190 #define FLAGS_MASK ((1UL << 2) - 1)
192 struct ht_items_count
{
193 unsigned long add
, remove
;
194 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
197 struct rcu_head head
;
198 struct _cds_lfht_node nodes
[0];
202 unsigned long size
; /* always a power of 2 */
203 unsigned long resize_target
;
204 int resize_initiated
;
205 struct rcu_head head
;
206 struct rcu_level
*tbl
[0];
210 struct rcu_table
*t
; /* shared */
211 cds_lfht_hash_fct hash_fct
;
212 cds_lfht_compare_fct compare_fct
;
213 unsigned long hash_seed
;
215 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
216 unsigned int in_progress_resize
, in_progress_destroy
;
217 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
218 void (*func
)(struct rcu_head
*head
));
219 void (*cds_lfht_synchronize_rcu
)(void);
220 void (*cds_lfht_rcu_read_lock
)(void);
221 void (*cds_lfht_rcu_read_unlock
)(void);
222 unsigned long count
; /* global approximate item count */
223 struct ht_items_count
*percpu_count
; /* per-cpu item count */
226 struct rcu_resize_work
{
227 struct rcu_head head
;
232 * Algorithm to reverse bits in a word by lookup table, extended to
235 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
236 * Originally from Public Domain.
239 static const uint8_t BitReverseTable256
[256] =
241 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
242 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
243 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
244 R6(0), R6(2), R6(1), R6(3)
251 uint8_t bit_reverse_u8(uint8_t v
)
253 return BitReverseTable256
[v
];
256 static __attribute__((unused
))
257 uint32_t bit_reverse_u32(uint32_t v
)
259 return ((uint32_t) bit_reverse_u8(v
) << 24) |
260 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
261 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
262 ((uint32_t) bit_reverse_u8(v
>> 24));
265 static __attribute__((unused
))
266 uint64_t bit_reverse_u64(uint64_t v
)
268 return ((uint64_t) bit_reverse_u8(v
) << 56) |
269 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
270 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
271 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
272 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
273 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
274 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
275 ((uint64_t) bit_reverse_u8(v
>> 56));
279 unsigned long bit_reverse_ulong(unsigned long v
)
281 #if (CAA_BITS_PER_LONG == 32)
282 return bit_reverse_u32(v
);
284 return bit_reverse_u64(v
);
289 * fls: returns the position of the most significant bit.
290 * Returns 0 if no bit is set, else returns the position of the most
291 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
293 #if defined(__i386) || defined(__x86_64)
295 unsigned int fls_u32(uint32_t x
)
303 : "=r" (r
) : "rm" (x
));
309 #if defined(__x86_64)
311 unsigned int fls_u64(uint64_t x
)
319 : "=r" (r
) : "rm" (x
));
326 static __attribute__((unused
))
327 unsigned int fls_u64(uint64_t x
)
334 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
338 if (!(x
& 0xFFFF000000000000ULL
)) {
342 if (!(x
& 0xFF00000000000000ULL
)) {
346 if (!(x
& 0xF000000000000000ULL
)) {
350 if (!(x
& 0xC000000000000000ULL
)) {
354 if (!(x
& 0x8000000000000000ULL
)) {
363 static __attribute__((unused
))
364 unsigned int fls_u32(uint32_t x
)
370 if (!(x
& 0xFFFF0000U
)) {
374 if (!(x
& 0xFF000000U
)) {
378 if (!(x
& 0xF0000000U
)) {
382 if (!(x
& 0xC0000000U
)) {
386 if (!(x
& 0x80000000U
)) {
394 unsigned int fls_ulong(unsigned long x
)
396 #if (CAA_BITS_PER_lONG == 32)
403 int get_count_order_u32(uint32_t x
)
407 order
= fls_u32(x
) - 1;
413 int get_count_order_ulong(unsigned long x
)
417 order
= fls_ulong(x
) - 1;
424 #define poison_free(ptr) \
426 memset(ptr, 0x42, sizeof(*(ptr))); \
430 #define poison_free(ptr) free(ptr)
434 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, struct rcu_table
*t
, int growth
);
437 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
438 * available, then we support hash table item accounting.
439 * In the unfortunate event the number of CPUs reported would be
440 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
442 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
445 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, struct rcu_table
*t
,
446 unsigned long count
);
448 static long nr_cpus_mask
= -1;
451 struct ht_items_count
*alloc_per_cpu_items_count(void)
453 struct ht_items_count
*count
;
455 switch (nr_cpus_mask
) {
462 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
468 * round up number of CPUs to next power of two, so we
469 * can use & for modulo.
471 maxcpus
= 1UL << get_count_order_ulong(maxcpus
);
472 nr_cpus_mask
= maxcpus
- 1;
476 return calloc(nr_cpus_mask
+ 1, sizeof(*count
));
481 void free_per_cpu_items_count(struct ht_items_count
*count
)
491 assert(nr_cpus_mask
>= 0);
492 cpu
= sched_getcpu();
493 if (unlikely(cpu
< 0))
496 return cpu
& nr_cpus_mask
;
500 void ht_count_add(struct cds_lfht
*ht
, struct rcu_table
*t
)
502 unsigned long percpu_count
;
505 if (unlikely(!ht
->percpu_count
))
508 if (unlikely(cpu
< 0))
510 percpu_count
= uatomic_add_return(&ht
->percpu_count
[cpu
].add
, 1);
511 if (unlikely(!(percpu_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
514 dbg_printf("add percpu %lu\n", percpu_count
);
515 count
= uatomic_add_return(&ht
->count
,
516 1UL << COUNT_COMMIT_ORDER
);
518 if (!(count
& (count
- 1))) {
519 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
)
522 dbg_printf("add set global %lu\n", count
);
523 cds_lfht_resize_lazy_count(ht
, t
,
524 count
>> (CHAIN_LEN_TARGET
- 1));
530 void ht_count_remove(struct cds_lfht
*ht
, struct rcu_table
*t
)
532 unsigned long percpu_count
;
535 if (unlikely(!ht
->percpu_count
))
538 if (unlikely(cpu
< 0))
540 percpu_count
= uatomic_add_return(&ht
->percpu_count
[cpu
].remove
, -1);
541 if (unlikely(!(percpu_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
544 dbg_printf("remove percpu %lu\n", percpu_count
);
545 count
= uatomic_add_return(&ht
->count
,
546 -(1UL << COUNT_COMMIT_ORDER
));
548 if (!(count
& (count
- 1))) {
549 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
)
552 dbg_printf("remove set global %lu\n", count
);
553 cds_lfht_resize_lazy_count(ht
, t
,
554 count
>> (CHAIN_LEN_TARGET
- 1));
559 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
561 static const long nr_cpus_mask
= -1;
564 struct ht_items_count
*alloc_per_cpu_items_count(void)
570 void free_per_cpu_items_count(struct ht_items_count
*count
)
575 void ht_count_add(struct cds_lfht
*ht
, struct rcu_table
*t
)
580 void ht_count_remove(struct cds_lfht
*ht
, struct rcu_table
*t
)
584 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
588 void check_resize(struct cds_lfht
*ht
, struct rcu_table
*t
,
593 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
595 count
= uatomic_read(&ht
->count
);
597 * Use bucket-local length for small table expand and for
598 * environments lacking per-cpu data support.
600 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
603 dbg_printf("WARNING: large chain length: %u.\n",
605 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
606 cds_lfht_resize_lazy(ht
, t
,
607 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
611 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
613 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
617 int is_removed(struct cds_lfht_node
*node
)
619 return ((unsigned long) node
) & REMOVED_FLAG
;
623 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
625 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
629 int is_dummy(struct cds_lfht_node
*node
)
631 return ((unsigned long) node
) & DUMMY_FLAG
;
635 struct cds_lfht_node
*flag_dummy(struct cds_lfht_node
*node
)
637 return (struct cds_lfht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
641 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
643 unsigned long old1
, old2
;
645 old1
= uatomic_read(ptr
);
650 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
655 void cds_lfht_free_table_cb(struct rcu_head
*head
)
657 struct rcu_table
*t
=
658 caa_container_of(head
, struct rcu_table
, head
);
663 void cds_lfht_free_level(struct rcu_head
*head
)
665 struct rcu_level
*l
=
666 caa_container_of(head
, struct rcu_level
, head
);
671 * Remove all logically deleted nodes from a bucket up to a certain node key.
674 void _cds_lfht_gc_bucket(struct cds_lfht_node
*dummy
, struct cds_lfht_node
*node
)
676 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
678 assert(!is_dummy(dummy
));
679 assert(!is_removed(dummy
));
680 assert(!is_dummy(node
));
681 assert(!is_removed(node
));
684 /* We can always skip the dummy node initially */
685 iter
= rcu_dereference(iter_prev
->p
.next
);
686 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
688 * We should never be called with dummy (start of chain)
689 * and logically removed node (end of path compression
690 * marker) being the actual same node. This would be a
691 * bug in the algorithm implementation.
693 assert(dummy
!= node
);
695 if (unlikely(!clear_flag(iter
)))
697 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
699 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
700 if (likely(is_removed(next
)))
702 iter_prev
= clear_flag(iter
);
705 assert(!is_removed(iter
));
707 new_next
= flag_dummy(clear_flag(next
));
709 new_next
= clear_flag(next
);
710 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
715 struct cds_lfht_node
*_cds_lfht_add(struct cds_lfht
*ht
, struct rcu_table
*t
,
716 struct cds_lfht_node
*node
, int unique
, int dummy
)
718 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
720 struct _cds_lfht_node
*lookup
;
721 unsigned long hash
, index
, order
;
723 assert(!is_dummy(node
));
724 assert(!is_removed(node
));
727 node
->p
.next
= flag_dummy(NULL
);
728 return node
; /* Initial first add (head) */
730 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
732 uint32_t chain_len
= 0;
735 * iter_prev points to the non-removed node prior to the
738 index
= hash
& (t
->size
- 1);
739 order
= get_count_order_ulong(index
+ 1);
740 lookup
= &t
->tbl
[order
]->nodes
[index
& ((!order
? 0 : (1UL << (order
- 1))) - 1)];
741 iter_prev
= (struct cds_lfht_node
*) lookup
;
742 /* We can always skip the dummy node initially */
743 iter
= rcu_dereference(iter_prev
->p
.next
);
744 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
746 /* TODO: check if removed */
747 if (unlikely(!clear_flag(iter
)))
749 /* TODO: check if removed */
750 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
752 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
753 if (unlikely(is_removed(next
)))
757 && !ht
->compare_fct(node
->key
, node
->key_len
,
758 clear_flag(iter
)->key
,
759 clear_flag(iter
)->key_len
))
760 return clear_flag(iter
);
761 /* Only account for identical reverse hash once */
762 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
764 check_resize(ht
, t
, ++chain_len
);
765 iter_prev
= clear_flag(iter
);
769 assert(node
!= clear_flag(iter
));
770 assert(!is_removed(iter_prev
));
771 assert(!is_removed(iter
));
772 assert(iter_prev
!= node
);
774 node
->p
.next
= clear_flag(iter
);
776 node
->p
.next
= flag_dummy(clear_flag(iter
));
778 new_node
= flag_dummy(node
);
781 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
783 continue; /* retry */
787 assert(!is_removed(iter
));
789 new_next
= flag_dummy(clear_flag(next
));
791 new_next
= clear_flag(next
);
792 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
796 /* Garbage collect logically removed nodes in the bucket */
797 index
= hash
& (t
->size
- 1);
798 order
= get_count_order_ulong(index
+ 1);
799 lookup
= &t
->tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1)) - 1))];
800 dummy_node
= (struct cds_lfht_node
*) lookup
;
801 _cds_lfht_gc_bucket(dummy_node
, node
);
806 int _cds_lfht_remove(struct cds_lfht
*ht
, struct rcu_table
*t
,
807 struct cds_lfht_node
*node
, int dummy_removal
)
809 struct cds_lfht_node
*dummy
, *next
, *old
;
810 struct _cds_lfht_node
*lookup
;
812 unsigned long hash
, index
, order
;
814 /* logically delete the node */
815 assert(!is_dummy(node
));
816 assert(!is_removed(node
));
817 old
= rcu_dereference(node
->p
.next
);
820 if (unlikely(is_removed(next
)))
823 assert(is_dummy(next
));
825 assert(!is_dummy(next
));
826 old
= uatomic_cmpxchg(&node
->p
.next
, next
,
828 } while (old
!= next
);
830 /* We performed the (logical) deletion. */
834 * Ensure that the node is not visible to readers anymore: lookup for
835 * the node, and remove it (along with any other logically removed node)
838 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
840 index
= hash
& (t
->size
- 1);
841 order
= get_count_order_ulong(index
+ 1);
842 lookup
= &t
->tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1)) - 1))];
843 dummy
= (struct cds_lfht_node
*) lookup
;
844 _cds_lfht_gc_bucket(dummy
, node
);
847 * Only the flagging action indicated that we (and no other)
848 * removed the node from the hash.
851 assert(is_removed(rcu_dereference(node
->p
.next
)));
858 * Holding RCU read lock to protect _cds_lfht_add against memory
859 * reclaim that could be performed by other call_rcu worker threads (ABA
863 void init_table(struct cds_lfht
*ht
, struct rcu_table
*t
,
864 unsigned long first_order
, unsigned long len_order
)
866 unsigned long i
, end_order
;
868 dbg_printf("init table: first_order %lu end_order %lu\n",
869 first_order
, first_order
+ len_order
);
870 end_order
= first_order
+ len_order
;
871 t
->size
= !first_order
? 0 : (1UL << (first_order
- 1));
872 for (i
= first_order
; i
< end_order
; i
++) {
873 unsigned long j
, len
;
875 len
= !i
? 1 : 1UL << (i
- 1);
876 dbg_printf("init order %lu len: %lu\n", i
, len
);
877 t
->tbl
[i
] = calloc(1, sizeof(struct rcu_level
)
878 + (len
* sizeof(struct _cds_lfht_node
)));
879 ht
->cds_lfht_rcu_read_lock();
880 for (j
= 0; j
< len
; j
++) {
881 struct cds_lfht_node
*new_node
=
882 (struct cds_lfht_node
*) &t
->tbl
[i
]->nodes
[j
];
884 dbg_printf("init entry: i %lu j %lu hash %lu\n",
885 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
886 new_node
->p
.reverse_hash
=
887 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
888 (void) _cds_lfht_add(ht
, t
, new_node
, 0, 1);
889 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
892 ht
->cds_lfht_rcu_read_unlock();
893 /* Update table size */
894 t
->size
= !i
? 1 : (1UL << i
);
895 dbg_printf("init new size: %lu\n", t
->size
);
896 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
899 t
->resize_target
= t
->size
;
900 t
->resize_initiated
= 0;
904 * Holding RCU read lock to protect _cds_lfht_remove against memory
905 * reclaim that could be performed by other call_rcu worker threads (ABA
909 void fini_table(struct cds_lfht
*ht
, struct rcu_table
*t
,
910 unsigned long first_order
, unsigned long len_order
)
914 dbg_printf("fini table: first_order %lu end_order %lu\n",
915 first_order
, first_order
+ len_order
);
916 end_order
= first_order
+ len_order
;
917 assert(first_order
> 0);
918 assert(t
->size
== (1UL << (end_order
- 1)));
919 for (i
= end_order
- 1; i
>= first_order
; i
--) {
920 unsigned long j
, len
;
922 len
= !i
? 1 : 1UL << (i
- 1);
923 dbg_printf("fini order %lu len: %lu\n", i
, len
);
925 * Update table size. Need to shrink this table prior to
926 * removal so gc lookups use non-logically-removed dummy
929 t
->size
= 1UL << (i
- 1);
931 ht
->cds_lfht_rcu_read_lock();
932 for (j
= 0; j
< len
; j
++) {
933 struct cds_lfht_node
*fini_node
=
934 (struct cds_lfht_node
*) &t
->tbl
[i
]->nodes
[j
];
936 dbg_printf("fini entry: i %lu j %lu hash %lu\n",
937 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
938 fini_node
->p
.reverse_hash
=
939 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
940 (void) _cds_lfht_remove(ht
, t
, fini_node
, 1);
941 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
944 ht
->cds_lfht_rcu_read_unlock();
945 ht
->cds_lfht_call_rcu(&t
->tbl
[i
]->head
, cds_lfht_free_level
);
946 dbg_printf("fini new size: %lu\n", t
->size
);
947 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
950 t
->resize_target
= t
->size
;
951 t
->resize_initiated
= 0;
954 struct cds_lfht
*cds_lfht_new(cds_lfht_hash_fct hash_fct
,
955 cds_lfht_compare_fct compare_fct
,
956 unsigned long hash_seed
,
957 unsigned long init_size
,
959 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
960 void (*func
)(struct rcu_head
*head
)),
961 void (*cds_lfht_synchronize_rcu
)(void),
962 void (*cds_lfht_rcu_read_lock
)(void),
963 void (*cds_lfht_rcu_read_unlock
)(void))
968 /* init_size must be power of two */
969 if (init_size
&& (init_size
& (init_size
- 1)))
971 ht
= calloc(1, sizeof(struct cds_lfht
));
972 ht
->hash_fct
= hash_fct
;
973 ht
->compare_fct
= compare_fct
;
974 ht
->hash_seed
= hash_seed
;
975 ht
->cds_lfht_call_rcu
= cds_lfht_call_rcu
;
976 ht
->cds_lfht_synchronize_rcu
= cds_lfht_synchronize_rcu
;
977 ht
->cds_lfht_rcu_read_lock
= cds_lfht_rcu_read_lock
;
978 ht
->cds_lfht_rcu_read_unlock
= cds_lfht_rcu_read_unlock
;
979 ht
->in_progress_resize
= 0;
980 ht
->percpu_count
= alloc_per_cpu_items_count();
981 /* this mutex should not nest in read-side C.S. */
982 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
983 order
= get_count_order_ulong(max(init_size
, MIN_TABLE_SIZE
)) + 1;
984 ht
->t
= calloc(1, sizeof(struct cds_lfht
)
985 + (order
* sizeof(struct rcu_level
*)));
988 pthread_mutex_lock(&ht
->resize_mutex
);
989 init_table(ht
, ht
->t
, 0, order
);
990 pthread_mutex_unlock(&ht
->resize_mutex
);
994 struct cds_lfht_node
*cds_lfht_lookup(struct cds_lfht
*ht
, void *key
, size_t key_len
)
997 struct cds_lfht_node
*node
, *next
;
998 struct _cds_lfht_node
*lookup
;
999 unsigned long hash
, reverse_hash
, index
, order
;
1001 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
1002 reverse_hash
= bit_reverse_ulong(hash
);
1004 t
= rcu_dereference(ht
->t
);
1005 index
= hash
& (t
->size
- 1);
1006 order
= get_count_order_ulong(index
+ 1);
1007 lookup
= &t
->tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1))) - 1)];
1008 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
1009 hash
, index
, order
, index
& (!order
? 0 : ((1UL << (order
- 1)) - 1)));
1010 node
= (struct cds_lfht_node
*) lookup
;
1012 if (unlikely(!node
))
1014 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1018 next
= rcu_dereference(node
->p
.next
);
1019 if (likely(!is_removed(next
))
1021 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1024 node
= clear_flag(next
);
1026 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1030 struct cds_lfht_node
*cds_lfht_next(struct cds_lfht
*ht
,
1031 struct cds_lfht_node
*node
)
1033 struct cds_lfht_node
*next
;
1034 unsigned long reverse_hash
;
1038 reverse_hash
= node
->p
.reverse_hash
;
1040 key_len
= node
->key_len
;
1041 next
= rcu_dereference(node
->p
.next
);
1042 node
= clear_flag(next
);
1045 if (unlikely(!node
))
1047 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1051 next
= rcu_dereference(node
->p
.next
);
1052 if (likely(!is_removed(next
))
1054 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1057 node
= clear_flag(next
);
1059 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1063 void cds_lfht_add(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1065 struct rcu_table
*t
;
1068 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1069 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1071 t
= rcu_dereference(ht
->t
);
1072 (void) _cds_lfht_add(ht
, t
, node
, 0, 0);
1073 ht_count_add(ht
, t
);
1076 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1077 struct cds_lfht_node
*node
)
1079 struct rcu_table
*t
;
1081 struct cds_lfht_node
*ret
;
1083 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1084 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1086 t
= rcu_dereference(ht
->t
);
1087 ret
= _cds_lfht_add(ht
, t
, node
, 1, 0);
1089 ht_count_add(ht
, t
);
1093 int cds_lfht_remove(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1095 struct rcu_table
*t
;
1098 t
= rcu_dereference(ht
->t
);
1099 ret
= _cds_lfht_remove(ht
, t
, node
, 0);
1101 ht_count_remove(ht
, t
);
1106 int cds_lfht_delete_dummy(struct cds_lfht
*ht
)
1108 struct rcu_table
*t
;
1109 struct cds_lfht_node
*node
;
1110 struct _cds_lfht_node
*lookup
;
1111 unsigned long order
, i
;
1114 /* Check that the table is empty */
1115 lookup
= &t
->tbl
[0]->nodes
[0];
1116 node
= (struct cds_lfht_node
*) lookup
;
1118 node
= clear_flag(node
)->p
.next
;
1119 if (!is_dummy(node
))
1121 assert(!is_removed(node
));
1122 } while (clear_flag(node
));
1123 /* Internal sanity check: all nodes left should be dummy */
1124 for (order
= 0; order
< get_count_order_ulong(t
->size
) + 1; order
++) {
1127 len
= !order
? 1 : 1UL << (order
- 1);
1128 for (i
= 0; i
< len
; i
++) {
1129 dbg_printf("delete order %lu i %lu hash %lu\n",
1131 bit_reverse_ulong(t
->tbl
[order
]->nodes
[i
].reverse_hash
));
1132 assert(is_dummy(t
->tbl
[order
]->nodes
[i
].next
));
1134 poison_free(t
->tbl
[order
]);
1140 * Should only be called when no more concurrent readers nor writers can
1141 * possibly access the table.
1143 int cds_lfht_destroy(struct cds_lfht
*ht
)
1147 /* Wait for in-flight resize operations to complete */
1148 CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1149 while (uatomic_read(&ht
->in_progress_resize
))
1150 poll(NULL
, 0, 100); /* wait for 100ms */
1151 ret
= cds_lfht_delete_dummy(ht
);
1155 free_per_cpu_items_count(ht
->percpu_count
);
1160 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1161 unsigned long *count
,
1162 unsigned long *removed
)
1164 struct rcu_table
*t
;
1165 struct cds_lfht_node
*node
, *next
;
1166 struct _cds_lfht_node
*lookup
;
1167 unsigned long nr_dummy
= 0;
1172 t
= rcu_dereference(ht
->t
);
1173 /* Count non-dummy nodes in the table */
1174 lookup
= &t
->tbl
[0]->nodes
[0];
1175 node
= (struct cds_lfht_node
*) lookup
;
1177 next
= rcu_dereference(node
->p
.next
);
1178 if (is_removed(next
)) {
1179 assert(!is_dummy(next
));
1181 } else if (!is_dummy(next
))
1185 node
= clear_flag(next
);
1187 dbg_printf("number of dummy nodes: %lu\n", nr_dummy
);
1190 /* called with resize mutex held */
1192 void _do_cds_lfht_grow(struct cds_lfht
*ht
, struct rcu_table
*old_t
,
1193 unsigned long old_size
, unsigned long new_size
)
1195 unsigned long old_order
, new_order
;
1196 struct rcu_table
*new_t
;
1198 old_order
= get_count_order_ulong(old_size
) + 1;
1199 new_order
= get_count_order_ulong(new_size
) + 1;
1200 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1201 old_size
, old_order
, new_size
, new_order
);
1202 new_t
= malloc(sizeof(struct cds_lfht
)
1203 + (new_order
* sizeof(struct rcu_level
*)));
1204 assert(new_size
> old_size
);
1205 memcpy(&new_t
->tbl
, &old_t
->tbl
,
1206 old_order
* sizeof(struct rcu_level
*));
1207 init_table(ht
, new_t
, old_order
, new_order
- old_order
);
1208 /* Changing table and size atomically wrt lookups */
1209 rcu_assign_pointer(ht
->t
, new_t
);
1210 ht
->cds_lfht_call_rcu(&old_t
->head
, cds_lfht_free_table_cb
);
1213 /* called with resize mutex held */
1215 void _do_cds_lfht_shrink(struct cds_lfht
*ht
, struct rcu_table
*old_t
,
1216 unsigned long old_size
, unsigned long new_size
)
1218 unsigned long old_order
, new_order
;
1219 struct rcu_table
*new_t
;
1221 new_size
= max(new_size
, MIN_TABLE_SIZE
);
1222 old_order
= get_count_order_ulong(old_size
) + 1;
1223 new_order
= get_count_order_ulong(new_size
) + 1;
1224 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1225 old_size
, old_order
, new_size
, new_order
);
1226 new_t
= malloc(sizeof(struct cds_lfht
)
1227 + (new_order
* sizeof(struct rcu_level
*)));
1228 assert(new_size
< old_size
);
1229 memcpy(&new_t
->tbl
, &old_t
->tbl
,
1230 new_order
* sizeof(struct rcu_level
*));
1231 new_t
->size
= !new_order
? 1 : (1UL << (new_order
- 1));
1232 assert(new_t
->size
== new_size
);
1233 new_t
->resize_target
= new_t
->size
;
1234 new_t
->resize_initiated
= 0;
1236 /* Changing table and size atomically wrt lookups */
1237 rcu_assign_pointer(ht
->t
, new_t
);
1240 * We need to wait for all add operations to reach Q.S. (and
1241 * thus use the new table for lookups) before we can start
1242 * releasing the old dummy nodes. Otherwise their lookup will
1243 * return a logically removed node as insert position.
1245 ht
->cds_lfht_synchronize_rcu();
1247 /* Unlink and remove all now-unused dummy node pointers. */
1248 fini_table(ht
, old_t
, new_order
, old_order
- new_order
);
1249 ht
->cds_lfht_call_rcu(&old_t
->head
, cds_lfht_free_table_cb
);
1253 /* called with resize mutex held */
1255 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1257 unsigned long new_size
, old_size
;
1258 struct rcu_table
*old_t
;
1261 old_size
= old_t
->size
;
1262 new_size
= CMM_LOAD_SHARED(old_t
->resize_target
);
1263 if (old_size
< new_size
)
1264 _do_cds_lfht_grow(ht
, old_t
, old_size
, new_size
);
1265 else if (old_size
> new_size
)
1266 _do_cds_lfht_shrink(ht
, old_t
, old_size
, new_size
);
1268 CMM_STORE_SHARED(old_t
->resize_initiated
, 0);
1272 unsigned long resize_target_update(struct rcu_table
*t
,
1275 return _uatomic_max(&t
->resize_target
,
1276 t
->size
<< growth_order
);
1280 void resize_target_update_count(struct rcu_table
*t
,
1281 unsigned long count
)
1283 count
= max(count
, MIN_TABLE_SIZE
);
1284 uatomic_set(&t
->resize_target
, count
);
1287 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1289 struct rcu_table
*t
= rcu_dereference(ht
->t
);
1291 resize_target_update_count(t
, new_size
);
1292 CMM_STORE_SHARED(t
->resize_initiated
, 1);
1293 pthread_mutex_lock(&ht
->resize_mutex
);
1294 _do_cds_lfht_resize(ht
);
1295 pthread_mutex_unlock(&ht
->resize_mutex
);
1299 void do_resize_cb(struct rcu_head
*head
)
1301 struct rcu_resize_work
*work
=
1302 caa_container_of(head
, struct rcu_resize_work
, head
);
1303 struct cds_lfht
*ht
= work
->ht
;
1305 pthread_mutex_lock(&ht
->resize_mutex
);
1306 _do_cds_lfht_resize(ht
);
1307 pthread_mutex_unlock(&ht
->resize_mutex
);
1309 cmm_smp_mb(); /* finish resize before decrement */
1310 uatomic_dec(&ht
->in_progress_resize
);
1314 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, struct rcu_table
*t
, int growth
)
1316 struct rcu_resize_work
*work
;
1317 unsigned long target_size
;
1319 target_size
= resize_target_update(t
, growth
);
1320 if (!CMM_LOAD_SHARED(t
->resize_initiated
) && t
->size
< target_size
) {
1321 uatomic_inc(&ht
->in_progress_resize
);
1322 cmm_smp_mb(); /* increment resize count before calling it */
1323 work
= malloc(sizeof(*work
));
1325 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1326 CMM_STORE_SHARED(t
->resize_initiated
, 1);
1330 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1333 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, struct rcu_table
*t
,
1334 unsigned long count
)
1336 struct rcu_resize_work
*work
;
1338 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1340 resize_target_update_count(t
, count
);
1341 if (!CMM_LOAD_SHARED(t
->resize_initiated
)) {
1342 uatomic_inc(&ht
->in_progress_resize
);
1343 cmm_smp_mb(); /* increment resize count before calling it */
1344 work
= malloc(sizeof(*work
));
1346 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1347 CMM_STORE_SHARED(t
->resize_initiated
, 1);