4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
95 * - call_rcu is used to garbage-collect the old order table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
100 * A bit of ascii art explanation:
102 * Order index is the off-by-one compare to the actual power of 2 because
103 * we use index 0 to deal with the 0 special-case.
105 * This shows the nodes for a small table ordered by reversed bits:
117 * This shows the nodes in order of non-reversed bits, linked by
118 * reversed-bit order.
123 * 1 | 1 001 100 <- <-
125 * 2 | | 2 010 010 | |
126 * | | | 3 011 110 | <- |
128 * 3 -> | | | 4 100 001 | |
144 #include <urcu-call-rcu.h>
145 #include <urcu/arch.h>
146 #include <urcu/uatomic.h>
147 #include <urcu/jhash.h>
148 #include <urcu/compiler.h>
149 #include <urcu/rculfhash.h>
154 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
156 #define dbg_printf(fmt, args...)
160 * Per-CPU split-counters lazily update the global counter each 1024
161 * addition/removal. It automatically keeps track of resize required.
162 * We use the bucket length as indicator for need to expand for small
163 * tables and machines lacking per-cpu data suppport.
165 #define COUNT_COMMIT_ORDER 10
166 #define CHAIN_LEN_TARGET 1
167 #define CHAIN_LEN_RESIZE_THRESHOLD 3
170 * Define the minimum table size.
172 #define MIN_TABLE_SIZE 1
174 #if (CAA_BITS_PER_LONG == 32)
175 #define MAX_TABLE_ORDER 32
177 #define MAX_TABLE_ORDER 64
181 * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
183 #define MIN_PARTITION_PER_THREAD_ORDER 12
184 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
187 #define min(a, b) ((a) < (b) ? (a) : (b))
191 #define max(a, b) ((a) > (b) ? (a) : (b))
195 * The removed flag needs to be updated atomically with the pointer.
196 * It indicates that no node must attach to the node scheduled for
197 * removal. The gc flag also needs to be updated atomically with the
198 * pointer. It indicates that node garbage collection must be performed.
199 * "removed" and "gc" flags are separate for the benefit of replacement
201 * The dummy flag does not require to be updated atomically with the
202 * pointer, but it is added as a pointer low bit flag to save space.
204 #define REMOVED_FLAG (1UL << 0)
205 #define GC_FLAG (1UL << 1)
206 #define DUMMY_FLAG (1UL << 2)
207 #define FLAGS_MASK ((1UL << 3) - 1)
209 /* Value of the end pointer. Should not interact with flags. */
210 #define END_VALUE NULL
212 struct ht_items_count
{
213 unsigned long add
, del
;
214 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
217 struct rcu_head head
;
218 struct _cds_lfht_node nodes
[0];
222 unsigned long size
; /* always a power of 2, shared (RCU) */
223 unsigned long resize_target
;
224 int resize_initiated
;
225 struct rcu_level
*tbl
[MAX_TABLE_ORDER
];
230 cds_lfht_hash_fct hash_fct
;
231 cds_lfht_compare_fct compare_fct
;
232 unsigned long hash_seed
;
235 * We need to put the work threads offline (QSBR) when taking this
236 * mutex, because we use synchronize_rcu within this mutex critical
237 * section, which waits on read-side critical sections, and could
238 * therefore cause grace-period deadlock if we hold off RCU G.P.
241 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
242 unsigned int in_progress_resize
, in_progress_destroy
;
243 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
244 void (*func
)(struct rcu_head
*head
));
245 void (*cds_lfht_synchronize_rcu
)(void);
246 void (*cds_lfht_rcu_read_lock
)(void);
247 void (*cds_lfht_rcu_read_unlock
)(void);
248 void (*cds_lfht_rcu_thread_offline
)(void);
249 void (*cds_lfht_rcu_thread_online
)(void);
250 void (*cds_lfht_rcu_register_thread
)(void);
251 void (*cds_lfht_rcu_unregister_thread
)(void);
252 pthread_attr_t
*resize_attr
; /* Resize threads attributes */
253 unsigned long count
; /* global approximate item count */
254 struct ht_items_count
*percpu_count
; /* per-cpu item count */
257 struct rcu_resize_work
{
258 struct rcu_head head
;
262 struct partition_resize_work
{
263 struct rcu_head head
;
265 unsigned long i
, start
, len
;
266 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
267 unsigned long start
, unsigned long len
);
277 struct cds_lfht_node
*_cds_lfht_add(struct cds_lfht
*ht
,
279 struct cds_lfht_node
*node
,
280 enum add_mode mode
, int dummy
);
283 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
284 struct cds_lfht_node
*node
,
285 int dummy_removal
, int do_gc
);
288 * Algorithm to reverse bits in a word by lookup table, extended to
291 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
292 * Originally from Public Domain.
295 static const uint8_t BitReverseTable256
[256] =
297 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
298 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
299 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
300 R6(0), R6(2), R6(1), R6(3)
307 uint8_t bit_reverse_u8(uint8_t v
)
309 return BitReverseTable256
[v
];
312 static __attribute__((unused
))
313 uint32_t bit_reverse_u32(uint32_t v
)
315 return ((uint32_t) bit_reverse_u8(v
) << 24) |
316 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
317 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
318 ((uint32_t) bit_reverse_u8(v
>> 24));
321 static __attribute__((unused
))
322 uint64_t bit_reverse_u64(uint64_t v
)
324 return ((uint64_t) bit_reverse_u8(v
) << 56) |
325 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
326 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
327 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
328 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
329 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
330 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
331 ((uint64_t) bit_reverse_u8(v
>> 56));
335 unsigned long bit_reverse_ulong(unsigned long v
)
337 #if (CAA_BITS_PER_LONG == 32)
338 return bit_reverse_u32(v
);
340 return bit_reverse_u64(v
);
345 * fls: returns the position of the most significant bit.
346 * Returns 0 if no bit is set, else returns the position of the most
347 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
349 #if defined(__i386) || defined(__x86_64)
351 unsigned int fls_u32(uint32_t x
)
359 : "=r" (r
) : "rm" (x
));
365 #if defined(__x86_64)
367 unsigned int fls_u64(uint64_t x
)
375 : "=r" (r
) : "rm" (x
));
382 static __attribute__((unused
))
383 unsigned int fls_u64(uint64_t x
)
390 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
394 if (!(x
& 0xFFFF000000000000ULL
)) {
398 if (!(x
& 0xFF00000000000000ULL
)) {
402 if (!(x
& 0xF000000000000000ULL
)) {
406 if (!(x
& 0xC000000000000000ULL
)) {
410 if (!(x
& 0x8000000000000000ULL
)) {
419 static __attribute__((unused
))
420 unsigned int fls_u32(uint32_t x
)
426 if (!(x
& 0xFFFF0000U
)) {
430 if (!(x
& 0xFF000000U
)) {
434 if (!(x
& 0xF0000000U
)) {
438 if (!(x
& 0xC0000000U
)) {
442 if (!(x
& 0x80000000U
)) {
450 unsigned int fls_ulong(unsigned long x
)
452 #if (CAA_BITS_PER_lONG == 32)
459 int get_count_order_u32(uint32_t x
)
463 order
= fls_u32(x
) - 1;
469 int get_count_order_ulong(unsigned long x
)
473 order
= fls_ulong(x
) - 1;
480 #define poison_free(ptr) \
482 memset(ptr, 0x42, sizeof(*(ptr))); \
486 #define poison_free(ptr) free(ptr)
490 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, unsigned long size
, int growth
);
493 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
494 * available, then we support hash table item accounting.
495 * In the unfortunate event the number of CPUs reported would be
496 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
498 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
501 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
502 unsigned long count
);
504 static long nr_cpus_mask
= -1;
507 struct ht_items_count
*alloc_per_cpu_items_count(void)
509 struct ht_items_count
*count
;
511 switch (nr_cpus_mask
) {
518 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
524 * round up number of CPUs to next power of two, so we
525 * can use & for modulo.
527 maxcpus
= 1UL << get_count_order_ulong(maxcpus
);
528 nr_cpus_mask
= maxcpus
- 1;
532 return calloc(nr_cpus_mask
+ 1, sizeof(*count
));
537 void free_per_cpu_items_count(struct ht_items_count
*count
)
547 assert(nr_cpus_mask
>= 0);
548 cpu
= sched_getcpu();
549 if (unlikely(cpu
< 0))
552 return cpu
& nr_cpus_mask
;
556 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
)
558 unsigned long percpu_count
;
561 if (unlikely(!ht
->percpu_count
))
564 if (unlikely(cpu
< 0))
566 percpu_count
= uatomic_add_return(&ht
->percpu_count
[cpu
].add
, 1);
567 if (unlikely(!(percpu_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
570 dbg_printf("add percpu %lu\n", percpu_count
);
571 count
= uatomic_add_return(&ht
->count
,
572 1UL << COUNT_COMMIT_ORDER
);
574 if (!(count
& (count
- 1))) {
575 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) < size
)
577 dbg_printf("add set global %lu\n", count
);
578 cds_lfht_resize_lazy_count(ht
, size
,
579 count
>> (CHAIN_LEN_TARGET
- 1));
585 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
)
587 unsigned long percpu_count
;
590 if (unlikely(!ht
->percpu_count
))
593 if (unlikely(cpu
< 0))
595 percpu_count
= uatomic_add_return(&ht
->percpu_count
[cpu
].del
, -1);
596 if (unlikely(!(percpu_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
599 dbg_printf("del percpu %lu\n", percpu_count
);
600 count
= uatomic_add_return(&ht
->count
,
601 -(1UL << COUNT_COMMIT_ORDER
));
603 if (!(count
& (count
- 1))) {
604 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) >= size
)
606 dbg_printf("del set global %lu\n", count
);
607 cds_lfht_resize_lazy_count(ht
, size
,
608 count
>> (CHAIN_LEN_TARGET
- 1));
613 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
615 static const long nr_cpus_mask
= -1;
618 struct ht_items_count
*alloc_per_cpu_items_count(void)
624 void free_per_cpu_items_count(struct ht_items_count
*count
)
629 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
)
634 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
)
638 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
642 void check_resize(struct cds_lfht
*ht
, unsigned long size
, uint32_t chain_len
)
646 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
648 count
= uatomic_read(&ht
->count
);
650 * Use bucket-local length for small table expand and for
651 * environments lacking per-cpu data support.
653 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
656 dbg_printf("WARNING: large chain length: %u.\n",
658 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
659 cds_lfht_resize_lazy(ht
, size
,
660 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
664 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
666 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
670 int is_removed(struct cds_lfht_node
*node
)
672 return ((unsigned long) node
) & REMOVED_FLAG
;
676 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
678 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
682 int is_gc(struct cds_lfht_node
*node
)
684 return ((unsigned long) node
) & GC_FLAG
;
688 struct cds_lfht_node
*flag_gc(struct cds_lfht_node
*node
)
690 return (struct cds_lfht_node
*) (((unsigned long) node
) | GC_FLAG
);
694 int is_dummy(struct cds_lfht_node
*node
)
696 return ((unsigned long) node
) & DUMMY_FLAG
;
700 struct cds_lfht_node
*flag_dummy(struct cds_lfht_node
*node
)
702 return (struct cds_lfht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
706 struct cds_lfht_node
*get_end(void)
708 return (struct cds_lfht_node
*) END_VALUE
;
712 int is_end(struct cds_lfht_node
*node
)
714 return clear_flag(node
) == (struct cds_lfht_node
*) END_VALUE
;
718 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
720 unsigned long old1
, old2
;
722 old1
= uatomic_read(ptr
);
727 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
732 void cds_lfht_free_level(struct rcu_head
*head
)
734 struct rcu_level
*l
=
735 caa_container_of(head
, struct rcu_level
, head
);
740 * Remove all logically deleted nodes from a bucket up to a certain node key.
743 void _cds_lfht_gc_bucket(struct cds_lfht_node
*dummy
, struct cds_lfht_node
*node
)
745 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
747 assert(!is_dummy(dummy
));
748 assert(!is_gc(dummy
));
749 assert(!is_removed(dummy
));
750 assert(!is_dummy(node
));
751 assert(!is_gc(node
));
752 assert(!is_removed(node
));
755 /* We can always skip the dummy node initially */
756 iter
= rcu_dereference(iter_prev
->p
.next
);
757 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
759 * We should never be called with dummy (start of chain)
760 * and logically removed node (end of path compression
761 * marker) being the actual same node. This would be a
762 * bug in the algorithm implementation.
764 assert(dummy
!= node
);
766 if (unlikely(is_end(iter
)))
768 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
770 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
771 if (likely(is_gc(next
)))
773 iter_prev
= clear_flag(iter
);
776 assert(!is_gc(iter
));
778 new_next
= flag_dummy(clear_flag(next
));
780 new_next
= clear_flag(next
);
781 if (is_removed(iter
))
782 new_next
= flag_removed(new_next
);
783 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
789 struct cds_lfht_node
*_cds_lfht_add(struct cds_lfht
*ht
,
791 struct cds_lfht_node
*node
,
792 enum add_mode mode
, int dummy
)
794 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
795 *dummy_node
, *return_node
, *replace_pinned
= NULL
;
796 struct _cds_lfht_node
*lookup
;
797 unsigned long hash
, index
, order
;
799 assert(!is_dummy(node
));
800 assert(!is_gc(node
));
801 assert(!is_removed(node
));
804 node
->p
.next
= flag_dummy(get_end());
805 return node
; /* Initial first add (head) */
807 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
814 * iter_prev points to the non-removed node prior to the
817 index
= hash
& (size
- 1);
818 order
= get_count_order_ulong(index
+ 1);
819 lookup
= &ht
->t
.tbl
[order
]->nodes
[index
& ((!order
? 0 : (1UL << (order
- 1))) - 1)];
820 iter_prev
= (struct cds_lfht_node
*) lookup
;
821 /* We can always skip the dummy node initially */
822 iter
= rcu_dereference(iter_prev
->p
.next
);
823 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
825 if (unlikely(is_end(iter
)))
827 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
829 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
830 if (unlikely(is_gc(next
)))
832 if (unlikely(replace_pinned
)) {
834 * We're in the retry of a node
835 * replacement. Only get exact iter
836 * pointer match. We own it, so it
837 * _needs_ to be there at some point.
839 if (clear_flag(iter
) == replace_pinned
)
843 * Next is removed but not gc'd. We need to
844 * busy-loop, because a concurrent replacement
845 * is keeping it temporarily pinned there but we
846 * cannot attach to it. The easiest solution is
849 if (unlikely(is_removed(next
)))
851 if ((mode
== ADD_UNIQUE
|| mode
== ADD_REPLACE
)
853 && !ht
->compare_fct(node
->key
, node
->key_len
,
854 clear_flag(iter
)->key
,
855 clear_flag(iter
)->key_len
)) {
856 if (mode
== ADD_UNIQUE
)
857 return clear_flag(iter
);
858 else /* mode == ADD_REPLACE */
861 /* Only account for identical reverse hash once */
862 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
864 check_resize(ht
, size
, ++chain_len
);
865 iter_prev
= clear_flag(iter
);
870 assert(node
!= clear_flag(iter
));
871 assert(!is_removed(iter_prev
));
872 assert(!is_removed(iter
));
873 assert(!is_gc(iter_prev
));
874 assert(!is_gc(iter
));
875 assert(iter_prev
!= node
);
876 assert(!replace_pinned
);
878 node
->p
.next
= clear_flag(iter
);
880 node
->p
.next
= flag_dummy(clear_flag(iter
));
882 new_node
= flag_dummy(node
);
885 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
887 continue; /* retry */
889 if (mode
== ADD_REPLACE
)
891 else /* ADD_DEFAULT and ADD_UNIQUE */
897 assert(node
!= clear_flag(iter
));
898 assert(!is_removed(iter_prev
));
899 assert(!is_removed(iter
));
900 assert(!is_gc(iter_prev
));
901 assert(!is_gc(iter
));
902 assert(iter_prev
!= node
);
904 node
->p
.next
= clear_flag(next
);
906 new_node
= flag_dummy(node
);
910 * Try to delete to-be-replaced node. Don't gc yet. Not
911 * performing gc here is important, because this lets
912 * concurrent lookups see the old node until we
913 * atomically swap the new node into its place.
915 * This algorithm is _not_ strictly lock-free between
916 * _cds_lfht_del and the uatomic_cmpxchg of the
917 * replacement operation, so a replacement should _not_
918 * crash here (which means: don't do replacements if you
919 * need strict lock-free guarantees).
921 if (!replace_pinned
) {
922 if (_cds_lfht_del(ht
, size
, clear_flag(iter
), 0, 0))
923 continue; /* concurrently removed. retry. */
926 * After _cds_lfht_del succeeds, we have pinned the
927 * to-be-removed node in place by setting its removed
928 * flag, but not its gc flag. If we fail to cmpxchg our
929 * new node with this node, we need to retry everything
930 * from the initial lookup, and only stop when we reach
931 * the node we pinned into place.
933 return_node
= uatomic_cmpxchg(&iter_prev
->p
.next
,
935 if (return_node
!= iter
) {
937 * If cmpxchg fails, we need to do path
938 * compression, but end it by placing our own
941 replace_pinned
= clear_flag(iter
);
942 continue; /* retry */
945 * cmpxchg succeeded. gc unnecessary, because we
946 * unlinked the return_node ourself with the
949 return_node
= clear_flag(return_node
);
954 assert(!is_removed(iter
));
955 assert(!is_gc(iter
));
957 new_next
= flag_dummy(clear_flag(next
));
959 new_next
= clear_flag(next
);
960 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
964 /* Garbage collect logically removed nodes in the bucket */
965 index
= hash
& (size
- 1);
966 order
= get_count_order_ulong(index
+ 1);
967 lookup
= &ht
->t
.tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1)) - 1))];
968 dummy_node
= (struct cds_lfht_node
*) lookup
;
969 _cds_lfht_gc_bucket(dummy_node
, node
);
975 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
976 struct cds_lfht_node
*node
,
977 int dummy_removal
, int do_gc
)
979 struct cds_lfht_node
*dummy
, *next
, *old
;
980 struct _cds_lfht_node
*lookup
;
982 unsigned long hash
, index
, order
;
984 /* logically delete the node */
985 assert(!is_dummy(node
));
986 assert(!is_gc(node
));
987 assert(!is_removed(node
));
988 old
= rcu_dereference(node
->p
.next
);
990 struct cds_lfht_node
*new_next
;
993 if (unlikely(is_removed(next
)))
996 assert(is_dummy(next
));
998 assert(!is_dummy(next
));
999 new_next
= flag_removed(next
);
1001 new_next
= flag_gc(new_next
);
1002 old
= uatomic_cmpxchg(&node
->p
.next
, next
, new_next
);
1003 } while (old
!= next
);
1005 /* We performed the (logical) deletion. */
1012 * Ensure that the node is not visible to readers anymore: lookup for
1013 * the node, and remove it (along with any other logically removed node)
1016 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
1018 index
= hash
& (size
- 1);
1019 order
= get_count_order_ulong(index
+ 1);
1020 lookup
= &ht
->t
.tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1)) - 1))];
1021 dummy
= (struct cds_lfht_node
*) lookup
;
1022 _cds_lfht_gc_bucket(dummy
, node
);
1025 * Only the flagging action indicated that we (and no other)
1026 * removed the node from the hash.
1029 assert(is_removed(rcu_dereference(node
->p
.next
)));
1036 void *partition_resize_thread(void *arg
)
1038 struct partition_resize_work
*work
= arg
;
1040 work
->ht
->cds_lfht_rcu_register_thread();
1041 work
->fct(work
->ht
, work
->i
, work
->start
, work
->len
);
1042 work
->ht
->cds_lfht_rcu_unregister_thread();
1047 void partition_resize_helper(struct cds_lfht
*ht
, unsigned long i
,
1049 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
1050 unsigned long start
, unsigned long len
))
1052 unsigned long partition_len
;
1053 struct partition_resize_work
*work
;
1055 unsigned long nr_threads
;
1056 pthread_t
*thread_id
;
1059 * Note: nr_cpus_mask + 1 is always power of 2.
1060 * We spawn just the number of threads we need to satisfy the minimum
1061 * partition size, up to the number of CPUs in the system.
1063 nr_threads
= min(nr_cpus_mask
+ 1,
1064 len
>> MIN_PARTITION_PER_THREAD_ORDER
);
1065 partition_len
= len
>> get_count_order_ulong(nr_threads
);
1066 work
= calloc(nr_threads
, sizeof(*work
));
1067 thread_id
= calloc(nr_threads
, sizeof(*thread_id
));
1069 for (thread
= 0; thread
< nr_threads
; thread
++) {
1070 work
[thread
].ht
= ht
;
1072 work
[thread
].len
= partition_len
;
1073 work
[thread
].start
= thread
* partition_len
;
1074 work
[thread
].fct
= fct
;
1075 ret
= pthread_create(&thread_id
[thread
], ht
->resize_attr
,
1076 partition_resize_thread
, &work
[thread
]);
1079 for (thread
= 0; thread
< nr_threads
; thread
++) {
1080 ret
= pthread_join(thread_id
[thread
], NULL
);
1088 * Holding RCU read lock to protect _cds_lfht_add against memory
1089 * reclaim that could be performed by other call_rcu worker threads (ABA
1092 * When we reach a certain length, we can split this population phase over
1093 * many worker threads, based on the number of CPUs available in the system.
1094 * This should therefore take care of not having the expand lagging behind too
1095 * many concurrent insertion threads by using the scheduler's ability to
1096 * schedule dummy node population fairly with insertions.
1099 void init_table_populate_partition(struct cds_lfht
*ht
, unsigned long i
,
1100 unsigned long start
, unsigned long len
)
1104 ht
->cds_lfht_rcu_read_lock();
1105 for (j
= start
; j
< start
+ len
; j
++) {
1106 struct cds_lfht_node
*new_node
=
1107 (struct cds_lfht_node
*) &ht
->t
.tbl
[i
]->nodes
[j
];
1109 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1110 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
1111 new_node
->p
.reverse_hash
=
1112 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
1113 (void) _cds_lfht_add(ht
, !i
? 0 : (1UL << (i
- 1)),
1114 new_node
, ADD_DEFAULT
, 1);
1115 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1118 ht
->cds_lfht_rcu_read_unlock();
1122 void init_table_populate(struct cds_lfht
*ht
, unsigned long i
,
1125 assert(nr_cpus_mask
!= -1);
1126 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1127 ht
->cds_lfht_rcu_thread_online();
1128 init_table_populate_partition(ht
, i
, 0, len
);
1129 ht
->cds_lfht_rcu_thread_offline();
1132 partition_resize_helper(ht
, i
, len
, init_table_populate_partition
);
1136 void init_table(struct cds_lfht
*ht
,
1137 unsigned long first_order
, unsigned long len_order
)
1139 unsigned long i
, end_order
;
1141 dbg_printf("init table: first_order %lu end_order %lu\n",
1142 first_order
, first_order
+ len_order
);
1143 end_order
= first_order
+ len_order
;
1144 for (i
= first_order
; i
< end_order
; i
++) {
1147 len
= !i
? 1 : 1UL << (i
- 1);
1148 dbg_printf("init order %lu len: %lu\n", i
, len
);
1150 /* Stop expand if the resize target changes under us */
1151 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) < (!i
? 1 : (1UL << i
)))
1154 ht
->t
.tbl
[i
] = calloc(1, sizeof(struct rcu_level
)
1155 + (len
* sizeof(struct _cds_lfht_node
)));
1156 assert(ht
->t
.tbl
[i
]);
1159 * Set all dummy nodes reverse hash values for a level and
1160 * link all dummy nodes into the table.
1162 init_table_populate(ht
, i
, len
);
1165 * Update table size.
1167 cmm_smp_wmb(); /* populate data before RCU size */
1168 CMM_STORE_SHARED(ht
->t
.size
, !i
? 1 : (1UL << i
));
1170 dbg_printf("init new size: %lu\n", !i
? 1 : (1UL << i
));
1171 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1177 * Holding RCU read lock to protect _cds_lfht_remove against memory
1178 * reclaim that could be performed by other call_rcu worker threads (ABA
1180 * For a single level, we logically remove and garbage collect each node.
1182 * As a design choice, we perform logical removal and garbage collection on a
1183 * node-per-node basis to simplify this algorithm. We also assume keeping good
1184 * cache locality of the operation would overweight possible performance gain
1185 * that could be achieved by batching garbage collection for multiple levels.
1186 * However, this would have to be justified by benchmarks.
1188 * Concurrent removal and add operations are helping us perform garbage
1189 * collection of logically removed nodes. We guarantee that all logically
1190 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1191 * invoked to free a hole level of dummy nodes (after a grace period).
1193 * Logical removal and garbage collection can therefore be done in batch or on a
1194 * node-per-node basis, as long as the guarantee above holds.
1196 * When we reach a certain length, we can split this removal over many worker
1197 * threads, based on the number of CPUs available in the system. This should
1198 * take care of not letting resize process lag behind too many concurrent
1199 * updater threads actively inserting into the hash table.
1202 void remove_table_partition(struct cds_lfht
*ht
, unsigned long i
,
1203 unsigned long start
, unsigned long len
)
1207 ht
->cds_lfht_rcu_read_lock();
1208 for (j
= start
; j
< start
+ len
; j
++) {
1209 struct cds_lfht_node
*fini_node
=
1210 (struct cds_lfht_node
*) &ht
->t
.tbl
[i
]->nodes
[j
];
1212 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1213 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
1214 fini_node
->p
.reverse_hash
=
1215 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
1216 (void) _cds_lfht_del(ht
, !i
? 0 : (1UL << (i
- 1)),
1218 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1221 ht
->cds_lfht_rcu_read_unlock();
1225 void remove_table(struct cds_lfht
*ht
, unsigned long i
, unsigned long len
)
1228 assert(nr_cpus_mask
!= -1);
1229 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1230 ht
->cds_lfht_rcu_thread_online();
1231 remove_table_partition(ht
, i
, 0, len
);
1232 ht
->cds_lfht_rcu_thread_offline();
1235 partition_resize_helper(ht
, i
, len
, remove_table_partition
);
1239 void fini_table(struct cds_lfht
*ht
,
1240 unsigned long first_order
, unsigned long len_order
)
1244 dbg_printf("fini table: first_order %lu end_order %lu\n",
1245 first_order
, first_order
+ len_order
);
1246 end_order
= first_order
+ len_order
;
1247 assert(first_order
> 0);
1248 for (i
= end_order
- 1; i
>= first_order
; i
--) {
1251 len
= !i
? 1 : 1UL << (i
- 1);
1252 dbg_printf("fini order %lu len: %lu\n", i
, len
);
1254 /* Stop shrink if the resize target changes under us */
1255 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) > (1UL << (i
- 1)))
1258 cmm_smp_wmb(); /* populate data before RCU size */
1259 CMM_STORE_SHARED(ht
->t
.size
, 1UL << (i
- 1));
1262 * We need to wait for all add operations to reach Q.S. (and
1263 * thus use the new table for lookups) before we can start
1264 * releasing the old dummy nodes. Otherwise their lookup will
1265 * return a logically removed node as insert position.
1267 ht
->cds_lfht_synchronize_rcu();
1270 * Set "removed" flag in dummy nodes about to be removed.
1271 * Unlink all now-logically-removed dummy node pointers.
1272 * Concurrent add/remove operation are helping us doing
1275 remove_table(ht
, i
, len
);
1277 ht
->cds_lfht_call_rcu(&ht
->t
.tbl
[i
]->head
, cds_lfht_free_level
);
1279 dbg_printf("fini new size: %lu\n", 1UL << i
);
1280 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1285 struct cds_lfht
*_cds_lfht_new(cds_lfht_hash_fct hash_fct
,
1286 cds_lfht_compare_fct compare_fct
,
1287 unsigned long hash_seed
,
1288 unsigned long init_size
,
1290 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
1291 void (*func
)(struct rcu_head
*head
)),
1292 void (*cds_lfht_synchronize_rcu
)(void),
1293 void (*cds_lfht_rcu_read_lock
)(void),
1294 void (*cds_lfht_rcu_read_unlock
)(void),
1295 void (*cds_lfht_rcu_thread_offline
)(void),
1296 void (*cds_lfht_rcu_thread_online
)(void),
1297 void (*cds_lfht_rcu_register_thread
)(void),
1298 void (*cds_lfht_rcu_unregister_thread
)(void),
1299 pthread_attr_t
*attr
)
1301 struct cds_lfht
*ht
;
1302 unsigned long order
;
1304 /* init_size must be power of two */
1305 if (init_size
&& (init_size
& (init_size
- 1)))
1307 ht
= calloc(1, sizeof(struct cds_lfht
));
1309 ht
->hash_fct
= hash_fct
;
1310 ht
->compare_fct
= compare_fct
;
1311 ht
->hash_seed
= hash_seed
;
1312 ht
->cds_lfht_call_rcu
= cds_lfht_call_rcu
;
1313 ht
->cds_lfht_synchronize_rcu
= cds_lfht_synchronize_rcu
;
1314 ht
->cds_lfht_rcu_read_lock
= cds_lfht_rcu_read_lock
;
1315 ht
->cds_lfht_rcu_read_unlock
= cds_lfht_rcu_read_unlock
;
1316 ht
->cds_lfht_rcu_thread_offline
= cds_lfht_rcu_thread_offline
;
1317 ht
->cds_lfht_rcu_thread_online
= cds_lfht_rcu_thread_online
;
1318 ht
->cds_lfht_rcu_register_thread
= cds_lfht_rcu_register_thread
;
1319 ht
->cds_lfht_rcu_unregister_thread
= cds_lfht_rcu_unregister_thread
;
1320 ht
->resize_attr
= attr
;
1321 ht
->percpu_count
= alloc_per_cpu_items_count();
1322 /* this mutex should not nest in read-side C.S. */
1323 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1324 order
= get_count_order_ulong(max(init_size
, MIN_TABLE_SIZE
)) + 1;
1326 ht
->cds_lfht_rcu_thread_offline();
1327 pthread_mutex_lock(&ht
->resize_mutex
);
1328 ht
->t
.resize_target
= 1UL << (order
- 1);
1329 init_table(ht
, 0, order
);
1330 pthread_mutex_unlock(&ht
->resize_mutex
);
1331 ht
->cds_lfht_rcu_thread_online();
1335 struct cds_lfht_node
*cds_lfht_lookup(struct cds_lfht
*ht
, void *key
, size_t key_len
)
1337 struct cds_lfht_node
*node
, *next
, *dummy_node
;
1338 struct _cds_lfht_node
*lookup
;
1339 unsigned long hash
, reverse_hash
, index
, order
, size
;
1341 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
1342 reverse_hash
= bit_reverse_ulong(hash
);
1344 size
= rcu_dereference(ht
->t
.size
);
1345 index
= hash
& (size
- 1);
1346 order
= get_count_order_ulong(index
+ 1);
1347 lookup
= &ht
->t
.tbl
[order
]->nodes
[index
& (!order
? 0 : ((1UL << (order
- 1))) - 1)];
1348 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
1349 hash
, index
, order
, index
& (!order
? 0 : ((1UL << (order
- 1)) - 1)));
1350 dummy_node
= (struct cds_lfht_node
*) lookup
;
1351 /* We can always skip the dummy node initially */
1352 node
= rcu_dereference(dummy_node
->p
.next
);
1353 node
= clear_flag(node
);
1355 if (unlikely(is_end(node
))) {
1359 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1363 next
= rcu_dereference(node
->p
.next
);
1365 * We consider return nodes marked removed but not gc as
1366 * hits for lookup vs replacement consistency.
1368 if (likely(!is_gc(next
))
1370 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1373 node
= clear_flag(next
);
1375 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1379 struct cds_lfht_node
*cds_lfht_next(struct cds_lfht
*ht
,
1380 struct cds_lfht_node
*node
)
1382 struct cds_lfht_node
*next
;
1383 unsigned long reverse_hash
;
1387 reverse_hash
= node
->p
.reverse_hash
;
1389 key_len
= node
->key_len
;
1390 next
= rcu_dereference(node
->p
.next
);
1391 node
= clear_flag(next
);
1394 if (unlikely(is_end(node
))) {
1398 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
1402 next
= rcu_dereference(node
->p
.next
);
1404 * We consider return nodes marked removed but not gc as
1405 * hits for lookup vs replacement consistency.
1407 if (likely(!is_gc(next
))
1409 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
1412 node
= clear_flag(next
);
1414 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
1418 void cds_lfht_add(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1420 unsigned long hash
, size
;
1422 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1423 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1425 size
= rcu_dereference(ht
->t
.size
);
1426 (void) _cds_lfht_add(ht
, size
, node
, ADD_DEFAULT
, 0);
1427 ht_count_add(ht
, size
);
1430 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1431 struct cds_lfht_node
*node
)
1433 unsigned long hash
, size
;
1434 struct cds_lfht_node
*ret
;
1436 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1437 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1439 size
= rcu_dereference(ht
->t
.size
);
1440 ret
= _cds_lfht_add(ht
, size
, node
, ADD_UNIQUE
, 0);
1442 ht_count_add(ht
, size
);
1446 struct cds_lfht_node
*cds_lfht_replace(struct cds_lfht
*ht
,
1447 struct cds_lfht_node
*node
)
1449 unsigned long hash
, size
;
1450 struct cds_lfht_node
*ret
;
1452 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
1453 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1455 size
= rcu_dereference(ht
->t
.size
);
1456 ret
= _cds_lfht_add(ht
, size
, node
, ADD_REPLACE
, 0);
1458 ht_count_add(ht
, size
);
1462 int cds_lfht_del(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
1467 size
= rcu_dereference(ht
->t
.size
);
1468 ret
= _cds_lfht_del(ht
, size
, node
, 0, 1);
1470 ht_count_del(ht
, size
);
1475 int cds_lfht_delete_dummy(struct cds_lfht
*ht
)
1477 struct cds_lfht_node
*node
;
1478 struct _cds_lfht_node
*lookup
;
1479 unsigned long order
, i
, size
;
1481 /* Check that the table is empty */
1482 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1483 node
= (struct cds_lfht_node
*) lookup
;
1485 node
= clear_flag(node
)->p
.next
;
1486 if (!is_dummy(node
))
1488 assert(!is_removed(node
));
1489 assert(!is_gc(node
));
1490 } while (!is_end(node
));
1492 * size accessed without rcu_dereference because hash table is
1496 /* Internal sanity check: all nodes left should be dummy */
1497 for (order
= 0; order
< get_count_order_ulong(size
) + 1; order
++) {
1500 len
= !order
? 1 : 1UL << (order
- 1);
1501 for (i
= 0; i
< len
; i
++) {
1502 dbg_printf("delete order %lu i %lu hash %lu\n",
1504 bit_reverse_ulong(ht
->t
.tbl
[order
]->nodes
[i
].reverse_hash
));
1505 assert(is_dummy(ht
->t
.tbl
[order
]->nodes
[i
].next
));
1507 poison_free(ht
->t
.tbl
[order
]);
1513 * Should only be called when no more concurrent readers nor writers can
1514 * possibly access the table.
1516 int cds_lfht_destroy(struct cds_lfht
*ht
, pthread_attr_t
**attr
)
1520 /* Wait for in-flight resize operations to complete */
1521 CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1522 while (uatomic_read(&ht
->in_progress_resize
))
1523 poll(NULL
, 0, 100); /* wait for 100ms */
1524 ret
= cds_lfht_delete_dummy(ht
);
1527 free_per_cpu_items_count(ht
->percpu_count
);
1529 *attr
= ht
->resize_attr
;
1534 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1535 unsigned long *count
,
1536 unsigned long *removed
)
1538 struct cds_lfht_node
*node
, *next
;
1539 struct _cds_lfht_node
*lookup
;
1540 unsigned long nr_dummy
= 0;
1545 /* Count non-dummy nodes in the table */
1546 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1547 node
= (struct cds_lfht_node
*) lookup
;
1549 next
= rcu_dereference(node
->p
.next
);
1550 if (is_removed(next
) || is_gc(next
)) {
1551 assert(!is_dummy(next
));
1553 } else if (!is_dummy(next
))
1557 node
= clear_flag(next
);
1558 } while (!is_end(node
));
1559 dbg_printf("number of dummy nodes: %lu\n", nr_dummy
);
1562 /* called with resize mutex held */
1564 void _do_cds_lfht_grow(struct cds_lfht
*ht
,
1565 unsigned long old_size
, unsigned long new_size
)
1567 unsigned long old_order
, new_order
;
1569 old_order
= get_count_order_ulong(old_size
) + 1;
1570 new_order
= get_count_order_ulong(new_size
) + 1;
1571 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1572 old_size
, old_order
, new_size
, new_order
);
1573 assert(new_size
> old_size
);
1574 init_table(ht
, old_order
, new_order
- old_order
);
1577 /* called with resize mutex held */
1579 void _do_cds_lfht_shrink(struct cds_lfht
*ht
,
1580 unsigned long old_size
, unsigned long new_size
)
1582 unsigned long old_order
, new_order
;
1584 new_size
= max(new_size
, MIN_TABLE_SIZE
);
1585 old_order
= get_count_order_ulong(old_size
) + 1;
1586 new_order
= get_count_order_ulong(new_size
) + 1;
1587 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1588 old_size
, old_order
, new_size
, new_order
);
1589 assert(new_size
< old_size
);
1591 /* Remove and unlink all dummy nodes to remove. */
1592 fini_table(ht
, new_order
, old_order
- new_order
);
1596 /* called with resize mutex held */
1598 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1600 unsigned long new_size
, old_size
;
1603 * Resize table, re-do if the target size has changed under us.
1606 ht
->t
.resize_initiated
= 1;
1607 old_size
= ht
->t
.size
;
1608 new_size
= CMM_LOAD_SHARED(ht
->t
.resize_target
);
1609 if (old_size
< new_size
)
1610 _do_cds_lfht_grow(ht
, old_size
, new_size
);
1611 else if (old_size
> new_size
)
1612 _do_cds_lfht_shrink(ht
, old_size
, new_size
);
1613 ht
->t
.resize_initiated
= 0;
1614 /* write resize_initiated before read resize_target */
1616 } while (ht
->t
.size
!= CMM_LOAD_SHARED(ht
->t
.resize_target
));
1620 unsigned long resize_target_update(struct cds_lfht
*ht
, unsigned long size
,
1623 return _uatomic_max(&ht
->t
.resize_target
,
1624 size
<< growth_order
);
1628 void resize_target_update_count(struct cds_lfht
*ht
,
1629 unsigned long count
)
1631 count
= max(count
, MIN_TABLE_SIZE
);
1632 uatomic_set(&ht
->t
.resize_target
, count
);
1635 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1637 resize_target_update_count(ht
, new_size
);
1638 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1639 ht
->cds_lfht_rcu_thread_offline();
1640 pthread_mutex_lock(&ht
->resize_mutex
);
1641 _do_cds_lfht_resize(ht
);
1642 pthread_mutex_unlock(&ht
->resize_mutex
);
1643 ht
->cds_lfht_rcu_thread_online();
1647 void do_resize_cb(struct rcu_head
*head
)
1649 struct rcu_resize_work
*work
=
1650 caa_container_of(head
, struct rcu_resize_work
, head
);
1651 struct cds_lfht
*ht
= work
->ht
;
1653 ht
->cds_lfht_rcu_thread_offline();
1654 pthread_mutex_lock(&ht
->resize_mutex
);
1655 _do_cds_lfht_resize(ht
);
1656 pthread_mutex_unlock(&ht
->resize_mutex
);
1657 ht
->cds_lfht_rcu_thread_online();
1659 cmm_smp_mb(); /* finish resize before decrement */
1660 uatomic_dec(&ht
->in_progress_resize
);
1664 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, unsigned long size
, int growth
)
1666 struct rcu_resize_work
*work
;
1667 unsigned long target_size
;
1669 target_size
= resize_target_update(ht
, size
, growth
);
1670 /* Store resize_target before read resize_initiated */
1672 if (!CMM_LOAD_SHARED(ht
->t
.resize_initiated
) && size
< target_size
) {
1673 uatomic_inc(&ht
->in_progress_resize
);
1674 cmm_smp_mb(); /* increment resize count before calling it */
1675 work
= malloc(sizeof(*work
));
1677 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1678 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1682 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1685 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
1686 unsigned long count
)
1688 struct rcu_resize_work
*work
;
1690 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1692 resize_target_update_count(ht
, count
);
1693 /* Store resize_target before read resize_initiated */
1695 if (!CMM_LOAD_SHARED(ht
->t
.resize_initiated
)) {
1696 uatomic_inc(&ht
->in_progress_resize
);
1697 cmm_smp_mb(); /* increment resize count before calling it */
1698 work
= malloc(sizeof(*work
));
1700 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1701 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);