4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
41 //#define DEBUG /* Test */
44 #define dbg_printf(args...) printf(args)
46 #define dbg_printf(args...)
49 #define CHAIN_LEN_TARGET 4
50 #define CHAIN_LEN_RESIZE_THRESHOLD 8
53 #define max(a, b) ((a) > (b) ? (a) : (b))
57 * The removed flag needs to be updated atomically with the pointer.
58 * The dummy flag does not require to be updated atomically with the
59 * pointer, but it is added as a pointer low bit flag to save space.
61 #define REMOVED_FLAG (1UL << 0)
62 #define DUMMY_FLAG (1UL << 1)
63 #define FLAGS_MASK ((1UL << 2) - 1)
66 unsigned long size
; /* always a power of 2 */
67 unsigned long resize_target
;
70 struct _rcu_ht_node
*tbl
[0];
74 struct rcu_table
*t
; /* shared */
76 ht_compare_fct compare_fct
;
77 unsigned long hash_seed
;
78 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
79 unsigned int in_progress_resize
, in_progress_destroy
;
80 void (*ht_call_rcu
)(struct rcu_head
*head
,
81 void (*func
)(struct rcu_head
*head
));
84 struct rcu_resize_work
{
90 * Algorithm to reverse bits in a word by lookup table, extended to
93 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
94 * Originally from Public Domain.
97 static const uint8_t BitReverseTable256
[256] =
99 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
100 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
101 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
102 R6(0), R6(2), R6(1), R6(3)
109 uint8_t bit_reverse_u8(uint8_t v
)
111 return BitReverseTable256
[v
];
114 static __attribute__((unused
))
115 uint32_t bit_reverse_u32(uint32_t v
)
117 return ((uint32_t) bit_reverse_u8(v
) << 24) |
118 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
119 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
120 ((uint32_t) bit_reverse_u8(v
>> 24));
123 static __attribute__((unused
))
124 uint64_t bit_reverse_u64(uint64_t v
)
126 return ((uint64_t) bit_reverse_u8(v
) << 56) |
127 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
128 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
129 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
130 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
131 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
132 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
133 ((uint64_t) bit_reverse_u8(v
>> 56));
137 unsigned long bit_reverse_ulong(unsigned long v
)
139 #if (CAA_BITS_PER_LONG == 32)
140 return bit_reverse_u32(v
);
142 return bit_reverse_u64(v
);
147 * fls: returns the position of the most significant bit.
148 * Returns 0 if no bit is set, else returns the position of the most
149 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
151 #if defined(__i386) || defined(__x86_64)
153 unsigned int fls_u32(uint32_t x
)
161 : "=r" (r
) : "rm" (x
));
167 #if defined(__x86_64)
169 unsigned int fls_u64(uint64_t x
)
177 : "=r" (r
) : "rm" (x
));
184 static __attribute__((unused
))
185 unsigned int fls_u64(uint64_t x
)
192 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
196 if (!(x
& 0xFFFF000000000000ULL
)) {
200 if (!(x
& 0xFF00000000000000ULL
)) {
204 if (!(x
& 0xF000000000000000ULL
)) {
208 if (!(x
& 0xC000000000000000ULL
)) {
212 if (!(x
& 0x8000000000000000ULL
)) {
221 static __attribute__((unused
))
222 unsigned int fls_u32(uint32_t x
)
228 if (!(x
& 0xFFFF0000U
)) {
232 if (!(x
& 0xFF000000U
)) {
236 if (!(x
& 0xF0000000U
)) {
240 if (!(x
& 0xC0000000U
)) {
244 if (!(x
& 0x80000000U
)) {
252 unsigned int fls_ulong(unsigned long x
)
254 #if (CAA_BITS_PER_lONG == 32)
261 int get_count_order_u32(uint32_t x
)
265 order
= fls_u32(x
) - 1;
271 int get_count_order_ulong(unsigned long x
)
275 order
= fls_ulong(x
) - 1;
282 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
);
285 void check_resize(struct rcu_ht
*ht
, struct rcu_table
*t
,
289 dbg_printf("rculfhash: WARNING: large chain length: %u.\n",
291 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
292 ht_resize_lazy(ht
, t
,
293 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
297 struct rcu_ht_node
*clear_flag(struct rcu_ht_node
*node
)
299 return (struct rcu_ht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
303 int is_removed(struct rcu_ht_node
*node
)
305 return ((unsigned long) node
) & REMOVED_FLAG
;
309 struct rcu_ht_node
*flag_removed(struct rcu_ht_node
*node
)
311 return (struct rcu_ht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
315 int is_dummy(struct rcu_ht_node
*node
)
317 return ((unsigned long) node
) & DUMMY_FLAG
;
321 struct rcu_ht_node
*flag_dummy(struct rcu_ht_node
*node
)
323 return (struct rcu_ht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
327 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
329 unsigned long old1
, old2
;
331 old1
= uatomic_read(ptr
);
336 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
341 * Remove all logically deleted nodes from a bucket up to a certain node key.
344 void _ht_gc_bucket(struct rcu_ht_node
*dummy
, struct rcu_ht_node
*node
)
346 struct rcu_ht_node
*iter_prev
, *iter
, *next
, *new_next
;
350 /* We can always skip the dummy node initially */
351 iter
= rcu_dereference(iter_prev
->p
.next
);
352 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
354 if (unlikely(!clear_flag(iter
)))
356 if (clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
)
358 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
359 if (is_removed(next
))
361 iter_prev
= clear_flag(iter
);
364 assert(!is_removed(iter
));
366 new_next
= flag_dummy(clear_flag(next
));
368 new_next
= clear_flag(next
);
369 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
374 struct rcu_ht_node
*_ht_add(struct rcu_ht
*ht
, struct rcu_table
*t
,
375 struct rcu_ht_node
*node
, int unique
, int dummy
)
377 struct rcu_ht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
379 struct _rcu_ht_node
*lookup
;
380 unsigned long hash
, index
, order
;
384 node
->p
.next
= flag_dummy(NULL
);
385 return node
; /* Initial first add (head) */
387 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
389 uint32_t chain_len
= 0;
392 * iter_prev points to the non-removed node prior to the
395 index
= hash
& (t
->size
- 1);
396 order
= get_count_order_ulong(index
+ 1);
397 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
398 iter_prev
= (struct rcu_ht_node
*) lookup
;
399 /* We can always skip the dummy node initially */
400 iter
= rcu_dereference(iter_prev
->p
.next
);
401 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
403 if (unlikely(!clear_flag(iter
)))
405 if (clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
)
407 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
408 if (is_removed(next
))
412 && !ht
->compare_fct(node
->key
, node
->key_len
,
413 clear_flag(iter
)->key
,
414 clear_flag(iter
)->key_len
))
415 return clear_flag(iter
);
416 /* Only account for identical reverse hash once */
417 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
419 check_resize(ht
, t
, ++chain_len
);
420 iter_prev
= clear_flag(iter
);
424 assert(node
!= clear_flag(iter
));
425 assert(!is_removed(iter_prev
));
426 assert(iter_prev
!= node
);
428 node
->p
.next
= clear_flag(iter
);
430 node
->p
.next
= flag_dummy(clear_flag(iter
));
432 new_node
= flag_dummy(node
);
435 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
437 continue; /* retry */
441 assert(!is_removed(iter
));
443 new_next
= flag_dummy(clear_flag(next
));
445 new_next
= clear_flag(next
);
446 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
450 /* Garbage collect logically removed nodes in the bucket */
451 index
= hash
& (t
->size
- 1);
452 order
= get_count_order_ulong(index
+ 1);
453 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
454 dummy_node
= (struct rcu_ht_node
*) lookup
;
455 _ht_gc_bucket(dummy_node
, node
);
460 int _ht_remove(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
462 struct rcu_ht_node
*dummy
, *next
, *old
;
463 struct _rcu_ht_node
*lookup
;
465 unsigned long hash
, index
, order
;
467 /* logically delete the node */
468 old
= rcu_dereference(node
->p
.next
);
471 if (is_removed(next
))
473 assert(!is_dummy(next
));
474 old
= uatomic_cmpxchg(&node
->p
.next
, next
,
476 } while (old
!= next
);
478 /* We performed the (logical) deletion. */
482 * Ensure that the node is not visible to readers anymore: lookup for
483 * the node, and remove it (along with any other logically removed node)
486 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
487 index
= hash
& (t
->size
- 1);
488 order
= get_count_order_ulong(index
+ 1);
489 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
490 dummy
= (struct rcu_ht_node
*) lookup
;
491 _ht_gc_bucket(dummy
, node
);
494 * Only the flagging action indicated that we (and no other)
495 * removed the node from the hash.
498 assert(is_removed(rcu_dereference(node
->p
.next
)));
505 void init_table(struct rcu_ht
*ht
, struct rcu_table
*t
,
506 unsigned long first_order
, unsigned long len_order
)
508 unsigned long i
, end_order
;
510 dbg_printf("rculfhash: init table: first_order %lu end_order %lu\n",
511 first_order
, first_order
+ len_order
);
512 end_order
= first_order
+ len_order
;
513 t
->size
= !first_order
? 0 : (1UL << (first_order
- 1));
514 for (i
= first_order
; i
< end_order
; i
++) {
515 unsigned long j
, len
;
517 len
= !i
? 1 : 1UL << (i
- 1);
518 dbg_printf("rculfhash: init order %lu len: %lu\n", i
, len
);
519 t
->tbl
[i
] = calloc(len
, sizeof(struct _rcu_ht_node
));
520 for (j
= 0; j
< len
; j
++) {
521 dbg_printf("rculfhash: init entry: i %lu j %lu hash %lu\n",
522 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
523 struct rcu_ht_node
*new_node
=
524 (struct rcu_ht_node
*) &t
->tbl
[i
][j
];
525 new_node
->p
.reverse_hash
=
526 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
527 (void) _ht_add(ht
, t
, new_node
, 0, 1);
528 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
531 /* Update table size */
532 t
->size
= !i
? 1 : (1UL << i
);
533 dbg_printf("rculfhash: init new size: %lu\n", t
->size
);
534 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
537 t
->resize_target
= t
->size
;
538 t
->resize_initiated
= 0;
541 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
,
542 ht_compare_fct compare_fct
,
543 unsigned long hash_seed
,
544 unsigned long init_size
,
545 void (*ht_call_rcu
)(struct rcu_head
*head
,
546 void (*func
)(struct rcu_head
*head
)))
551 ht
= calloc(1, sizeof(struct rcu_ht
));
552 ht
->hash_fct
= hash_fct
;
553 ht
->compare_fct
= compare_fct
;
554 ht
->hash_seed
= hash_seed
;
555 ht
->ht_call_rcu
= ht_call_rcu
;
556 ht
->in_progress_resize
= 0;
557 /* this mutex should not nest in read-side C.S. */
558 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
559 order
= get_count_order_ulong(max(init_size
, 1)) + 1;
560 ht
->t
= calloc(1, sizeof(struct rcu_table
)
561 + (order
* sizeof(struct _rcu_ht_node
*)));
563 pthread_mutex_lock(&ht
->resize_mutex
);
564 init_table(ht
, ht
->t
, 0, order
);
565 pthread_mutex_unlock(&ht
->resize_mutex
);
569 struct rcu_ht_node
*ht_lookup(struct rcu_ht
*ht
, void *key
, size_t key_len
)
572 struct rcu_ht_node
*node
, *next
;
573 struct _rcu_ht_node
*lookup
;
574 unsigned long hash
, reverse_hash
, index
, order
;
576 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
577 reverse_hash
= bit_reverse_ulong(hash
);
579 t
= rcu_dereference(ht
->t
);
580 index
= hash
& (t
->size
- 1);
581 order
= get_count_order_ulong(index
+ 1);
582 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
583 dbg_printf("rculfhash: lookup hash %lu index %lu order %lu aridx %lu\n",
584 hash
, index
, order
, index
& ((1UL << (order
- 1)) - 1));
585 node
= (struct rcu_ht_node
*) lookup
;
589 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
593 next
= rcu_dereference(node
->p
.next
);
594 if (likely(!is_removed(next
))
596 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
599 node
= clear_flag(next
);
601 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
605 void ht_add(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
610 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
611 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
613 t
= rcu_dereference(ht
->t
);
614 (void) _ht_add(ht
, t
, node
, 0, 0);
617 struct rcu_ht_node
*ht_add_unique(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
622 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
623 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
625 t
= rcu_dereference(ht
->t
);
626 return _ht_add(ht
, t
, node
, 1, 0);
629 int ht_remove(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
633 t
= rcu_dereference(ht
->t
);
634 return _ht_remove(ht
, t
, node
);
638 int ht_delete_dummy(struct rcu_ht
*ht
)
641 struct rcu_ht_node
*node
;
642 struct _rcu_ht_node
*lookup
;
643 unsigned long order
, i
;
646 /* Check that the table is empty */
647 lookup
= &t
->tbl
[0][0];
648 node
= (struct rcu_ht_node
*) lookup
;
650 node
= clear_flag(node
)->p
.next
;
653 assert(!is_removed(node
));
654 } while (clear_flag(node
));
655 /* Internal sanity check: all nodes left should be dummy */
656 for (order
= 0; order
< get_count_order_ulong(t
->size
) + 1; order
++) {
659 len
= !order
? 1 : 1UL << (order
- 1);
660 for (i
= 0; i
< len
; i
++) {
661 dbg_printf("rculfhash: delete order %lu i %lu hash %lu\n",
663 bit_reverse_ulong(t
->tbl
[order
][i
].reverse_hash
));
664 assert(is_dummy(t
->tbl
[order
][i
].next
));
672 * Should only be called when no more concurrent readers nor writers can
673 * possibly access the table.
675 int ht_destroy(struct rcu_ht
*ht
)
679 /* Wait for in-flight resize operations to complete */
680 CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
681 while (uatomic_read(&ht
->in_progress_resize
))
682 poll(NULL
, 0, 100); /* wait for 100ms */
683 ret
= ht_delete_dummy(ht
);
691 void ht_count_nodes(struct rcu_ht
*ht
,
692 unsigned long *count
,
693 unsigned long *removed
)
696 struct rcu_ht_node
*node
, *next
;
697 struct _rcu_ht_node
*lookup
;
698 unsigned long nr_dummy
= 0;
703 t
= rcu_dereference(ht
->t
);
704 /* Count non-dummy nodes in the table */
705 lookup
= &t
->tbl
[0][0];
706 node
= (struct rcu_ht_node
*) lookup
;
708 next
= rcu_dereference(node
->p
.next
);
709 if (is_removed(next
)) {
710 assert(!is_dummy(next
));
712 } else if (!is_dummy(next
))
716 node
= clear_flag(next
);
718 dbg_printf("rculfhash: number of dummy nodes: %lu\n", nr_dummy
);
722 void ht_free_table_cb(struct rcu_head
*head
)
724 struct rcu_table
*t
=
725 caa_container_of(head
, struct rcu_table
, head
);
729 /* called with resize mutex held */
731 void _do_ht_resize(struct rcu_ht
*ht
)
733 unsigned long new_size
, old_size
, old_order
, new_order
;
734 struct rcu_table
*new_t
, *old_t
;
737 old_size
= old_t
->size
;
738 old_order
= get_count_order_ulong(old_size
) + 1;
740 new_size
= CMM_LOAD_SHARED(old_t
->resize_target
);
741 if (old_size
== new_size
)
743 new_order
= get_count_order_ulong(new_size
) + 1;
744 printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n",
745 old_size
, old_order
, new_size
, new_order
);
746 new_t
= malloc(sizeof(struct rcu_table
)
747 + (new_order
* sizeof(struct _rcu_ht_node
*)));
748 assert(new_size
> old_size
);
749 memcpy(&new_t
->tbl
, &old_t
->tbl
,
750 old_order
* sizeof(struct _rcu_ht_node
*));
751 init_table(ht
, new_t
, old_order
, new_order
- old_order
);
752 /* Changing table and size atomically wrt lookups */
753 rcu_assign_pointer(ht
->t
, new_t
);
754 ht
->ht_call_rcu(&old_t
->head
, ht_free_table_cb
);
758 unsigned long resize_target_update(struct rcu_table
*t
,
761 return _uatomic_max(&t
->resize_target
,
762 t
->size
<< growth_order
);
765 void ht_resize(struct rcu_ht
*ht
, int growth
)
767 struct rcu_table
*t
= rcu_dereference(ht
->t
);
768 unsigned long target_size
;
770 target_size
= resize_target_update(t
, growth
);
771 if (t
->size
< target_size
) {
772 CMM_STORE_SHARED(t
->resize_initiated
, 1);
773 pthread_mutex_lock(&ht
->resize_mutex
);
775 pthread_mutex_unlock(&ht
->resize_mutex
);
780 void do_resize_cb(struct rcu_head
*head
)
782 struct rcu_resize_work
*work
=
783 caa_container_of(head
, struct rcu_resize_work
, head
);
784 struct rcu_ht
*ht
= work
->ht
;
786 pthread_mutex_lock(&ht
->resize_mutex
);
788 pthread_mutex_unlock(&ht
->resize_mutex
);
790 cmm_smp_mb(); /* finish resize before decrement */
791 uatomic_dec(&ht
->in_progress_resize
);
795 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
)
797 struct rcu_resize_work
*work
;
798 unsigned long target_size
;
800 target_size
= resize_target_update(t
, growth
);
801 if (!CMM_LOAD_SHARED(t
->resize_initiated
) && t
->size
< target_size
) {
802 uatomic_inc(&ht
->in_progress_resize
);
803 cmm_smp_mb(); /* increment resize count before calling it */
804 work
= malloc(sizeof(*work
));
806 ht
->ht_call_rcu(&work
->head
, do_resize_cb
);
807 CMM_STORE_SHARED(t
->resize_initiated
, 1);