4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
41 #define DEBUG /* Test */
44 #define dbg_printf(args...) printf(args)
46 #define dbg_printf(args...)
49 #define CHAIN_LEN_TARGET 1
50 #define CHAIN_LEN_RESIZE_THRESHOLD 2
53 #define max(a, b) ((a) > (b) ? (a) : (b))
56 #define REMOVED_FLAG (1UL << 0)
57 #define DUMMY_FLAG (1UL << 1)
58 #define FLAGS_MASK ((1UL << 2) - 1)
61 unsigned long size
; /* always a power of 2 */
62 unsigned long resize_target
;
65 struct rcu_ht_node
*tbl
[0];
69 struct rcu_table
*t
; /* shared */
71 ht_compare_fct compare_fct
;
72 unsigned long hash_seed
;
73 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
74 unsigned int in_progress_resize
;
75 void (*ht_call_rcu
)(struct rcu_head
*head
,
76 void (*func
)(struct rcu_head
*head
));
79 struct rcu_resize_work
{
85 * Algorithm to reverse bits in a word by lookup table, extended to
88 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
89 * Originally from Public Domain.
92 static const uint8_t BitReverseTable256
[256] =
94 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
95 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
96 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
97 R6(0), R6(2), R6(1), R6(3)
104 uint8_t bit_reverse_u8(uint8_t v
)
106 return BitReverseTable256
[v
];
109 static __attribute__((unused
))
110 uint32_t bit_reverse_u32(uint32_t v
)
112 return ((uint32_t) bit_reverse_u8(v
) << 24) |
113 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
114 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
115 ((uint32_t) bit_reverse_u8(v
>> 24));
118 static __attribute__((unused
))
119 uint64_t bit_reverse_u64(uint64_t v
)
121 return ((uint64_t) bit_reverse_u8(v
) << 56) |
122 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
123 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
124 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
125 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
126 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
127 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
128 ((uint64_t) bit_reverse_u8(v
>> 56));
132 unsigned long bit_reverse_ulong(unsigned long v
)
134 #if (CAA_BITS_PER_LONG == 32)
135 return bit_reverse_u32(v
);
137 return bit_reverse_u64(v
);
142 * Algorithm to find the log2 of a 32-bit unsigned integer.
143 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
144 * Originally from Public Domain.
146 static const char LogTable256
[256] =
148 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
149 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
150 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
151 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
154 uint32_t log2_u32(uint32_t v
)
158 if ((tt
= (v
>> 16)))
159 return (t
= (tt
>> 8))
160 ? 24 + LogTable256
[t
]
161 : 16 + LogTable256
[tt
];
163 return (t
= (v
>> 8))
169 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
);
172 void check_resize(struct rcu_ht
*ht
, struct rcu_table
*t
,
175 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
176 ht_resize_lazy(ht
, t
,
177 log2_u32(chain_len
- CHAIN_LEN_TARGET
- 1));
181 struct rcu_ht_node
*clear_flag(struct rcu_ht_node
*node
)
183 return (struct rcu_ht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
187 int is_removed(struct rcu_ht_node
*node
)
189 return ((unsigned long) node
) & REMOVED_FLAG
;
193 struct rcu_ht_node
*flag_removed(struct rcu_ht_node
*node
)
195 return (struct rcu_ht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
199 int is_dummy(struct rcu_ht_node
*node
)
201 return ((unsigned long) node
) & DUMMY_FLAG
;
205 struct rcu_ht_node
*flag_dummy(struct rcu_ht_node
*node
)
207 return (struct rcu_ht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
211 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
213 unsigned long old1
, old2
;
215 old1
= uatomic_read(ptr
);
220 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
225 * Remove all logically deleted nodes from a bucket up to a certain node key.
228 void _ht_gc_bucket(struct rcu_ht_node
*dummy
, struct rcu_ht_node
*node
)
230 struct rcu_ht_node
*iter_prev
, *iter
, *next
, *new_next
;
234 /* We can always skip the dummy node initially */
235 iter
= rcu_dereference(iter_prev
->p
.next
);
236 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
238 if (unlikely(!clear_flag(iter
)))
240 if (clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
)
242 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
243 if (is_removed(next
))
248 assert(!is_removed(iter
));
250 new_next
= flag_dummy(clear_flag(next
));
252 new_next
= clear_flag(next
);
253 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
258 struct rcu_ht_node
*_ht_add(struct rcu_ht
*ht
, struct rcu_table
*t
,
259 struct rcu_ht_node
*node
, int unique
, int dummy
)
261 struct rcu_ht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
267 node
->p
.next
= flag_dummy(NULL
);
268 return node
; /* Initial first add (head) */
270 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
272 uint32_t chain_len
= 0;
275 * iter_prev points to the non-removed node prior to the
278 iter_prev
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
279 /* We can always skip the dummy node initially */
280 iter
= rcu_dereference(iter_prev
->p
.next
);
281 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
283 if (unlikely(!clear_flag(iter
)))
285 if (clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
)
287 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
288 if (is_removed(next
))
292 && !ht
->compare_fct(node
->key
, node
->key_len
,
293 clear_flag(iter
)->key
,
294 clear_flag(iter
)->key_len
))
295 return clear_flag(iter
);
296 /* Only account for identical reverse hash once */
297 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
)
298 check_resize(ht
, t
, ++chain_len
);
299 iter_prev
= clear_flag(iter
);
303 assert(node
!= clear_flag(iter
));
304 assert(!is_removed(iter_prev
));
305 assert(iter_prev
!= node
);
307 node
->p
.next
= clear_flag(iter
);
309 node
->p
.next
= flag_dummy(clear_flag(iter
));
311 new_node
= flag_dummy(node
);
314 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
316 continue; /* retry */
320 assert(!is_removed(iter
));
322 new_next
= flag_dummy(clear_flag(next
));
324 new_next
= clear_flag(next
);
325 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
329 /* Garbage collect logically removed nodes in the bucket */
330 dummy_node
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
331 _ht_gc_bucket(dummy_node
, node
);
336 int _ht_remove(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
338 struct rcu_ht_node
*dummy
, *next
, *old
;
342 /* logically delete the node */
343 old
= rcu_dereference(node
->p
.next
);
346 if (is_removed(next
))
348 assert(!is_dummy(next
));
349 old
= uatomic_cmpxchg(&node
->p
.next
, next
,
351 } while (old
!= next
);
353 /* We performed the (logical) deletion. */
357 * Ensure that the node is not visible to readers anymore: lookup for
358 * the node, and remove it (along with any other logically removed node)
361 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
362 dummy
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
363 _ht_gc_bucket(dummy
, node
);
366 * Only the flagging action indicated that we (and no other)
367 * removed the node from the hash.
370 assert(is_removed(rcu_dereference(node
->p
.next
)));
377 void init_table(struct rcu_ht
*ht
, struct rcu_table
*t
,
378 unsigned long first
, unsigned long len
)
380 unsigned long i
, end
;
383 for (i
= first
; i
< end
; i
++) {
384 /* Update table size when power of two */
385 if (i
!= 0 && !(i
& (i
- 1)))
387 t
->tbl
[i
] = calloc(1, sizeof(struct _rcu_ht_node
));
388 t
->tbl
[i
]->p
.reverse_hash
= bit_reverse_ulong(i
);
389 (void) _ht_add(ht
, t
, t
->tbl
[i
], 0, 1);
391 t
->resize_target
= t
->size
= end
;
392 t
->resize_initiated
= 0;
395 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
,
396 ht_compare_fct compare_fct
,
397 unsigned long hash_seed
,
398 unsigned long init_size
,
399 void (*ht_call_rcu
)(struct rcu_head
*head
,
400 void (*func
)(struct rcu_head
*head
)))
404 ht
= calloc(1, sizeof(struct rcu_ht
));
405 ht
->hash_fct
= hash_fct
;
406 ht
->compare_fct
= compare_fct
;
407 ht
->hash_seed
= hash_seed
;
408 ht
->ht_call_rcu
= ht_call_rcu
;
409 ht
->in_progress_resize
= 0;
410 /* this mutex should not nest in read-side C.S. */
411 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
412 ht
->t
= calloc(1, sizeof(struct rcu_table
)
413 + (max(init_size
, 1) * sizeof(struct rcu_ht_node
*)));
415 pthread_mutex_lock(&ht
->resize_mutex
);
416 init_table(ht
, ht
->t
, 0, max(init_size
, 1));
417 pthread_mutex_unlock(&ht
->resize_mutex
);
421 struct rcu_ht_node
*ht_lookup(struct rcu_ht
*ht
, void *key
, size_t key_len
)
424 struct rcu_ht_node
*node
, *next
;
425 unsigned long hash
, reverse_hash
;
427 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
428 reverse_hash
= bit_reverse_ulong(hash
);
430 t
= rcu_dereference(ht
->t
);
431 node
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
435 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
439 next
= rcu_dereference(node
->p
.next
);
440 if (likely(!is_removed(next
))
442 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
445 node
= clear_flag(next
);
447 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
451 void ht_add(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
456 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
457 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
459 t
= rcu_dereference(ht
->t
);
460 (void) _ht_add(ht
, t
, node
, 0, 0);
463 struct rcu_ht_node
*ht_add_unique(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
468 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
469 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
471 t
= rcu_dereference(ht
->t
);
472 return _ht_add(ht
, t
, node
, 1, 0);
475 int ht_remove(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
479 t
= rcu_dereference(ht
->t
);
480 return _ht_remove(ht
, t
, node
);
484 int ht_delete_dummy(struct rcu_ht
*ht
)
487 struct rcu_ht_node
*node
;
491 /* Check that the table is empty */
494 node
= clear_flag(node
)->p
.next
;
497 assert(!is_removed(node
));
498 } while (clear_flag(node
));
499 /* Internal sanity check: all nodes left should be dummy */
500 for (i
= 0; i
< t
->size
; i
++) {
501 assert(is_dummy(t
->tbl
[i
]->p
.next
));
508 * Should only be called when no more concurrent readers nor writers can
509 * possibly access the table.
511 int ht_destroy(struct rcu_ht
*ht
)
515 /* Wait for in-flight resize operations to complete */
516 while (uatomic_read(&ht
->in_progress_resize
))
517 poll(NULL
, 0, 100); /* wait for 100ms */
518 ret
= ht_delete_dummy(ht
);
526 void ht_count_nodes(struct rcu_ht
*ht
,
527 unsigned long *count
,
528 unsigned long *removed
)
531 struct rcu_ht_node
*node
, *next
;
536 t
= rcu_dereference(ht
->t
);
537 /* Check that the table is empty */
538 node
= rcu_dereference(t
->tbl
[0]);
540 next
= rcu_dereference(node
->p
.next
);
541 if (is_removed(next
)) {
542 assert(!is_dummy(next
));
544 } else if (!is_dummy(next
))
546 node
= clear_flag(next
);
551 void ht_free_table_cb(struct rcu_head
*head
)
553 struct rcu_table
*t
=
554 caa_container_of(head
, struct rcu_table
, head
);
558 /* called with resize mutex held */
560 void _do_ht_resize(struct rcu_ht
*ht
)
562 unsigned long new_size
, old_size
;
563 struct rcu_table
*new_t
, *old_t
;
566 old_size
= old_t
->size
;
568 new_size
= CMM_LOAD_SHARED(old_t
->resize_target
);
569 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
571 if (old_size
== new_size
)
573 new_t
= malloc(sizeof(struct rcu_table
)
574 + (new_size
* sizeof(struct rcu_ht_node
*)));
575 assert(new_size
> old_size
);
576 memcpy(&new_t
->tbl
, &old_t
->tbl
,
577 old_size
* sizeof(struct rcu_ht_node
*));
578 init_table(ht
, new_t
, old_size
, new_size
- old_size
);
579 /* Changing table and size atomically wrt lookups */
580 rcu_assign_pointer(ht
->t
, new_t
);
581 ht
->ht_call_rcu(&old_t
->head
, ht_free_table_cb
);
585 unsigned long resize_target_update(struct rcu_table
*t
,
588 return _uatomic_max(&t
->resize_target
,
589 t
->size
<< growth_order
);
592 void ht_resize(struct rcu_ht
*ht
, int growth
)
594 struct rcu_table
*t
= rcu_dereference(ht
->t
);
595 unsigned long target_size
;
597 target_size
= resize_target_update(t
, growth
);
598 if (t
->size
< target_size
) {
599 CMM_STORE_SHARED(t
->resize_initiated
, 1);
600 pthread_mutex_lock(&ht
->resize_mutex
);
602 pthread_mutex_unlock(&ht
->resize_mutex
);
607 void do_resize_cb(struct rcu_head
*head
)
609 struct rcu_resize_work
*work
=
610 caa_container_of(head
, struct rcu_resize_work
, head
);
611 struct rcu_ht
*ht
= work
->ht
;
613 pthread_mutex_lock(&ht
->resize_mutex
);
615 pthread_mutex_unlock(&ht
->resize_mutex
);
617 cmm_smp_mb(); /* finish resize before decrement */
618 uatomic_dec(&ht
->in_progress_resize
);
622 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
)
624 struct rcu_resize_work
*work
;
625 unsigned long target_size
;
627 target_size
= resize_target_update(t
, growth
);
628 if (!CMM_LOAD_SHARED(t
->resize_initiated
) && t
->size
< target_size
) {
629 uatomic_inc(&ht
->in_progress_resize
);
630 cmm_smp_mb(); /* increment resize count before calling it */
631 work
= malloc(sizeof(*work
));
633 ht
->ht_call_rcu(&work
->head
, do_resize_cb
);
634 CMM_STORE_SHARED(t
->resize_initiated
, 1);