4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
41 #define DEBUG /* Test */
44 #define dbg_printf(args...) printf(args)
46 #define dbg_printf(args...)
49 #define CHAIN_LEN_TARGET 1
50 #define CHAIN_LEN_RESIZE_THRESHOLD 2
53 #define max(a, b) ((a) > (b) ? (a) : (b))
57 unsigned long size
; /* always a power of 2 */
58 unsigned long resize_target
;
61 struct rcu_ht_node
*tbl
[0];
65 struct rcu_table
*t
; /* shared */
67 ht_compare_fct compare_fct
;
68 unsigned long hash_seed
;
69 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
70 void (*ht_call_rcu
)(struct rcu_head
*head
,
71 void (*func
)(struct rcu_head
*head
));
74 struct rcu_resize_work
{
80 * Algorithm to reverse bits in a word by lookup table, extended to
83 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
84 * Originally from Public Domain.
87 static const uint8_t BitReverseTable256
[256] =
89 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
90 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
91 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
92 R6(0), R6(2), R6(1), R6(3)
99 uint8_t bit_reverse_u8(uint8_t v
)
101 return BitReverseTable256
[v
];
104 static __attribute__((unused
))
105 uint32_t bit_reverse_u32(uint32_t v
)
107 return ((uint32_t) bit_reverse_u8(v
) << 24) |
108 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
109 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
110 ((uint32_t) bit_reverse_u8(v
>> 24));
113 static __attribute__((unused
))
114 uint64_t bit_reverse_u64(uint64_t v
)
116 return ((uint64_t) bit_reverse_u8(v
) << 56) |
117 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
118 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
119 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
120 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
121 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
122 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
123 ((uint64_t) bit_reverse_u8(v
>> 56));
127 unsigned long bit_reverse_ulong(unsigned long v
)
129 #if (CAA_BITS_PER_LONG == 32)
130 return bit_reverse_u32(v
);
132 return bit_reverse_u64(v
);
137 * Algorithm to find the log2 of a 32-bit unsigned integer.
138 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
139 * Originally from Public Domain.
141 static const char LogTable256
[256] =
143 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
144 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
145 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
146 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
149 uint32_t log2_u32(uint32_t v
)
153 if ((tt
= (v
>> 16)))
154 return (t
= (tt
>> 8))
155 ? 24 + LogTable256
[t
]
156 : 16 + LogTable256
[tt
];
158 return (t
= (v
>> 8))
164 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
);
167 void check_resize(struct rcu_ht
*ht
, struct rcu_table
*t
,
170 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
171 ht_resize_lazy(ht
, t
,
172 log2_u32(chain_len
- CHAIN_LEN_TARGET
- 1));
176 struct rcu_ht_node
*clear_flag(struct rcu_ht_node
*node
)
178 return (struct rcu_ht_node
*) (((unsigned long) node
) & ~0x1);
182 int is_removed(struct rcu_ht_node
*node
)
184 return ((unsigned long) node
) & 0x1;
188 struct rcu_ht_node
*flag_removed(struct rcu_ht_node
*node
)
190 return (struct rcu_ht_node
*) (((unsigned long) node
) | 0x1);
194 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
196 unsigned long old1
, old2
;
198 old1
= uatomic_read(ptr
);
203 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
208 void _ht_add(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
210 struct rcu_ht_node
*iter_prev
, *iter
, *iter_prev_next
, *next
;
215 uint32_t chain_len
= 0;
218 * iter_prev points to the non-removed node prior to the
220 * iter iterates until it finds the next non-removed
223 iter_prev
= rcu_dereference(t
->tbl
[node
->hash
& (t
->size
- 1)]);
224 /* We can always skip the dummy node initially */
225 iter_prev_next
= next
= rcu_dereference(iter_prev
->next
);
227 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
230 if (unlikely(!clear_flag(iter
)))
232 next
= rcu_dereference(clear_flag(iter
)->next
);
233 if (unlikely(is_removed(next
)))
235 if (clear_flag(iter
)->reverse_hash
> node
->reverse_hash
)
237 /* Only account for identical reverse hash once */
238 if (iter_prev
->reverse_hash
!= clear_flag(iter
)->reverse_hash
)
239 check_resize(ht
, t
, ++chain_len
);
240 iter_prev
= clear_flag(iter
);
241 iter_prev_next
= next
;
243 assert(node
!= iter
);
244 assert(!is_removed(iter_prev
));
245 assert(iter_prev
!= node
);
247 if (uatomic_cmpxchg(&iter_prev
->next
, iter_prev_next
,
248 node
) != iter_prev_next
)
256 int _ht_remove(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
258 struct rcu_ht_node
*iter_prev
, *iter
, *iter_prev_next
, *next
, *old
;
259 unsigned long chain_len
;
267 * iter_prev points to the non-removed node prior to the remove
269 * node is the node to remove.
271 iter_prev
= rcu_dereference(t
->tbl
[node
->hash
& (t
->size
- 1)]);
272 /* We can always skip the dummy node initially */
273 iter_prev_next
= next
= rcu_dereference(iter_prev
->next
);
275 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
278 if (unlikely(!clear_flag(iter
)))
280 next
= rcu_dereference(clear_flag(iter
)->next
);
285 if (unlikely(is_removed(next
)))
287 if (clear_flag(iter
)->reverse_hash
> node
->reverse_hash
)
289 iter_prev
= clear_flag(iter
);
290 iter_prev_next
= next
;
295 if (is_removed(next
))
297 /* set deletion flag */
298 if ((old
= uatomic_cmpxchg(&iter
->next
, next
,
299 flag_removed(next
))) != next
) {
300 if (old
== flag_removed(next
))
308 * Remove the element from the list.
309 * - Retry if there has been a concurrent add before us.
310 * - Retry if the prev node has been deleted (its next removed
311 * flag would be set).
312 * - There cannot be a concurrent delete for our position, because
313 * we won the deletion flag cmpxchg.
314 * - If there is a concurrent add or remove after us while our
315 * removed flag is set, it will skip us and link directly after
316 * the prior non-removed node before us. In this case, the
317 * retry will not find the node in the list anymore.
319 if (uatomic_cmpxchg(&iter_prev
->next
, iter_prev_next
,
320 clear_flag(next
)) != iter_prev_next
)
324 * Only the flagging action indicated that we (and no other)
325 * removed the node from the hash.
334 void init_table(struct rcu_ht
*ht
, struct rcu_table
*t
,
335 unsigned long first
, unsigned long len
)
337 unsigned long i
, end
;
340 for (i
= first
; i
< end
; i
++) {
341 /* Update table size when power of two */
342 if (i
!= 0 && !(i
& (i
- 1)))
344 t
->tbl
[i
] = calloc(1, sizeof(struct rcu_ht_node
));
345 t
->tbl
[i
]->dummy
= 1;
347 t
->tbl
[i
]->reverse_hash
= bit_reverse_ulong(i
);
348 _ht_add(ht
, t
, t
->tbl
[i
]);
350 t
->resize_target
= t
->size
= end
;
351 t
->resize_initiated
= 0;
354 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
,
355 ht_compare_fct compare_fct
,
356 unsigned long hash_seed
,
357 unsigned long init_size
,
358 void (*ht_call_rcu
)(struct rcu_head
*head
,
359 void (*func
)(struct rcu_head
*head
)))
363 ht
= calloc(1, sizeof(struct rcu_ht
));
364 ht
->hash_fct
= hash_fct
;
365 ht
->compare_fct
= compare_fct
;
366 ht
->hash_seed
= hash_seed
;
367 ht
->ht_call_rcu
= ht_call_rcu
;
368 /* this mutex should not nest in read-side C.S. */
369 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
370 ht
->t
= calloc(1, sizeof(struct rcu_table
)
371 + (max(init_size
, 1) * sizeof(struct rcu_ht_node
*)));
373 pthread_mutex_lock(&ht
->resize_mutex
);
374 init_table(ht
, ht
->t
, 0, max(init_size
, 1));
375 pthread_mutex_unlock(&ht
->resize_mutex
);
379 struct rcu_ht_node
*ht_lookup(struct rcu_ht
*ht
, void *key
, size_t key_len
)
382 struct rcu_ht_node
*node
;
383 unsigned long hash
, reverse_hash
;
385 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
386 reverse_hash
= bit_reverse_ulong(hash
);
388 t
= rcu_dereference(ht
->t
);
389 node
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
393 if (unlikely(node
->reverse_hash
> reverse_hash
)) {
397 if (!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
)) {
398 if (unlikely(is_removed(rcu_dereference(node
->next
))))
402 node
= clear_flag(rcu_dereference(node
->next
));
407 void ht_add(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
411 node
->hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
412 node
->reverse_hash
= bit_reverse_ulong((unsigned long) node
->hash
);
414 t
= rcu_dereference(ht
->t
);
415 _ht_add(ht
, t
, node
);
418 int ht_remove(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
422 t
= rcu_dereference(ht
->t
);
423 return _ht_remove(ht
, t
, node
);
427 int ht_delete_dummy(struct rcu_ht
*ht
)
430 struct rcu_ht_node
*node
;
434 /* Check that the table is empty */
441 /* Internal sanity check: all nodes left should be dummy */
442 for (i
= 0; i
< t
->size
; i
++) {
443 assert(t
->tbl
[i
]->dummy
);
450 * Should only be called when no more concurrent readers nor writers can
451 * possibly access the table.
453 int ht_destroy(struct rcu_ht
*ht
)
457 ret
= ht_delete_dummy(ht
);
466 void ht_free_table_cb(struct rcu_head
*head
)
468 struct rcu_table
*t
=
469 caa_container_of(head
, struct rcu_table
, head
);
473 /* called with resize mutex held */
475 void _do_ht_resize(struct rcu_ht
*ht
)
477 unsigned long new_size
, old_size
;
478 struct rcu_table
*new_t
, *old_t
;
481 old_size
= old_t
->size
;
483 new_size
= CMM_LOAD_SHARED(old_t
->resize_target
);
484 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
486 if (old_size
== new_size
)
488 new_t
= malloc(sizeof(struct rcu_table
)
489 + (new_size
* sizeof(struct rcu_ht_node
*)));
490 assert(new_size
> old_size
);
491 memcpy(&new_t
->tbl
, &old_t
->tbl
,
492 old_size
* sizeof(struct rcu_ht_node
*));
493 init_table(ht
, new_t
, old_size
, new_size
- old_size
);
494 /* Changing table and size atomically wrt lookups */
495 rcu_assign_pointer(ht
->t
, new_t
);
496 ht
->ht_call_rcu(&old_t
->head
, ht_free_table_cb
);
500 unsigned long resize_target_update(struct rcu_table
*t
,
503 return _uatomic_max(&t
->resize_target
,
504 t
->size
<< growth_order
);
507 void ht_resize(struct rcu_ht
*ht
, int growth
)
509 struct rcu_table
*t
= rcu_dereference(ht
->t
);
510 unsigned long target_size
;
512 target_size
= resize_target_update(t
, growth
);
513 if (t
->size
< target_size
) {
514 CMM_STORE_SHARED(t
->resize_initiated
, 1);
515 pthread_mutex_lock(&ht
->resize_mutex
);
517 pthread_mutex_unlock(&ht
->resize_mutex
);
522 void do_resize_cb(struct rcu_head
*head
)
524 struct rcu_resize_work
*work
=
525 caa_container_of(head
, struct rcu_resize_work
, head
);
526 struct rcu_ht
*ht
= work
->ht
;
528 pthread_mutex_lock(&ht
->resize_mutex
);
530 pthread_mutex_unlock(&ht
->resize_mutex
);
535 void ht_resize_lazy(struct rcu_ht
*ht
, struct rcu_table
*t
, int growth
)
537 struct rcu_resize_work
*work
;
538 unsigned long target_size
;
540 target_size
= resize_target_update(t
, growth
);
541 if (!CMM_LOAD_SHARED(t
->resize_initiated
) && t
->size
< target_size
) {
542 work
= malloc(sizeof(*work
));
544 ht
->ht_call_rcu(&work
->head
, do_resize_cb
);
545 CMM_STORE_SHARED(t
->resize_initiated
, 1);