4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include <urcu-call-rcu.h>
32 #include <urcu/arch.h>
33 #include <urcu/uatomic.h>
34 #include <urcu/jhash.h>
35 #include <urcu/compiler.h>
36 #include <urcu/rculfhash.h>
40 #define BUCKET_SIZE_RESIZE_THRESHOLD 5
43 #define max(a, b) ((a) > (b) ? (a) : (b))
47 unsigned long size
; /* always a power of 2 */
49 struct rcu_ht_node
*tbl
[0];
53 struct rcu_table
*t
; /* shared */
56 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
57 unsigned long target_size
;
58 void (*ht_call_rcu
)(struct rcu_head
*head
,
59 void (*func
)(struct rcu_head
*head
));
62 struct rcu_resize_work
{
68 void ht_resize_lazy(struct rcu_ht
*ht
, int growth
);
71 void check_resize(struct rcu_ht
*ht
, unsigned long chain_len
)
73 if (chain_len
>= BUCKET_SIZE_RESIZE_THRESHOLD
)
74 ht_resize_lazy(ht
, chain_len
/ BUCKET_SIZE_RESIZE_THRESHOLD
);
78 * Algorithm to reverse bits in a word by lookup table, extended to
81 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
84 static const uint8_t BitReverseTable256
[256] =
86 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
87 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
88 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
89 R6(0), R6(2), R6(1), R6(3)
96 uint8_t bit_reverse_u8(uint8_t v
)
98 return BitReverseTable256
[v
];
101 static __attribute__((unused
))
102 uint32_t bit_reverse_u32(uint32_t v
)
104 return ((uint32_t) bit_reverse_u8(v
) << 24) |
105 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
106 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
107 ((uint32_t) bit_reverse_u8(v
>> 24));
110 static __attribute__((unused
))
111 uint64_t bit_reverse_u64(uint64_t v
)
113 return ((uint64_t) bit_reverse_u8(v
) << 56) |
114 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
115 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
116 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
117 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
118 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
119 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
120 ((uint64_t) bit_reverse_u8(v
>> 56));
124 unsigned long bit_reverse_ulong(unsigned long v
)
126 #if (CAA_BITS_PER_LONG == 32)
127 return bit_reverse_u32(v
);
129 return bit_reverse_u64(v
);
134 struct rcu_ht_node
*clear_flag(struct rcu_ht_node
*node
)
136 return (struct rcu_ht_node
*) (((unsigned long) node
) & ~0x1);
140 int is_removed(struct rcu_ht_node
*node
)
142 return ((unsigned long) node
) & 0x1;
146 struct rcu_ht_node
*flag_removed(struct rcu_ht_node
*node
)
148 return (struct rcu_ht_node
*) (((unsigned long) node
) | 0x1);
152 void _uatomic_max(unsigned long *ptr
, unsigned long v
)
154 unsigned long old1
, old2
;
156 old1
= uatomic_read(ptr
);
161 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
165 int _ht_add(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
167 struct rcu_ht_node
*iter_prev
= NULL
, *iter
= NULL
;
170 unsigned long chain_len
= 0;
172 iter_prev
= rcu_dereference(t
->tbl
[node
->hash
& (t
->size
- 1)]);
174 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
176 iter
= clear_flag(rcu_dereference(iter_prev
->next
));
179 if (iter
->reverse_hash
< node
->reverse_hash
)
182 check_resize(ht
, ++chain_len
);
184 /* add in iter_prev->next */
185 if (is_removed(iter
))
188 if (uatomic_cmpxchg(&iter_prev
->next
, iter
, node
) != iter
)
194 int _ht_remove(struct rcu_ht
*ht
, struct rcu_table
*t
, struct rcu_ht_node
*node
)
196 struct rcu_ht_node
*iter_prev
, *iter
, *next
, *old
;
197 unsigned long chain_len
;
204 iter_prev
= rcu_dereference(t
->tbl
[node
->hash
& (t
->size
- 1)]);
206 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
208 iter
= clear_flag(rcu_dereference(iter_prev
->next
));
211 if (iter
->reverse_hash
< node
->reverse_hash
)
223 next
= rcu_dereference(iter
->next
);
225 if (is_removed(next
)) {
229 /* set deletion flag */
230 if ((old
= uatomic_cmpxchg(&iter
->next
, next
, flag_removed(next
))) != next
) {
231 if (old
== flag_removed(next
)) {
241 * Remove the element from the list. Retry if there has been a
242 * concurrent add (there cannot be a concurrent delete, because
243 * we won the deletion flag cmpxchg).
245 if (uatomic_cmpxchg(&iter_prev
->next
, iter
, clear_flag(next
)) != iter
)
252 void init_table(struct rcu_ht
*ht
, struct rcu_table
*t
,
253 unsigned long first
, unsigned long len
)
255 unsigned long i
, end
;
258 for (i
= first
; i
< end
; i
++) {
259 /* Update table size when power of two */
260 if (i
!= 0 && !(i
& (i
- 1)))
262 t
->tbl
[i
] = calloc(1, sizeof(struct rcu_ht_node
));
263 t
->tbl
[i
]->dummy
= 1;
265 t
->tbl
[i
]->reverse_hash
= bit_reverse_ulong(i
);
266 _ht_add(ht
, t
, t
->tbl
[i
]);
271 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
,
273 unsigned long init_size
,
274 void (*ht_call_rcu
)(struct rcu_head
*head
,
275 void (*func
)(struct rcu_head
*head
)))
279 ht
= calloc(1, sizeof(struct rcu_ht
));
280 ht
->hash_fct
= hash_fct
;
281 ht
->hashseed
= hashseed
;
282 /* this mutex should not nest in read-side C.S. */
283 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
284 ht
->t
= calloc(1, sizeof(struct rcu_table
)
285 + (max(init_size
, 1) * sizeof(struct rcu_ht_node
*)));
287 init_table(ht
, ht
->t
, 0, max(init_size
, 1));
288 ht
->target_size
= ht
->t
->size
;
289 ht
->ht_call_rcu
= ht_call_rcu
;
293 struct rcu_ht_node
*ht_lookup(struct rcu_ht
*ht
, void *key
)
296 struct rcu_ht_node
*node
;
297 unsigned long hash
, reverse_hash
;
299 hash
= ht
->hash_fct(ht
->hashseed
, key
);
300 reverse_hash
= bit_reverse_ulong(hash
);
302 t
= rcu_dereference(ht
->t
);
303 cmm_smp_read_barrier_depends(); /* read t before size and table */
304 node
= rcu_dereference(t
->tbl
[hash
& (t
->size
- 1)]);
308 if (node
->reverse_hash
> reverse_hash
) {
312 if (node
->key
== key
) {
313 if (is_removed(rcu_dereference(node
->next
)))
317 node
= clear_flag(rcu_dereference(node
->next
));
322 int ht_add(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
326 node
->hash
= ht
->hash_fct(ht
->hashseed
, node
->key
);
327 node
->reverse_hash
= bit_reverse_ulong((unsigned long) node
->hash
);
329 t
= rcu_dereference(ht
->t
);
330 cmm_smp_read_barrier_depends(); /* read t before size and table */
331 return _ht_add(ht
, t
, node
);
334 int ht_remove(struct rcu_ht
*ht
, struct rcu_ht_node
*node
)
338 t
= rcu_dereference(ht
->t
);
339 cmm_smp_read_barrier_depends(); /* read t before size and table */
340 return _ht_remove(ht
, t
, node
);
344 int ht_delete_dummy(struct rcu_ht
*ht
)
347 struct rcu_ht_node
*node
;
351 /* Check that the table is empty */
358 /* Internal sanity check: all nodes left should be dummy */
359 for (i
= 0; i
< t
->size
; i
++) {
360 assert(t
->tbl
[i
]->dummy
);
367 * Should only be called when no more concurrent readers nor writers can
368 * possibly access the table.
370 int ht_destroy(struct rcu_ht
*ht
)
374 ret
= ht_delete_dummy(ht
);
383 void ht_free_table_cb(struct rcu_head
*head
)
385 struct rcu_table
*t
=
386 caa_container_of(head
, struct rcu_table
, head
);
390 /* called with resize mutex held */
392 void _do_ht_resize(struct rcu_ht
*ht
)
394 unsigned long new_size
, old_size
;
395 struct rcu_table
*new_t
, *old_t
;
398 old_size
= old_t
->size
;
400 new_size
= CMM_LOAD_SHARED(ht
->target_size
);
401 if (old_size
== new_size
)
403 new_t
= realloc(old_t
, sizeof(struct rcu_table
)
404 + (new_size
* sizeof(struct rcu_ht_node
*)));
405 if (new_size
> old_size
)
406 init_table(ht
, new_t
, old_size
, new_size
- old_size
);
407 cmm_smp_wmb(); /* update content before updating reallocated size */
408 CMM_STORE_SHARED(new_t
->size
, new_size
);
409 if (new_t
!= old_t
) {
410 /* Changing table and size atomically wrt lookups */
411 rcu_assign_pointer(ht
->t
, new_t
);
412 ht
->ht_call_rcu(&old_t
->head
, ht_free_table_cb
);
417 void resize_target_update(struct rcu_ht
*ht
, int growth_order
)
419 _uatomic_max(&ht
->target_size
,
420 CMM_LOAD_SHARED(ht
->target_size
) << growth_order
);
423 void ht_resize(struct rcu_ht
*ht
, int growth
)
425 resize_target_update(ht
, growth
);
426 pthread_mutex_lock(&ht
->resize_mutex
);
428 pthread_mutex_unlock(&ht
->resize_mutex
);
432 void do_resize_cb(struct rcu_head
*head
)
434 struct rcu_resize_work
*work
=
435 caa_container_of(head
, struct rcu_resize_work
, head
);
436 struct rcu_ht
*ht
= work
->ht
;
438 pthread_mutex_lock(&ht
->resize_mutex
);
440 pthread_mutex_unlock(&ht
->resize_mutex
);
445 void ht_resize_lazy(struct rcu_ht
*ht
, int growth
)
447 struct rcu_resize_work
*work
;
449 work
= malloc(sizeof(*work
));
451 resize_target_update(ht
, growth
);
452 ht
->ht_call_rcu(&work
->head
, do_resize_cb
);