Commit | Line | Data |
---|---|---|
5e28c532 | 1 | /* |
abc490a1 MD |
2 | * rculfhash.c |
3 | * | |
4 | * Userspace RCU library - Lock-Free Expandable RCU Hash Table | |
5 | * | |
6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
5e28c532 MD |
21 | */ |
22 | ||
2ed95849 MD |
23 | #define _LGPL_SOURCE |
24 | #include <stdlib.h> | |
e0ba718a MD |
25 | #include <errno.h> |
26 | #include <assert.h> | |
27 | #include <stdio.h> | |
abc490a1 | 28 | #include <stdint.h> |
f000907d | 29 | #include <string.h> |
e0ba718a | 30 | |
2ed95849 | 31 | #include <urcu.h> |
abc490a1 | 32 | #include <urcu-call-rcu.h> |
a42cc659 MD |
33 | #include <urcu/arch.h> |
34 | #include <urcu/uatomic.h> | |
674f7a69 | 35 | #include <urcu/jhash.h> |
a42cc659 | 36 | #include <urcu/compiler.h> |
abc490a1 | 37 | #include <urcu/rculfhash.h> |
5e28c532 | 38 | #include <stdio.h> |
464a1ec9 | 39 | #include <pthread.h> |
44395fb7 | 40 | |
f9830efd MD |
41 | #define DEBUG /* Test */ |
42 | ||
43 | #ifdef DEBUG | |
44 | #define dbg_printf(args...) printf(args) | |
45 | #else | |
46 | #define dbg_printf(args...) | |
47 | #endif | |
48 | ||
65e8e729 MD |
49 | #define CHAIN_LEN_TARGET 1 |
50 | #define CHAIN_LEN_RESIZE_THRESHOLD 2 | |
2ed95849 | 51 | |
abc490a1 MD |
52 | #ifndef max |
53 | #define max(a, b) ((a) > (b) ? (a) : (b)) | |
54 | #endif | |
2ed95849 | 55 | |
395270b6 | 56 | struct rcu_table { |
abc490a1 | 57 | unsigned long size; /* always a power of 2 */ |
f9830efd | 58 | unsigned long resize_target; |
11519af6 | 59 | int resize_initiated; |
abc490a1 | 60 | struct rcu_head head; |
395270b6 MD |
61 | struct rcu_ht_node *tbl[0]; |
62 | }; | |
63 | ||
2ed95849 | 64 | struct rcu_ht { |
395270b6 | 65 | struct rcu_table *t; /* shared */ |
2ed95849 | 66 | ht_hash_fct hash_fct; |
732ad076 MD |
67 | ht_compare_fct compare_fct; |
68 | unsigned long hash_seed; | |
464a1ec9 | 69 | pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ |
848d4088 | 70 | unsigned int in_progress_resize; |
abc490a1 MD |
71 | void (*ht_call_rcu)(struct rcu_head *head, |
72 | void (*func)(struct rcu_head *head)); | |
2ed95849 MD |
73 | }; |
74 | ||
abc490a1 MD |
75 | struct rcu_resize_work { |
76 | struct rcu_head head; | |
2ed95849 | 77 | struct rcu_ht *ht; |
abc490a1 | 78 | }; |
2ed95849 | 79 | |
abc490a1 MD |
80 | /* |
81 | * Algorithm to reverse bits in a word by lookup table, extended to | |
82 | * 64-bit words. | |
f9830efd | 83 | * Source: |
abc490a1 | 84 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
f9830efd | 85 | * Originally from Public Domain. |
abc490a1 MD |
86 | */ |
87 | ||
88 | static const uint8_t BitReverseTable256[256] = | |
2ed95849 | 89 | { |
abc490a1 MD |
90 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
91 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) | |
92 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) | |
93 | R6(0), R6(2), R6(1), R6(3) | |
94 | }; | |
95 | #undef R2 | |
96 | #undef R4 | |
97 | #undef R6 | |
2ed95849 | 98 | |
abc490a1 MD |
99 | static |
100 | uint8_t bit_reverse_u8(uint8_t v) | |
101 | { | |
102 | return BitReverseTable256[v]; | |
103 | } | |
ab7d5fc6 | 104 | |
abc490a1 MD |
105 | static __attribute__((unused)) |
106 | uint32_t bit_reverse_u32(uint32_t v) | |
107 | { | |
108 | return ((uint32_t) bit_reverse_u8(v) << 24) | | |
109 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | | |
110 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | | |
111 | ((uint32_t) bit_reverse_u8(v >> 24)); | |
2ed95849 MD |
112 | } |
113 | ||
abc490a1 MD |
114 | static __attribute__((unused)) |
115 | uint64_t bit_reverse_u64(uint64_t v) | |
2ed95849 | 116 | { |
abc490a1 MD |
117 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
118 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | | |
119 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | | |
120 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | | |
121 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | | |
122 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | | |
123 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | | |
124 | ((uint64_t) bit_reverse_u8(v >> 56)); | |
125 | } | |
126 | ||
127 | static | |
128 | unsigned long bit_reverse_ulong(unsigned long v) | |
129 | { | |
130 | #if (CAA_BITS_PER_LONG == 32) | |
131 | return bit_reverse_u32(v); | |
132 | #else | |
133 | return bit_reverse_u64(v); | |
134 | #endif | |
135 | } | |
136 | ||
f9830efd MD |
137 | /* |
138 | * Algorithm to find the log2 of a 32-bit unsigned integer. | |
139 | * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup | |
140 | * Originally from Public Domain. | |
141 | */ | |
142 | static const char LogTable256[256] = | |
143 | { | |
144 | #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n | |
145 | -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, | |
146 | LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6), | |
147 | LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7) | |
148 | }; | |
149 | ||
150 | uint32_t log2_u32(uint32_t v) | |
151 | { | |
152 | uint32_t t, tt; | |
153 | ||
154 | if ((tt = (v >> 16))) | |
155 | return (t = (tt >> 8)) | |
156 | ? 24 + LogTable256[t] | |
157 | : 16 + LogTable256[tt]; | |
158 | else | |
159 | return (t = (v >> 8)) | |
160 | ? 8 + LogTable256[t] | |
161 | : LogTable256[v]; | |
162 | } | |
163 | ||
164 | static | |
165 | void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth); | |
166 | ||
167 | static | |
168 | void check_resize(struct rcu_ht *ht, struct rcu_table *t, | |
169 | uint32_t chain_len) | |
170 | { | |
3390d470 MD |
171 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
172 | ht_resize_lazy(ht, t, | |
65e8e729 | 173 | log2_u32(chain_len - CHAIN_LEN_TARGET - 1)); |
f9830efd MD |
174 | } |
175 | ||
abc490a1 MD |
176 | static |
177 | struct rcu_ht_node *clear_flag(struct rcu_ht_node *node) | |
178 | { | |
179 | return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1); | |
180 | } | |
181 | ||
182 | static | |
183 | int is_removed(struct rcu_ht_node *node) | |
184 | { | |
185 | return ((unsigned long) node) & 0x1; | |
186 | } | |
187 | ||
188 | static | |
189 | struct rcu_ht_node *flag_removed(struct rcu_ht_node *node) | |
190 | { | |
191 | return (struct rcu_ht_node *) (((unsigned long) node) | 0x1); | |
192 | } | |
193 | ||
194 | static | |
f9830efd | 195 | unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) |
abc490a1 MD |
196 | { |
197 | unsigned long old1, old2; | |
198 | ||
199 | old1 = uatomic_read(ptr); | |
200 | do { | |
201 | old2 = old1; | |
202 | if (old2 >= v) | |
f9830efd | 203 | return old2; |
abc490a1 | 204 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
f9830efd | 205 | return v; |
abc490a1 MD |
206 | } |
207 | ||
273399de MD |
208 | /* |
209 | * Remove all logically deleted nodes from a bucket up to a certain node key. | |
210 | */ | |
211 | static | |
212 | void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node) | |
213 | { | |
214 | struct rcu_ht_node *iter_prev, *iter, *next; | |
215 | ||
216 | for (;;) { | |
217 | iter_prev = dummy; | |
218 | /* We can always skip the dummy node initially */ | |
219 | iter = rcu_dereference(iter_prev->next); | |
220 | assert(iter_prev->reverse_hash <= node->reverse_hash); | |
273399de | 221 | for (;;) { |
479c8a32 MD |
222 | if (unlikely(!iter)) |
223 | return; | |
273399de MD |
224 | if (clear_flag(iter)->reverse_hash > node->reverse_hash) |
225 | return; | |
226 | next = rcu_dereference(clear_flag(iter)->next); | |
227 | if (is_removed(next)) | |
228 | break; | |
273399de MD |
229 | iter_prev = iter; |
230 | iter = next; | |
231 | } | |
232 | assert(!is_removed(iter)); | |
233 | (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)); | |
234 | } | |
235 | } | |
236 | ||
abc490a1 | 237 | static |
18117871 MD |
238 | struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, |
239 | struct rcu_ht_node *node, int unique) | |
abc490a1 | 240 | { |
273399de | 241 | struct rcu_ht_node *iter_prev, *dummy, *iter, *next; |
49c2e2d6 | 242 | unsigned long hash; |
abc490a1 | 243 | |
18117871 MD |
244 | if (!t->size) { |
245 | assert(node->dummy); | |
246 | return node; /* Initial first add (head) */ | |
247 | } | |
49c2e2d6 | 248 | hash = bit_reverse_ulong(node->reverse_hash); |
abc490a1 | 249 | for (;;) { |
f9830efd | 250 | uint32_t chain_len = 0; |
abc490a1 | 251 | |
11519af6 MD |
252 | /* |
253 | * iter_prev points to the non-removed node prior to the | |
254 | * insert location. | |
11519af6 | 255 | */ |
49c2e2d6 | 256 | iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]); |
11519af6 | 257 | /* We can always skip the dummy node initially */ |
273399de | 258 | iter = rcu_dereference(iter_prev->next); |
abc490a1 MD |
259 | assert(iter_prev->reverse_hash <= node->reverse_hash); |
260 | for (;;) { | |
273399de MD |
261 | if (unlikely(!iter)) |
262 | goto insert; | |
11519af6 | 263 | if (clear_flag(iter)->reverse_hash > node->reverse_hash) |
273399de MD |
264 | goto insert; |
265 | next = rcu_dereference(clear_flag(iter)->next); | |
266 | if (is_removed(next)) | |
9dba85be | 267 | goto gc_node; |
e43f23f8 MD |
268 | if (unique |
269 | && !clear_flag(iter)->dummy | |
270 | && !ht->compare_fct(node->key, node->key_len, | |
271 | clear_flag(iter)->key, | |
272 | clear_flag(iter)->key_len)) | |
18117871 | 273 | return clear_flag(iter); |
11519af6 MD |
274 | /* Only account for identical reverse hash once */ |
275 | if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash) | |
276 | check_resize(ht, t, ++chain_len); | |
277 | iter_prev = clear_flag(iter); | |
273399de | 278 | iter = next; |
abc490a1 | 279 | } |
273399de | 280 | insert: |
7ec59d3b | 281 | assert(node != clear_flag(iter)); |
11519af6 | 282 | assert(!is_removed(iter_prev)); |
f000907d | 283 | assert(iter_prev != node); |
11519af6 | 284 | node->next = iter; |
273399de MD |
285 | if (uatomic_cmpxchg(&iter_prev->next, iter, |
286 | node) != iter) | |
287 | continue; /* retry */ | |
11519af6 | 288 | else |
273399de | 289 | goto gc_end; |
9dba85be MD |
290 | gc_node: |
291 | assert(!is_removed(iter)); | |
292 | (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)); | |
273399de | 293 | /* retry */ |
464a1ec9 | 294 | } |
273399de MD |
295 | gc_end: |
296 | /* Garbage collect logically removed nodes in the bucket */ | |
49c2e2d6 | 297 | dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); |
273399de | 298 | _ht_gc_bucket(dummy, node); |
18117871 | 299 | return node; |
abc490a1 | 300 | } |
464a1ec9 | 301 | |
abc490a1 MD |
302 | static |
303 | int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) | |
304 | { | |
273399de | 305 | struct rcu_ht_node *dummy, *next, *old; |
abc490a1 | 306 | int flagged = 0; |
49c2e2d6 | 307 | unsigned long hash; |
5e28c532 | 308 | |
7ec59d3b MD |
309 | /* logically delete the node */ |
310 | old = rcu_dereference(node->next); | |
311 | do { | |
312 | next = old; | |
313 | if (is_removed(next)) | |
314 | goto end; | |
273399de | 315 | assert(!node->dummy); |
7ec59d3b MD |
316 | old = uatomic_cmpxchg(&node->next, next, |
317 | flag_removed(next)); | |
318 | } while (old != next); | |
319 | ||
320 | /* We performed the (logical) deletion. */ | |
321 | flagged = 1; | |
322 | ||
323 | /* | |
324 | * Ensure that the node is not visible to readers anymore: lookup for | |
273399de MD |
325 | * the node, and remove it (along with any other logically removed node) |
326 | * if found. | |
11519af6 | 327 | */ |
49c2e2d6 MD |
328 | hash = bit_reverse_ulong(node->reverse_hash); |
329 | dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); | |
273399de | 330 | _ht_gc_bucket(dummy, node); |
2ed95849 | 331 | end: |
11519af6 MD |
332 | /* |
333 | * Only the flagging action indicated that we (and no other) | |
334 | * removed the node from the hash. | |
335 | */ | |
7ec59d3b MD |
336 | if (flagged) { |
337 | assert(is_removed(rcu_dereference(node->next))); | |
11519af6 | 338 | return 0; |
7ec59d3b | 339 | } else |
11519af6 | 340 | return -ENOENT; |
abc490a1 | 341 | } |
2ed95849 | 342 | |
abc490a1 MD |
343 | static |
344 | void init_table(struct rcu_ht *ht, struct rcu_table *t, | |
345 | unsigned long first, unsigned long len) | |
346 | { | |
347 | unsigned long i, end; | |
348 | ||
349 | end = first + len; | |
350 | for (i = first; i < end; i++) { | |
351 | /* Update table size when power of two */ | |
352 | if (i != 0 && !(i & (i - 1))) | |
353 | t->size = i; | |
354 | t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node)); | |
355 | t->tbl[i]->dummy = 1; | |
abc490a1 | 356 | t->tbl[i]->reverse_hash = bit_reverse_ulong(i); |
3eca1b8c | 357 | (void) _ht_add(ht, t, t->tbl[i], 0); |
abc490a1 | 358 | } |
f9830efd | 359 | t->resize_target = t->size = end; |
11519af6 | 360 | t->resize_initiated = 0; |
2ed95849 MD |
361 | } |
362 | ||
abc490a1 | 363 | struct rcu_ht *ht_new(ht_hash_fct hash_fct, |
732ad076 MD |
364 | ht_compare_fct compare_fct, |
365 | unsigned long hash_seed, | |
abc490a1 MD |
366 | unsigned long init_size, |
367 | void (*ht_call_rcu)(struct rcu_head *head, | |
368 | void (*func)(struct rcu_head *head))) | |
369 | { | |
370 | struct rcu_ht *ht; | |
371 | ||
372 | ht = calloc(1, sizeof(struct rcu_ht)); | |
373 | ht->hash_fct = hash_fct; | |
732ad076 MD |
374 | ht->compare_fct = compare_fct; |
375 | ht->hash_seed = hash_seed; | |
f000907d | 376 | ht->ht_call_rcu = ht_call_rcu; |
848d4088 | 377 | ht->in_progress_resize = 0; |
abc490a1 MD |
378 | /* this mutex should not nest in read-side C.S. */ |
379 | pthread_mutex_init(&ht->resize_mutex, NULL); | |
380 | ht->t = calloc(1, sizeof(struct rcu_table) | |
381 | + (max(init_size, 1) * sizeof(struct rcu_ht_node *))); | |
382 | ht->t->size = 0; | |
f000907d | 383 | pthread_mutex_lock(&ht->resize_mutex); |
abc490a1 | 384 | init_table(ht, ht->t, 0, max(init_size, 1)); |
f000907d | 385 | pthread_mutex_unlock(&ht->resize_mutex); |
abc490a1 MD |
386 | return ht; |
387 | } | |
388 | ||
732ad076 | 389 | struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) |
2ed95849 | 390 | { |
395270b6 | 391 | struct rcu_table *t; |
abc490a1 MD |
392 | struct rcu_ht_node *node; |
393 | unsigned long hash, reverse_hash; | |
2ed95849 | 394 | |
732ad076 | 395 | hash = ht->hash_fct(key, key_len, ht->hash_seed); |
abc490a1 | 396 | reverse_hash = bit_reverse_ulong(hash); |
464a1ec9 | 397 | |
395270b6 | 398 | t = rcu_dereference(ht->t); |
abc490a1 | 399 | node = rcu_dereference(t->tbl[hash & (t->size - 1)]); |
2ed95849 | 400 | for (;;) { |
abc490a1 MD |
401 | if (unlikely(!node)) |
402 | break; | |
dd4505e0 | 403 | if (unlikely(node->reverse_hash > reverse_hash)) { |
abc490a1 MD |
404 | node = NULL; |
405 | break; | |
2ed95849 | 406 | } |
49c2e2d6 MD |
407 | if (likely(!is_removed(rcu_dereference(node->next))) |
408 | && !node->dummy | |
409 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { | |
273399de | 410 | break; |
2ed95849 | 411 | } |
abc490a1 | 412 | node = clear_flag(rcu_dereference(node->next)); |
2ed95849 | 413 | } |
273399de | 414 | assert(!node || !node->dummy); |
abc490a1 MD |
415 | return node; |
416 | } | |
e0ba718a | 417 | |
f000907d | 418 | void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node) |
abc490a1 MD |
419 | { |
420 | struct rcu_table *t; | |
49c2e2d6 | 421 | unsigned long hash; |
ab7d5fc6 | 422 | |
49c2e2d6 MD |
423 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
424 | node->reverse_hash = bit_reverse_ulong((unsigned long) hash); | |
2ed95849 | 425 | |
abc490a1 | 426 | t = rcu_dereference(ht->t); |
3eca1b8c MD |
427 | (void) _ht_add(ht, t, node, 0); |
428 | } | |
429 | ||
18117871 | 430 | struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) |
3eca1b8c MD |
431 | { |
432 | struct rcu_table *t; | |
49c2e2d6 | 433 | unsigned long hash; |
3eca1b8c | 434 | |
49c2e2d6 MD |
435 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
436 | node->reverse_hash = bit_reverse_ulong((unsigned long) hash); | |
3eca1b8c MD |
437 | |
438 | t = rcu_dereference(ht->t); | |
439 | return _ht_add(ht, t, node, 1); | |
2ed95849 MD |
440 | } |
441 | ||
abc490a1 | 442 | int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node) |
2ed95849 | 443 | { |
abc490a1 MD |
444 | struct rcu_table *t; |
445 | ||
446 | t = rcu_dereference(ht->t); | |
abc490a1 | 447 | return _ht_remove(ht, t, node); |
2ed95849 | 448 | } |
ab7d5fc6 | 449 | |
abc490a1 MD |
450 | static |
451 | int ht_delete_dummy(struct rcu_ht *ht) | |
674f7a69 | 452 | { |
395270b6 | 453 | struct rcu_table *t; |
abc490a1 MD |
454 | struct rcu_ht_node *node; |
455 | unsigned long i; | |
674f7a69 | 456 | |
abc490a1 MD |
457 | t = ht->t; |
458 | /* Check that the table is empty */ | |
459 | node = t->tbl[0]; | |
460 | do { | |
461 | if (!node->dummy) | |
462 | return -EPERM; | |
463 | node = node->next; | |
273399de | 464 | assert(!is_removed(node)); |
abc490a1 MD |
465 | } while (node); |
466 | /* Internal sanity check: all nodes left should be dummy */ | |
395270b6 | 467 | for (i = 0; i < t->size; i++) { |
abc490a1 MD |
468 | assert(t->tbl[i]->dummy); |
469 | free(t->tbl[i]); | |
674f7a69 | 470 | } |
abc490a1 | 471 | return 0; |
674f7a69 MD |
472 | } |
473 | ||
474 | /* | |
475 | * Should only be called when no more concurrent readers nor writers can | |
476 | * possibly access the table. | |
477 | */ | |
5e28c532 | 478 | int ht_destroy(struct rcu_ht *ht) |
674f7a69 | 479 | { |
5e28c532 MD |
480 | int ret; |
481 | ||
848d4088 MD |
482 | /* Wait for in-flight resize operations to complete */ |
483 | while (uatomic_read(&ht->in_progress_resize)) | |
484 | poll(NULL, 0, 100); /* wait for 100ms */ | |
abc490a1 MD |
485 | ret = ht_delete_dummy(ht); |
486 | if (ret) | |
487 | return ret; | |
395270b6 | 488 | free(ht->t); |
674f7a69 | 489 | free(ht); |
5e28c532 | 490 | return ret; |
674f7a69 MD |
491 | } |
492 | ||
273399de MD |
493 | void ht_count_nodes(struct rcu_ht *ht, |
494 | unsigned long *count, | |
495 | unsigned long *removed) | |
496 | { | |
497 | struct rcu_table *t; | |
498 | struct rcu_ht_node *node, *next; | |
499 | ||
500 | *count = 0; | |
501 | *removed = 0; | |
502 | ||
503 | t = rcu_dereference(ht->t); | |
504 | /* Check that the table is empty */ | |
505 | node = rcu_dereference(t->tbl[0]); | |
506 | do { | |
507 | next = rcu_dereference(node->next); | |
508 | if (is_removed(next)) { | |
509 | assert(!node->dummy); | |
510 | (*removed)++; | |
511 | } else if (!node->dummy) | |
512 | (*count)++; | |
513 | node = clear_flag(next); | |
514 | } while (node); | |
515 | } | |
516 | ||
abc490a1 MD |
517 | static |
518 | void ht_free_table_cb(struct rcu_head *head) | |
519 | { | |
520 | struct rcu_table *t = | |
521 | caa_container_of(head, struct rcu_table, head); | |
522 | free(t); | |
523 | } | |
524 | ||
525 | /* called with resize mutex held */ | |
526 | static | |
527 | void _do_ht_resize(struct rcu_ht *ht) | |
464a1ec9 | 528 | { |
abc490a1 | 529 | unsigned long new_size, old_size; |
395270b6 | 530 | struct rcu_table *new_t, *old_t; |
464a1ec9 | 531 | |
395270b6 MD |
532 | old_t = ht->t; |
533 | old_size = old_t->size; | |
464a1ec9 | 534 | |
f9830efd MD |
535 | new_size = CMM_LOAD_SHARED(old_t->resize_target); |
536 | dbg_printf("rculfhash: resize from %lu to %lu buckets\n", | |
537 | old_size, new_size); | |
abc490a1 | 538 | if (old_size == new_size) |
464a1ec9 | 539 | return; |
f000907d | 540 | new_t = malloc(sizeof(struct rcu_table) |
abc490a1 | 541 | + (new_size * sizeof(struct rcu_ht_node *))); |
f000907d MD |
542 | assert(new_size > old_size); |
543 | memcpy(&new_t->tbl, &old_t->tbl, | |
544 | old_size * sizeof(struct rcu_ht_node *)); | |
545 | init_table(ht, new_t, old_size, new_size - old_size); | |
f000907d MD |
546 | /* Changing table and size atomically wrt lookups */ |
547 | rcu_assign_pointer(ht->t, new_t); | |
548 | ht->ht_call_rcu(&old_t->head, ht_free_table_cb); | |
464a1ec9 MD |
549 | } |
550 | ||
abc490a1 | 551 | static |
f9830efd MD |
552 | unsigned long resize_target_update(struct rcu_table *t, |
553 | int growth_order) | |
464a1ec9 | 554 | { |
f9830efd MD |
555 | return _uatomic_max(&t->resize_target, |
556 | t->size << growth_order); | |
464a1ec9 MD |
557 | } |
558 | ||
464a1ec9 MD |
559 | void ht_resize(struct rcu_ht *ht, int growth) |
560 | { | |
f9830efd MD |
561 | struct rcu_table *t = rcu_dereference(ht->t); |
562 | unsigned long target_size; | |
563 | ||
564 | target_size = resize_target_update(t, growth); | |
565 | if (t->size < target_size) { | |
11519af6 | 566 | CMM_STORE_SHARED(t->resize_initiated, 1); |
f9830efd MD |
567 | pthread_mutex_lock(&ht->resize_mutex); |
568 | _do_ht_resize(ht); | |
569 | pthread_mutex_unlock(&ht->resize_mutex); | |
570 | } | |
abc490a1 | 571 | } |
464a1ec9 | 572 | |
abc490a1 MD |
573 | static |
574 | void do_resize_cb(struct rcu_head *head) | |
575 | { | |
576 | struct rcu_resize_work *work = | |
577 | caa_container_of(head, struct rcu_resize_work, head); | |
578 | struct rcu_ht *ht = work->ht; | |
579 | ||
580 | pthread_mutex_lock(&ht->resize_mutex); | |
581 | _do_ht_resize(ht); | |
582 | pthread_mutex_unlock(&ht->resize_mutex); | |
583 | free(work); | |
848d4088 MD |
584 | cmm_smp_mb(); /* finish resize before decrement */ |
585 | uatomic_dec(&ht->in_progress_resize); | |
464a1ec9 MD |
586 | } |
587 | ||
abc490a1 | 588 | static |
f000907d | 589 | void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth) |
ab7d5fc6 | 590 | { |
abc490a1 | 591 | struct rcu_resize_work *work; |
f9830efd | 592 | unsigned long target_size; |
abc490a1 | 593 | |
f9830efd | 594 | target_size = resize_target_update(t, growth); |
11519af6 | 595 | if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { |
848d4088 MD |
596 | uatomic_inc(&ht->in_progress_resize); |
597 | cmm_smp_mb(); /* increment resize count before calling it */ | |
f9830efd MD |
598 | work = malloc(sizeof(*work)); |
599 | work->ht = ht; | |
600 | ht->ht_call_rcu(&work->head, do_resize_cb); | |
11519af6 | 601 | CMM_STORE_SHARED(t->resize_initiated, 1); |
f9830efd | 602 | } |
ab7d5fc6 | 603 | } |