rculfhash: hold rcu read-side lock in resize
[userspace-rcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
94 * this order.
95 * - call_rcu is used to garbage-collect the old order table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
99 *
100 * A bit of ascii art explanation:
101 *
102 * Order index is the off-by-one compare to the actual power of 2 because
103 * we use index 0 to deal with the 0 special-case.
104 *
105 * This shows the nodes for a small table ordered by reversed bits:
106 *
107 * bits reverse
108 * 0 000 000
109 * 4 100 001
110 * 2 010 010
111 * 6 110 011
112 * 1 001 100
113 * 5 101 101
114 * 3 011 110
115 * 7 111 111
116 *
117 * This shows the nodes in order of non-reversed bits, linked by
118 * reversed-bit order.
119 *
120 * order bits reverse
121 * 0 0 000 000
122 * |
123 * 1 | 1 001 100 <- <-
124 * | | | |
125 * 2 | | 2 010 010 | |
126 * | | | 3 011 110 | <- |
127 * | | | | | | |
128 * 3 -> | | | 4 100 001 | |
129 * -> | | 5 101 101 |
130 * -> | 6 110 011
131 * -> 7 111 111
132 */
133
134 #define _LGPL_SOURCE
135 #include <stdlib.h>
136 #include <errno.h>
137 #include <assert.h>
138 #include <stdio.h>
139 #include <stdint.h>
140 #include <string.h>
141
142 #include "config.h"
143 #include <urcu.h>
144 #include <urcu-call-rcu.h>
145 #include <urcu/arch.h>
146 #include <urcu/uatomic.h>
147 #include <urcu/jhash.h>
148 #include <urcu/compiler.h>
149 #include <urcu/rculfhash.h>
150 #include <stdio.h>
151 #include <pthread.h>
152
153 #ifdef DEBUG
154 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
155 #else
156 #define dbg_printf(fmt, args...)
157 #endif
158
159 /* For testing */
160 #define POISON_FREE
161
162 /*
163 * Per-CPU split-counters lazily update the global counter each 1024
164 * addition/removal. It automatically keeps track of resize required.
165 * We use the bucket length as indicator for need to expand for small
166 * tables and machines lacking per-cpu data suppport.
167 */
168 #define COUNT_COMMIT_ORDER 10
169 #define CHAIN_LEN_TARGET 1
170 #define CHAIN_LEN_RESIZE_THRESHOLD 3
171
172 /*
173 * Define the minimum table size. Protects against hash table resize overload
174 * when too many entries are added quickly before the resize can complete.
175 * This is especially the case if the table could be shrinked to a size of 1.
176 * TODO: we might want to make the add/remove operations help the resize to
177 * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
178 * chain length.
179 */
180 #define MIN_TABLE_SIZE 128
181
182 #ifndef max
183 #define max(a, b) ((a) > (b) ? (a) : (b))
184 #endif
185
186 /*
187 * The removed flag needs to be updated atomically with the pointer.
188 * The dummy flag does not require to be updated atomically with the
189 * pointer, but it is added as a pointer low bit flag to save space.
190 */
191 #define REMOVED_FLAG (1UL << 0)
192 #define DUMMY_FLAG (1UL << 1)
193 #define FLAGS_MASK ((1UL << 2) - 1)
194
195 struct ht_items_count {
196 unsigned long add, remove;
197 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
198
199 struct rcu_level {
200 struct rcu_head head;
201 struct _cds_lfht_node nodes[0];
202 };
203
204 struct rcu_table {
205 unsigned long size; /* always a power of 2 */
206 unsigned long resize_target;
207 int resize_initiated;
208 struct rcu_head head;
209 struct rcu_level *tbl[0];
210 };
211
212 struct cds_lfht {
213 struct rcu_table *t; /* shared */
214 cds_lfht_hash_fct hash_fct;
215 cds_lfht_compare_fct compare_fct;
216 unsigned long hash_seed;
217 int flags;
218 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
219 unsigned int in_progress_resize, in_progress_destroy;
220 void (*cds_lfht_call_rcu)(struct rcu_head *head,
221 void (*func)(struct rcu_head *head));
222 void (*cds_lfht_synchronize_rcu)(void);
223 void (*cds_lfht_rcu_read_lock)(void);
224 void (*cds_lfht_rcu_read_unlock)(void);
225 unsigned long count; /* global approximate item count */
226 struct ht_items_count *percpu_count; /* per-cpu item count */
227 };
228
229 struct rcu_resize_work {
230 struct rcu_head head;
231 struct cds_lfht *ht;
232 };
233
234 /*
235 * Algorithm to reverse bits in a word by lookup table, extended to
236 * 64-bit words.
237 * Source:
238 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
239 * Originally from Public Domain.
240 */
241
242 static const uint8_t BitReverseTable256[256] =
243 {
244 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
245 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
246 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
247 R6(0), R6(2), R6(1), R6(3)
248 };
249 #undef R2
250 #undef R4
251 #undef R6
252
253 static
254 uint8_t bit_reverse_u8(uint8_t v)
255 {
256 return BitReverseTable256[v];
257 }
258
259 static __attribute__((unused))
260 uint32_t bit_reverse_u32(uint32_t v)
261 {
262 return ((uint32_t) bit_reverse_u8(v) << 24) |
263 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
264 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
265 ((uint32_t) bit_reverse_u8(v >> 24));
266 }
267
268 static __attribute__((unused))
269 uint64_t bit_reverse_u64(uint64_t v)
270 {
271 return ((uint64_t) bit_reverse_u8(v) << 56) |
272 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
273 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
274 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
275 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
276 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
277 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
278 ((uint64_t) bit_reverse_u8(v >> 56));
279 }
280
281 static
282 unsigned long bit_reverse_ulong(unsigned long v)
283 {
284 #if (CAA_BITS_PER_LONG == 32)
285 return bit_reverse_u32(v);
286 #else
287 return bit_reverse_u64(v);
288 #endif
289 }
290
291 /*
292 * fls: returns the position of the most significant bit.
293 * Returns 0 if no bit is set, else returns the position of the most
294 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
295 */
296 #if defined(__i386) || defined(__x86_64)
297 static inline
298 unsigned int fls_u32(uint32_t x)
299 {
300 int r;
301
302 asm("bsrl %1,%0\n\t"
303 "jnz 1f\n\t"
304 "movl $-1,%0\n\t"
305 "1:\n\t"
306 : "=r" (r) : "rm" (x));
307 return r + 1;
308 }
309 #define HAS_FLS_U32
310 #endif
311
312 #if defined(__x86_64)
313 static inline
314 unsigned int fls_u64(uint64_t x)
315 {
316 long r;
317
318 asm("bsrq %1,%0\n\t"
319 "jnz 1f\n\t"
320 "movq $-1,%0\n\t"
321 "1:\n\t"
322 : "=r" (r) : "rm" (x));
323 return r + 1;
324 }
325 #define HAS_FLS_U64
326 #endif
327
328 #ifndef HAS_FLS_U64
329 static __attribute__((unused))
330 unsigned int fls_u64(uint64_t x)
331 {
332 unsigned int r = 64;
333
334 if (!x)
335 return 0;
336
337 if (!(x & 0xFFFFFFFF00000000ULL)) {
338 x <<= 32;
339 r -= 32;
340 }
341 if (!(x & 0xFFFF000000000000ULL)) {
342 x <<= 16;
343 r -= 16;
344 }
345 if (!(x & 0xFF00000000000000ULL)) {
346 x <<= 8;
347 r -= 8;
348 }
349 if (!(x & 0xF000000000000000ULL)) {
350 x <<= 4;
351 r -= 4;
352 }
353 if (!(x & 0xC000000000000000ULL)) {
354 x <<= 2;
355 r -= 2;
356 }
357 if (!(x & 0x8000000000000000ULL)) {
358 x <<= 1;
359 r -= 1;
360 }
361 return r;
362 }
363 #endif
364
365 #ifndef HAS_FLS_U32
366 static __attribute__((unused))
367 unsigned int fls_u32(uint32_t x)
368 {
369 unsigned int r = 32;
370
371 if (!x)
372 return 0;
373 if (!(x & 0xFFFF0000U)) {
374 x <<= 16;
375 r -= 16;
376 }
377 if (!(x & 0xFF000000U)) {
378 x <<= 8;
379 r -= 8;
380 }
381 if (!(x & 0xF0000000U)) {
382 x <<= 4;
383 r -= 4;
384 }
385 if (!(x & 0xC0000000U)) {
386 x <<= 2;
387 r -= 2;
388 }
389 if (!(x & 0x80000000U)) {
390 x <<= 1;
391 r -= 1;
392 }
393 return r;
394 }
395 #endif
396
397 unsigned int fls_ulong(unsigned long x)
398 {
399 #if (CAA_BITS_PER_lONG == 32)
400 return fls_u32(x);
401 #else
402 return fls_u64(x);
403 #endif
404 }
405
406 int get_count_order_u32(uint32_t x)
407 {
408 int order;
409
410 order = fls_u32(x) - 1;
411 if (x & (x - 1))
412 order++;
413 return order;
414 }
415
416 int get_count_order_ulong(unsigned long x)
417 {
418 int order;
419
420 order = fls_ulong(x) - 1;
421 if (x & (x - 1))
422 order++;
423 return order;
424 }
425
426 #ifdef POISON_FREE
427 #define poison_free(ptr) \
428 do { \
429 memset(ptr, 0x42, sizeof(*(ptr))); \
430 free(ptr); \
431 } while (0)
432 #else
433 #define poison_free(ptr) free(ptr)
434 #endif
435
436 static
437 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
438
439 /*
440 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
441 * available, then we support hash table item accounting.
442 * In the unfortunate event the number of CPUs reported would be
443 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
444 */
445 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
446
447 static
448 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
449 unsigned long count);
450
451 static long nr_cpus_mask = -1;
452
453 static
454 struct ht_items_count *alloc_per_cpu_items_count(void)
455 {
456 struct ht_items_count *count;
457
458 switch (nr_cpus_mask) {
459 case -2:
460 return NULL;
461 case -1:
462 {
463 long maxcpus;
464
465 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
466 if (maxcpus <= 0) {
467 nr_cpus_mask = -2;
468 return NULL;
469 }
470 /*
471 * round up number of CPUs to next power of two, so we
472 * can use & for modulo.
473 */
474 maxcpus = 1UL << get_count_order_ulong(maxcpus);
475 nr_cpus_mask = maxcpus - 1;
476 }
477 /* Fall-through */
478 default:
479 return calloc(nr_cpus_mask + 1, sizeof(*count));
480 }
481 }
482
483 static
484 void free_per_cpu_items_count(struct ht_items_count *count)
485 {
486 poison_free(count);
487 }
488
489 static
490 int ht_get_cpu(void)
491 {
492 int cpu;
493
494 assert(nr_cpus_mask >= 0);
495 cpu = sched_getcpu();
496 if (unlikely(cpu < 0))
497 return cpu;
498 else
499 return cpu & nr_cpus_mask;
500 }
501
502 static
503 void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
504 {
505 unsigned long percpu_count;
506 int cpu;
507
508 if (unlikely(!ht->percpu_count))
509 return;
510 cpu = ht_get_cpu();
511 if (unlikely(cpu < 0))
512 return;
513 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
514 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
515 unsigned long count;
516
517 dbg_printf("add percpu %lu\n", percpu_count);
518 count = uatomic_add_return(&ht->count,
519 1UL << COUNT_COMMIT_ORDER);
520 /* If power of 2 */
521 if (!(count & (count - 1))) {
522 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
523 < t->size)
524 return;
525 dbg_printf("add set global %lu\n", count);
526 cds_lfht_resize_lazy_count(ht, t,
527 count >> (CHAIN_LEN_TARGET - 1));
528 }
529 }
530 }
531
532 static
533 void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
534 {
535 unsigned long percpu_count;
536 int cpu;
537
538 if (unlikely(!ht->percpu_count))
539 return;
540 cpu = ht_get_cpu();
541 if (unlikely(cpu < 0))
542 return;
543 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
544 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
545 unsigned long count;
546
547 dbg_printf("remove percpu %lu\n", percpu_count);
548 count = uatomic_add_return(&ht->count,
549 -(1UL << COUNT_COMMIT_ORDER));
550 /* If power of 2 */
551 if (!(count & (count - 1))) {
552 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
553 >= t->size)
554 return;
555 dbg_printf("remove set global %lu\n", count);
556 cds_lfht_resize_lazy_count(ht, t,
557 count >> (CHAIN_LEN_TARGET - 1));
558 }
559 }
560 }
561
562 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
563
564 static const long nr_cpus_mask = -1;
565
566 static
567 struct ht_items_count *alloc_per_cpu_items_count(void)
568 {
569 return NULL;
570 }
571
572 static
573 void free_per_cpu_items_count(struct ht_items_count *count)
574 {
575 }
576
577 static
578 void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
579 {
580 }
581
582 static
583 void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
584 {
585 }
586
587 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
588
589
590 static
591 void check_resize(struct cds_lfht *ht, struct rcu_table *t,
592 uint32_t chain_len)
593 {
594 unsigned long count;
595
596 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
597 return;
598 count = uatomic_read(&ht->count);
599 /*
600 * Use bucket-local length for small table expand and for
601 * environments lacking per-cpu data support.
602 */
603 if (count >= (1UL << COUNT_COMMIT_ORDER))
604 return;
605 if (chain_len > 100)
606 dbg_printf("WARNING: large chain length: %u.\n",
607 chain_len);
608 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
609 cds_lfht_resize_lazy(ht, t,
610 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
611 }
612
613 static
614 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
615 {
616 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
617 }
618
619 static
620 int is_removed(struct cds_lfht_node *node)
621 {
622 return ((unsigned long) node) & REMOVED_FLAG;
623 }
624
625 static
626 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
627 {
628 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
629 }
630
631 static
632 int is_dummy(struct cds_lfht_node *node)
633 {
634 return ((unsigned long) node) & DUMMY_FLAG;
635 }
636
637 static
638 struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
639 {
640 return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
641 }
642
643 static
644 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
645 {
646 unsigned long old1, old2;
647
648 old1 = uatomic_read(ptr);
649 do {
650 old2 = old1;
651 if (old2 >= v)
652 return old2;
653 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
654 return v;
655 }
656
657 static
658 void cds_lfht_free_table_cb(struct rcu_head *head)
659 {
660 struct rcu_table *t =
661 caa_container_of(head, struct rcu_table, head);
662 poison_free(t);
663 }
664
665 static
666 void cds_lfht_free_level(struct rcu_head *head)
667 {
668 struct rcu_level *l =
669 caa_container_of(head, struct rcu_level, head);
670 poison_free(l);
671 }
672
673 /*
674 * Remove all logically deleted nodes from a bucket up to a certain node key.
675 */
676 static
677 void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
678 {
679 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
680 struct cds_lfht_node *iter_trace[64];
681 unsigned long trace_idx = 0;
682
683 memset(iter_trace, 0, sizeof(iter_trace));
684 assert(!is_dummy(dummy));
685 assert(!is_removed(dummy));
686 assert(!is_dummy(node));
687 assert(!is_removed(node));
688 for (;;) {
689 iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
690 iter_prev = dummy;
691 /* We can always skip the dummy node initially */
692 iter = rcu_dereference(iter_prev->p.next);
693 iter_trace[trace_idx++ & (64 - 1)] = iter;
694 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
695 /*
696 * We should never be called with dummy (start of chain)
697 * and logically removed node (end of path compression
698 * marker) being the actual same node. This would be a
699 * bug in the algorithm implementation.
700 */
701 assert(dummy != node);
702 for (;;) {
703 if (unlikely(!clear_flag(iter)))
704 return;
705 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
706 return;
707 next = rcu_dereference(clear_flag(iter)->p.next);
708 if (likely(is_removed(next)))
709 break;
710 iter_prev = clear_flag(iter);
711 iter = next;
712 iter_trace[trace_idx++ & (64 - 1)] = iter;
713 }
714 assert(!is_removed(iter));
715 if (is_dummy(iter))
716 new_next = flag_dummy(clear_flag(next));
717 else
718 new_next = clear_flag(next);
719 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
720 iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
721 }
722 }
723
724 static
725 struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
726 struct cds_lfht_node *node, int unique, int dummy)
727 {
728 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
729 *dummy_node;
730 struct _cds_lfht_node *lookup;
731 unsigned long hash, index, order;
732
733 assert(!is_dummy(node));
734 assert(!is_removed(node));
735 if (!t->size) {
736 assert(dummy);
737 node->p.next = flag_dummy(NULL);
738 return node; /* Initial first add (head) */
739 }
740 hash = bit_reverse_ulong(node->p.reverse_hash);
741 for (;;) {
742 uint32_t chain_len = 0;
743
744 /*
745 * iter_prev points to the non-removed node prior to the
746 * insert location.
747 */
748 index = hash & (t->size - 1);
749 order = get_count_order_ulong(index + 1);
750 lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
751 iter_prev = (struct cds_lfht_node *) lookup;
752 /* We can always skip the dummy node initially */
753 iter = rcu_dereference(iter_prev->p.next);
754 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
755 for (;;) {
756 /* TODO: check if removed */
757 if (unlikely(!clear_flag(iter)))
758 goto insert;
759 /* TODO: check if removed */
760 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
761 goto insert;
762 next = rcu_dereference(clear_flag(iter)->p.next);
763 if (unlikely(is_removed(next)))
764 goto gc_node;
765 if (unique
766 && !is_dummy(next)
767 && !ht->compare_fct(node->key, node->key_len,
768 clear_flag(iter)->key,
769 clear_flag(iter)->key_len))
770 return clear_flag(iter);
771 /* Only account for identical reverse hash once */
772 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
773 && !is_dummy(next))
774 check_resize(ht, t, ++chain_len);
775 iter_prev = clear_flag(iter);
776 iter = next;
777 }
778 insert:
779 assert(node != clear_flag(iter));
780 assert(!is_removed(iter_prev));
781 assert(!is_removed(iter));
782 assert(iter_prev != node);
783 if (!dummy)
784 node->p.next = clear_flag(iter);
785 else
786 node->p.next = flag_dummy(clear_flag(iter));
787 if (is_dummy(iter))
788 new_node = flag_dummy(node);
789 else
790 new_node = node;
791 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
792 new_node) != iter)
793 continue; /* retry */
794 else
795 goto gc_end;
796 gc_node:
797 assert(!is_removed(iter));
798 if (is_dummy(iter))
799 new_next = flag_dummy(clear_flag(next));
800 else
801 new_next = clear_flag(next);
802 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
803 /* retry */
804 }
805 gc_end:
806 /* Garbage collect logically removed nodes in the bucket */
807 index = hash & (t->size - 1);
808 order = get_count_order_ulong(index + 1);
809 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
810 dummy_node = (struct cds_lfht_node *) lookup;
811 _cds_lfht_gc_bucket(dummy_node, node);
812 return node;
813 }
814
815 static
816 int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
817 struct cds_lfht_node *node, int dummy_removal)
818 {
819 struct cds_lfht_node *dummy, *next, *old;
820 struct _cds_lfht_node *lookup;
821 int flagged = 0;
822 unsigned long hash, index, order;
823
824 /* logically delete the node */
825 assert(!is_dummy(node));
826 assert(!is_removed(node));
827 old = rcu_dereference(node->p.next);
828 do {
829 next = old;
830 if (unlikely(is_removed(next)))
831 goto end;
832 if (dummy_removal)
833 assert(is_dummy(next));
834 else
835 assert(!is_dummy(next));
836 old = uatomic_cmpxchg(&node->p.next, next,
837 flag_removed(next));
838 } while (old != next);
839
840 /* We performed the (logical) deletion. */
841 flagged = 1;
842
843 /*
844 * Ensure that the node is not visible to readers anymore: lookup for
845 * the node, and remove it (along with any other logically removed node)
846 * if found.
847 */
848 hash = bit_reverse_ulong(node->p.reverse_hash);
849 assert(t->size > 0);
850 index = hash & (t->size - 1);
851 order = get_count_order_ulong(index + 1);
852 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
853 dummy = (struct cds_lfht_node *) lookup;
854 _cds_lfht_gc_bucket(dummy, node);
855 end:
856 /*
857 * Only the flagging action indicated that we (and no other)
858 * removed the node from the hash.
859 */
860 if (flagged) {
861 assert(is_removed(rcu_dereference(node->p.next)));
862 return 0;
863 } else
864 return -ENOENT;
865 }
866
867 /*
868 * Holding RCU read lock to protect _cds_lfht_add against memory
869 * reclaim that could be performed by other call_rcu worker threads (ABA
870 * problem).
871 */
872 static
873 void init_table(struct cds_lfht *ht, struct rcu_table *t,
874 unsigned long first_order, unsigned long len_order)
875 {
876 unsigned long i, end_order;
877
878 dbg_printf("init table: first_order %lu end_order %lu\n",
879 first_order, first_order + len_order);
880 end_order = first_order + len_order;
881 t->size = !first_order ? 0 : (1UL << (first_order - 1));
882 for (i = first_order; i < end_order; i++) {
883 unsigned long j, len;
884
885 len = !i ? 1 : 1UL << (i - 1);
886 dbg_printf("init order %lu len: %lu\n", i, len);
887 t->tbl[i] = calloc(1, sizeof(struct rcu_level)
888 + (len * sizeof(struct _cds_lfht_node)));
889 ht->cds_lfht_rcu_read_lock();
890 for (j = 0; j < len; j++) {
891 struct cds_lfht_node *new_node =
892 (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
893
894 dbg_printf("init entry: i %lu j %lu hash %lu\n",
895 i, j, !i ? 0 : (1UL << (i - 1)) + j);
896 new_node->p.reverse_hash =
897 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
898 (void) _cds_lfht_add(ht, t, new_node, 0, 1);
899 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
900 break;
901 }
902 ht->cds_lfht_rcu_read_unlock();
903 /* Update table size */
904 t->size = !i ? 1 : (1UL << i);
905 dbg_printf("init new size: %lu\n", t->size);
906 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
907 break;
908 }
909 t->resize_target = t->size;
910 t->resize_initiated = 0;
911 }
912
913 /*
914 * Holding RCU read lock to protect _cds_lfht_remove against memory
915 * reclaim that could be performed by other call_rcu worker threads (ABA
916 * problem).
917 */
918 static
919 void fini_table(struct cds_lfht *ht, struct rcu_table *t,
920 unsigned long first_order, unsigned long len_order)
921 {
922 long i, end_order;
923
924 dbg_printf("fini table: first_order %lu end_order %lu\n",
925 first_order, first_order + len_order);
926 end_order = first_order + len_order;
927 assert(first_order > 0);
928 assert(t->size == (1UL << (end_order - 1)));
929 for (i = end_order - 1; i >= first_order; i--) {
930 unsigned long j, len;
931
932 len = !i ? 1 : 1UL << (i - 1);
933 dbg_printf("fini order %lu len: %lu\n", i, len);
934 /*
935 * Update table size. Need to shrink this table prior to
936 * removal so gc lookups use non-logically-removed dummy
937 * nodes.
938 */
939 t->size = 1UL << (i - 1);
940 /* Unlink */
941 ht->cds_lfht_rcu_read_lock();
942 for (j = 0; j < len; j++) {
943 struct cds_lfht_node *fini_node =
944 (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
945
946 dbg_printf("fini entry: i %lu j %lu hash %lu\n",
947 i, j, !i ? 0 : (1UL << (i - 1)) + j);
948 fini_node->p.reverse_hash =
949 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
950 (void) _cds_lfht_remove(ht, t, fini_node, 1);
951 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
952 break;
953 }
954 ht->cds_lfht_rcu_read_unlock();
955 ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
956 dbg_printf("fini new size: %lu\n", t->size);
957 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
958 break;
959 }
960 t->resize_target = t->size;
961 t->resize_initiated = 0;
962 }
963
964 struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
965 cds_lfht_compare_fct compare_fct,
966 unsigned long hash_seed,
967 unsigned long init_size,
968 int flags,
969 void (*cds_lfht_call_rcu)(struct rcu_head *head,
970 void (*func)(struct rcu_head *head)),
971 void (*cds_lfht_synchronize_rcu)(void),
972 void (*cds_lfht_rcu_read_lock)(void),
973 void (*cds_lfht_rcu_read_unlock)(void))
974 {
975 struct cds_lfht *ht;
976 unsigned long order;
977
978 /* init_size must be power of two */
979 if (init_size && (init_size & (init_size - 1)))
980 return NULL;
981 ht = calloc(1, sizeof(struct cds_lfht));
982 ht->hash_fct = hash_fct;
983 ht->compare_fct = compare_fct;
984 ht->hash_seed = hash_seed;
985 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
986 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
987 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
988 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
989 ht->in_progress_resize = 0;
990 ht->percpu_count = alloc_per_cpu_items_count();
991 /* this mutex should not nest in read-side C.S. */
992 pthread_mutex_init(&ht->resize_mutex, NULL);
993 order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
994 ht->t = calloc(1, sizeof(struct cds_lfht)
995 + (order * sizeof(struct rcu_level *)));
996 ht->t->size = 0;
997 ht->flags = flags;
998 pthread_mutex_lock(&ht->resize_mutex);
999 init_table(ht, ht->t, 0, order);
1000 pthread_mutex_unlock(&ht->resize_mutex);
1001 return ht;
1002 }
1003
1004 struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
1005 {
1006 struct rcu_table *t;
1007 struct cds_lfht_node *node, *next;
1008 struct _cds_lfht_node *lookup;
1009 unsigned long hash, reverse_hash, index, order;
1010
1011 hash = ht->hash_fct(key, key_len, ht->hash_seed);
1012 reverse_hash = bit_reverse_ulong(hash);
1013
1014 t = rcu_dereference(ht->t);
1015 index = hash & (t->size - 1);
1016 order = get_count_order_ulong(index + 1);
1017 lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
1018 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
1019 hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
1020 node = (struct cds_lfht_node *) lookup;
1021 for (;;) {
1022 if (unlikely(!node))
1023 break;
1024 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1025 node = NULL;
1026 break;
1027 }
1028 next = rcu_dereference(node->p.next);
1029 if (likely(!is_removed(next))
1030 && !is_dummy(next)
1031 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1032 break;
1033 }
1034 node = clear_flag(next);
1035 }
1036 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1037 return node;
1038 }
1039
1040 struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
1041 struct cds_lfht_node *node)
1042 {
1043 struct cds_lfht_node *next;
1044 unsigned long reverse_hash;
1045 void *key;
1046 size_t key_len;
1047
1048 reverse_hash = node->p.reverse_hash;
1049 key = node->key;
1050 key_len = node->key_len;
1051 next = rcu_dereference(node->p.next);
1052 node = clear_flag(next);
1053
1054 for (;;) {
1055 if (unlikely(!node))
1056 break;
1057 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1058 node = NULL;
1059 break;
1060 }
1061 next = rcu_dereference(node->p.next);
1062 if (likely(!is_removed(next))
1063 && !is_dummy(next)
1064 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1065 break;
1066 }
1067 node = clear_flag(next);
1068 }
1069 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1070 return node;
1071 }
1072
1073 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
1074 {
1075 struct rcu_table *t;
1076 unsigned long hash;
1077
1078 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1079 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1080
1081 t = rcu_dereference(ht->t);
1082 (void) _cds_lfht_add(ht, t, node, 0, 0);
1083 ht_count_add(ht, t);
1084 }
1085
1086 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1087 struct cds_lfht_node *node)
1088 {
1089 struct rcu_table *t;
1090 unsigned long hash;
1091 struct cds_lfht_node *ret;
1092
1093 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1094 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1095
1096 t = rcu_dereference(ht->t);
1097 ret = _cds_lfht_add(ht, t, node, 1, 0);
1098 if (ret != node)
1099 ht_count_add(ht, t);
1100 return ret;
1101 }
1102
1103 int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
1104 {
1105 struct rcu_table *t;
1106 int ret;
1107
1108 t = rcu_dereference(ht->t);
1109 ret = _cds_lfht_remove(ht, t, node, 0);
1110 if (!ret)
1111 ht_count_remove(ht, t);
1112 return ret;
1113 }
1114
1115 static
1116 int cds_lfht_delete_dummy(struct cds_lfht *ht)
1117 {
1118 struct rcu_table *t;
1119 struct cds_lfht_node *node;
1120 struct _cds_lfht_node *lookup;
1121 unsigned long order, i;
1122
1123 t = ht->t;
1124 /* Check that the table is empty */
1125 lookup = &t->tbl[0]->nodes[0];
1126 node = (struct cds_lfht_node *) lookup;
1127 do {
1128 node = clear_flag(node)->p.next;
1129 if (!is_dummy(node))
1130 return -EPERM;
1131 assert(!is_removed(node));
1132 } while (clear_flag(node));
1133 /* Internal sanity check: all nodes left should be dummy */
1134 for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
1135 unsigned long len;
1136
1137 len = !order ? 1 : 1UL << (order - 1);
1138 for (i = 0; i < len; i++) {
1139 dbg_printf("delete order %lu i %lu hash %lu\n",
1140 order, i,
1141 bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
1142 assert(is_dummy(t->tbl[order]->nodes[i].next));
1143 }
1144 poison_free(t->tbl[order]);
1145 }
1146 return 0;
1147 }
1148
1149 /*
1150 * Should only be called when no more concurrent readers nor writers can
1151 * possibly access the table.
1152 */
1153 int cds_lfht_destroy(struct cds_lfht *ht)
1154 {
1155 int ret;
1156
1157 /* Wait for in-flight resize operations to complete */
1158 CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1159 while (uatomic_read(&ht->in_progress_resize))
1160 poll(NULL, 0, 100); /* wait for 100ms */
1161 ret = cds_lfht_delete_dummy(ht);
1162 if (ret)
1163 return ret;
1164 poison_free(ht->t);
1165 free_per_cpu_items_count(ht->percpu_count);
1166 poison_free(ht);
1167 return ret;
1168 }
1169
1170 void cds_lfht_count_nodes(struct cds_lfht *ht,
1171 unsigned long *count,
1172 unsigned long *removed)
1173 {
1174 struct rcu_table *t;
1175 struct cds_lfht_node *node, *next;
1176 struct _cds_lfht_node *lookup;
1177 unsigned long nr_dummy = 0;
1178
1179 *count = 0;
1180 *removed = 0;
1181
1182 t = rcu_dereference(ht->t);
1183 /* Count non-dummy nodes in the table */
1184 lookup = &t->tbl[0]->nodes[0];
1185 node = (struct cds_lfht_node *) lookup;
1186 do {
1187 next = rcu_dereference(node->p.next);
1188 if (is_removed(next)) {
1189 assert(!is_dummy(next));
1190 (*removed)++;
1191 } else if (!is_dummy(next))
1192 (*count)++;
1193 else
1194 (nr_dummy)++;
1195 node = clear_flag(next);
1196 } while (node);
1197 dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
1198 }
1199
1200 /* called with resize mutex held */
1201 static
1202 void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t,
1203 unsigned long old_size, unsigned long new_size)
1204 {
1205 unsigned long old_order, new_order;
1206 struct rcu_table *new_t;
1207
1208 old_order = get_count_order_ulong(old_size) + 1;
1209 new_order = get_count_order_ulong(new_size) + 1;
1210 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1211 old_size, old_order, new_size, new_order);
1212 new_t = malloc(sizeof(struct cds_lfht)
1213 + (new_order * sizeof(struct rcu_level *)));
1214 assert(new_size > old_size);
1215 memcpy(&new_t->tbl, &old_t->tbl,
1216 old_order * sizeof(struct rcu_level *));
1217 init_table(ht, new_t, old_order, new_order - old_order);
1218 /* Changing table and size atomically wrt lookups */
1219 rcu_assign_pointer(ht->t, new_t);
1220 ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
1221 }
1222
1223 /* called with resize mutex held */
1224 static
1225 void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t,
1226 unsigned long old_size, unsigned long new_size)
1227 {
1228 unsigned long old_order, new_order;
1229 struct rcu_table *new_t;
1230
1231 new_size = max(new_size, MIN_TABLE_SIZE);
1232 old_order = get_count_order_ulong(old_size) + 1;
1233 new_order = get_count_order_ulong(new_size) + 1;
1234 printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1235 old_size, old_order, new_size, new_order);
1236 new_t = malloc(sizeof(struct cds_lfht)
1237 + (new_order * sizeof(struct rcu_level *)));
1238 assert(new_size < old_size);
1239 memcpy(&new_t->tbl, &old_t->tbl,
1240 new_order * sizeof(struct rcu_level *));
1241 new_t->size = !new_order ? 1 : (1UL << (new_order - 1));
1242 assert(new_t->size == new_size);
1243 new_t->resize_target = new_t->size;
1244 new_t->resize_initiated = 0;
1245
1246 /* Changing table and size atomically wrt lookups */
1247 rcu_assign_pointer(ht->t, new_t);
1248
1249 /*
1250 * We need to wait for all add operations to reach Q.S. (and
1251 * thus use the new table for lookups) before we can start
1252 * releasing the old dummy nodes. Otherwise their lookup will
1253 * return a logically removed node as insert position.
1254 */
1255 ht->cds_lfht_synchronize_rcu();
1256
1257 /* Unlink and remove all now-unused dummy node pointers. */
1258 fini_table(ht, old_t, new_order, old_order - new_order);
1259 ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
1260 }
1261
1262
1263 /* called with resize mutex held */
1264 static
1265 void _do_cds_lfht_resize(struct cds_lfht *ht)
1266 {
1267 unsigned long new_size, old_size;
1268 struct rcu_table *old_t;
1269
1270 old_t = ht->t;
1271 old_size = old_t->size;
1272 new_size = CMM_LOAD_SHARED(old_t->resize_target);
1273 if (old_size < new_size)
1274 _do_cds_lfht_grow(ht, old_t, old_size, new_size);
1275 else if (old_size > new_size)
1276 _do_cds_lfht_shrink(ht, old_t, old_size, new_size);
1277 else
1278 CMM_STORE_SHARED(old_t->resize_initiated, 0);
1279 }
1280
1281 static
1282 unsigned long resize_target_update(struct rcu_table *t,
1283 int growth_order)
1284 {
1285 return _uatomic_max(&t->resize_target,
1286 t->size << growth_order);
1287 }
1288
1289 static
1290 void resize_target_update_count(struct rcu_table *t,
1291 unsigned long count)
1292 {
1293 count = max(count, MIN_TABLE_SIZE);
1294 uatomic_set(&t->resize_target, count);
1295 }
1296
1297 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1298 {
1299 struct rcu_table *t = rcu_dereference(ht->t);
1300
1301 resize_target_update_count(t, new_size);
1302 CMM_STORE_SHARED(t->resize_initiated, 1);
1303 pthread_mutex_lock(&ht->resize_mutex);
1304 _do_cds_lfht_resize(ht);
1305 pthread_mutex_unlock(&ht->resize_mutex);
1306 }
1307
1308 static
1309 void do_resize_cb(struct rcu_head *head)
1310 {
1311 struct rcu_resize_work *work =
1312 caa_container_of(head, struct rcu_resize_work, head);
1313 struct cds_lfht *ht = work->ht;
1314
1315 pthread_mutex_lock(&ht->resize_mutex);
1316 _do_cds_lfht_resize(ht);
1317 pthread_mutex_unlock(&ht->resize_mutex);
1318 poison_free(work);
1319 cmm_smp_mb(); /* finish resize before decrement */
1320 uatomic_dec(&ht->in_progress_resize);
1321 }
1322
1323 static
1324 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
1325 {
1326 struct rcu_resize_work *work;
1327 unsigned long target_size;
1328
1329 target_size = resize_target_update(t, growth);
1330 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
1331 uatomic_inc(&ht->in_progress_resize);
1332 cmm_smp_mb(); /* increment resize count before calling it */
1333 work = malloc(sizeof(*work));
1334 work->ht = ht;
1335 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1336 CMM_STORE_SHARED(t->resize_initiated, 1);
1337 }
1338 }
1339
1340 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1341
1342 static
1343 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
1344 unsigned long count)
1345 {
1346 struct rcu_resize_work *work;
1347
1348 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1349 return;
1350 resize_target_update_count(t, count);
1351 if (!CMM_LOAD_SHARED(t->resize_initiated)) {
1352 uatomic_inc(&ht->in_progress_resize);
1353 cmm_smp_mb(); /* increment resize count before calling it */
1354 work = malloc(sizeof(*work));
1355 work->ht = ht;
1356 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1357 CMM_STORE_SHARED(t->resize_initiated, 1);
1358 }
1359 }
1360
1361 #endif
This page took 0.084798 seconds and 4 git commands to generate.