4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups, as well as traversals, and use the returned objects
38 * safely by allowing memory reclaim to take place only after a grace
40 * - Add and remove operations are lock-free, and do not need to
41 * allocate memory. They need to be executed within RCU read-side
42 * critical section to ensure the objects they read are valid and to
43 * deal with the cmpxchg ABA problem.
44 * - add and add_unique operations are supported. add_unique checks if
45 * the node key already exists in the hash table. It ensures not to
46 * populate a duplicate key if the node key already exists in the hash
48 * - The resize operation executes concurrently with
49 * add/add_unique/add_replace/remove/lookup/traversal.
50 * - Hash table nodes are contained within a split-ordered list. This
51 * list is ordered by incrementing reversed-bits-hash value.
52 * - An index of bucket nodes is kept. These bucket nodes are the hash
53 * table "buckets". These buckets are internal nodes that allow to
54 * perform a fast hash lookup, similarly to a skip list. These
55 * buckets are chained together in the split-ordered list, which
56 * allows recursive expansion by inserting new buckets between the
57 * existing buckets. The split-ordered list allows adding new buckets
58 * between existing buckets as the table needs to grow.
59 * - The resize operation for small tables only allows expanding the
60 * hash table. It is triggered automatically by detecting long chains
61 * in the add operation.
62 * - The resize operation for larger tables (and available through an
63 * API) allows both expanding and shrinking the hash table.
64 * - Split-counters are used to keep track of the number of
65 * nodes within the hash table for automatic resize triggering.
66 * - Resize operation initiated by long chain detection is executed by a
67 * worker thread, which keeps lock-freedom of add and remove.
68 * - Resize operations are protected by a mutex.
69 * - The removal operation is split in two parts: first, a "removed"
70 * flag is set in the next pointer within the node to remove. Then,
71 * a "garbage collection" is performed in the bucket containing the
72 * removed node (from the start of the bucket up to the removed node).
73 * All encountered nodes with "removed" flag set in their next
74 * pointers are removed from the linked-list. If the cmpxchg used for
75 * removal fails (due to concurrent garbage-collection or concurrent
76 * add), we retry from the beginning of the bucket. This ensures that
77 * the node with "removed" flag set is removed from the hash table
78 * (not visible to lookups anymore) before the RCU read-side critical
79 * section held across removal ends. Furthermore, this ensures that
80 * the node with "removed" flag set is removed from the linked-list
81 * before its memory is reclaimed. After setting the "removal" flag,
82 * only the thread which removal is the first to set the "removal
83 * owner" flag (with an xchg) into a node's next pointer is considered
84 * to have succeeded its removal (and thus owns the node to reclaim).
85 * Because we garbage-collect starting from an invariant node (the
86 * start-of-bucket bucket node) up to the "removed" node (or find a
87 * reverse-hash that is higher), we are sure that a successful
88 * traversal of the chain leads to a chain that is present in the
89 * linked-list (the start node is never removed) and that it does not
90 * contain the "removed" node anymore, even if concurrent delete/add
91 * operations are changing the structure of the list concurrently.
92 * - The add operations perform garbage collection of buckets if they
93 * encounter nodes with removed flag set in the bucket where they want
94 * to add their new node. This ensures lock-freedom of add operation by
95 * helping the remover unlink nodes from the list rather than to wait
97 * - There are three memory backends for the hash table buckets: the
98 * "order table", the "chunks", and the "mmap".
99 * - These bucket containers contain a compact version of the hash table
101 * - The RCU "order table":
102 * - has a first level table indexed by log2(hash index) which is
103 * copied and expanded by the resize operation. This order table
104 * allows finding the "bucket node" tables.
105 * - There is one bucket node table per hash index order. The size of
106 * each bucket node table is half the number of hashes contained in
107 * this order (except for order 0).
108 * - The RCU "chunks" is best suited for close interaction with a page
109 * allocator. It uses a linear array as index to "chunks" containing
110 * each the same number of buckets.
111 * - The RCU "mmap" memory backend uses a single memory map to hold
113 * - synchronize_rcu is used to garbage-collect the old bucket node table.
115 * Ordering Guarantees:
117 * To discuss these guarantees, we first define "read" operation as any
118 * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
119 * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
120 * lttng_ust_lfht_add_unique (failure).
122 * We define "read traversal" operation as any of the following
123 * group of operations
124 * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
125 * (and/or lttng_ust_lfht_next, although less common).
126 * - lttng_ust_lfht_add_unique (failure) followed by iteration with
127 * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
129 * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
130 * lttng_ust_lfht_next_duplicate, although less common).
132 * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
133 * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
135 * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
136 * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
137 * fails (returns a node different from the one passed as parameter), it
138 * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
139 * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
140 * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
143 * We define "prior" and "later" node as nodes observable by reads and
144 * read traversals respectively before and after a write or sequence of
147 * Hash-table operations are often cascaded, for example, the pointer
148 * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
149 * whose return value might in turn be passed to another hash-table
150 * operation. This entire cascaded series of operations must be enclosed
151 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
154 * The following ordering guarantees are offered by this hash table:
156 * A.1) "read" after "write": if there is ordering between a write and a
157 * later read, then the read is guaranteed to see the write or some
159 * A.2) "read traversal" after "write": given that there is dependency
160 * ordering between reads in a "read traversal", if there is
161 * ordering between a write and the first read of the traversal,
162 * then the "read traversal" is guaranteed to see the write or
164 * B.1) "write" after "read": if there is ordering between a read and a
165 * later write, then the read will never see the write.
166 * B.2) "write" after "read traversal": given that there is dependency
167 * ordering between reads in a "read traversal", if there is
168 * ordering between the last read of the traversal and a later
169 * write, then the "read traversal" will never see the write.
170 * C) "write" while "read traversal": if a write occurs during a "read
171 * traversal", the traversal may, or may not, see the write.
172 * D.1) "write" after "write": if there is ordering between a write and
173 * a later write, then the later write is guaranteed to see the
174 * effects of the first write.
175 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
176 * order to any pair of concurrent conflicting writes.
177 * Non-conflicting writes (for example, to different keys) are
179 * E) If a grace period separates a "del" or "replace" operation
180 * and a subsequent operation, then that subsequent operation is
181 * guaranteed not to see the removed item.
182 * F) Uniqueness guarantee: given a hash table that does not contain
183 * duplicate items for a given key, there will only be one item in
184 * the hash table after an arbitrary sequence of add_unique and/or
185 * add_replace operations. Note, however, that a pair of
186 * concurrent read operations might well access two different items
188 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
189 * memory barrier), then the second lookup will return the same
190 * node as the previous lookup, or some later node.
191 * G.2) A "read traversal" that starts after the end of a prior "read
192 * traversal" (ordered by memory barriers) is guaranteed to see the
193 * same nodes as the previous traversal, or some later nodes.
194 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
195 * example, if a pair of reads to the same key run concurrently
196 * with an insertion of that same key, the reads remain unordered
197 * regardless of their return values. In other words, you cannot
198 * rely on the values returned by the reads to deduce ordering.
200 * Progress guarantees:
202 * * Reads are wait-free. These operations always move forward in the
203 * hash table linked list, and this list has no loop.
204 * * Writes are lock-free. Any retry loop performed by a write operation
205 * is triggered by progress made within another update operation.
207 * Bucket node tables:
209 * hash table hash table the last all bucket node tables
210 * order size bucket node 0 1 2 3 4 5 6(index)
217 * 5 32 16 1 1 2 4 8 16
218 * 6 64 32 1 1 2 4 8 16 32
220 * When growing/shrinking, we only focus on the last bucket node table
221 * which size is (!order ? 1 : (1 << (order -1))).
223 * Example for growing/shrinking:
224 * grow hash table from order 5 to 6: init the index=6 bucket node table
225 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
227 * A bit of ascii art explanation:
229 * The order index is the off-by-one compared to the actual power of 2
230 * because we use index 0 to deal with the 0 special-case.
232 * This shows the nodes for a small table ordered by reversed bits:
244 * This shows the nodes in order of non-reversed bits, linked by
245 * reversed-bit order.
250 * 2 | | 2 010 010 <- |
251 * | | | 3 011 110 | <- |
252 * 3 -> | | | 4 100 001 | |
259 * Note on port to lttng-ust: auto-resize and accounting features are
273 #include <lttng/urcu/pointer.h>
274 #include <urcu/arch.h>
275 #include <urcu/uatomic.h>
276 #include <urcu/compiler.h>
277 #include "rculfhash.h"
278 #include "rculfhash-internal.h"
284 * Split-counters lazily update the global counter each 1024
285 * addition/removal. It automatically keeps track of resize required.
286 * We use the bucket length as indicator for need to expand for small
287 * tables and machines lacking per-cpu data support.
289 #define COUNT_COMMIT_ORDER 10
292 * Define the minimum table size.
294 #define MIN_TABLE_ORDER 0
295 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
298 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
300 #define MIN_PARTITION_PER_THREAD_ORDER 12
301 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
304 * The removed flag needs to be updated atomically with the pointer.
305 * It indicates that no node must attach to the node scheduled for
306 * removal, and that node garbage collection must be performed.
307 * The bucket flag does not require to be updated atomically with the
308 * pointer, but it is added as a pointer low bit flag to save space.
309 * The "removal owner" flag is used to detect which of the "del"
310 * operation that has set the "removed flag" gets to return the removed
311 * node to its caller. Note that the replace operation does not need to
312 * iteract with the "removal owner" flag, because it validates that
313 * the "removed" flag is not set before performing its cmpxchg.
315 #define REMOVED_FLAG (1UL << 0)
316 #define BUCKET_FLAG (1UL << 1)
317 #define REMOVAL_OWNER_FLAG (1UL << 2)
318 #define FLAGS_MASK ((1UL << 3) - 1)
320 /* Value of the end pointer. Should not interact with flags. */
321 #define END_VALUE NULL
324 * ht_items_count: Split-counters counting the number of node addition
325 * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
326 * is set at hash table creation.
328 * These are free-running counters, never reset to zero. They count the
329 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
330 * operations to update the global counter. We choose a power-of-2 value
331 * for the trigger to deal with 32 or 64-bit overflow of the counter.
333 struct ht_items_count
{
334 unsigned long add
, del
;
335 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
337 #ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
340 void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
345 #define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
350 void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
354 #define lttng_ust_lfht_iter_debug_assert(...)
359 * Algorithm to reverse bits in a word by lookup table, extended to
362 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
363 * Originally from Public Domain.
366 static const uint8_t BitReverseTable256
[256] =
368 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
369 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
370 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
371 R6(0), R6(2), R6(1), R6(3)
378 uint8_t bit_reverse_u8(uint8_t v
)
380 return BitReverseTable256
[v
];
383 #if (CAA_BITS_PER_LONG == 32)
385 uint32_t bit_reverse_u32(uint32_t v
)
387 return ((uint32_t) bit_reverse_u8(v
) << 24) |
388 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
389 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
390 ((uint32_t) bit_reverse_u8(v
>> 24));
394 uint64_t bit_reverse_u64(uint64_t v
)
396 return ((uint64_t) bit_reverse_u8(v
) << 56) |
397 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
398 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
399 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
400 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
401 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
402 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
403 ((uint64_t) bit_reverse_u8(v
>> 56));
408 unsigned long bit_reverse_ulong(unsigned long v
)
410 #if (CAA_BITS_PER_LONG == 32)
411 return bit_reverse_u32(v
);
413 return bit_reverse_u64(v
);
418 * fls: returns the position of the most significant bit.
419 * Returns 0 if no bit is set, else returns the position of the most
420 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
422 #if defined(__i386) || defined(__x86_64)
424 unsigned int fls_u32(uint32_t x
)
428 __asm__ ("bsrl %1,%0\n\t"
432 : "=r" (r
) : "rm" (x
));
438 #if defined(__x86_64)
440 unsigned int fls_u64(uint64_t x
)
444 __asm__ ("bsrq %1,%0\n\t"
448 : "=r" (r
) : "rm" (x
));
455 static __attribute__((unused
))
456 unsigned int fls_u64(uint64_t x
)
463 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
467 if (!(x
& 0xFFFF000000000000ULL
)) {
471 if (!(x
& 0xFF00000000000000ULL
)) {
475 if (!(x
& 0xF000000000000000ULL
)) {
479 if (!(x
& 0xC000000000000000ULL
)) {
483 if (!(x
& 0x8000000000000000ULL
)) {
492 static __attribute__((unused
))
493 unsigned int fls_u32(uint32_t x
)
499 if (!(x
& 0xFFFF0000U
)) {
503 if (!(x
& 0xFF000000U
)) {
507 if (!(x
& 0xF0000000U
)) {
511 if (!(x
& 0xC0000000U
)) {
515 if (!(x
& 0x80000000U
)) {
523 unsigned int lttng_ust_lfht_fls_ulong(unsigned long x
)
525 #if (CAA_BITS_PER_LONG == 32)
533 * Return the minimum order for which x <= (1UL << order).
534 * Return -1 if x is 0.
536 int lttng_ust_lfht_get_count_order_u32(uint32_t x
)
541 return fls_u32(x
- 1);
545 * Return the minimum order for which x <= (1UL << order).
546 * Return -1 if x is 0.
548 int lttng_ust_lfht_get_count_order_ulong(unsigned long x
)
553 return lttng_ust_lfht_fls_ulong(x
- 1);
557 struct lttng_ust_lfht_node
*clear_flag(struct lttng_ust_lfht_node
*node
)
559 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
563 int is_removed(const struct lttng_ust_lfht_node
*node
)
565 return ((unsigned long) node
) & REMOVED_FLAG
;
569 int is_bucket(struct lttng_ust_lfht_node
*node
)
571 return ((unsigned long) node
) & BUCKET_FLAG
;
575 struct lttng_ust_lfht_node
*flag_bucket(struct lttng_ust_lfht_node
*node
)
577 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | BUCKET_FLAG
);
581 int is_removal_owner(struct lttng_ust_lfht_node
*node
)
583 return ((unsigned long) node
) & REMOVAL_OWNER_FLAG
;
587 struct lttng_ust_lfht_node
*flag_removal_owner(struct lttng_ust_lfht_node
*node
)
589 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | REMOVAL_OWNER_FLAG
);
593 struct lttng_ust_lfht_node
*flag_removed_or_removal_owner(struct lttng_ust_lfht_node
*node
)
595 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
| REMOVAL_OWNER_FLAG
);
599 struct lttng_ust_lfht_node
*get_end(void)
601 return (struct lttng_ust_lfht_node
*) END_VALUE
;
605 int is_end(struct lttng_ust_lfht_node
*node
)
607 return clear_flag(node
) == (struct lttng_ust_lfht_node
*) END_VALUE
;
611 void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
613 return ht
->mm
->alloc_bucket_table(ht
, order
);
617 * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
618 * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
622 void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
624 return ht
->mm
->free_bucket_table(ht
, order
);
628 struct lttng_ust_lfht_node
*bucket_at(struct lttng_ust_lfht
*ht
, unsigned long index
)
630 return ht
->bucket_at(ht
, index
);
634 struct lttng_ust_lfht_node
*lookup_bucket(struct lttng_ust_lfht
*ht
, unsigned long size
,
638 return bucket_at(ht
, hash
& (size
- 1));
642 * Remove all logically deleted nodes from a bucket up to a certain node key.
645 void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node
*bucket
, struct lttng_ust_lfht_node
*node
)
647 struct lttng_ust_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
649 assert(!is_bucket(bucket
));
650 assert(!is_removed(bucket
));
651 assert(!is_removal_owner(bucket
));
652 assert(!is_bucket(node
));
653 assert(!is_removed(node
));
654 assert(!is_removal_owner(node
));
657 /* We can always skip the bucket node initially */
658 iter
= lttng_ust_rcu_dereference(iter_prev
->next
);
659 assert(!is_removed(iter
));
660 assert(!is_removal_owner(iter
));
661 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
663 * We should never be called with bucket (start of chain)
664 * and logically removed node (end of path compression
665 * marker) being the actual same node. This would be a
666 * bug in the algorithm implementation.
668 assert(bucket
!= node
);
670 if (caa_unlikely(is_end(iter
)))
672 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
674 next
= lttng_ust_rcu_dereference(clear_flag(iter
)->next
);
675 if (caa_likely(is_removed(next
)))
677 iter_prev
= clear_flag(iter
);
680 assert(!is_removed(iter
));
681 assert(!is_removal_owner(iter
));
683 new_next
= flag_bucket(clear_flag(next
));
685 new_next
= clear_flag(next
);
686 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
691 int _lttng_ust_lfht_replace(struct lttng_ust_lfht
*ht
, unsigned long size
,
692 struct lttng_ust_lfht_node
*old_node
,
693 struct lttng_ust_lfht_node
*old_next
,
694 struct lttng_ust_lfht_node
*new_node
)
696 struct lttng_ust_lfht_node
*bucket
, *ret_next
;
698 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
701 assert(!is_removed(old_node
));
702 assert(!is_removal_owner(old_node
));
703 assert(!is_bucket(old_node
));
704 assert(!is_removed(new_node
));
705 assert(!is_removal_owner(new_node
));
706 assert(!is_bucket(new_node
));
707 assert(new_node
!= old_node
);
709 /* Insert after node to be replaced */
710 if (is_removed(old_next
)) {
712 * Too late, the old node has been removed under us
713 * between lookup and replace. Fail.
717 assert(old_next
== clear_flag(old_next
));
718 assert(new_node
!= old_next
);
720 * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
721 * flag. It is either set atomically at the same time
722 * (replace) or after (del).
724 assert(!is_removal_owner(old_next
));
725 new_node
->next
= old_next
;
727 * Here is the whole trick for lock-free replace: we add
728 * the replacement node _after_ the node we want to
729 * replace by atomically setting its next pointer at the
730 * same time we set its removal flag. Given that
731 * the lookups/get next use an iterator aware of the
732 * next pointer, they will either skip the old node due
733 * to the removal flag and see the new node, or use
734 * the old node, but will not see the new one.
735 * This is a replacement of a node with another node
736 * that has the same value: we are therefore not
737 * removing a value from the hash table. We set both the
738 * REMOVED and REMOVAL_OWNER flags atomically so we own
739 * the node after successful cmpxchg.
741 ret_next
= uatomic_cmpxchg(&old_node
->next
,
742 old_next
, flag_removed_or_removal_owner(new_node
));
743 if (ret_next
== old_next
)
744 break; /* We performed the replacement. */
749 * Ensure that the old node is not visible to readers anymore:
750 * lookup for the node, and remove it (along with any other
751 * logically removed node) if found.
753 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->reverse_hash
));
754 _lttng_ust_lfht_gc_bucket(bucket
, new_node
);
756 assert(is_removed(CMM_LOAD_SHARED(old_node
->next
)));
761 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
762 * mode. A NULL unique_ret allows creation of duplicate keys.
765 void _lttng_ust_lfht_add(struct lttng_ust_lfht
*ht
,
767 lttng_ust_lfht_match_fct match
,
770 struct lttng_ust_lfht_node
*node
,
771 struct lttng_ust_lfht_iter
*unique_ret
,
774 struct lttng_ust_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
776 struct lttng_ust_lfht_node
*bucket
;
778 assert(!is_bucket(node
));
779 assert(!is_removed(node
));
780 assert(!is_removal_owner(node
));
781 bucket
= lookup_bucket(ht
, size
, hash
);
784 * iter_prev points to the non-removed node prior to the
788 /* We can always skip the bucket node initially */
789 iter
= lttng_ust_rcu_dereference(iter_prev
->next
);
790 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
792 if (caa_unlikely(is_end(iter
)))
794 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
797 /* bucket node is the first node of the identical-hash-value chain */
798 if (bucket_flag
&& clear_flag(iter
)->reverse_hash
== node
->reverse_hash
)
801 next
= lttng_ust_rcu_dereference(clear_flag(iter
)->next
);
802 if (caa_unlikely(is_removed(next
)))
808 && clear_flag(iter
)->reverse_hash
== node
->reverse_hash
) {
809 struct lttng_ust_lfht_iter d_iter
= {
812 #ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
818 * uniquely adding inserts the node as the first
819 * node of the identical-hash-value node chain.
821 * This semantic ensures no duplicated keys
822 * should ever be observable in the table
823 * (including traversing the table node by
824 * node by forward iterations)
826 lttng_ust_lfht_next_duplicate(ht
, match
, key
, &d_iter
);
830 *unique_ret
= d_iter
;
834 iter_prev
= clear_flag(iter
);
839 assert(node
!= clear_flag(iter
));
840 assert(!is_removed(iter_prev
));
841 assert(!is_removal_owner(iter_prev
));
842 assert(!is_removed(iter
));
843 assert(!is_removal_owner(iter
));
844 assert(iter_prev
!= node
);
846 node
->next
= clear_flag(iter
);
848 node
->next
= flag_bucket(clear_flag(iter
));
850 new_node
= flag_bucket(node
);
853 if (uatomic_cmpxchg(&iter_prev
->next
, iter
,
855 continue; /* retry */
862 assert(!is_removed(iter
));
863 assert(!is_removal_owner(iter
));
865 new_next
= flag_bucket(clear_flag(next
));
867 new_next
= clear_flag(next
);
868 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
873 unique_ret
->node
= return_node
;
874 /* unique_ret->next left unset, never used. */
879 int _lttng_ust_lfht_del(struct lttng_ust_lfht
*ht
, unsigned long size
,
880 struct lttng_ust_lfht_node
*node
)
882 struct lttng_ust_lfht_node
*bucket
, *next
;
884 if (!node
) /* Return -ENOENT if asked to delete NULL node */
887 /* logically delete the node */
888 assert(!is_bucket(node
));
889 assert(!is_removed(node
));
890 assert(!is_removal_owner(node
));
893 * We are first checking if the node had previously been
894 * logically removed (this check is not atomic with setting the
895 * logical removal flag). Return -ENOENT if the node had
896 * previously been removed.
898 next
= CMM_LOAD_SHARED(node
->next
); /* next is not dereferenced */
899 if (caa_unlikely(is_removed(next
)))
901 assert(!is_bucket(next
));
903 * The del operation semantic guarantees a full memory barrier
904 * before the uatomic_or atomic commit of the deletion flag.
906 cmm_smp_mb__before_uatomic_or();
908 * We set the REMOVED_FLAG unconditionally. Note that there may
909 * be more than one concurrent thread setting this flag.
910 * Knowing which wins the race will be known after the garbage
911 * collection phase, stay tuned!
913 uatomic_or(&node
->next
, REMOVED_FLAG
);
914 /* We performed the (logical) deletion. */
917 * Ensure that the node is not visible to readers anymore: lookup for
918 * the node, and remove it (along with any other logically removed node)
921 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
922 _lttng_ust_lfht_gc_bucket(bucket
, node
);
924 assert(is_removed(CMM_LOAD_SHARED(node
->next
)));
926 * Last phase: atomically exchange node->next with a version
927 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
928 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
929 * the node and win the removal race.
930 * It is interesting to note that all "add" paths are forbidden
931 * to change the next pointer starting from the point where the
932 * REMOVED_FLAG is set, so here using a read, followed by a
933 * xchg() suffice to guarantee that the xchg() will ever only
934 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
937 if (!is_removal_owner(uatomic_xchg(&node
->next
,
938 flag_removal_owner(node
->next
))))
945 * Never called with size < 1.
948 void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht
*ht
, unsigned long size
)
950 struct lttng_ust_lfht_node
*prev
, *node
;
951 unsigned long order
, len
, i
;
954 lttng_ust_lfht_alloc_bucket_table(ht
, 0);
956 dbg_printf("create bucket: order 0 index 0 hash 0\n");
957 node
= bucket_at(ht
, 0);
958 node
->next
= flag_bucket(get_end());
959 node
->reverse_hash
= 0;
961 bucket_order
= lttng_ust_lfht_get_count_order_ulong(size
);
962 assert(bucket_order
>= 0);
964 for (order
= 1; order
< (unsigned long) bucket_order
+ 1; order
++) {
965 len
= 1UL << (order
- 1);
966 lttng_ust_lfht_alloc_bucket_table(ht
, order
);
968 for (i
= 0; i
< len
; i
++) {
970 * Now, we are trying to init the node with the
971 * hash=(len+i) (which is also a bucket with the
972 * index=(len+i)) and insert it into the hash table,
973 * so this node has to be inserted after the bucket
974 * with the index=(len+i)&(len-1)=i. And because there
975 * is no other non-bucket node nor bucket node with
976 * larger index/hash inserted, so the bucket node
977 * being inserted should be inserted directly linked
978 * after the bucket node with index=i.
980 prev
= bucket_at(ht
, i
);
981 node
= bucket_at(ht
, len
+ i
);
983 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
984 order
, len
+ i
, len
+ i
);
985 node
->reverse_hash
= bit_reverse_ulong(len
+ i
);
987 /* insert after prev */
988 assert(is_bucket(prev
->next
));
989 node
->next
= prev
->next
;
990 prev
->next
= flag_bucket(node
);
995 #if (CAA_BITS_PER_LONG > 32)
997 * For 64-bit architectures, with max number of buckets small enough not to
998 * use the entire 64-bit memory mapping space (and allowing a fair number of
999 * hash table instances), use the mmap allocator, which is faster. Otherwise,
1000 * fallback to the order allocator.
1003 const struct lttng_ust_lfht_mm_type
*get_mm_type(unsigned long max_nr_buckets
)
1005 if (max_nr_buckets
&& max_nr_buckets
<= (1ULL << 32))
1006 return <tng_ust_lfht_mm_mmap
;
1008 return <tng_ust_lfht_mm_order
;
1012 * For 32-bit architectures, use the order allocator.
1015 const struct lttng_ust_lfht_mm_type
*get_mm_type(unsigned long max_nr_buckets
)
1017 return <tng_ust_lfht_mm_order
;
1021 struct lttng_ust_lfht
*lttng_ust_lfht_new(unsigned long init_size
,
1022 unsigned long min_nr_alloc_buckets
,
1023 unsigned long max_nr_buckets
,
1025 const struct lttng_ust_lfht_mm_type
*mm
)
1027 struct lttng_ust_lfht
*ht
;
1028 unsigned long order
;
1030 /* min_nr_alloc_buckets must be power of two */
1031 if (!min_nr_alloc_buckets
|| (min_nr_alloc_buckets
& (min_nr_alloc_buckets
- 1)))
1034 /* init_size must be power of two */
1035 if (!init_size
|| (init_size
& (init_size
- 1)))
1039 * Memory management plugin default.
1042 mm
= get_mm_type(max_nr_buckets
);
1044 /* max_nr_buckets == 0 for order based mm means infinite */
1045 if (mm
== <tng_ust_lfht_mm_order
&& !max_nr_buckets
)
1046 max_nr_buckets
= 1UL << (MAX_TABLE_ORDER
- 1);
1048 /* max_nr_buckets must be power of two */
1049 if (!max_nr_buckets
|| (max_nr_buckets
& (max_nr_buckets
- 1)))
1052 if (flags
& LTTNG_UST_LFHT_AUTO_RESIZE
)
1055 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
, MIN_TABLE_SIZE
);
1056 init_size
= max(init_size
, MIN_TABLE_SIZE
);
1057 max_nr_buckets
= max(max_nr_buckets
, min_nr_alloc_buckets
);
1058 init_size
= min(init_size
, max_nr_buckets
);
1060 ht
= mm
->alloc_lttng_ust_lfht(min_nr_alloc_buckets
, max_nr_buckets
);
1062 assert(ht
->mm
== mm
);
1063 assert(ht
->bucket_at
== mm
->bucket_at
);
1066 /* this mutex should not nest in read-side C.S. */
1067 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1068 order
= lttng_ust_lfht_get_count_order_ulong(init_size
);
1069 ht
->resize_target
= 1UL << order
;
1070 lttng_ust_lfht_create_bucket(ht
, 1UL << order
);
1071 ht
->size
= 1UL << order
;
1075 void lttng_ust_lfht_lookup(struct lttng_ust_lfht
*ht
, unsigned long hash
,
1076 lttng_ust_lfht_match_fct match
, const void *key
,
1077 struct lttng_ust_lfht_iter
*iter
)
1079 struct lttng_ust_lfht_node
*node
, *next
, *bucket
;
1080 unsigned long reverse_hash
, size
;
1082 lttng_ust_lfht_iter_debug_set_ht(ht
, iter
);
1084 reverse_hash
= bit_reverse_ulong(hash
);
1086 size
= lttng_ust_rcu_dereference(ht
->size
);
1087 bucket
= lookup_bucket(ht
, size
, hash
);
1088 /* We can always skip the bucket node initially */
1089 node
= lttng_ust_rcu_dereference(bucket
->next
);
1090 node
= clear_flag(node
);
1092 if (caa_unlikely(is_end(node
))) {
1096 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1100 next
= lttng_ust_rcu_dereference(node
->next
);
1101 assert(node
== clear_flag(node
));
1102 if (caa_likely(!is_removed(next
))
1104 && node
->reverse_hash
== reverse_hash
1105 && caa_likely(match(node
, key
))) {
1108 node
= clear_flag(next
);
1110 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1115 void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht
*ht
, lttng_ust_lfht_match_fct match
,
1116 const void *key
, struct lttng_ust_lfht_iter
*iter
)
1118 struct lttng_ust_lfht_node
*node
, *next
;
1119 unsigned long reverse_hash
;
1121 lttng_ust_lfht_iter_debug_assert(ht
== iter
->lfht
);
1123 reverse_hash
= node
->reverse_hash
;
1125 node
= clear_flag(next
);
1128 if (caa_unlikely(is_end(node
))) {
1132 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1136 next
= lttng_ust_rcu_dereference(node
->next
);
1137 if (caa_likely(!is_removed(next
))
1139 && caa_likely(match(node
, key
))) {
1142 node
= clear_flag(next
);
1144 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1149 void lttng_ust_lfht_next(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
1151 struct lttng_ust_lfht_node
*node
, *next
;
1153 lttng_ust_lfht_iter_debug_assert(ht
== iter
->lfht
);
1154 node
= clear_flag(iter
->next
);
1156 if (caa_unlikely(is_end(node
))) {
1160 next
= lttng_ust_rcu_dereference(node
->next
);
1161 if (caa_likely(!is_removed(next
))
1162 && !is_bucket(next
)) {
1165 node
= clear_flag(next
);
1167 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1172 void lttng_ust_lfht_first(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
1174 lttng_ust_lfht_iter_debug_set_ht(ht
, iter
);
1176 * Get next after first bucket node. The first bucket node is the
1177 * first node of the linked list.
1179 iter
->next
= bucket_at(ht
, 0)->next
;
1180 lttng_ust_lfht_next(ht
, iter
);
1183 void lttng_ust_lfht_add(struct lttng_ust_lfht
*ht
, unsigned long hash
,
1184 struct lttng_ust_lfht_node
*node
)
1188 node
->reverse_hash
= bit_reverse_ulong(hash
);
1189 size
= lttng_ust_rcu_dereference(ht
->size
);
1190 _lttng_ust_lfht_add(ht
, hash
, NULL
, NULL
, size
, node
, NULL
, 0);
1193 struct lttng_ust_lfht_node
*lttng_ust_lfht_add_unique(struct lttng_ust_lfht
*ht
,
1195 lttng_ust_lfht_match_fct match
,
1197 struct lttng_ust_lfht_node
*node
)
1200 struct lttng_ust_lfht_iter iter
;
1202 node
->reverse_hash
= bit_reverse_ulong(hash
);
1203 size
= lttng_ust_rcu_dereference(ht
->size
);
1204 _lttng_ust_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1208 struct lttng_ust_lfht_node
*lttng_ust_lfht_add_replace(struct lttng_ust_lfht
*ht
,
1210 lttng_ust_lfht_match_fct match
,
1212 struct lttng_ust_lfht_node
*node
)
1215 struct lttng_ust_lfht_iter iter
;
1217 node
->reverse_hash
= bit_reverse_ulong(hash
);
1218 size
= lttng_ust_rcu_dereference(ht
->size
);
1220 _lttng_ust_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1221 if (iter
.node
== node
) {
1225 if (!_lttng_ust_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1230 int lttng_ust_lfht_replace(struct lttng_ust_lfht
*ht
,
1231 struct lttng_ust_lfht_iter
*old_iter
,
1233 lttng_ust_lfht_match_fct match
,
1235 struct lttng_ust_lfht_node
*new_node
)
1239 new_node
->reverse_hash
= bit_reverse_ulong(hash
);
1240 if (!old_iter
->node
)
1242 if (caa_unlikely(old_iter
->node
->reverse_hash
!= new_node
->reverse_hash
))
1244 if (caa_unlikely(!match(old_iter
->node
, key
)))
1246 size
= lttng_ust_rcu_dereference(ht
->size
);
1247 return _lttng_ust_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1251 int lttng_ust_lfht_del(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_node
*node
)
1255 size
= lttng_ust_rcu_dereference(ht
->size
);
1256 return _lttng_ust_lfht_del(ht
, size
, node
);
1259 int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node
*node
)
1261 return is_removed(CMM_LOAD_SHARED(node
->next
));
1265 int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht
*ht
)
1267 struct lttng_ust_lfht_node
*node
;
1268 unsigned long order
, i
, size
;
1270 /* Check that the table is empty */
1271 node
= bucket_at(ht
, 0);
1273 node
= clear_flag(node
)->next
;
1274 if (!is_bucket(node
))
1276 assert(!is_removed(node
));
1277 assert(!is_removal_owner(node
));
1278 } while (!is_end(node
));
1280 * size accessed without lttng_ust_rcu_dereference because hash table is
1284 /* Internal sanity check: all nodes left should be buckets */
1285 for (i
= 0; i
< size
; i
++) {
1286 node
= bucket_at(ht
, i
);
1287 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1288 i
, i
, bit_reverse_ulong(node
->reverse_hash
));
1289 assert(is_bucket(node
->next
));
1292 for (order
= lttng_ust_lfht_get_count_order_ulong(size
); (long)order
>= 0; order
--)
1293 lttng_ust_lfht_free_bucket_table(ht
, order
);
1299 * Should only be called when no more concurrent readers nor writers can
1300 * possibly access the table.
1302 int lttng_ust_lfht_destroy(struct lttng_ust_lfht
*ht
)
1306 ret
= lttng_ust_lfht_delete_bucket(ht
);
1309 ret
= pthread_mutex_destroy(&ht
->resize_mutex
);