Commit | Line | Data |
---|---|---|
5e28c532 | 1 | /* |
abc490a1 MD |
2 | * rculfhash.c |
3 | * | |
1475579c | 4 | * Userspace RCU library - Lock-Free Resizable RCU Hash Table |
abc490a1 MD |
5 | * |
6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
5e28c532 MD |
21 | */ |
22 | ||
e753ff5a MD |
23 | /* |
24 | * Based on the following articles: | |
25 | * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free | |
26 | * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. | |
27 | * - Michael, M. M. High performance dynamic lock-free hash tables | |
28 | * and list-based sets. In Proceedings of the fourteenth annual ACM | |
29 | * symposium on Parallel algorithms and architectures, ACM Press, | |
30 | * (2002), 73-82. | |
31 | * | |
1475579c | 32 | * Some specificities of this Lock-Free Resizable RCU Hash Table |
e753ff5a MD |
33 | * implementation: |
34 | * | |
35 | * - RCU read-side critical section allows readers to perform hash | |
36 | * table lookups and use the returned objects safely by delaying | |
37 | * memory reclaim of a grace period. | |
38 | * - Add and remove operations are lock-free, and do not need to | |
39 | * allocate memory. They need to be executed within RCU read-side | |
40 | * critical section to ensure the objects they read are valid and to | |
41 | * deal with the cmpxchg ABA problem. | |
42 | * - add and add_unique operations are supported. add_unique checks if | |
43 | * the node key already exists in the hash table. It ensures no key | |
44 | * duplicata exists. | |
45 | * - The resize operation executes concurrently with add/remove/lookup. | |
46 | * - Hash table nodes are contained within a split-ordered list. This | |
47 | * list is ordered by incrementing reversed-bits-hash value. | |
48 | * - An index of dummy nodes is kept. These dummy nodes are the hash | |
49 | * table "buckets", and they are also chained together in the | |
50 | * split-ordered list, which allows recursive expansion. | |
1475579c MD |
51 | * - The resize operation for small tables only allows expanding the hash table. |
52 | * It is triggered automatically by detecting long chains in the add | |
53 | * operation. | |
54 | * - The resize operation for larger tables (and available through an | |
55 | * API) allows both expanding and shrinking the hash table. | |
56 | * - Per-CPU Split-counters are used to keep track of the number of | |
57 | * nodes within the hash table for automatic resize triggering. | |
e753ff5a MD |
58 | * - Resize operation initiated by long chain detection is executed by a |
59 | * call_rcu thread, which keeps lock-freedom of add and remove. | |
60 | * - Resize operations are protected by a mutex. | |
61 | * - The removal operation is split in two parts: first, a "removed" | |
62 | * flag is set in the next pointer within the node to remove. Then, | |
63 | * a "garbage collection" is performed in the bucket containing the | |
64 | * removed node (from the start of the bucket up to the removed node). | |
65 | * All encountered nodes with "removed" flag set in their next | |
66 | * pointers are removed from the linked-list. If the cmpxchg used for | |
67 | * removal fails (due to concurrent garbage-collection or concurrent | |
68 | * add), we retry from the beginning of the bucket. This ensures that | |
69 | * the node with "removed" flag set is removed from the hash table | |
70 | * (not visible to lookups anymore) before the RCU read-side critical | |
71 | * section held across removal ends. Furthermore, this ensures that | |
72 | * the node with "removed" flag set is removed from the linked-list | |
73 | * before its memory is reclaimed. Only the thread which removal | |
74 | * successfully set the "removed" flag (with a cmpxchg) into a node's | |
75 | * next pointer is considered to have succeeded its removal (and thus | |
76 | * owns the node to reclaim). Because we garbage-collect starting from | |
77 | * an invariant node (the start-of-bucket dummy node) up to the | |
78 | * "removed" node (or find a reverse-hash that is higher), we are sure | |
79 | * that a successful traversal of the chain leads to a chain that is | |
80 | * present in the linked-list (the start node is never removed) and | |
81 | * that is does not contain the "removed" node anymore, even if | |
82 | * concurrent delete/add operations are changing the structure of the | |
83 | * list concurrently. | |
29e669f6 MD |
84 | * - The add operation performs gargage collection of buckets if it |
85 | * encounters nodes with removed flag set in the bucket where it wants | |
86 | * to add its new node. This ensures lock-freedom of add operation by | |
87 | * helping the remover unlink nodes from the list rather than to wait | |
88 | * for it do to so. | |
e753ff5a MD |
89 | * - A RCU "order table" indexed by log2(hash index) is copied and |
90 | * expanded by the resize operation. This order table allows finding | |
91 | * the "dummy node" tables. | |
92 | * - There is one dummy node table per hash index order. The size of | |
93 | * each dummy node table is half the number of hashes contained in | |
93d46c39 LJ |
94 | * this order (except for order 0). |
95 | * - synchronzie_rcu is used to garbage-collect the old dummy node table. | |
e753ff5a MD |
96 | * - The per-order dummy node tables contain a compact version of the |
97 | * hash table nodes. These tables are invariant after they are | |
98 | * populated into the hash table. | |
93d46c39 LJ |
99 | * |
100 | * Dummy node tables: | |
101 | * | |
102 | * hash table hash table the last all dummy node tables | |
103 | * order size dummy node 0 1 2 3 4 5 6(index) | |
104 | * table size | |
105 | * 0 1 1 1 | |
106 | * 1 2 1 1 1 | |
107 | * 2 4 2 1 1 2 | |
108 | * 3 8 4 1 1 2 4 | |
109 | * 4 16 8 1 1 2 4 8 | |
110 | * 5 32 16 1 1 2 4 8 16 | |
111 | * 6 64 32 1 1 2 4 8 16 32 | |
112 | * | |
113 | * When growing/shrinking, we only focus on the last dummy node table | |
114 | * which size is (!order ? 1 : (1 << (order -1))). | |
115 | * | |
116 | * Example for growing/shrinking: | |
117 | * grow hash table from order 5 to 6: init the index=6 dummy node table | |
118 | * shrink hash table from order 6 to 5: fini the index=6 dummy node table | |
119 | * | |
1475579c MD |
120 | * A bit of ascii art explanation: |
121 | * | |
122 | * Order index is the off-by-one compare to the actual power of 2 because | |
123 | * we use index 0 to deal with the 0 special-case. | |
124 | * | |
125 | * This shows the nodes for a small table ordered by reversed bits: | |
126 | * | |
127 | * bits reverse | |
128 | * 0 000 000 | |
129 | * 4 100 001 | |
130 | * 2 010 010 | |
131 | * 6 110 011 | |
132 | * 1 001 100 | |
133 | * 5 101 101 | |
134 | * 3 011 110 | |
135 | * 7 111 111 | |
136 | * | |
137 | * This shows the nodes in order of non-reversed bits, linked by | |
138 | * reversed-bit order. | |
139 | * | |
140 | * order bits reverse | |
141 | * 0 0 000 000 | |
0adc36a8 LJ |
142 | * 1 | 1 001 100 <- |
143 | * 2 | | 2 010 010 <- | | |
f6fdd688 | 144 | * | | | 3 011 110 | <- | |
1475579c MD |
145 | * 3 -> | | | 4 100 001 | | |
146 | * -> | | 5 101 101 | | |
147 | * -> | 6 110 011 | |
148 | * -> 7 111 111 | |
e753ff5a MD |
149 | */ |
150 | ||
2ed95849 MD |
151 | #define _LGPL_SOURCE |
152 | #include <stdlib.h> | |
e0ba718a MD |
153 | #include <errno.h> |
154 | #include <assert.h> | |
155 | #include <stdio.h> | |
abc490a1 | 156 | #include <stdint.h> |
f000907d | 157 | #include <string.h> |
e0ba718a | 158 | |
15cfbec7 | 159 | #include "config.h" |
2ed95849 | 160 | #include <urcu.h> |
abc490a1 | 161 | #include <urcu-call-rcu.h> |
a42cc659 MD |
162 | #include <urcu/arch.h> |
163 | #include <urcu/uatomic.h> | |
a42cc659 | 164 | #include <urcu/compiler.h> |
abc490a1 | 165 | #include <urcu/rculfhash.h> |
5e28c532 | 166 | #include <stdio.h> |
464a1ec9 | 167 | #include <pthread.h> |
44395fb7 | 168 | |
f9830efd | 169 | #ifdef DEBUG |
f0c29ed7 | 170 | #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args) |
f9830efd | 171 | #else |
e753ff5a | 172 | #define dbg_printf(fmt, args...) |
f9830efd MD |
173 | #endif |
174 | ||
f8994aee MD |
175 | /* |
176 | * Per-CPU split-counters lazily update the global counter each 1024 | |
177 | * addition/removal. It automatically keeps track of resize required. | |
178 | * We use the bucket length as indicator for need to expand for small | |
179 | * tables and machines lacking per-cpu data suppport. | |
180 | */ | |
181 | #define COUNT_COMMIT_ORDER 10 | |
6ea6bc67 MD |
182 | #define CHAIN_LEN_TARGET 1 |
183 | #define CHAIN_LEN_RESIZE_THRESHOLD 3 | |
2ed95849 | 184 | |
cd95516d | 185 | /* |
76a73da8 | 186 | * Define the minimum table size. |
cd95516d | 187 | */ |
c9edd44a | 188 | #define MIN_TABLE_SIZE 1 |
cd95516d | 189 | |
4105056a MD |
190 | #if (CAA_BITS_PER_LONG == 32) |
191 | #define MAX_TABLE_ORDER 32 | |
192 | #else | |
193 | #define MAX_TABLE_ORDER 64 | |
194 | #endif | |
195 | ||
b7d619b0 MD |
196 | /* |
197 | * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. | |
198 | */ | |
6083a889 MD |
199 | #define MIN_PARTITION_PER_THREAD_ORDER 12 |
200 | #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) | |
b7d619b0 | 201 | |
4105056a MD |
202 | #ifndef min |
203 | #define min(a, b) ((a) < (b) ? (a) : (b)) | |
204 | #endif | |
205 | ||
abc490a1 MD |
206 | #ifndef max |
207 | #define max(a, b) ((a) > (b) ? (a) : (b)) | |
208 | #endif | |
2ed95849 | 209 | |
d95bd160 MD |
210 | /* |
211 | * The removed flag needs to be updated atomically with the pointer. | |
48ed1c18 | 212 | * It indicates that no node must attach to the node scheduled for |
b198f0fd | 213 | * removal, and that node garbage collection must be performed. |
d95bd160 MD |
214 | * The dummy flag does not require to be updated atomically with the |
215 | * pointer, but it is added as a pointer low bit flag to save space. | |
216 | */ | |
d37166c6 | 217 | #define REMOVED_FLAG (1UL << 0) |
b198f0fd MD |
218 | #define DUMMY_FLAG (1UL << 1) |
219 | #define FLAGS_MASK ((1UL << 2) - 1) | |
d37166c6 | 220 | |
bb7b2f26 | 221 | /* Value of the end pointer. Should not interact with flags. */ |
f9c80341 | 222 | #define END_VALUE NULL |
bb7b2f26 | 223 | |
df44348d | 224 | struct ht_items_count { |
860d07e8 | 225 | unsigned long add, del; |
df44348d MD |
226 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); |
227 | ||
1475579c | 228 | struct rcu_level { |
0d14ceb2 | 229 | /* Note: manually update allocation length when adding a field */ |
1475579c MD |
230 | struct _cds_lfht_node nodes[0]; |
231 | }; | |
232 | ||
395270b6 | 233 | struct rcu_table { |
4105056a | 234 | unsigned long size; /* always a power of 2, shared (RCU) */ |
f9830efd | 235 | unsigned long resize_target; |
11519af6 | 236 | int resize_initiated; |
4105056a | 237 | struct rcu_level *tbl[MAX_TABLE_ORDER]; |
395270b6 MD |
238 | }; |
239 | ||
14044b37 | 240 | struct cds_lfht { |
4105056a | 241 | struct rcu_table t; |
14044b37 MD |
242 | cds_lfht_hash_fct hash_fct; |
243 | cds_lfht_compare_fct compare_fct; | |
5488222b LJ |
244 | unsigned long min_alloc_order; |
245 | unsigned long min_alloc_size; | |
732ad076 | 246 | unsigned long hash_seed; |
b8af5011 | 247 | int flags; |
5f511391 MD |
248 | /* |
249 | * We need to put the work threads offline (QSBR) when taking this | |
250 | * mutex, because we use synchronize_rcu within this mutex critical | |
251 | * section, which waits on read-side critical sections, and could | |
252 | * therefore cause grace-period deadlock if we hold off RCU G.P. | |
253 | * completion. | |
254 | */ | |
464a1ec9 | 255 | pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ |
33c7c748 | 256 | unsigned int in_progress_resize, in_progress_destroy; |
14044b37 | 257 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
abc490a1 | 258 | void (*func)(struct rcu_head *head)); |
1475579c | 259 | void (*cds_lfht_synchronize_rcu)(void); |
01dbfa62 MD |
260 | void (*cds_lfht_rcu_read_lock)(void); |
261 | void (*cds_lfht_rcu_read_unlock)(void); | |
5f511391 MD |
262 | void (*cds_lfht_rcu_thread_offline)(void); |
263 | void (*cds_lfht_rcu_thread_online)(void); | |
b7d619b0 MD |
264 | void (*cds_lfht_rcu_register_thread)(void); |
265 | void (*cds_lfht_rcu_unregister_thread)(void); | |
266 | pthread_attr_t *resize_attr; /* Resize threads attributes */ | |
7de5ccfd | 267 | long count; /* global approximate item count */ |
df44348d | 268 | struct ht_items_count *percpu_count; /* per-cpu item count */ |
2ed95849 MD |
269 | }; |
270 | ||
abc490a1 MD |
271 | struct rcu_resize_work { |
272 | struct rcu_head head; | |
14044b37 | 273 | struct cds_lfht *ht; |
abc490a1 | 274 | }; |
2ed95849 | 275 | |
b7d619b0 | 276 | struct partition_resize_work { |
1af6e26e | 277 | pthread_t thread_id; |
b7d619b0 MD |
278 | struct cds_lfht *ht; |
279 | unsigned long i, start, len; | |
280 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
281 | unsigned long start, unsigned long len); | |
282 | }; | |
283 | ||
76a73da8 | 284 | static |
83beee94 MD |
285 | void _cds_lfht_add(struct cds_lfht *ht, |
286 | unsigned long size, | |
287 | struct cds_lfht_node *node, | |
288 | struct cds_lfht_iter *unique_ret, | |
289 | int dummy); | |
48ed1c18 | 290 | |
abc490a1 MD |
291 | /* |
292 | * Algorithm to reverse bits in a word by lookup table, extended to | |
293 | * 64-bit words. | |
f9830efd | 294 | * Source: |
abc490a1 | 295 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
f9830efd | 296 | * Originally from Public Domain. |
abc490a1 MD |
297 | */ |
298 | ||
299 | static const uint8_t BitReverseTable256[256] = | |
2ed95849 | 300 | { |
abc490a1 MD |
301 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
302 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) | |
303 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) | |
304 | R6(0), R6(2), R6(1), R6(3) | |
305 | }; | |
306 | #undef R2 | |
307 | #undef R4 | |
308 | #undef R6 | |
2ed95849 | 309 | |
abc490a1 MD |
310 | static |
311 | uint8_t bit_reverse_u8(uint8_t v) | |
312 | { | |
313 | return BitReverseTable256[v]; | |
314 | } | |
ab7d5fc6 | 315 | |
abc490a1 MD |
316 | static __attribute__((unused)) |
317 | uint32_t bit_reverse_u32(uint32_t v) | |
318 | { | |
319 | return ((uint32_t) bit_reverse_u8(v) << 24) | | |
320 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | | |
321 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | | |
322 | ((uint32_t) bit_reverse_u8(v >> 24)); | |
2ed95849 MD |
323 | } |
324 | ||
abc490a1 MD |
325 | static __attribute__((unused)) |
326 | uint64_t bit_reverse_u64(uint64_t v) | |
2ed95849 | 327 | { |
abc490a1 MD |
328 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
329 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | | |
330 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | | |
331 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | | |
332 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | | |
333 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | | |
334 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | | |
335 | ((uint64_t) bit_reverse_u8(v >> 56)); | |
336 | } | |
337 | ||
338 | static | |
339 | unsigned long bit_reverse_ulong(unsigned long v) | |
340 | { | |
341 | #if (CAA_BITS_PER_LONG == 32) | |
342 | return bit_reverse_u32(v); | |
343 | #else | |
344 | return bit_reverse_u64(v); | |
345 | #endif | |
346 | } | |
347 | ||
f9830efd | 348 | /* |
24365af7 MD |
349 | * fls: returns the position of the most significant bit. |
350 | * Returns 0 if no bit is set, else returns the position of the most | |
351 | * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). | |
f9830efd | 352 | */ |
24365af7 MD |
353 | #if defined(__i386) || defined(__x86_64) |
354 | static inline | |
355 | unsigned int fls_u32(uint32_t x) | |
f9830efd | 356 | { |
24365af7 MD |
357 | int r; |
358 | ||
359 | asm("bsrl %1,%0\n\t" | |
360 | "jnz 1f\n\t" | |
361 | "movl $-1,%0\n\t" | |
362 | "1:\n\t" | |
363 | : "=r" (r) : "rm" (x)); | |
364 | return r + 1; | |
365 | } | |
366 | #define HAS_FLS_U32 | |
367 | #endif | |
368 | ||
369 | #if defined(__x86_64) | |
370 | static inline | |
371 | unsigned int fls_u64(uint64_t x) | |
372 | { | |
373 | long r; | |
374 | ||
375 | asm("bsrq %1,%0\n\t" | |
376 | "jnz 1f\n\t" | |
377 | "movq $-1,%0\n\t" | |
378 | "1:\n\t" | |
379 | : "=r" (r) : "rm" (x)); | |
380 | return r + 1; | |
381 | } | |
382 | #define HAS_FLS_U64 | |
383 | #endif | |
384 | ||
385 | #ifndef HAS_FLS_U64 | |
386 | static __attribute__((unused)) | |
387 | unsigned int fls_u64(uint64_t x) | |
388 | { | |
389 | unsigned int r = 64; | |
390 | ||
391 | if (!x) | |
392 | return 0; | |
393 | ||
394 | if (!(x & 0xFFFFFFFF00000000ULL)) { | |
395 | x <<= 32; | |
396 | r -= 32; | |
397 | } | |
398 | if (!(x & 0xFFFF000000000000ULL)) { | |
399 | x <<= 16; | |
400 | r -= 16; | |
401 | } | |
402 | if (!(x & 0xFF00000000000000ULL)) { | |
403 | x <<= 8; | |
404 | r -= 8; | |
405 | } | |
406 | if (!(x & 0xF000000000000000ULL)) { | |
407 | x <<= 4; | |
408 | r -= 4; | |
409 | } | |
410 | if (!(x & 0xC000000000000000ULL)) { | |
411 | x <<= 2; | |
412 | r -= 2; | |
413 | } | |
414 | if (!(x & 0x8000000000000000ULL)) { | |
415 | x <<= 1; | |
416 | r -= 1; | |
417 | } | |
418 | return r; | |
419 | } | |
420 | #endif | |
421 | ||
422 | #ifndef HAS_FLS_U32 | |
423 | static __attribute__((unused)) | |
424 | unsigned int fls_u32(uint32_t x) | |
425 | { | |
426 | unsigned int r = 32; | |
f9830efd | 427 | |
24365af7 MD |
428 | if (!x) |
429 | return 0; | |
430 | if (!(x & 0xFFFF0000U)) { | |
431 | x <<= 16; | |
432 | r -= 16; | |
433 | } | |
434 | if (!(x & 0xFF000000U)) { | |
435 | x <<= 8; | |
436 | r -= 8; | |
437 | } | |
438 | if (!(x & 0xF0000000U)) { | |
439 | x <<= 4; | |
440 | r -= 4; | |
441 | } | |
442 | if (!(x & 0xC0000000U)) { | |
443 | x <<= 2; | |
444 | r -= 2; | |
445 | } | |
446 | if (!(x & 0x80000000U)) { | |
447 | x <<= 1; | |
448 | r -= 1; | |
449 | } | |
450 | return r; | |
451 | } | |
452 | #endif | |
453 | ||
454 | unsigned int fls_ulong(unsigned long x) | |
f9830efd | 455 | { |
24365af7 MD |
456 | #if (CAA_BITS_PER_lONG == 32) |
457 | return fls_u32(x); | |
458 | #else | |
459 | return fls_u64(x); | |
460 | #endif | |
461 | } | |
f9830efd | 462 | |
920f8ef6 LJ |
463 | /* |
464 | * Return the minimum order for which x <= (1UL << order). | |
465 | * Return -1 if x is 0. | |
466 | */ | |
24365af7 MD |
467 | int get_count_order_u32(uint32_t x) |
468 | { | |
920f8ef6 LJ |
469 | if (!x) |
470 | return -1; | |
24365af7 | 471 | |
920f8ef6 | 472 | return fls_u32(x - 1); |
24365af7 MD |
473 | } |
474 | ||
920f8ef6 LJ |
475 | /* |
476 | * Return the minimum order for which x <= (1UL << order). | |
477 | * Return -1 if x is 0. | |
478 | */ | |
24365af7 MD |
479 | int get_count_order_ulong(unsigned long x) |
480 | { | |
920f8ef6 LJ |
481 | if (!x) |
482 | return -1; | |
24365af7 | 483 | |
920f8ef6 | 484 | return fls_ulong(x - 1); |
f9830efd MD |
485 | } |
486 | ||
98808fb1 MD |
487 | #ifdef POISON_FREE |
488 | #define poison_free(ptr) \ | |
489 | do { \ | |
490 | memset(ptr, 0x42, sizeof(*(ptr))); \ | |
491 | free(ptr); \ | |
492 | } while (0) | |
493 | #else | |
494 | #define poison_free(ptr) free(ptr) | |
495 | #endif | |
496 | ||
f9830efd | 497 | static |
4105056a | 498 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); |
f9830efd | 499 | |
df44348d MD |
500 | /* |
501 | * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are | |
502 | * available, then we support hash table item accounting. | |
503 | * In the unfortunate event the number of CPUs reported would be | |
504 | * inaccurate, we use modulo arithmetic on the number of CPUs we got. | |
505 | */ | |
df44348d MD |
506 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
507 | ||
f8994aee | 508 | static |
4105056a | 509 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
f8994aee MD |
510 | unsigned long count); |
511 | ||
df44348d MD |
512 | static long nr_cpus_mask = -1; |
513 | ||
514 | static | |
515 | struct ht_items_count *alloc_per_cpu_items_count(void) | |
516 | { | |
517 | struct ht_items_count *count; | |
518 | ||
519 | switch (nr_cpus_mask) { | |
520 | case -2: | |
521 | return NULL; | |
522 | case -1: | |
523 | { | |
524 | long maxcpus; | |
525 | ||
526 | maxcpus = sysconf(_SC_NPROCESSORS_CONF); | |
527 | if (maxcpus <= 0) { | |
528 | nr_cpus_mask = -2; | |
529 | return NULL; | |
530 | } | |
531 | /* | |
532 | * round up number of CPUs to next power of two, so we | |
533 | * can use & for modulo. | |
534 | */ | |
535 | maxcpus = 1UL << get_count_order_ulong(maxcpus); | |
536 | nr_cpus_mask = maxcpus - 1; | |
537 | } | |
538 | /* Fall-through */ | |
539 | default: | |
540 | return calloc(nr_cpus_mask + 1, sizeof(*count)); | |
541 | } | |
542 | } | |
543 | ||
544 | static | |
545 | void free_per_cpu_items_count(struct ht_items_count *count) | |
546 | { | |
98808fb1 | 547 | poison_free(count); |
df44348d MD |
548 | } |
549 | ||
550 | static | |
551 | int ht_get_cpu(void) | |
552 | { | |
553 | int cpu; | |
554 | ||
555 | assert(nr_cpus_mask >= 0); | |
556 | cpu = sched_getcpu(); | |
557 | if (unlikely(cpu < 0)) | |
558 | return cpu; | |
559 | else | |
560 | return cpu & nr_cpus_mask; | |
561 | } | |
562 | ||
563 | static | |
4105056a | 564 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
df44348d | 565 | { |
3171717f | 566 | unsigned long percpu_count; |
df44348d MD |
567 | int cpu; |
568 | ||
569 | if (unlikely(!ht->percpu_count)) | |
3171717f | 570 | return; |
df44348d MD |
571 | cpu = ht_get_cpu(); |
572 | if (unlikely(cpu < 0)) | |
3171717f MD |
573 | return; |
574 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); | |
df44348d | 575 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
e3ecefd6 | 576 | long count; |
df44348d MD |
577 | |
578 | dbg_printf("add percpu %lu\n", percpu_count); | |
579 | count = uatomic_add_return(&ht->count, | |
580 | 1UL << COUNT_COMMIT_ORDER); | |
581 | /* If power of 2 */ | |
582 | if (!(count & (count - 1))) { | |
4105056a | 583 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) |
f8994aee | 584 | return; |
e3ecefd6 | 585 | dbg_printf("add set global %ld\n", count); |
4105056a | 586 | cds_lfht_resize_lazy_count(ht, size, |
6ea6bc67 | 587 | count >> (CHAIN_LEN_TARGET - 1)); |
df44348d MD |
588 | } |
589 | } | |
590 | } | |
591 | ||
592 | static | |
860d07e8 | 593 | void ht_count_del(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
594 | { |
595 | unsigned long percpu_count; | |
3171717f | 596 | int cpu; |
df44348d | 597 | |
3171717f MD |
598 | if (unlikely(!ht->percpu_count)) |
599 | return; | |
600 | cpu = ht_get_cpu(); | |
601 | if (unlikely(cpu < 0)) | |
602 | return; | |
80d90c06 | 603 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1); |
df44348d | 604 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
e3ecefd6 | 605 | long count; |
df44348d | 606 | |
860d07e8 | 607 | dbg_printf("del percpu %lu\n", percpu_count); |
df44348d | 608 | count = uatomic_add_return(&ht->count, |
3171717f | 609 | -(1UL << COUNT_COMMIT_ORDER)); |
df44348d MD |
610 | /* If power of 2 */ |
611 | if (!(count & (count - 1))) { | |
4105056a | 612 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) |
f8994aee | 613 | return; |
e3ecefd6 MD |
614 | dbg_printf("del set global %ld\n", count); |
615 | /* | |
c941bb9e | 616 | * Don't shrink table if the number of nodes is below a |
e3ecefd6 MD |
617 | * certain threshold. |
618 | */ | |
c941bb9e | 619 | if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1)) |
e3ecefd6 | 620 | return; |
4105056a | 621 | cds_lfht_resize_lazy_count(ht, size, |
6ea6bc67 | 622 | count >> (CHAIN_LEN_TARGET - 1)); |
df44348d MD |
623 | } |
624 | } | |
625 | } | |
626 | ||
627 | #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ | |
628 | ||
91452a6a | 629 | static const long nr_cpus_mask = -2; |
df44348d MD |
630 | |
631 | static | |
632 | struct ht_items_count *alloc_per_cpu_items_count(void) | |
633 | { | |
634 | return NULL; | |
635 | } | |
636 | ||
637 | static | |
638 | void free_per_cpu_items_count(struct ht_items_count *count) | |
639 | { | |
640 | } | |
641 | ||
642 | static | |
4105056a | 643 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
644 | { |
645 | } | |
646 | ||
647 | static | |
860d07e8 | 648 | void ht_count_del(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
649 | { |
650 | } | |
651 | ||
652 | #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ | |
653 | ||
654 | ||
f9830efd | 655 | static |
4105056a | 656 | void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) |
f9830efd | 657 | { |
f8994aee MD |
658 | unsigned long count; |
659 | ||
b8af5011 MD |
660 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
661 | return; | |
f8994aee MD |
662 | count = uatomic_read(&ht->count); |
663 | /* | |
664 | * Use bucket-local length for small table expand and for | |
665 | * environments lacking per-cpu data support. | |
666 | */ | |
667 | if (count >= (1UL << COUNT_COMMIT_ORDER)) | |
668 | return; | |
24365af7 | 669 | if (chain_len > 100) |
f0c29ed7 | 670 | dbg_printf("WARNING: large chain length: %u.\n", |
24365af7 | 671 | chain_len); |
3390d470 | 672 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
4105056a | 673 | cds_lfht_resize_lazy(ht, size, |
01370f0b | 674 | get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); |
f9830efd MD |
675 | } |
676 | ||
abc490a1 | 677 | static |
14044b37 | 678 | struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) |
abc490a1 | 679 | { |
14044b37 | 680 | return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK); |
abc490a1 MD |
681 | } |
682 | ||
683 | static | |
14044b37 | 684 | int is_removed(struct cds_lfht_node *node) |
abc490a1 | 685 | { |
d37166c6 | 686 | return ((unsigned long) node) & REMOVED_FLAG; |
abc490a1 MD |
687 | } |
688 | ||
689 | static | |
14044b37 | 690 | struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) |
abc490a1 | 691 | { |
14044b37 | 692 | return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); |
abc490a1 MD |
693 | } |
694 | ||
f5596c94 | 695 | static |
14044b37 | 696 | int is_dummy(struct cds_lfht_node *node) |
f5596c94 MD |
697 | { |
698 | return ((unsigned long) node) & DUMMY_FLAG; | |
699 | } | |
700 | ||
701 | static | |
14044b37 | 702 | struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) |
f5596c94 | 703 | { |
14044b37 | 704 | return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); |
f5596c94 | 705 | } |
bb7b2f26 MD |
706 | |
707 | static | |
708 | struct cds_lfht_node *get_end(void) | |
709 | { | |
710 | return (struct cds_lfht_node *) END_VALUE; | |
711 | } | |
712 | ||
713 | static | |
714 | int is_end(struct cds_lfht_node *node) | |
715 | { | |
716 | return clear_flag(node) == (struct cds_lfht_node *) END_VALUE; | |
717 | } | |
718 | ||
abc490a1 | 719 | static |
f9830efd | 720 | unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) |
abc490a1 MD |
721 | { |
722 | unsigned long old1, old2; | |
723 | ||
724 | old1 = uatomic_read(ptr); | |
725 | do { | |
726 | old2 = old1; | |
727 | if (old2 >= v) | |
f9830efd | 728 | return old2; |
abc490a1 | 729 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
f9830efd | 730 | return v; |
abc490a1 MD |
731 | } |
732 | ||
f4a9cc0b LJ |
733 | static |
734 | struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, | |
735 | unsigned long hash) | |
736 | { | |
737 | unsigned long index, order; | |
738 | ||
739 | assert(size > 0); | |
740 | index = hash & (size - 1); | |
a4ea2223 LJ |
741 | /* |
742 | * equivalent to get_count_order_ulong(index + 1), but optimizes | |
743 | * away the non-existing 0 special-case for | |
744 | * get_count_order_ulong. | |
745 | */ | |
746 | order = fls_ulong(index); | |
f4a9cc0b LJ |
747 | |
748 | dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", | |
749 | hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); | |
750 | ||
751 | return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; | |
752 | } | |
753 | ||
273399de MD |
754 | /* |
755 | * Remove all logically deleted nodes from a bucket up to a certain node key. | |
756 | */ | |
757 | static | |
f9c80341 | 758 | void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) |
273399de | 759 | { |
14044b37 | 760 | struct cds_lfht_node *iter_prev, *iter, *next, *new_next; |
273399de | 761 | |
c90201ac MD |
762 | assert(!is_dummy(dummy)); |
763 | assert(!is_removed(dummy)); | |
764 | assert(!is_dummy(node)); | |
765 | assert(!is_removed(node)); | |
273399de MD |
766 | for (;;) { |
767 | iter_prev = dummy; | |
768 | /* We can always skip the dummy node initially */ | |
cc4fcb10 | 769 | iter = rcu_dereference(iter_prev->p.next); |
b4cb483f | 770 | assert(!is_removed(iter)); |
cc4fcb10 | 771 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); |
bd4db153 MD |
772 | /* |
773 | * We should never be called with dummy (start of chain) | |
774 | * and logically removed node (end of path compression | |
775 | * marker) being the actual same node. This would be a | |
776 | * bug in the algorithm implementation. | |
777 | */ | |
778 | assert(dummy != node); | |
273399de | 779 | for (;;) { |
bb7b2f26 | 780 | if (unlikely(is_end(iter))) |
f9c80341 | 781 | return; |
76412f24 | 782 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
f9c80341 | 783 | return; |
cc4fcb10 | 784 | next = rcu_dereference(clear_flag(iter)->p.next); |
b198f0fd | 785 | if (likely(is_removed(next))) |
273399de | 786 | break; |
b453eae1 | 787 | iter_prev = clear_flag(iter); |
273399de MD |
788 | iter = next; |
789 | } | |
b198f0fd | 790 | assert(!is_removed(iter)); |
f5596c94 MD |
791 | if (is_dummy(iter)) |
792 | new_next = flag_dummy(clear_flag(next)); | |
793 | else | |
794 | new_next = clear_flag(next); | |
795 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); | |
273399de | 796 | } |
f9c80341 | 797 | return; |
273399de MD |
798 | } |
799 | ||
9357c415 MD |
800 | static |
801 | int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, | |
802 | struct cds_lfht_node *old_node, | |
3fb86f26 | 803 | struct cds_lfht_node *old_next, |
9357c415 MD |
804 | struct cds_lfht_node *new_node) |
805 | { | |
3fb86f26 | 806 | struct cds_lfht_node *dummy, *ret_next; |
9357c415 | 807 | struct _cds_lfht_node *lookup; |
9357c415 MD |
808 | |
809 | if (!old_node) /* Return -ENOENT if asked to replace NULL node */ | |
7801dadd | 810 | return -ENOENT; |
9357c415 MD |
811 | |
812 | assert(!is_removed(old_node)); | |
813 | assert(!is_dummy(old_node)); | |
814 | assert(!is_removed(new_node)); | |
815 | assert(!is_dummy(new_node)); | |
816 | assert(new_node != old_node); | |
3fb86f26 | 817 | for (;;) { |
9357c415 | 818 | /* Insert after node to be replaced */ |
9357c415 MD |
819 | if (is_removed(old_next)) { |
820 | /* | |
821 | * Too late, the old node has been removed under us | |
822 | * between lookup and replace. Fail. | |
823 | */ | |
7801dadd | 824 | return -ENOENT; |
9357c415 MD |
825 | } |
826 | assert(!is_dummy(old_next)); | |
827 | assert(new_node != clear_flag(old_next)); | |
828 | new_node->p.next = clear_flag(old_next); | |
829 | /* | |
830 | * Here is the whole trick for lock-free replace: we add | |
831 | * the replacement node _after_ the node we want to | |
832 | * replace by atomically setting its next pointer at the | |
833 | * same time we set its removal flag. Given that | |
834 | * the lookups/get next use an iterator aware of the | |
835 | * next pointer, they will either skip the old node due | |
836 | * to the removal flag and see the new node, or use | |
837 | * the old node, but will not see the new one. | |
838 | */ | |
839 | ret_next = uatomic_cmpxchg(&old_node->p.next, | |
840 | old_next, flag_removed(new_node)); | |
3fb86f26 | 841 | if (ret_next == old_next) |
7801dadd | 842 | break; /* We performed the replacement. */ |
3fb86f26 LJ |
843 | old_next = ret_next; |
844 | } | |
9357c415 | 845 | |
9357c415 MD |
846 | /* |
847 | * Ensure that the old node is not visible to readers anymore: | |
848 | * lookup for the node, and remove it (along with any other | |
849 | * logically removed node) if found. | |
850 | */ | |
f4a9cc0b | 851 | lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash)); |
9357c415 MD |
852 | dummy = (struct cds_lfht_node *) lookup; |
853 | _cds_lfht_gc_bucket(dummy, new_node); | |
7801dadd LJ |
854 | |
855 | assert(is_removed(rcu_dereference(old_node->p.next))); | |
856 | return 0; | |
9357c415 MD |
857 | } |
858 | ||
83beee94 MD |
859 | /* |
860 | * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add | |
861 | * mode. A NULL unique_ret allows creation of duplicate keys. | |
862 | */ | |
abc490a1 | 863 | static |
83beee94 MD |
864 | void _cds_lfht_add(struct cds_lfht *ht, |
865 | unsigned long size, | |
866 | struct cds_lfht_node *node, | |
867 | struct cds_lfht_iter *unique_ret, | |
868 | int dummy) | |
abc490a1 | 869 | { |
14044b37 | 870 | struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, |
960c9e4f | 871 | *return_node; |
14044b37 | 872 | struct _cds_lfht_node *lookup; |
abc490a1 | 873 | |
c90201ac MD |
874 | assert(!is_dummy(node)); |
875 | assert(!is_removed(node)); | |
f4a9cc0b | 876 | lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); |
abc490a1 | 877 | for (;;) { |
adc0de68 | 878 | uint32_t chain_len = 0; |
abc490a1 | 879 | |
11519af6 MD |
880 | /* |
881 | * iter_prev points to the non-removed node prior to the | |
882 | * insert location. | |
11519af6 | 883 | */ |
14044b37 | 884 | iter_prev = (struct cds_lfht_node *) lookup; |
11519af6 | 885 | /* We can always skip the dummy node initially */ |
cc4fcb10 MD |
886 | iter = rcu_dereference(iter_prev->p.next); |
887 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); | |
abc490a1 | 888 | for (;;) { |
bb7b2f26 | 889 | if (unlikely(is_end(iter))) |
273399de | 890 | goto insert; |
76412f24 | 891 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
273399de | 892 | goto insert; |
238cc06e | 893 | |
194fdbd1 LJ |
894 | /* dummy node is the first node of the identical-hash-value chain */ |
895 | if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) | |
896 | goto insert; | |
238cc06e | 897 | |
cc4fcb10 | 898 | next = rcu_dereference(clear_flag(iter)->p.next); |
b198f0fd | 899 | if (unlikely(is_removed(next))) |
9dba85be | 900 | goto gc_node; |
238cc06e LJ |
901 | |
902 | /* uniquely add */ | |
83beee94 | 903 | if (unique_ret |
1b81fe1a | 904 | && !is_dummy(next) |
238cc06e LJ |
905 | && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) { |
906 | struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; | |
907 | ||
908 | /* | |
909 | * uniquely adding inserts the node as the first | |
910 | * node of the identical-hash-value node chain. | |
911 | * | |
912 | * This semantic ensures no duplicated keys | |
913 | * should ever be observable in the table | |
914 | * (including observe one node by one node | |
915 | * by forward iterations) | |
916 | */ | |
917 | cds_lfht_next_duplicate(ht, &d_iter); | |
918 | if (!d_iter.node) | |
919 | goto insert; | |
920 | ||
921 | *unique_ret = d_iter; | |
83beee94 | 922 | return; |
48ed1c18 | 923 | } |
238cc06e | 924 | |
11519af6 | 925 | /* Only account for identical reverse hash once */ |
24365af7 MD |
926 | if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash |
927 | && !is_dummy(next)) | |
4105056a | 928 | check_resize(ht, size, ++chain_len); |
11519af6 | 929 | iter_prev = clear_flag(iter); |
273399de | 930 | iter = next; |
abc490a1 | 931 | } |
48ed1c18 | 932 | |
273399de | 933 | insert: |
7ec59d3b | 934 | assert(node != clear_flag(iter)); |
11519af6 | 935 | assert(!is_removed(iter_prev)); |
c90201ac | 936 | assert(!is_removed(iter)); |
f000907d | 937 | assert(iter_prev != node); |
f9c80341 | 938 | if (!dummy) |
1b81fe1a | 939 | node->p.next = clear_flag(iter); |
f9c80341 MD |
940 | else |
941 | node->p.next = flag_dummy(clear_flag(iter)); | |
f5596c94 MD |
942 | if (is_dummy(iter)) |
943 | new_node = flag_dummy(node); | |
944 | else | |
945 | new_node = node; | |
cc4fcb10 | 946 | if (uatomic_cmpxchg(&iter_prev->p.next, iter, |
48ed1c18 | 947 | new_node) != iter) { |
273399de | 948 | continue; /* retry */ |
48ed1c18 | 949 | } else { |
83beee94 | 950 | return_node = node; |
960c9e4f | 951 | goto end; |
48ed1c18 MD |
952 | } |
953 | ||
9dba85be MD |
954 | gc_node: |
955 | assert(!is_removed(iter)); | |
f5596c94 MD |
956 | if (is_dummy(iter)) |
957 | new_next = flag_dummy(clear_flag(next)); | |
958 | else | |
959 | new_next = clear_flag(next); | |
960 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); | |
273399de | 961 | /* retry */ |
464a1ec9 | 962 | } |
9357c415 | 963 | end: |
83beee94 MD |
964 | if (unique_ret) { |
965 | unique_ret->node = return_node; | |
966 | /* unique_ret->next left unset, never used. */ | |
967 | } | |
abc490a1 | 968 | } |
464a1ec9 | 969 | |
abc490a1 | 970 | static |
860d07e8 | 971 | int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, |
4105056a | 972 | struct cds_lfht_node *node, |
b198f0fd | 973 | int dummy_removal) |
abc490a1 | 974 | { |
14044b37 MD |
975 | struct cds_lfht_node *dummy, *next, *old; |
976 | struct _cds_lfht_node *lookup; | |
5e28c532 | 977 | |
9357c415 | 978 | if (!node) /* Return -ENOENT if asked to delete NULL node */ |
743f9143 | 979 | return -ENOENT; |
9357c415 | 980 | |
7ec59d3b | 981 | /* logically delete the node */ |
c90201ac MD |
982 | assert(!is_dummy(node)); |
983 | assert(!is_removed(node)); | |
cc4fcb10 | 984 | old = rcu_dereference(node->p.next); |
7ec59d3b | 985 | do { |
48ed1c18 MD |
986 | struct cds_lfht_node *new_next; |
987 | ||
7ec59d3b | 988 | next = old; |
76412f24 | 989 | if (unlikely(is_removed(next))) |
743f9143 | 990 | return -ENOENT; |
1475579c MD |
991 | if (dummy_removal) |
992 | assert(is_dummy(next)); | |
993 | else | |
994 | assert(!is_dummy(next)); | |
48ed1c18 | 995 | new_next = flag_removed(next); |
48ed1c18 | 996 | old = uatomic_cmpxchg(&node->p.next, next, new_next); |
7ec59d3b | 997 | } while (old != next); |
7ec59d3b | 998 | /* We performed the (logical) deletion. */ |
7ec59d3b MD |
999 | |
1000 | /* | |
1001 | * Ensure that the node is not visible to readers anymore: lookup for | |
273399de MD |
1002 | * the node, and remove it (along with any other logically removed node) |
1003 | * if found. | |
11519af6 | 1004 | */ |
f4a9cc0b | 1005 | lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); |
14044b37 | 1006 | dummy = (struct cds_lfht_node *) lookup; |
f9c80341 | 1007 | _cds_lfht_gc_bucket(dummy, node); |
743f9143 LJ |
1008 | |
1009 | assert(is_removed(rcu_dereference(node->p.next))); | |
1010 | return 0; | |
abc490a1 | 1011 | } |
2ed95849 | 1012 | |
b7d619b0 MD |
1013 | static |
1014 | void *partition_resize_thread(void *arg) | |
1015 | { | |
1016 | struct partition_resize_work *work = arg; | |
1017 | ||
1018 | work->ht->cds_lfht_rcu_register_thread(); | |
1019 | work->fct(work->ht, work->i, work->start, work->len); | |
1020 | work->ht->cds_lfht_rcu_unregister_thread(); | |
1021 | return NULL; | |
1022 | } | |
1023 | ||
1024 | static | |
1025 | void partition_resize_helper(struct cds_lfht *ht, unsigned long i, | |
1026 | unsigned long len, | |
1027 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
1028 | unsigned long start, unsigned long len)) | |
1029 | { | |
1030 | unsigned long partition_len; | |
1031 | struct partition_resize_work *work; | |
6083a889 MD |
1032 | int thread, ret; |
1033 | unsigned long nr_threads; | |
b7d619b0 | 1034 | |
6083a889 MD |
1035 | /* |
1036 | * Note: nr_cpus_mask + 1 is always power of 2. | |
1037 | * We spawn just the number of threads we need to satisfy the minimum | |
1038 | * partition size, up to the number of CPUs in the system. | |
1039 | */ | |
91452a6a MD |
1040 | if (nr_cpus_mask > 0) { |
1041 | nr_threads = min(nr_cpus_mask + 1, | |
1042 | len >> MIN_PARTITION_PER_THREAD_ORDER); | |
1043 | } else { | |
1044 | nr_threads = 1; | |
1045 | } | |
6083a889 MD |
1046 | partition_len = len >> get_count_order_ulong(nr_threads); |
1047 | work = calloc(nr_threads, sizeof(*work)); | |
b7d619b0 | 1048 | assert(work); |
6083a889 MD |
1049 | for (thread = 0; thread < nr_threads; thread++) { |
1050 | work[thread].ht = ht; | |
1051 | work[thread].i = i; | |
1052 | work[thread].len = partition_len; | |
1053 | work[thread].start = thread * partition_len; | |
1054 | work[thread].fct = fct; | |
1af6e26e | 1055 | ret = pthread_create(&(work[thread].thread_id), ht->resize_attr, |
6083a889 | 1056 | partition_resize_thread, &work[thread]); |
b7d619b0 MD |
1057 | assert(!ret); |
1058 | } | |
6083a889 | 1059 | for (thread = 0; thread < nr_threads; thread++) { |
1af6e26e | 1060 | ret = pthread_join(work[thread].thread_id, NULL); |
b7d619b0 MD |
1061 | assert(!ret); |
1062 | } | |
1063 | free(work); | |
b7d619b0 MD |
1064 | } |
1065 | ||
e8de508e MD |
1066 | /* |
1067 | * Holding RCU read lock to protect _cds_lfht_add against memory | |
1068 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
1069 | * problem). | |
9ee0fc9a | 1070 | * |
b7d619b0 | 1071 | * When we reach a certain length, we can split this population phase over |
9ee0fc9a MD |
1072 | * many worker threads, based on the number of CPUs available in the system. |
1073 | * This should therefore take care of not having the expand lagging behind too | |
1074 | * many concurrent insertion threads by using the scheduler's ability to | |
1075 | * schedule dummy node population fairly with insertions. | |
e8de508e | 1076 | */ |
4105056a | 1077 | static |
b7d619b0 MD |
1078 | void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, |
1079 | unsigned long start, unsigned long len) | |
4105056a MD |
1080 | { |
1081 | unsigned long j; | |
1082 | ||
5488222b | 1083 | assert(i > ht->min_alloc_order); |
4105056a | 1084 | ht->cds_lfht_rcu_read_lock(); |
b7d619b0 | 1085 | for (j = start; j < start + len; j++) { |
4105056a MD |
1086 | struct cds_lfht_node *new_node = |
1087 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; | |
1088 | ||
dc1da8f6 | 1089 | dbg_printf("init populate: i %lu j %lu hash %lu\n", |
4f6e90b7 | 1090 | i, j, (1UL << (i - 1)) + j); |
dc1da8f6 | 1091 | new_node->p.reverse_hash = |
4f6e90b7 LJ |
1092 | bit_reverse_ulong((1UL << (i - 1)) + j); |
1093 | _cds_lfht_add(ht, 1UL << (i - 1), | |
83beee94 | 1094 | new_node, NULL, 1); |
4105056a MD |
1095 | } |
1096 | ht->cds_lfht_rcu_read_unlock(); | |
b7d619b0 MD |
1097 | } |
1098 | ||
1099 | static | |
1100 | void init_table_populate(struct cds_lfht *ht, unsigned long i, | |
1101 | unsigned long len) | |
1102 | { | |
1103 | assert(nr_cpus_mask != -1); | |
6083a889 | 1104 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
b7d619b0 MD |
1105 | ht->cds_lfht_rcu_thread_online(); |
1106 | init_table_populate_partition(ht, i, 0, len); | |
1107 | ht->cds_lfht_rcu_thread_offline(); | |
1108 | return; | |
1109 | } | |
1110 | partition_resize_helper(ht, i, len, init_table_populate_partition); | |
4105056a MD |
1111 | } |
1112 | ||
abc490a1 | 1113 | static |
4105056a | 1114 | void init_table(struct cds_lfht *ht, |
93d46c39 | 1115 | unsigned long first_order, unsigned long last_order) |
24365af7 | 1116 | { |
93d46c39 | 1117 | unsigned long i; |
24365af7 | 1118 | |
93d46c39 LJ |
1119 | dbg_printf("init table: first_order %lu last_order %lu\n", |
1120 | first_order, last_order); | |
5488222b | 1121 | assert(first_order > ht->min_alloc_order); |
93d46c39 | 1122 | for (i = first_order; i <= last_order; i++) { |
4105056a | 1123 | unsigned long len; |
24365af7 | 1124 | |
4f6e90b7 | 1125 | len = 1UL << (i - 1); |
f0c29ed7 | 1126 | dbg_printf("init order %lu len: %lu\n", i, len); |
4d676753 MD |
1127 | |
1128 | /* Stop expand if the resize target changes under us */ | |
4f6e90b7 | 1129 | if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i)) |
4d676753 MD |
1130 | break; |
1131 | ||
0d14ceb2 | 1132 | ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node)); |
b7d619b0 | 1133 | assert(ht->t.tbl[i]); |
4105056a | 1134 | |
4105056a | 1135 | /* |
dc1da8f6 MD |
1136 | * Set all dummy nodes reverse hash values for a level and |
1137 | * link all dummy nodes into the table. | |
4105056a | 1138 | */ |
dc1da8f6 | 1139 | init_table_populate(ht, i, len); |
4105056a | 1140 | |
f9c80341 MD |
1141 | /* |
1142 | * Update table size. | |
1143 | */ | |
1144 | cmm_smp_wmb(); /* populate data before RCU size */ | |
4f6e90b7 | 1145 | CMM_STORE_SHARED(ht->t.size, 1UL << i); |
f9c80341 | 1146 | |
4f6e90b7 | 1147 | dbg_printf("init new size: %lu\n", 1UL << i); |
4105056a MD |
1148 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1149 | break; | |
1150 | } | |
1151 | } | |
1152 | ||
e8de508e MD |
1153 | /* |
1154 | * Holding RCU read lock to protect _cds_lfht_remove against memory | |
1155 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
1156 | * problem). | |
1157 | * For a single level, we logically remove and garbage collect each node. | |
1158 | * | |
1159 | * As a design choice, we perform logical removal and garbage collection on a | |
1160 | * node-per-node basis to simplify this algorithm. We also assume keeping good | |
1161 | * cache locality of the operation would overweight possible performance gain | |
1162 | * that could be achieved by batching garbage collection for multiple levels. | |
1163 | * However, this would have to be justified by benchmarks. | |
1164 | * | |
1165 | * Concurrent removal and add operations are helping us perform garbage | |
1166 | * collection of logically removed nodes. We guarantee that all logically | |
1167 | * removed nodes have been garbage-collected (unlinked) before call_rcu is | |
1168 | * invoked to free a hole level of dummy nodes (after a grace period). | |
1169 | * | |
1170 | * Logical removal and garbage collection can therefore be done in batch or on a | |
1171 | * node-per-node basis, as long as the guarantee above holds. | |
9ee0fc9a | 1172 | * |
b7d619b0 MD |
1173 | * When we reach a certain length, we can split this removal over many worker |
1174 | * threads, based on the number of CPUs available in the system. This should | |
1175 | * take care of not letting resize process lag behind too many concurrent | |
9ee0fc9a | 1176 | * updater threads actively inserting into the hash table. |
e8de508e | 1177 | */ |
4105056a | 1178 | static |
b7d619b0 MD |
1179 | void remove_table_partition(struct cds_lfht *ht, unsigned long i, |
1180 | unsigned long start, unsigned long len) | |
4105056a MD |
1181 | { |
1182 | unsigned long j; | |
1183 | ||
5488222b | 1184 | assert(i > ht->min_alloc_order); |
4105056a | 1185 | ht->cds_lfht_rcu_read_lock(); |
b7d619b0 | 1186 | for (j = start; j < start + len; j++) { |
4105056a MD |
1187 | struct cds_lfht_node *fini_node = |
1188 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; | |
1189 | ||
1190 | dbg_printf("remove entry: i %lu j %lu hash %lu\n", | |
4f6e90b7 | 1191 | i, j, (1UL << (i - 1)) + j); |
4105056a | 1192 | fini_node->p.reverse_hash = |
4f6e90b7 LJ |
1193 | bit_reverse_ulong((1UL << (i - 1)) + j); |
1194 | (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1); | |
abc490a1 | 1195 | } |
4105056a | 1196 | ht->cds_lfht_rcu_read_unlock(); |
b7d619b0 MD |
1197 | } |
1198 | ||
1199 | static | |
1200 | void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) | |
1201 | { | |
1202 | ||
1203 | assert(nr_cpus_mask != -1); | |
6083a889 | 1204 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
b7d619b0 MD |
1205 | ht->cds_lfht_rcu_thread_online(); |
1206 | remove_table_partition(ht, i, 0, len); | |
1207 | ht->cds_lfht_rcu_thread_offline(); | |
1208 | return; | |
1209 | } | |
1210 | partition_resize_helper(ht, i, len, remove_table_partition); | |
2ed95849 MD |
1211 | } |
1212 | ||
1475579c | 1213 | static |
4105056a | 1214 | void fini_table(struct cds_lfht *ht, |
93d46c39 | 1215 | unsigned long first_order, unsigned long last_order) |
1475579c | 1216 | { |
93d46c39 | 1217 | long i; |
0d14ceb2 | 1218 | void *free_by_rcu = NULL; |
1475579c | 1219 | |
93d46c39 LJ |
1220 | dbg_printf("fini table: first_order %lu last_order %lu\n", |
1221 | first_order, last_order); | |
5488222b | 1222 | assert(first_order > ht->min_alloc_order); |
93d46c39 | 1223 | for (i = last_order; i >= first_order; i--) { |
4105056a | 1224 | unsigned long len; |
1475579c | 1225 | |
4f6e90b7 | 1226 | len = 1UL << (i - 1); |
1475579c | 1227 | dbg_printf("fini order %lu len: %lu\n", i, len); |
4105056a | 1228 | |
4d676753 MD |
1229 | /* Stop shrink if the resize target changes under us */ |
1230 | if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1))) | |
1231 | break; | |
1232 | ||
1233 | cmm_smp_wmb(); /* populate data before RCU size */ | |
1234 | CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1)); | |
1235 | ||
1236 | /* | |
1237 | * We need to wait for all add operations to reach Q.S. (and | |
1238 | * thus use the new table for lookups) before we can start | |
1239 | * releasing the old dummy nodes. Otherwise their lookup will | |
1240 | * return a logically removed node as insert position. | |
1241 | */ | |
1242 | ht->cds_lfht_synchronize_rcu(); | |
0d14ceb2 LJ |
1243 | if (free_by_rcu) |
1244 | free(free_by_rcu); | |
4d676753 | 1245 | |
21263e21 | 1246 | /* |
4105056a MD |
1247 | * Set "removed" flag in dummy nodes about to be removed. |
1248 | * Unlink all now-logically-removed dummy node pointers. | |
1249 | * Concurrent add/remove operation are helping us doing | |
1250 | * the gc. | |
21263e21 | 1251 | */ |
4105056a MD |
1252 | remove_table(ht, i, len); |
1253 | ||
0d14ceb2 | 1254 | free_by_rcu = ht->t.tbl[i]; |
4105056a MD |
1255 | |
1256 | dbg_printf("fini new size: %lu\n", 1UL << i); | |
1475579c MD |
1257 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1258 | break; | |
1259 | } | |
0d14ceb2 LJ |
1260 | |
1261 | if (free_by_rcu) { | |
1262 | ht->cds_lfht_synchronize_rcu(); | |
1263 | free(free_by_rcu); | |
1264 | } | |
1475579c MD |
1265 | } |
1266 | ||
ff0d69de LJ |
1267 | static |
1268 | void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) | |
1269 | { | |
1270 | struct _cds_lfht_node *prev, *node; | |
1271 | unsigned long order, len, i, j; | |
1272 | ||
5488222b | 1273 | ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node)); |
ff0d69de LJ |
1274 | assert(ht->t.tbl[0]); |
1275 | ||
1276 | dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0); | |
1277 | ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end()); | |
1278 | ht->t.tbl[0]->nodes[0].reverse_hash = 0; | |
1279 | ||
1280 | for (order = 1; order < get_count_order_ulong(size) + 1; order++) { | |
1281 | len = 1UL << (order - 1); | |
5488222b LJ |
1282 | if (order <= ht->min_alloc_order) { |
1283 | ht->t.tbl[order] = (void *)(ht->t.tbl[0]->nodes + len); | |
1284 | } else { | |
1285 | ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node)); | |
1286 | assert(ht->t.tbl[order]); | |
1287 | } | |
ff0d69de LJ |
1288 | |
1289 | i = 0; | |
1290 | prev = ht->t.tbl[i]->nodes; | |
1291 | for (j = 0; j < len; j++) { | |
1292 | if (j & (j - 1)) { /* Between power of 2 */ | |
1293 | prev++; | |
1294 | } else if (j) { /* At each power of 2 */ | |
1295 | i++; | |
1296 | prev = ht->t.tbl[i]->nodes; | |
1297 | } | |
1298 | ||
1299 | node = &ht->t.tbl[order]->nodes[j]; | |
1300 | dbg_printf("create dummy: order %lu index %lu hash %lu\n", | |
1301 | order, j, j + len); | |
1302 | node->next = prev->next; | |
1303 | assert(is_dummy(node->next)); | |
1304 | node->reverse_hash = bit_reverse_ulong(j + len); | |
1305 | prev->next = flag_dummy((struct cds_lfht_node *)node); | |
1306 | } | |
1307 | } | |
1308 | } | |
1309 | ||
7a9dcf9b | 1310 | struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, |
14044b37 MD |
1311 | cds_lfht_compare_fct compare_fct, |
1312 | unsigned long hash_seed, | |
1313 | unsigned long init_size, | |
5488222b | 1314 | unsigned long min_alloc_size, |
b8af5011 | 1315 | int flags, |
14044b37 | 1316 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
1475579c | 1317 | void (*func)(struct rcu_head *head)), |
01dbfa62 MD |
1318 | void (*cds_lfht_synchronize_rcu)(void), |
1319 | void (*cds_lfht_rcu_read_lock)(void), | |
5f511391 MD |
1320 | void (*cds_lfht_rcu_read_unlock)(void), |
1321 | void (*cds_lfht_rcu_thread_offline)(void), | |
b7d619b0 MD |
1322 | void (*cds_lfht_rcu_thread_online)(void), |
1323 | void (*cds_lfht_rcu_register_thread)(void), | |
1324 | void (*cds_lfht_rcu_unregister_thread)(void), | |
1325 | pthread_attr_t *attr) | |
abc490a1 | 1326 | { |
14044b37 | 1327 | struct cds_lfht *ht; |
24365af7 | 1328 | unsigned long order; |
abc490a1 | 1329 | |
5488222b LJ |
1330 | /* min_alloc_size must be power of two */ |
1331 | if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1))) | |
1332 | return NULL; | |
8129be4e | 1333 | /* init_size must be power of two */ |
5488222b | 1334 | if (!init_size || (init_size & (init_size - 1))) |
8129be4e | 1335 | return NULL; |
5488222b LJ |
1336 | min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE); |
1337 | init_size = max(init_size, min_alloc_size); | |
14044b37 | 1338 | ht = calloc(1, sizeof(struct cds_lfht)); |
b7d619b0 | 1339 | assert(ht); |
abc490a1 | 1340 | ht->hash_fct = hash_fct; |
732ad076 MD |
1341 | ht->compare_fct = compare_fct; |
1342 | ht->hash_seed = hash_seed; | |
14044b37 | 1343 | ht->cds_lfht_call_rcu = cds_lfht_call_rcu; |
1475579c | 1344 | ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; |
01dbfa62 MD |
1345 | ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; |
1346 | ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; | |
5f511391 MD |
1347 | ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; |
1348 | ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; | |
b7d619b0 MD |
1349 | ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; |
1350 | ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; | |
1351 | ht->resize_attr = attr; | |
df44348d | 1352 | ht->percpu_count = alloc_per_cpu_items_count(); |
abc490a1 MD |
1353 | /* this mutex should not nest in read-side C.S. */ |
1354 | pthread_mutex_init(&ht->resize_mutex, NULL); | |
b8af5011 | 1355 | ht->flags = flags; |
5488222b | 1356 | order = get_count_order_ulong(init_size); |
93d46c39 | 1357 | ht->t.resize_target = 1UL << order; |
ff0d69de LJ |
1358 | cds_lfht_create_dummy(ht, 1UL << order); |
1359 | ht->t.size = 1UL << order; | |
5488222b LJ |
1360 | ht->min_alloc_size = min_alloc_size; |
1361 | ht->min_alloc_order = get_count_order_ulong(min_alloc_size); | |
abc490a1 MD |
1362 | return ht; |
1363 | } | |
1364 | ||
adc0de68 MD |
1365 | void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, |
1366 | struct cds_lfht_iter *iter) | |
2ed95849 | 1367 | { |
bb7b2f26 | 1368 | struct cds_lfht_node *node, *next, *dummy_node; |
14044b37 | 1369 | struct _cds_lfht_node *lookup; |
f4a9cc0b | 1370 | unsigned long hash, reverse_hash, size; |
2ed95849 | 1371 | |
732ad076 | 1372 | hash = ht->hash_fct(key, key_len, ht->hash_seed); |
abc490a1 | 1373 | reverse_hash = bit_reverse_ulong(hash); |
464a1ec9 | 1374 | |
4105056a | 1375 | size = rcu_dereference(ht->t.size); |
f4a9cc0b | 1376 | lookup = lookup_bucket(ht, size, hash); |
bb7b2f26 MD |
1377 | dummy_node = (struct cds_lfht_node *) lookup; |
1378 | /* We can always skip the dummy node initially */ | |
1379 | node = rcu_dereference(dummy_node->p.next); | |
bb7b2f26 | 1380 | node = clear_flag(node); |
2ed95849 | 1381 | for (;;) { |
bb7b2f26 | 1382 | if (unlikely(is_end(node))) { |
96ad1112 | 1383 | node = next = NULL; |
abc490a1 | 1384 | break; |
bb7b2f26 | 1385 | } |
cc4fcb10 | 1386 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
96ad1112 | 1387 | node = next = NULL; |
abc490a1 | 1388 | break; |
2ed95849 | 1389 | } |
1b81fe1a | 1390 | next = rcu_dereference(node->p.next); |
adc0de68 | 1391 | if (likely(!is_removed(next)) |
1b81fe1a | 1392 | && !is_dummy(next) |
ae450da3 | 1393 | && clear_flag(node)->p.reverse_hash == reverse_hash |
49c2e2d6 | 1394 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { |
273399de | 1395 | break; |
2ed95849 | 1396 | } |
1b81fe1a | 1397 | node = clear_flag(next); |
2ed95849 | 1398 | } |
1b81fe1a | 1399 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); |
adc0de68 MD |
1400 | iter->node = node; |
1401 | iter->next = next; | |
abc490a1 | 1402 | } |
e0ba718a | 1403 | |
3883c0e5 | 1404 | void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter) |
a481e5ff | 1405 | { |
adc0de68 | 1406 | struct cds_lfht_node *node, *next; |
a481e5ff MD |
1407 | unsigned long reverse_hash; |
1408 | void *key; | |
1409 | size_t key_len; | |
1410 | ||
adc0de68 | 1411 | node = iter->node; |
a481e5ff MD |
1412 | reverse_hash = node->p.reverse_hash; |
1413 | key = node->key; | |
1414 | key_len = node->key_len; | |
adc0de68 | 1415 | next = iter->next; |
a481e5ff MD |
1416 | node = clear_flag(next); |
1417 | ||
1418 | for (;;) { | |
bb7b2f26 | 1419 | if (unlikely(is_end(node))) { |
96ad1112 | 1420 | node = next = NULL; |
a481e5ff | 1421 | break; |
bb7b2f26 | 1422 | } |
a481e5ff | 1423 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
96ad1112 | 1424 | node = next = NULL; |
a481e5ff MD |
1425 | break; |
1426 | } | |
1427 | next = rcu_dereference(node->p.next); | |
adc0de68 | 1428 | if (likely(!is_removed(next)) |
a481e5ff MD |
1429 | && !is_dummy(next) |
1430 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { | |
1431 | break; | |
1432 | } | |
1433 | node = clear_flag(next); | |
1434 | } | |
1435 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); | |
adc0de68 MD |
1436 | iter->node = node; |
1437 | iter->next = next; | |
a481e5ff MD |
1438 | } |
1439 | ||
4e9b9fbf MD |
1440 | void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) |
1441 | { | |
1442 | struct cds_lfht_node *node, *next; | |
1443 | ||
853395e1 | 1444 | node = clear_flag(iter->next); |
4e9b9fbf MD |
1445 | for (;;) { |
1446 | if (unlikely(is_end(node))) { | |
1447 | node = next = NULL; | |
1448 | break; | |
1449 | } | |
1450 | next = rcu_dereference(node->p.next); | |
1451 | if (likely(!is_removed(next)) | |
1452 | && !is_dummy(next)) { | |
1453 | break; | |
1454 | } | |
1455 | node = clear_flag(next); | |
1456 | } | |
1457 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); | |
1458 | iter->node = node; | |
1459 | iter->next = next; | |
1460 | } | |
1461 | ||
1462 | void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) | |
1463 | { | |
1464 | struct _cds_lfht_node *lookup; | |
1465 | ||
1466 | /* | |
1467 | * Get next after first dummy node. The first dummy node is the | |
1468 | * first node of the linked list. | |
1469 | */ | |
1470 | lookup = &ht->t.tbl[0]->nodes[0]; | |
853395e1 | 1471 | iter->next = lookup->next; |
4e9b9fbf MD |
1472 | cds_lfht_next(ht, iter); |
1473 | } | |
1474 | ||
14044b37 | 1475 | void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) |
abc490a1 | 1476 | { |
4105056a | 1477 | unsigned long hash, size; |
ab7d5fc6 | 1478 | |
49c2e2d6 | 1479 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
cc4fcb10 | 1480 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
2ed95849 | 1481 | |
4105056a | 1482 | size = rcu_dereference(ht->t.size); |
83beee94 | 1483 | _cds_lfht_add(ht, size, node, NULL, 0); |
4105056a | 1484 | ht_count_add(ht, size); |
3eca1b8c MD |
1485 | } |
1486 | ||
14044b37 | 1487 | struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, |
48ed1c18 | 1488 | struct cds_lfht_node *node) |
3eca1b8c | 1489 | { |
4105056a | 1490 | unsigned long hash, size; |
83beee94 | 1491 | struct cds_lfht_iter iter; |
3eca1b8c | 1492 | |
49c2e2d6 | 1493 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
cc4fcb10 | 1494 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
3eca1b8c | 1495 | |
4105056a | 1496 | size = rcu_dereference(ht->t.size); |
83beee94 MD |
1497 | _cds_lfht_add(ht, size, node, &iter, 0); |
1498 | if (iter.node == node) | |
4105056a | 1499 | ht_count_add(ht, size); |
83beee94 | 1500 | return iter.node; |
2ed95849 MD |
1501 | } |
1502 | ||
9357c415 | 1503 | struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, |
48ed1c18 MD |
1504 | struct cds_lfht_node *node) |
1505 | { | |
1506 | unsigned long hash, size; | |
83beee94 | 1507 | struct cds_lfht_iter iter; |
48ed1c18 MD |
1508 | |
1509 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); | |
1510 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); | |
1511 | ||
1512 | size = rcu_dereference(ht->t.size); | |
83beee94 MD |
1513 | for (;;) { |
1514 | _cds_lfht_add(ht, size, node, &iter, 0); | |
1515 | if (iter.node == node) { | |
1516 | ht_count_add(ht, size); | |
1517 | return NULL; | |
1518 | } | |
1519 | ||
1520 | if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node)) | |
1521 | return iter.node; | |
1522 | } | |
48ed1c18 MD |
1523 | } |
1524 | ||
9357c415 MD |
1525 | int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, |
1526 | struct cds_lfht_node *new_node) | |
1527 | { | |
1528 | unsigned long size; | |
1529 | ||
1530 | size = rcu_dereference(ht->t.size); | |
1531 | return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next, | |
1532 | new_node); | |
1533 | } | |
1534 | ||
1535 | int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) | |
2ed95849 | 1536 | { |
4105056a | 1537 | unsigned long size; |
df44348d | 1538 | int ret; |
abc490a1 | 1539 | |
4105056a | 1540 | size = rcu_dereference(ht->t.size); |
9357c415 | 1541 | ret = _cds_lfht_del(ht, size, iter->node, 0); |
df44348d | 1542 | if (!ret) |
860d07e8 | 1543 | ht_count_del(ht, size); |
df44348d | 1544 | return ret; |
2ed95849 | 1545 | } |
ab7d5fc6 | 1546 | |
abc490a1 | 1547 | static |
14044b37 | 1548 | int cds_lfht_delete_dummy(struct cds_lfht *ht) |
674f7a69 | 1549 | { |
14044b37 MD |
1550 | struct cds_lfht_node *node; |
1551 | struct _cds_lfht_node *lookup; | |
4105056a | 1552 | unsigned long order, i, size; |
674f7a69 | 1553 | |
abc490a1 | 1554 | /* Check that the table is empty */ |
4105056a | 1555 | lookup = &ht->t.tbl[0]->nodes[0]; |
14044b37 | 1556 | node = (struct cds_lfht_node *) lookup; |
abc490a1 | 1557 | do { |
1b81fe1a MD |
1558 | node = clear_flag(node)->p.next; |
1559 | if (!is_dummy(node)) | |
abc490a1 | 1560 | return -EPERM; |
273399de | 1561 | assert(!is_removed(node)); |
bb7b2f26 | 1562 | } while (!is_end(node)); |
4105056a MD |
1563 | /* |
1564 | * size accessed without rcu_dereference because hash table is | |
1565 | * being destroyed. | |
1566 | */ | |
1567 | size = ht->t.size; | |
abc490a1 | 1568 | /* Internal sanity check: all nodes left should be dummy */ |
4105056a | 1569 | for (order = 0; order < get_count_order_ulong(size) + 1; order++) { |
24365af7 MD |
1570 | unsigned long len; |
1571 | ||
1572 | len = !order ? 1 : 1UL << (order - 1); | |
1573 | for (i = 0; i < len; i++) { | |
f0c29ed7 | 1574 | dbg_printf("delete order %lu i %lu hash %lu\n", |
24365af7 | 1575 | order, i, |
4105056a MD |
1576 | bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); |
1577 | assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); | |
24365af7 | 1578 | } |
5488222b LJ |
1579 | |
1580 | if (order == ht->min_alloc_order) | |
1581 | poison_free(ht->t.tbl[0]); | |
1582 | else if (order > ht->min_alloc_order) | |
1583 | poison_free(ht->t.tbl[order]); | |
1584 | /* Nothing to delete for order < ht->min_alloc_order */ | |
674f7a69 | 1585 | } |
abc490a1 | 1586 | return 0; |
674f7a69 MD |
1587 | } |
1588 | ||
1589 | /* | |
1590 | * Should only be called when no more concurrent readers nor writers can | |
1591 | * possibly access the table. | |
1592 | */ | |
b7d619b0 | 1593 | int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) |
674f7a69 | 1594 | { |
5e28c532 MD |
1595 | int ret; |
1596 | ||
848d4088 | 1597 | /* Wait for in-flight resize operations to complete */ |
24953e08 MD |
1598 | _CMM_STORE_SHARED(ht->in_progress_destroy, 1); |
1599 | cmm_smp_mb(); /* Store destroy before load resize */ | |
848d4088 MD |
1600 | while (uatomic_read(&ht->in_progress_resize)) |
1601 | poll(NULL, 0, 100); /* wait for 100ms */ | |
14044b37 | 1602 | ret = cds_lfht_delete_dummy(ht); |
abc490a1 MD |
1603 | if (ret) |
1604 | return ret; | |
df44348d | 1605 | free_per_cpu_items_count(ht->percpu_count); |
b7d619b0 MD |
1606 | if (attr) |
1607 | *attr = ht->resize_attr; | |
98808fb1 | 1608 | poison_free(ht); |
5e28c532 | 1609 | return ret; |
674f7a69 MD |
1610 | } |
1611 | ||
14044b37 | 1612 | void cds_lfht_count_nodes(struct cds_lfht *ht, |
d933dd0e | 1613 | long *approx_before, |
273399de | 1614 | unsigned long *count, |
973e5e1b | 1615 | unsigned long *removed, |
d933dd0e | 1616 | long *approx_after) |
273399de | 1617 | { |
14044b37 MD |
1618 | struct cds_lfht_node *node, *next; |
1619 | struct _cds_lfht_node *lookup; | |
24365af7 | 1620 | unsigned long nr_dummy = 0; |
273399de | 1621 | |
7ed7682f | 1622 | *approx_before = 0; |
973e5e1b MD |
1623 | if (nr_cpus_mask >= 0) { |
1624 | int i; | |
1625 | ||
1626 | for (i = 0; i < nr_cpus_mask + 1; i++) { | |
1627 | *approx_before += uatomic_read(&ht->percpu_count[i].add); | |
1628 | *approx_before -= uatomic_read(&ht->percpu_count[i].del); | |
1629 | } | |
1630 | } | |
1631 | ||
273399de MD |
1632 | *count = 0; |
1633 | *removed = 0; | |
1634 | ||
24365af7 | 1635 | /* Count non-dummy nodes in the table */ |
4105056a | 1636 | lookup = &ht->t.tbl[0]->nodes[0]; |
14044b37 | 1637 | node = (struct cds_lfht_node *) lookup; |
273399de | 1638 | do { |
cc4fcb10 | 1639 | next = rcu_dereference(node->p.next); |
b198f0fd | 1640 | if (is_removed(next)) { |
973e5e1b MD |
1641 | if (!is_dummy(next)) |
1642 | (*removed)++; | |
1643 | else | |
1644 | (nr_dummy)++; | |
1b81fe1a | 1645 | } else if (!is_dummy(next)) |
273399de | 1646 | (*count)++; |
24365af7 MD |
1647 | else |
1648 | (nr_dummy)++; | |
273399de | 1649 | node = clear_flag(next); |
bb7b2f26 | 1650 | } while (!is_end(node)); |
f0c29ed7 | 1651 | dbg_printf("number of dummy nodes: %lu\n", nr_dummy); |
7ed7682f | 1652 | *approx_after = 0; |
973e5e1b MD |
1653 | if (nr_cpus_mask >= 0) { |
1654 | int i; | |
1655 | ||
1656 | for (i = 0; i < nr_cpus_mask + 1; i++) { | |
1657 | *approx_after += uatomic_read(&ht->percpu_count[i].add); | |
1658 | *approx_after -= uatomic_read(&ht->percpu_count[i].del); | |
1659 | } | |
1660 | } | |
273399de MD |
1661 | } |
1662 | ||
1475579c | 1663 | /* called with resize mutex held */ |
abc490a1 | 1664 | static |
4105056a | 1665 | void _do_cds_lfht_grow(struct cds_lfht *ht, |
1475579c | 1666 | unsigned long old_size, unsigned long new_size) |
abc490a1 | 1667 | { |
1475579c | 1668 | unsigned long old_order, new_order; |
1475579c | 1669 | |
93d46c39 LJ |
1670 | old_order = get_count_order_ulong(old_size); |
1671 | new_order = get_count_order_ulong(new_size); | |
1a401918 LJ |
1672 | dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
1673 | old_size, old_order, new_size, new_order); | |
1475579c | 1674 | assert(new_size > old_size); |
93d46c39 | 1675 | init_table(ht, old_order + 1, new_order); |
abc490a1 MD |
1676 | } |
1677 | ||
1678 | /* called with resize mutex held */ | |
1679 | static | |
4105056a | 1680 | void _do_cds_lfht_shrink(struct cds_lfht *ht, |
1475579c | 1681 | unsigned long old_size, unsigned long new_size) |
464a1ec9 | 1682 | { |
1475579c | 1683 | unsigned long old_order, new_order; |
464a1ec9 | 1684 | |
5488222b | 1685 | new_size = max(new_size, ht->min_alloc_size); |
93d46c39 LJ |
1686 | old_order = get_count_order_ulong(old_size); |
1687 | new_order = get_count_order_ulong(new_size); | |
1a401918 LJ |
1688 | dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
1689 | old_size, old_order, new_size, new_order); | |
1475579c | 1690 | assert(new_size < old_size); |
1475579c | 1691 | |
4105056a | 1692 | /* Remove and unlink all dummy nodes to remove. */ |
93d46c39 | 1693 | fini_table(ht, new_order + 1, old_order); |
464a1ec9 MD |
1694 | } |
1695 | ||
1475579c MD |
1696 | |
1697 | /* called with resize mutex held */ | |
1698 | static | |
1699 | void _do_cds_lfht_resize(struct cds_lfht *ht) | |
1700 | { | |
1701 | unsigned long new_size, old_size; | |
4105056a MD |
1702 | |
1703 | /* | |
1704 | * Resize table, re-do if the target size has changed under us. | |
1705 | */ | |
1706 | do { | |
d2be3620 MD |
1707 | assert(uatomic_read(&ht->in_progress_resize)); |
1708 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) | |
1709 | break; | |
4105056a MD |
1710 | ht->t.resize_initiated = 1; |
1711 | old_size = ht->t.size; | |
1712 | new_size = CMM_LOAD_SHARED(ht->t.resize_target); | |
1713 | if (old_size < new_size) | |
1714 | _do_cds_lfht_grow(ht, old_size, new_size); | |
1715 | else if (old_size > new_size) | |
1716 | _do_cds_lfht_shrink(ht, old_size, new_size); | |
1717 | ht->t.resize_initiated = 0; | |
1718 | /* write resize_initiated before read resize_target */ | |
1719 | cmm_smp_mb(); | |
4d676753 | 1720 | } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target)); |
1475579c MD |
1721 | } |
1722 | ||
abc490a1 | 1723 | static |
4105056a | 1724 | unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size, |
f9830efd | 1725 | int growth_order) |
464a1ec9 | 1726 | { |
4105056a MD |
1727 | return _uatomic_max(&ht->t.resize_target, |
1728 | size << growth_order); | |
464a1ec9 MD |
1729 | } |
1730 | ||
1475579c | 1731 | static |
4105056a | 1732 | void resize_target_update_count(struct cds_lfht *ht, |
b8af5011 | 1733 | unsigned long count) |
1475579c | 1734 | { |
5488222b | 1735 | count = max(count, ht->min_alloc_size); |
4105056a | 1736 | uatomic_set(&ht->t.resize_target, count); |
1475579c MD |
1737 | } |
1738 | ||
1739 | void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) | |
464a1ec9 | 1740 | { |
4105056a MD |
1741 | resize_target_update_count(ht, new_size); |
1742 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); | |
5f511391 | 1743 | ht->cds_lfht_rcu_thread_offline(); |
1475579c MD |
1744 | pthread_mutex_lock(&ht->resize_mutex); |
1745 | _do_cds_lfht_resize(ht); | |
1746 | pthread_mutex_unlock(&ht->resize_mutex); | |
5f511391 | 1747 | ht->cds_lfht_rcu_thread_online(); |
abc490a1 | 1748 | } |
464a1ec9 | 1749 | |
abc490a1 MD |
1750 | static |
1751 | void do_resize_cb(struct rcu_head *head) | |
1752 | { | |
1753 | struct rcu_resize_work *work = | |
1754 | caa_container_of(head, struct rcu_resize_work, head); | |
14044b37 | 1755 | struct cds_lfht *ht = work->ht; |
abc490a1 | 1756 | |
5f511391 | 1757 | ht->cds_lfht_rcu_thread_offline(); |
abc490a1 | 1758 | pthread_mutex_lock(&ht->resize_mutex); |
14044b37 | 1759 | _do_cds_lfht_resize(ht); |
abc490a1 | 1760 | pthread_mutex_unlock(&ht->resize_mutex); |
5f511391 | 1761 | ht->cds_lfht_rcu_thread_online(); |
98808fb1 | 1762 | poison_free(work); |
848d4088 MD |
1763 | cmm_smp_mb(); /* finish resize before decrement */ |
1764 | uatomic_dec(&ht->in_progress_resize); | |
464a1ec9 MD |
1765 | } |
1766 | ||
abc490a1 | 1767 | static |
4105056a | 1768 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) |
ab7d5fc6 | 1769 | { |
abc490a1 | 1770 | struct rcu_resize_work *work; |
f9830efd | 1771 | unsigned long target_size; |
abc490a1 | 1772 | |
4105056a MD |
1773 | target_size = resize_target_update(ht, size, growth); |
1774 | /* Store resize_target before read resize_initiated */ | |
1775 | cmm_smp_mb(); | |
1776 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { | |
848d4088 | 1777 | uatomic_inc(&ht->in_progress_resize); |
59290e9d | 1778 | cmm_smp_mb(); /* increment resize count before load destroy */ |
ed35e6d8 MD |
1779 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { |
1780 | uatomic_dec(&ht->in_progress_resize); | |
59290e9d | 1781 | return; |
ed35e6d8 | 1782 | } |
f9830efd MD |
1783 | work = malloc(sizeof(*work)); |
1784 | work->ht = ht; | |
14044b37 | 1785 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); |
4105056a | 1786 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
f9830efd | 1787 | } |
ab7d5fc6 | 1788 | } |
3171717f | 1789 | |
f8994aee MD |
1790 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
1791 | ||
3171717f | 1792 | static |
4105056a | 1793 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
3171717f MD |
1794 | unsigned long count) |
1795 | { | |
1796 | struct rcu_resize_work *work; | |
3171717f | 1797 | |
b8af5011 MD |
1798 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
1799 | return; | |
4105056a MD |
1800 | resize_target_update_count(ht, count); |
1801 | /* Store resize_target before read resize_initiated */ | |
1802 | cmm_smp_mb(); | |
1803 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { | |
3171717f | 1804 | uatomic_inc(&ht->in_progress_resize); |
59290e9d | 1805 | cmm_smp_mb(); /* increment resize count before load destroy */ |
ed35e6d8 MD |
1806 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { |
1807 | uatomic_dec(&ht->in_progress_resize); | |
59290e9d | 1808 | return; |
ed35e6d8 | 1809 | } |
3171717f MD |
1810 | work = malloc(sizeof(*work)); |
1811 | work->ht = ht; | |
1812 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); | |
4105056a | 1813 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
3171717f MD |
1814 | } |
1815 | } | |
f8994aee MD |
1816 | |
1817 | #endif |