4 * Userspace RCU library - RCU Judy Array
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
40 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
43 enum cds_ja_type_class
{
44 RCU_JA_LINEAR
= 0, /* Type A */
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL
= 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
50 RCU_JA_PIGEON
= 2, /* Type C */
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
53 /* Leaf nodes are implicit from their height in the tree */
56 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
60 enum cds_ja_type_class type_class
;
61 uint16_t min_child
; /* minimum number of children: 1 to 256 */
62 uint16_t max_child
; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order
; /* node size is (1 << order), in bytes */
65 uint16_t nr_pool_order
; /* number of pools */
66 uint16_t pool_size_order
; /* pool size */
70 * Iteration on the array to find the right node size for the number of
71 * children stops when it reaches .max_child == 256 (this is the largest
72 * possible node size, which contains 256 children).
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
87 #if (CAA_BITS_PER_LONG < 64)
90 ja_type_0_max_child
= 1,
91 ja_type_1_max_child
= 3,
92 ja_type_2_max_child
= 6,
93 ja_type_3_max_child
= 12,
94 ja_type_4_max_child
= 25,
95 ja_type_5_max_child
= 48,
96 ja_type_6_max_child
= 92,
97 ja_type_7_max_child
= 256,
98 ja_type_8_max_child
= 0, /* NULL */
102 ja_type_0_max_linear_child
= 1,
103 ja_type_1_max_linear_child
= 3,
104 ja_type_2_max_linear_child
= 6,
105 ja_type_3_max_linear_child
= 12,
106 ja_type_4_max_linear_child
= 25,
107 ja_type_5_max_linear_child
= 24,
108 ja_type_6_max_linear_child
= 23,
112 ja_type_5_nr_pool_order
= 1,
113 ja_type_6_nr_pool_order
= 2,
116 const struct cds_ja_type ja_types
[] = {
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
121 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
123 /* Pools may fill sooner than max_child */
124 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
125 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
131 { .type_class
= RCU_JA_PIGEON
, .min_child
= 89, .max_child
= ja_type_7_max_child
, .order
= 10, },
133 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
135 #else /* !(CAA_BITS_PER_LONG < 64) */
136 /* 64-bit pointers */
138 ja_type_0_max_child
= 1,
139 ja_type_1_max_child
= 3,
140 ja_type_2_max_child
= 7,
141 ja_type_3_max_child
= 14,
142 ja_type_4_max_child
= 28,
143 ja_type_5_max_child
= 54,
144 ja_type_6_max_child
= 104,
145 ja_type_7_max_child
= 256,
146 ja_type_8_max_child
= 256,
150 ja_type_0_max_linear_child
= 1,
151 ja_type_1_max_linear_child
= 3,
152 ja_type_2_max_linear_child
= 7,
153 ja_type_3_max_linear_child
= 14,
154 ja_type_4_max_linear_child
= 28,
155 ja_type_5_max_linear_child
= 27,
156 ja_type_6_max_linear_child
= 26,
160 ja_type_5_nr_pool_order
= 1,
161 ja_type_6_nr_pool_order
= 2,
164 const struct cds_ja_type ja_types
[] = {
165 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
169 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
171 /* Pools may fill sooner than max_child. */
172 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
173 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
179 { .type_class
= RCU_JA_PIGEON
, .min_child
= 101, .max_child
= ja_type_7_max_child
, .order
= 11, },
181 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
183 #endif /* !(BITS_PER_LONG < 64) */
185 static inline __attribute__((unused
))
186 void static_array_size_check(void)
188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
192 * The cds_ja_node contains the compressed node data needed for
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
203 #define DECLARE_LINEAR_NODE(index) \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
210 #define DECLARE_POOL_NODE(index) \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
219 struct cds_ja_inode
{
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0
;
223 DECLARE_LINEAR_NODE(1) conf_1
;
224 DECLARE_LINEAR_NODE(2) conf_2
;
225 DECLARE_LINEAR_NODE(3) conf_3
;
226 DECLARE_LINEAR_NODE(4) conf_4
;
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5
;
230 DECLARE_POOL_NODE(6) conf_6
;
232 /* Pigeon configuration */
234 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
236 /* data aliasing nodes for computed accesses */
237 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
242 JA_RECOMPACT_ADD_SAME
,
243 JA_RECOMPACT_ADD_NEXT
,
248 unsigned long node_fallback_count_distribution
[JA_ENTRY_PER_NODE
];
250 unsigned long nr_nodes_allocated
, nr_nodes_freed
;
253 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
255 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
258 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
262 if (_ja_node_mask_ptr(node
) == NULL
) {
263 return NODE_INDEX_NULL
;
265 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
266 assert(type
< (1UL << JA_TYPE_BITS
));
270 struct cds_ja_inode
*ja_node_ptr(struct cds_ja_inode_flag
*node
)
272 unsigned long type_index
= ja_node_type(node
);
273 const struct cds_ja_type
*type
;
275 type
= &ja_types
[type_index
];
276 switch (type
->type_class
) {
278 case RCU_JA_PIGEON
: /* fall-through */
279 case RCU_JA_NULL
: /* fall-through */
280 default: /* fall-through */
281 return _ja_node_mask_ptr(node
);
283 switch (type
->nr_pool_order
) {
285 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_1D_MASK
| JA_TYPE_MASK
));
287 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_2D_MASK
| JA_POOL_1D_MASK
| JA_TYPE_MASK
));
294 struct cds_ja_inode
*alloc_cds_ja_node(const struct cds_ja_type
*ja_type
)
296 size_t len
= 1U << ja_type
->order
;
300 ret
= posix_memalign(&p
, len
, len
);
305 uatomic_inc(&nr_nodes_allocated
);
309 void free_cds_ja_node(struct cds_ja_inode
*node
)
312 uatomic_inc(&nr_nodes_freed
);
315 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
316 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
317 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
318 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
321 uint8_t *align_ptr_size(uint8_t *ptr
)
323 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
327 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
328 struct cds_ja_inode
*node
)
330 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
331 return rcu_dereference(node
->u
.data
[0]);
335 * The order in which values and pointers are does does not matter: if
336 * a value is missing, we return NULL. If a value is there, but its
337 * associated pointers is still NULL, we return NULL too.
340 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
341 struct cds_ja_inode
*node
,
342 struct cds_ja_inode_flag
***child_node_flag_ptr
,
343 struct cds_ja_inode_flag
**child_node_flag_v
,
344 struct cds_ja_inode_flag
***node_flag_ptr
,
349 struct cds_ja_inode_flag
**pointers
;
350 struct cds_ja_inode_flag
*ptr
;
353 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
355 nr_child
= ja_linear_node_get_nr_child(type
, node
);
356 cmm_smp_rmb(); /* read nr_child before values and pointers */
357 assert(nr_child
<= type
->max_linear_child
);
358 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
360 values
= &node
->u
.data
[1];
361 for (i
= 0; i
< nr_child
; i
++) {
362 if (CMM_LOAD_SHARED(values
[i
]) == n
)
366 if (caa_unlikely(node_flag_ptr
))
367 *node_flag_ptr
= NULL
;
370 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
371 ptr
= rcu_dereference(pointers
[i
]);
372 if (caa_unlikely(child_node_flag_ptr
) && ptr
)
373 *child_node_flag_ptr
= &pointers
[i
];
374 if (caa_unlikely(child_node_flag_v
) && ptr
)
375 *child_node_flag_v
= ptr
;
376 if (caa_unlikely(node_flag_ptr
))
377 *node_flag_ptr
= &pointers
[i
];
382 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
383 struct cds_ja_inode
*node
,
386 struct cds_ja_inode_flag
**iter
)
389 struct cds_ja_inode_flag
**pointers
;
391 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
392 assert(i
< ja_linear_node_get_nr_child(type
, node
));
394 values
= &node
->u
.data
[1];
396 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
401 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
402 struct cds_ja_inode
*node
,
403 struct cds_ja_inode_flag
*node_flag
,
404 struct cds_ja_inode_flag
***child_node_flag_ptr
,
405 struct cds_ja_inode_flag
**child_node_flag_v
,
406 struct cds_ja_inode_flag
***node_flag_ptr
,
409 struct cds_ja_inode
*linear
;
411 assert(type
->type_class
== RCU_JA_POOL
);
413 switch (type
->nr_pool_order
) {
416 unsigned long bitsel
, index
;
418 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
419 assert(bitsel
< CHAR_BIT
);
420 index
= ((unsigned long) n
>> bitsel
) & 0x1;
421 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
426 unsigned long bitsel
[2], index
[2], rindex
;
428 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
429 assert(bitsel
[0] < CHAR_BIT
);
430 assert(bitsel
[1] < CHAR_BIT
);
431 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
433 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
434 rindex
= index
[0] | index
[1];
435 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
442 return ja_linear_node_get_nth(type
, linear
, child_node_flag_ptr
,
443 child_node_flag_v
, node_flag_ptr
, n
);
447 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
448 struct cds_ja_inode
*node
,
451 assert(type
->type_class
== RCU_JA_POOL
);
452 return (struct cds_ja_inode
*)
453 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
457 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
458 struct cds_ja_inode
*node
,
459 struct cds_ja_inode_flag
***child_node_flag_ptr
,
460 struct cds_ja_inode_flag
**child_node_flag_v
,
461 struct cds_ja_inode_flag
***node_flag_ptr
,
464 struct cds_ja_inode_flag
**child_node_flag
;
465 struct cds_ja_inode_flag
*child_node_flag_read
;
467 assert(type
->type_class
== RCU_JA_PIGEON
);
468 child_node_flag
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
469 child_node_flag_read
= rcu_dereference(*child_node_flag
);
470 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
472 if (caa_unlikely(child_node_flag_ptr
) && child_node_flag_read
)
473 *child_node_flag_ptr
= child_node_flag
;
474 if (caa_unlikely(child_node_flag_v
) && child_node_flag_read
)
475 *child_node_flag_v
= child_node_flag_read
;
476 if (caa_unlikely(node_flag_ptr
))
477 *node_flag_ptr
= child_node_flag
;
478 return child_node_flag_read
;
482 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
483 struct cds_ja_inode
*node
,
486 return ja_pigeon_node_get_nth(type
, node
, NULL
, NULL
, NULL
, i
);
490 * ja_node_get_nth: get nth item from a node.
491 * node_flag is already rcu_dereference'd.
494 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
495 struct cds_ja_inode_flag
***child_node_flag_ptr
,
496 struct cds_ja_inode_flag
**child_node_flag
,
497 struct cds_ja_inode_flag
***node_flag_ptr
,
500 unsigned int type_index
;
501 struct cds_ja_inode
*node
;
502 const struct cds_ja_type
*type
;
504 node
= ja_node_ptr(node_flag
);
505 assert(node
!= NULL
);
506 type_index
= ja_node_type(node_flag
);
507 type
= &ja_types
[type_index
];
509 switch (type
->type_class
) {
511 return ja_linear_node_get_nth(type
, node
,
512 child_node_flag_ptr
, child_node_flag
,
515 return ja_pool_node_get_nth(type
, node
, node_flag
,
516 child_node_flag_ptr
, child_node_flag
,
519 return ja_pigeon_node_get_nth(type
, node
,
520 child_node_flag_ptr
, child_node_flag
,
524 return (void *) -1UL;
529 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
530 struct cds_ja_inode
*node
,
531 struct cds_ja_shadow_node
*shadow_node
,
533 struct cds_ja_inode_flag
*child_node_flag
)
536 uint8_t *values
, *nr_child_ptr
;
537 struct cds_ja_inode_flag
**pointers
;
538 unsigned int i
, unused
= 0;
540 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
542 nr_child_ptr
= &node
->u
.data
[0];
543 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr
);
544 nr_child
= *nr_child_ptr
;
545 assert(nr_child
<= type
->max_linear_child
);
547 values
= &node
->u
.data
[1];
548 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
549 /* Check if node value is already populated */
550 for (i
= 0; i
< nr_child
; i
++) {
551 if (values
[i
] == n
) {
561 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
563 return -ERANGE
; /* recompact node */
565 return -ENOSPC
; /* No space left in this node type */
568 assert(pointers
[i
] == NULL
);
569 rcu_assign_pointer(pointers
[i
], child_node_flag
);
570 /* If we expanded the nr_child, increment it */
572 CMM_STORE_SHARED(values
[nr_child
], n
);
573 /* write pointer and value before nr_child */
575 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
577 shadow_node
->nr_child
++;
578 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
579 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
580 (unsigned int) shadow_node
->nr_child
,
587 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
588 struct cds_ja_inode
*node
,
589 struct cds_ja_inode_flag
*node_flag
,
590 struct cds_ja_shadow_node
*shadow_node
,
592 struct cds_ja_inode_flag
*child_node_flag
)
594 struct cds_ja_inode
*linear
;
596 assert(type
->type_class
== RCU_JA_POOL
);
598 switch (type
->nr_pool_order
) {
601 unsigned long bitsel
, index
;
603 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
604 assert(bitsel
< CHAR_BIT
);
605 index
= ((unsigned long) n
>> bitsel
) & 0x1;
606 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
611 unsigned long bitsel
[2], index
[2], rindex
;
613 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
614 assert(bitsel
[0] < CHAR_BIT
);
615 assert(bitsel
[1] < CHAR_BIT
);
616 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
618 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
619 rindex
= index
[0] | index
[1];
620 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
628 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
633 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
634 struct cds_ja_inode
*node
,
635 struct cds_ja_shadow_node
*shadow_node
,
637 struct cds_ja_inode_flag
*child_node_flag
)
639 struct cds_ja_inode_flag
**ptr
;
641 assert(type
->type_class
== RCU_JA_PIGEON
);
642 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
645 rcu_assign_pointer(*ptr
, child_node_flag
);
646 shadow_node
->nr_child
++;
651 * _ja_node_set_nth: set nth item within a node. Return an error
652 * (negative error value) if it is already there.
655 int _ja_node_set_nth(const struct cds_ja_type
*type
,
656 struct cds_ja_inode
*node
,
657 struct cds_ja_inode_flag
*node_flag
,
658 struct cds_ja_shadow_node
*shadow_node
,
660 struct cds_ja_inode_flag
*child_node_flag
)
662 switch (type
->type_class
) {
664 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
667 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
670 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
683 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
684 struct cds_ja_inode
*node
,
685 struct cds_ja_shadow_node
*shadow_node
,
686 struct cds_ja_inode_flag
**node_flag_ptr
)
689 uint8_t *nr_child_ptr
;
691 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
693 nr_child_ptr
= &node
->u
.data
[0];
694 nr_child
= *nr_child_ptr
;
695 assert(nr_child
<= type
->max_linear_child
);
697 if (shadow_node
->fallback_removal_count
) {
698 shadow_node
->fallback_removal_count
--;
700 if (type
->type_class
== RCU_JA_LINEAR
701 && shadow_node
->nr_child
<= type
->min_child
) {
702 /* We need to try recompacting the node */
706 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
707 assert(*node_flag_ptr
!= NULL
);
708 rcu_assign_pointer(*node_flag_ptr
, NULL
);
710 * Value and nr_child are never changed (would cause ABA issue).
711 * Instead, we leave the pointer to NULL and recompact the node
712 * once in a while. It is allowed to set a NULL pointer to a new
713 * value without recompaction though.
714 * Only update the shadow node accounting.
716 shadow_node
->nr_child
--;
717 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
718 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
719 (unsigned int) shadow_node
->nr_child
,
725 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
726 struct cds_ja_inode
*node
,
727 struct cds_ja_inode_flag
*node_flag
,
728 struct cds_ja_shadow_node
*shadow_node
,
729 struct cds_ja_inode_flag
**node_flag_ptr
,
732 struct cds_ja_inode
*linear
;
734 assert(type
->type_class
== RCU_JA_POOL
);
736 if (shadow_node
->fallback_removal_count
) {
737 shadow_node
->fallback_removal_count
--;
739 /* We should try recompacting the node */
740 if (shadow_node
->nr_child
<= type
->min_child
)
744 switch (type
->nr_pool_order
) {
747 unsigned long bitsel
, index
;
749 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
750 assert(bitsel
< CHAR_BIT
);
751 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
752 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
757 unsigned long bitsel
[2], index
[2], rindex
;
759 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
760 assert(bitsel
[0] < CHAR_BIT
);
761 assert(bitsel
[1] < CHAR_BIT
);
762 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
764 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
765 rindex
= index
[0] | index
[1];
766 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
774 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
778 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
779 struct cds_ja_inode
*node
,
780 struct cds_ja_shadow_node
*shadow_node
,
781 struct cds_ja_inode_flag
**node_flag_ptr
)
783 assert(type
->type_class
== RCU_JA_PIGEON
);
785 if (shadow_node
->fallback_removal_count
) {
786 shadow_node
->fallback_removal_count
--;
788 /* We should try recompacting the node */
789 if (shadow_node
->nr_child
<= type
->min_child
)
792 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
793 rcu_assign_pointer(*node_flag_ptr
, NULL
);
794 shadow_node
->nr_child
--;
799 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
800 * (negative error value) if it is not found (-ENOENT).
803 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
804 struct cds_ja_inode
*node
,
805 struct cds_ja_inode_flag
*node_flag
,
806 struct cds_ja_shadow_node
*shadow_node
,
807 struct cds_ja_inode_flag
**node_flag_ptr
,
810 switch (type
->type_class
) {
812 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
814 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
816 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
828 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
829 * distribution in two sub-distributions containing as much elements one
830 * compared to the other.
833 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
835 unsigned int type_index
,
836 const struct cds_ja_type
*type
,
837 struct cds_ja_inode
*node
,
838 struct cds_ja_shadow_node
*shadow_node
,
840 struct cds_ja_inode_flag
*child_node_flag
,
841 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
843 uint8_t nr_one
[JA_BITS_PER_BYTE
];
844 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
845 unsigned int distrib_nr_child
= 0;
847 memset(nr_one
, 0, sizeof(nr_one
));
849 switch (type
->type_class
) {
853 ja_linear_node_get_nr_child(type
, node
);
856 for (i
= 0; i
< nr_child
; i
++) {
857 struct cds_ja_inode_flag
*iter
;
860 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
863 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
865 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
866 if (v
& (1U << bit_i
))
875 unsigned int pool_nr
;
877 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
878 struct cds_ja_inode
*pool
=
879 ja_pool_node_get_ith_pool(type
,
882 ja_linear_node_get_nr_child(type
, pool
);
885 for (j
= 0; j
< nr_child
; j
++) {
886 struct cds_ja_inode_flag
*iter
;
889 ja_linear_node_get_ith_pos(type
, pool
,
893 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
895 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
896 if (v
& (1U << bit_i
))
909 assert(mode
== JA_RECOMPACT_DEL
);
910 nr_child
= shadow_node
->nr_child
;
911 for (i
= 0; i
< nr_child
; i
++) {
912 struct cds_ja_inode_flag
*iter
;
914 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
917 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
919 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
920 if (i
& (1U << bit_i
))
928 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
935 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
936 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
937 if (n
& (1U << bit_i
))
944 * The best bit selector is that for which the number of ones is
945 * closest to half of the number of children in the
946 * distribution. We calculate the distance using the double of
947 * the sub-distribution sizes to eliminate truncation error.
949 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
950 unsigned int distance_to_best
;
952 distance_to_best
= abs_int((nr_one
[bit_i
] << 1U) - distrib_nr_child
);
953 if (distance_to_best
< overall_best_distance
) {
954 overall_best_distance
= distance_to_best
;
958 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
963 * Calculate bit distribution in two dimensions. Returns the two bits
964 * (each 0 to 7) that splits the distribution in four sub-distributions
965 * containing as much elements one compared to the other.
968 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
970 unsigned int type_index
,
971 const struct cds_ja_type
*type
,
972 struct cds_ja_inode
*node
,
973 struct cds_ja_shadow_node
*shadow_node
,
975 struct cds_ja_inode_flag
*child_node_flag
,
976 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
977 unsigned int *_bitsel
)
979 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
980 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
981 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
982 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
983 unsigned int bitsel
[2] = { 0, 1 };
984 unsigned int bit_i
, bit_j
;
985 int overall_best_distance
= INT_MAX
;
986 unsigned int distrib_nr_child
= 0;
988 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
989 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
990 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
991 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
993 switch (type
->type_class
) {
997 ja_linear_node_get_nr_child(type
, node
);
1000 for (i
= 0; i
< nr_child
; i
++) {
1001 struct cds_ja_inode_flag
*iter
;
1004 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1007 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1009 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1010 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1011 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1012 nr_2d_11
[bit_i
][bit_j
]++;
1014 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1015 nr_2d_10
[bit_i
][bit_j
]++;
1017 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1018 nr_2d_01
[bit_i
][bit_j
]++;
1020 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1021 nr_2d_00
[bit_i
][bit_j
]++;
1031 unsigned int pool_nr
;
1033 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1034 struct cds_ja_inode
*pool
=
1035 ja_pool_node_get_ith_pool(type
,
1038 ja_linear_node_get_nr_child(type
, pool
);
1041 for (j
= 0; j
< nr_child
; j
++) {
1042 struct cds_ja_inode_flag
*iter
;
1045 ja_linear_node_get_ith_pos(type
, pool
,
1049 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1051 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1052 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1053 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1054 nr_2d_11
[bit_i
][bit_j
]++;
1056 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1057 nr_2d_10
[bit_i
][bit_j
]++;
1059 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1060 nr_2d_01
[bit_i
][bit_j
]++;
1062 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1063 nr_2d_00
[bit_i
][bit_j
]++;
1077 assert(mode
== JA_RECOMPACT_DEL
);
1078 nr_child
= shadow_node
->nr_child
;
1079 for (i
= 0; i
< nr_child
; i
++) {
1080 struct cds_ja_inode_flag
*iter
;
1082 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1085 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1087 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1088 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1089 if ((i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1090 nr_2d_11
[bit_i
][bit_j
]++;
1092 if ((i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1093 nr_2d_10
[bit_i
][bit_j
]++;
1095 if (!(i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1096 nr_2d_01
[bit_i
][bit_j
]++;
1098 if (!(i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1099 nr_2d_00
[bit_i
][bit_j
]++;
1108 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1115 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1116 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1117 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1118 if ((n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1119 nr_2d_11
[bit_i
][bit_j
]++;
1121 if ((n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1122 nr_2d_10
[bit_i
][bit_j
]++;
1124 if (!(n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1125 nr_2d_01
[bit_i
][bit_j
]++;
1127 if (!(n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1128 nr_2d_00
[bit_i
][bit_j
]++;
1136 * The best bit selector is that for which the number of nodes
1137 * in each sub-class is closest to one-fourth of the number of
1138 * children in the distribution. We calculate the distance using
1139 * 4 times the size of the sub-distribution to eliminate
1142 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1143 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1144 int distance_to_best
[4];
1146 distance_to_best
[0] = (nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1147 distance_to_best
[1] = (nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1148 distance_to_best
[2] = (nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1149 distance_to_best
[3] = (nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1151 /* Consider worse distance above best */
1152 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1153 distance_to_best
[0] = distance_to_best
[1];
1154 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1155 distance_to_best
[0] = distance_to_best
[2];
1156 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1157 distance_to_best
[0] = distance_to_best
[3];
1160 * If our worse distance is better than overall,
1161 * we become new best candidate.
1163 if (distance_to_best
[0] < overall_best_distance
) {
1164 overall_best_distance
= distance_to_best
[0];
1171 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1173 /* Return our bit selection */
1174 _bitsel
[0] = bitsel
[0];
1175 _bitsel
[1] = bitsel
[1];
1179 * ja_node_recompact_add: recompact a node, adding a new child.
1180 * Return 0 on success, -EAGAIN if need to retry, or other negative
1181 * error value otherwise.
1184 int ja_node_recompact(enum ja_recompact mode
,
1186 unsigned int old_type_index
,
1187 const struct cds_ja_type
*old_type
,
1188 struct cds_ja_inode
*old_node
,
1189 struct cds_ja_shadow_node
*shadow_node
,
1190 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1191 struct cds_ja_inode_flag
*child_node_flag
,
1192 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
1194 unsigned int new_type_index
;
1195 struct cds_ja_inode
*new_node
;
1196 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1197 const struct cds_ja_type
*new_type
;
1198 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1202 old_node_flag
= *old_node_flag_ptr
;
1205 case JA_RECOMPACT_ADD_SAME
:
1206 if (old_type
->type_class
== RCU_JA_POOL
) {
1208 * For pool type, try redistributing
1209 * into a different distribution of same
1210 * size if we have not reached limits.
1212 if (shadow_node
->nr_child
+ 1 > old_type
->max_child
) {
1213 new_type_index
= old_type_index
+ 1;
1214 } else if (shadow_node
->nr_child
+ 1 < old_type
->min_child
) {
1215 new_type_index
= old_type_index
- 1;
1217 new_type_index
= old_type_index
;
1220 new_type_index
= old_type_index
;
1223 case JA_RECOMPACT_ADD_NEXT
:
1224 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1227 if (old_type
->type_class
== RCU_JA_POOL
) {
1229 * For pool type, try redistributing
1230 * into a different distribution of same
1231 * size if we have not reached limits.
1233 if (shadow_node
->nr_child
+ 1 > old_type
->max_child
) {
1234 new_type_index
= old_type_index
+ 1;
1236 new_type_index
= old_type_index
;
1239 new_type_index
= old_type_index
+ 1;
1243 case JA_RECOMPACT_DEL
:
1244 if (old_type_index
== 0) {
1245 new_type_index
= NODE_INDEX_NULL
;
1247 if (old_type
->type_class
== RCU_JA_POOL
) {
1249 * For pool type, try redistributing
1250 * into a different distribution of same
1251 * size if we have not reached limits.
1253 if (shadow_node
->nr_child
- 1 < old_type
->min_child
) {
1254 new_type_index
= old_type_index
- 1;
1256 new_type_index
= old_type_index
;
1259 new_type_index
= old_type_index
- 1;
1267 retry
: /* for fallback */
1268 dbg_printf("Recompact from type %d to type %d\n",
1269 old_type_index
, new_type_index
);
1270 new_type
= &ja_types
[new_type_index
];
1271 if (new_type_index
!= NODE_INDEX_NULL
) {
1272 new_node
= alloc_cds_ja_node(new_type
);
1276 if (new_type
->type_class
== RCU_JA_POOL
) {
1277 switch (new_type
->nr_pool_order
) {
1280 unsigned int node_distrib_bitsel
;
1282 node_distrib_bitsel
=
1283 ja_node_sum_distribution_1d(mode
, ja
,
1284 old_type_index
, old_type
,
1285 old_node
, shadow_node
,
1287 nullify_node_flag_ptr
);
1288 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1289 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1290 new_type_index
, node_distrib_bitsel
);
1295 unsigned int node_distrib_bitsel
[2];
1297 ja_node_sum_distribution_2d(mode
, ja
,
1298 old_type_index
, old_type
,
1299 old_node
, shadow_node
,
1301 nullify_node_flag_ptr
,
1302 node_distrib_bitsel
);
1303 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1304 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1305 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1306 new_type_index
, node_distrib_bitsel
);
1313 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1316 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1317 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
);
1318 if (!new_shadow_node
) {
1323 new_shadow_node
->fallback_removal_count
=
1324 JA_FALLBACK_REMOVAL_COUNT
;
1327 new_node_flag
= NULL
;
1330 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1332 if (new_type_index
== NODE_INDEX_NULL
)
1335 switch (old_type
->type_class
) {
1339 ja_linear_node_get_nr_child(old_type
, old_node
);
1342 for (i
= 0; i
< nr_child
; i
++) {
1343 struct cds_ja_inode_flag
*iter
;
1346 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1349 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1351 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1354 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1355 goto fallback_toosmall
;
1363 unsigned int pool_nr
;
1365 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1366 struct cds_ja_inode
*pool
=
1367 ja_pool_node_get_ith_pool(old_type
,
1370 ja_linear_node_get_nr_child(old_type
, pool
);
1373 for (j
= 0; j
< nr_child
; j
++) {
1374 struct cds_ja_inode_flag
*iter
;
1377 ja_linear_node_get_ith_pos(old_type
, pool
,
1381 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1383 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1386 if (new_type
->type_class
== RCU_JA_POOL
1388 goto fallback_toosmall
;
1396 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1403 assert(mode
== JA_RECOMPACT_DEL
);
1404 nr_child
= shadow_node
->nr_child
;
1405 for (i
= 0; i
< nr_child
; i
++) {
1406 struct cds_ja_inode_flag
*iter
;
1408 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1411 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1413 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1416 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1417 goto fallback_toosmall
;
1430 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1432 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1434 n
, child_node_flag
);
1435 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1436 goto fallback_toosmall
;
1442 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1443 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1444 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1445 uatomic_inc(&node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1448 /* Return pointer to new recompacted node through old_node_flag_ptr */
1449 *old_node_flag_ptr
= new_node_flag
;
1453 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1455 * It is OK to free the lock associated with a node
1456 * going to NULL, since we are holding the parent lock.
1457 * This synchronizes removal with re-add of that node.
1459 if (new_type_index
== NODE_INDEX_NULL
)
1460 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1461 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1471 /* fallback if next pool is too small */
1472 assert(new_shadow_node
);
1473 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1474 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1478 case JA_RECOMPACT_ADD_SAME
:
1480 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1481 * node within a pool has unused entries. It should
1482 * therefore _never_ be too small.
1487 case JA_RECOMPACT_ADD_NEXT
:
1489 const struct cds_ja_type
*next_type
;
1492 * Recompaction attempt on add failed. Should only
1493 * happen if target node type is pool. Caused by
1494 * hard-to-split distribution. Recompact using the next
1495 * distribution size.
1497 assert(new_type
->type_class
== RCU_JA_POOL
);
1498 next_type
= &ja_types
[new_type_index
+ 1];
1500 * Try going to the next pool size if our population
1501 * fits within its range. This is not flagged as a
1504 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1505 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1510 dbg_printf("Add fallback to type %d\n", new_type_index
);
1511 uatomic_inc(&ja
->nr_fallback
);
1517 case JA_RECOMPACT_DEL
:
1519 * Recompaction attempt on delete failed. Should only
1520 * happen if target node type is pool. This is caused by
1521 * a hard-to-split distribution. Recompact on same node
1522 * size, but flag current node as "fallback" to ensure
1523 * we don't attempt recompaction before some activity
1524 * has reshuffled our node.
1526 assert(new_type
->type_class
== RCU_JA_POOL
);
1527 new_type_index
= old_type_index
;
1528 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1529 uatomic_inc(&ja
->nr_fallback
);
1538 * Last resort fallback: pigeon.
1540 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1541 dbg_printf("Fallback to type %d\n", new_type_index
);
1542 uatomic_inc(&ja
->nr_fallback
);
1548 * Return 0 on success, -EAGAIN if need to retry, or other negative
1549 * error value otherwise.
1552 int ja_node_set_nth(struct cds_ja
*ja
,
1553 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1554 struct cds_ja_inode_flag
*child_node_flag
,
1555 struct cds_ja_shadow_node
*shadow_node
)
1558 unsigned int type_index
;
1559 const struct cds_ja_type
*type
;
1560 struct cds_ja_inode
*node
;
1562 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1563 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1565 node
= ja_node_ptr(*node_flag
);
1566 type_index
= ja_node_type(*node_flag
);
1567 type
= &ja_types
[type_index
];
1568 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1569 n
, child_node_flag
);
1572 /* Not enough space in node, need to recompact to next type. */
1573 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1574 shadow_node
, node_flag
, n
, child_node_flag
, NULL
);
1577 /* Node needs to be recompacted. */
1578 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1579 shadow_node
, node_flag
, n
, child_node_flag
, NULL
);
1586 * Return 0 on success, -EAGAIN if need to retry, or other negative
1587 * error value otherwise.
1590 int ja_node_clear_ptr(struct cds_ja
*ja
,
1591 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1592 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1593 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1597 unsigned int type_index
;
1598 const struct cds_ja_type
*type
;
1599 struct cds_ja_inode
*node
;
1601 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1602 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1604 node
= ja_node_ptr(*parent_node_flag_ptr
);
1605 type_index
= ja_node_type(*parent_node_flag_ptr
);
1606 type
= &ja_types
[type_index
];
1607 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1608 if (ret
== -EFBIG
) {
1609 /* Should try recompaction. */
1610 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1611 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1617 struct cds_hlist_head
cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1619 unsigned int tree_depth
, i
;
1620 struct cds_ja_inode_flag
*node_flag
;
1621 struct cds_hlist_head head
= { NULL
};
1623 if (caa_unlikely(key
> ja
->key_max
))
1625 tree_depth
= ja
->tree_depth
;
1626 node_flag
= rcu_dereference(ja
->root
);
1628 /* level 0: root node */
1629 if (!ja_node_ptr(node_flag
))
1632 for (i
= 1; i
< tree_depth
; i
++) {
1635 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1636 node_flag
= ja_node_get_nth(node_flag
, NULL
, NULL
, NULL
,
1638 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1639 (unsigned int) iter_key
, node_flag
);
1640 if (!ja_node_ptr(node_flag
))
1644 /* Last level lookup succeded. We got an actual match. */
1645 head
.next
= (struct cds_hlist_node
*) node_flag
;
1650 * We reached an unpopulated node. Create it and the children we need,
1651 * and then attach the entire branch to the current node. This may
1652 * trigger recompaction of the current node. Locks needed: node lock
1653 * (for add), and, possibly, parent node lock (to update pointer due to
1654 * node recompaction).
1656 * First take node lock, check if recompaction is needed, then take
1657 * parent lock (if needed). Then we can proceed to create the new
1658 * branch. Publish the new branch, and release locks.
1659 * TODO: we currently always take the parent lock even when not needed.
1662 int ja_attach_node(struct cds_ja
*ja
,
1663 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1664 struct cds_ja_inode_flag
*attach_node_flag
,
1665 struct cds_ja_inode_flag
**node_flag_ptr
,
1666 struct cds_ja_inode_flag
*node_flag
,
1667 struct cds_ja_inode_flag
*parent_node_flag
,
1670 struct cds_ja_node
*child_node
)
1672 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1673 *parent_shadow_node
= NULL
;
1674 struct cds_ja_inode
*node
= ja_node_ptr(node_flag
);
1675 struct cds_ja_inode
*parent_node
= ja_node_ptr(parent_node_flag
);
1676 struct cds_hlist_head head
;
1677 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1679 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1680 int nr_created_nodes
= 0;
1682 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1683 level
, node
, node_flag
);
1686 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, node_flag
);
1692 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1694 if (!parent_shadow_node
) {
1700 if (node_flag_ptr
&& ja_node_ptr(*node_flag_ptr
)) {
1702 * Target node has been updated between RCU lookup and
1703 * lock acquisition. We need to re-try lookup and
1710 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
1711 ja_node_ptr(attach_node_flag
)) {
1713 * Target node has been updated between RCU lookup and
1714 * lock acquisition. We need to re-try lookup and
1721 /* Create new branch, starting from bottom */
1722 CDS_INIT_HLIST_HEAD(&head
);
1723 cds_hlist_add_head_rcu(&child_node
->list
, &head
);
1724 iter_node_flag
= (struct cds_ja_inode_flag
*) head
.next
;
1726 for (i
= ja
->tree_depth
; i
> (int) level
; i
--) {
1729 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
)));
1730 dbg_printf("branch creation level %d, key %u\n",
1731 i
- 1, (unsigned int) iter_key
);
1732 iter_dest_node_flag
= NULL
;
1733 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1739 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
1740 iter_node_flag
= iter_dest_node_flag
;
1746 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
1747 /* We need to use set_nth on the previous level. */
1748 iter_dest_node_flag
= node_flag
;
1749 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1755 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
1756 iter_node_flag
= iter_dest_node_flag
;
1759 /* Publish new branch */
1760 dbg_printf("Publish branch %p, replacing %p\n",
1761 iter_node_flag
, *attach_node_flag_ptr
);
1762 rcu_assign_pointer(*attach_node_flag_ptr
, iter_node_flag
);
1769 for (i
= 0; i
< nr_created_nodes
; i
++) {
1773 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1775 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1776 tmpret
= rcuja_shadow_clear(ja
->ht
,
1784 if (parent_shadow_node
)
1785 rcuja_shadow_unlock(parent_shadow_node
);
1788 rcuja_shadow_unlock(shadow_node
);
1794 * Lock the parent containing the hlist head pointer, and add node to list of
1795 * duplicates. Failure can happen if concurrent update changes the
1796 * parent before we get the lock. We return -EAGAIN in that case.
1797 * Return 0 on success, negative error value on failure.
1800 int ja_chain_node(struct cds_ja
*ja
,
1801 struct cds_ja_inode_flag
*parent_node_flag
,
1802 struct cds_ja_inode_flag
**node_flag_ptr
,
1803 struct cds_ja_inode_flag
*node_flag
,
1804 struct cds_hlist_head
*head
,
1805 struct cds_ja_node
*node
)
1807 struct cds_ja_shadow_node
*shadow_node
;
1810 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
1814 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
1818 cds_hlist_add_head_rcu(&node
->list
, head
);
1820 rcuja_shadow_unlock(shadow_node
);
1824 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
1825 struct cds_ja_node
*new_node
)
1827 unsigned int tree_depth
, i
;
1828 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1830 struct cds_ja_inode_flag
*node_flag
,
1836 if (caa_unlikely(key
> ja
->key_max
)) {
1839 tree_depth
= ja
->tree_depth
;
1842 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
1844 parent2_node_flag
= NULL
;
1846 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
1847 attach_node_flag_ptr
= &ja
->root
;
1848 attach_node_flag
= rcu_dereference(ja
->root
);
1849 node_flag_ptr
= &ja
->root
;
1850 node_flag
= rcu_dereference(ja
->root
);
1852 /* Iterate on all internal levels */
1853 for (i
= 1; i
< tree_depth
; i
++) {
1856 dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1857 attach_node_flag_ptr
, node_flag_ptr
, node_flag
);
1858 if (!ja_node_ptr(node_flag
)) {
1859 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
1865 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
1870 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1871 parent2_node_flag
= parent_node_flag
;
1872 parent_node_flag
= node_flag
;
1873 node_flag
= ja_node_get_nth(node_flag
,
1874 &attach_node_flag_ptr
,
1878 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
1879 (unsigned int) iter_key
, node_flag
,
1880 attach_node_flag_ptr
,
1885 * We reached bottom of tree, simply add node to last internal
1886 * level, or chain it if key is already present.
1888 if (!ja_node_ptr(node_flag
)) {
1889 dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1890 attach_node_flag_ptr
, node_flag_ptr
, node_flag
);
1891 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
1893 node_flag_ptr
, parent_node_flag
,
1894 parent2_node_flag
, key
, i
, new_node
);
1896 ret
= ja_chain_node(ja
,
1900 (struct cds_hlist_head
*) attach_node_flag_ptr
,
1903 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
1910 * Note: there is no need to lookup the pointer address associated with
1911 * each node's nth item after taking the lock: it's already been done by
1912 * cds_ja_del while holding the rcu read-side lock, and our node rules
1913 * ensure that when a match value -> pointer is found in a node, it is
1914 * _NEVER_ changed for that node without recompaction, and recompaction
1915 * reallocates the node.
1916 * However, when a child is removed from "linear" nodes, its pointer
1917 * is set to NULL. We therefore check, while holding the locks, if this
1918 * pointer is NULL, and return -ENOENT to the caller if it is the case.
1921 int ja_detach_node(struct cds_ja
*ja
,
1922 struct cds_ja_inode_flag
**snapshot
,
1923 struct cds_ja_inode_flag
***snapshot_ptr
,
1924 uint8_t *snapshot_n
,
1927 struct cds_ja_node
*node
)
1929 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
1930 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
1931 *parent_node_flag
= NULL
,
1932 **parent_node_flag_ptr
= NULL
;
1933 struct cds_ja_inode_flag
*iter_node_flag
;
1934 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
1937 assert(nr_snapshot
== ja
->tree_depth
+ 1);
1940 * From the last internal level node going up, get the node
1941 * lock, check if the node has only one child left. If it is the
1942 * case, we continue iterating upward. When we reach a node
1943 * which has more that one child left, we lock the parent, and
1944 * proceed to the node deletion (removing its children too).
1946 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
1947 struct cds_ja_shadow_node
*shadow_node
;
1949 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1955 shadow_nodes
[nr_shadow
++] = shadow_node
;
1958 * Check if node has been removed between RCU
1959 * lookup and lock acquisition.
1961 assert(snapshot_ptr
[i
+ 1]);
1962 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
1963 != ja_node_ptr(snapshot
[i
+ 1])) {
1968 assert(shadow_node
->nr_child
> 0);
1969 if (shadow_node
->nr_child
== 1 && i
> 1)
1972 if (shadow_node
->nr_child
> 1 || i
== 1) {
1973 /* Lock parent and break */
1974 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1980 shadow_nodes
[nr_shadow
++] = shadow_node
;
1983 * Check if node has been removed between RCU
1984 * lookup and lock acquisition.
1986 assert(snapshot_ptr
[i
]);
1987 if (ja_node_ptr(*snapshot_ptr
[i
])
1988 != ja_node_ptr(snapshot
[i
])) {
1993 node_flag_ptr
= snapshot_ptr
[i
+ 1];
1994 n
= snapshot_n
[i
+ 1];
1995 parent_node_flag_ptr
= snapshot_ptr
[i
];
1996 parent_node_flag
= snapshot
[i
];
2000 * Lock parent's parent, in case we need
2001 * to recompact parent.
2003 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2009 shadow_nodes
[nr_shadow
++] = shadow_node
;
2012 * Check if node has been removed between RCU
2013 * lookup and lock acquisition.
2015 assert(snapshot_ptr
[i
- 1]);
2016 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2017 != ja_node_ptr(snapshot
[i
- 1])) {
2028 * At this point, we want to delete all nodes that are about to
2029 * be removed from shadow_nodes (except the last one, which is
2030 * either the root or the parent of the upmost node with 1
2031 * child). OK to free lock here, because RCU read lock is held,
2032 * and free only performed in call_rcu.
2035 for (i
= 0; i
< nr_clear
; i
++) {
2036 ret
= rcuja_shadow_clear(ja
->ht
,
2037 shadow_nodes
[i
]->node_flag
,
2039 RCUJA_SHADOW_CLEAR_FREE_NODE
2040 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2044 iter_node_flag
= parent_node_flag
;
2045 /* Remove from parent */
2046 ret
= ja_node_clear_ptr(ja
,
2047 node_flag_ptr
, /* Pointer to location to nullify */
2048 &iter_node_flag
, /* Old new parent ptr in its parent */
2049 shadow_nodes
[nr_branch
- 1], /* of parent */
2054 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2055 iter_node_flag
, *parent_node_flag_ptr
);
2056 /* Update address of parent ptr in its parent */
2057 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2060 for (i
= 0; i
< nr_shadow
; i
++)
2061 rcuja_shadow_unlock(shadow_nodes
[i
]);
2066 int ja_unchain_node(struct cds_ja
*ja
,
2067 struct cds_ja_inode_flag
*parent_node_flag
,
2068 struct cds_ja_inode_flag
**node_flag_ptr
,
2069 struct cds_ja_inode_flag
*node_flag
,
2070 struct cds_ja_node
*node
)
2072 struct cds_ja_shadow_node
*shadow_node
;
2073 struct cds_hlist_node
*hlist_node
;
2074 struct cds_hlist_head hlist_head
;
2075 int ret
= 0, count
= 0, found
= 0;
2077 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2080 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2084 hlist_head
.next
= (struct cds_hlist_node
*) ja_node_ptr(node_flag
);
2086 * Retry if another thread removed all but one of duplicates
2087 * since check (this check was performed without lock).
2088 * Ensure that the node we are about to remove is still in the
2089 * list (while holding lock).
2091 cds_hlist_for_each_rcu(hlist_node
, &hlist_head
) {
2093 /* FIXME: currently a work-around */
2094 hlist_node
->prev
= (struct cds_hlist_node
*) node_flag_ptr
;
2097 if (hlist_node
== &node
->list
)
2101 if (!found
|| count
== 1) {
2105 cds_hlist_del_rcu(&node
->list
);
2107 * Validate that we indeed removed the node from linked list.
2109 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2111 rcuja_shadow_unlock(shadow_node
);
2116 * Called with RCU read lock held.
2118 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2119 struct cds_ja_node
*node
)
2121 unsigned int tree_depth
, i
;
2122 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2123 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2124 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2125 struct cds_ja_inode_flag
*node_flag
;
2126 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2131 if (caa_unlikely(key
> ja
->key_max
))
2133 tree_depth
= ja
->tree_depth
;
2137 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2140 /* snapshot for level 0 is only for shadow node lookup */
2143 snapshot_ptr
[nr_snapshot
] = NULL
;
2144 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2145 node_flag
= rcu_dereference(ja
->root
);
2146 prev_node_flag_ptr
= &ja
->root
;
2147 node_flag_ptr
= &ja
->root
;
2149 /* Iterate on all internal levels */
2150 for (i
= 1; i
< tree_depth
; i
++) {
2153 dbg_printf("cds_ja_del iter node_flag %p\n",
2155 if (!ja_node_ptr(node_flag
)) {
2158 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2159 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2160 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2161 snapshot
[nr_snapshot
++] = node_flag
;
2162 node_flag
= ja_node_get_nth(node_flag
,
2163 &prev_node_flag_ptr
,
2167 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2168 (unsigned int) iter_key
, node_flag
,
2169 prev_node_flag_ptr
);
2172 * We reached bottom of tree, try to find the node we are trying
2173 * to remove. Fail if we cannot find it.
2175 if (!ja_node_ptr(node_flag
)) {
2176 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2180 struct cds_hlist_head hlist_head
;
2181 struct cds_hlist_node
*hlist_node
;
2182 struct cds_ja_node
*entry
, *match
= NULL
;
2186 (struct cds_hlist_node
*) ja_node_ptr(node_flag
);
2187 cds_hlist_for_each_entry_rcu(entry
,
2191 dbg_printf("cds_ja_del: compare %p with entry %p\n", node
, entry
);
2197 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2203 * Removing last of duplicates. Last snapshot
2204 * does not have a shadow node (external leafs).
2206 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2207 snapshot
[nr_snapshot
++] = node_flag
;
2208 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2209 snapshot_n
, nr_snapshot
, key
, node
);
2211 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2212 node_flag_ptr
, node_flag
, match
);
2216 * Explanation of -ENOENT handling: caused by concurrent delete
2217 * between RCU lookup and actual removal. Need to re-do the
2218 * lookup and removal attempt.
2220 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2225 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2226 const struct rcu_flavor_struct
*flavor
)
2230 struct cds_ja_shadow_node
*root_shadow_node
;
2232 ja
= calloc(sizeof(*ja
), 1);
2244 ja
->key_max
= (1ULL << key_bits
) - 1;
2247 ja
->key_max
= UINT64_MAX
;
2253 /* ja->root is NULL */
2254 /* tree_depth 0 is for pointer to root node */
2255 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2256 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2257 ja
->ht
= rcuja_create_ht(flavor
);
2262 * Note: we should not free this node until judy array destroy.
2264 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2265 (struct cds_ja_inode_flag
*) &ja
->root
,
2267 if (!root_shadow_node
) {
2271 root_shadow_node
->level
= 0;
2276 ret
= rcuja_delete_ht(ja
->ht
);
2286 * Called from RCU read-side CS.
2288 __attribute__((visibility("protected")))
2289 void rcuja_free_all_children(struct cds_ja_shadow_node
*shadow_node
,
2290 struct cds_ja_inode_flag
*node_flag
,
2291 void (*free_node_cb
)(struct rcu_head
*head
))
2293 const struct rcu_flavor_struct
*flavor
;
2294 unsigned int type_index
;
2295 struct cds_ja_inode
*node
;
2296 const struct cds_ja_type
*type
;
2298 flavor
= cds_lfht_rcu_flavor(shadow_node
->ja
->ht
);
2299 node
= ja_node_ptr(node_flag
);
2300 assert(node
!= NULL
);
2301 type_index
= ja_node_type(node_flag
);
2302 type
= &ja_types
[type_index
];
2304 switch (type
->type_class
) {
2308 ja_linear_node_get_nr_child(type
, node
);
2311 for (i
= 0; i
< nr_child
; i
++) {
2312 struct cds_ja_inode_flag
*iter
;
2313 struct cds_hlist_head head
;
2314 struct cds_ja_node
*entry
;
2315 struct cds_hlist_node
*pos
;
2318 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
2321 head
.next
= (struct cds_hlist_node
*) iter
;
2322 cds_hlist_for_each_entry_rcu(entry
, pos
, &head
, list
) {
2323 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2330 unsigned int pool_nr
;
2332 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
2333 struct cds_ja_inode
*pool
=
2334 ja_pool_node_get_ith_pool(type
, node
, pool_nr
);
2336 ja_linear_node_get_nr_child(type
, pool
);
2339 for (j
= 0; j
< nr_child
; j
++) {
2340 struct cds_ja_inode_flag
*iter
;
2341 struct cds_hlist_head head
;
2342 struct cds_ja_node
*entry
;
2343 struct cds_hlist_node
*pos
;
2346 ja_linear_node_get_ith_pos(type
, node
, j
, &v
, &iter
);
2349 head
.next
= (struct cds_hlist_node
*) iter
;
2350 cds_hlist_for_each_entry_rcu(entry
, pos
, &head
, list
) {
2351 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2364 nr_child
= shadow_node
->nr_child
;
2365 for (i
= 0; i
< nr_child
; i
++) {
2366 struct cds_ja_inode_flag
*iter
;
2367 struct cds_hlist_head head
;
2368 struct cds_ja_node
*entry
;
2369 struct cds_hlist_node
*pos
;
2371 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
2374 head
.next
= (struct cds_hlist_node
*) iter
;
2375 cds_hlist_for_each_entry_rcu(entry
, pos
, &head
, list
) {
2376 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2387 void print_debug_fallback_distribution(void)
2391 fprintf(stderr
, "Fallback node distribution:\n");
2392 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2393 if (!node_fallback_count_distribution
[i
])
2395 fprintf(stderr
, " %3u: %4lu\n",
2396 i
, node_fallback_count_distribution
[i
]);
2401 * There should be no more concurrent add to the judy array while it is
2402 * being destroyed (ensured by the caller).
2404 int cds_ja_destroy(struct cds_ja
*ja
,
2405 void (*free_node_cb
)(struct rcu_head
*head
))
2409 rcuja_shadow_prune(ja
->ht
,
2410 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
,
2412 ret
= rcuja_delete_ht(ja
->ht
);
2415 if (uatomic_read(&ja
->nr_fallback
))
2417 "[warning] RCU Judy Array used %lu fallback node(s)\n",
2418 uatomic_read(&ja
->nr_fallback
));
2419 fprintf(stderr
, "Nodes allocated: %lu, Nodes freed: %lu. Fallback ratio: %g\n",
2420 uatomic_read(&nr_nodes_allocated
),
2421 uatomic_read(&nr_nodes_freed
),
2422 (double) uatomic_read(&ja
->nr_fallback
) / (double) uatomic_read(&nr_nodes_allocated
));
2423 print_debug_fallback_distribution();