4 * Userspace RCU library - RCU Judy Array
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
40 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
43 enum cds_ja_type_class
{
44 RCU_JA_LINEAR
= 0, /* Type A */
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL
= 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
50 RCU_JA_PIGEON
= 2, /* Type C */
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
53 /* Leaf nodes are implicit from their height in the tree */
56 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
60 enum cds_ja_type_class type_class
;
61 uint16_t min_child
; /* minimum number of children: 1 to 256 */
62 uint16_t max_child
; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order
; /* node size is (1 << order), in bytes */
65 uint16_t nr_pool_order
; /* number of pools */
66 uint16_t pool_size_order
; /* pool size */
70 * Iteration on the array to find the right node size for the number of
71 * children stops when it reaches .max_child == 256 (this is the largest
72 * possible node size, which contains 256 children).
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
87 #if (CAA_BITS_PER_LONG < 64)
90 ja_type_0_max_child
= 1,
91 ja_type_1_max_child
= 3,
92 ja_type_2_max_child
= 6,
93 ja_type_3_max_child
= 12,
94 ja_type_4_max_child
= 25,
95 ja_type_5_max_child
= 48,
96 ja_type_6_max_child
= 92,
97 ja_type_7_max_child
= 256,
98 ja_type_8_max_child
= 0, /* NULL */
102 ja_type_0_max_linear_child
= 1,
103 ja_type_1_max_linear_child
= 3,
104 ja_type_2_max_linear_child
= 6,
105 ja_type_3_max_linear_child
= 12,
106 ja_type_4_max_linear_child
= 25,
107 ja_type_5_max_linear_child
= 24,
108 ja_type_6_max_linear_child
= 23,
112 ja_type_5_nr_pool_order
= 1,
113 ja_type_6_nr_pool_order
= 2,
116 const struct cds_ja_type ja_types
[] = {
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
121 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
123 /* Pools may fill sooner than max_child */
124 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
125 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
131 { .type_class
= RCU_JA_PIGEON
, .min_child
= 83, .max_child
= ja_type_7_max_child
, .order
= 10, },
133 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
135 #else /* !(CAA_BITS_PER_LONG < 64) */
136 /* 64-bit pointers */
138 ja_type_0_max_child
= 1,
139 ja_type_1_max_child
= 3,
140 ja_type_2_max_child
= 7,
141 ja_type_3_max_child
= 14,
142 ja_type_4_max_child
= 28,
143 ja_type_5_max_child
= 54,
144 ja_type_6_max_child
= 104,
145 ja_type_7_max_child
= 256,
146 ja_type_8_max_child
= 256,
150 ja_type_0_max_linear_child
= 1,
151 ja_type_1_max_linear_child
= 3,
152 ja_type_2_max_linear_child
= 7,
153 ja_type_3_max_linear_child
= 14,
154 ja_type_4_max_linear_child
= 28,
155 ja_type_5_max_linear_child
= 27,
156 ja_type_6_max_linear_child
= 26,
160 ja_type_5_nr_pool_order
= 1,
161 ja_type_6_nr_pool_order
= 2,
164 const struct cds_ja_type ja_types
[] = {
165 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
169 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
171 /* Pools may fill sooner than max_child. */
172 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
173 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
179 { .type_class
= RCU_JA_PIGEON
, .min_child
= 95, .max_child
= ja_type_7_max_child
, .order
= 11, },
181 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
183 #endif /* !(BITS_PER_LONG < 64) */
185 static inline __attribute__((unused
))
186 void static_array_size_check(void)
188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
192 * The cds_ja_node contains the compressed node data needed for
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
203 #define DECLARE_LINEAR_NODE(index) \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
210 #define DECLARE_POOL_NODE(index) \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
219 struct cds_ja_inode
{
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0
;
223 DECLARE_LINEAR_NODE(1) conf_1
;
224 DECLARE_LINEAR_NODE(2) conf_2
;
225 DECLARE_LINEAR_NODE(3) conf_3
;
226 DECLARE_LINEAR_NODE(4) conf_4
;
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5
;
230 DECLARE_POOL_NODE(6) conf_6
;
232 /* Pigeon configuration */
234 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
236 /* data aliasing nodes for computed accesses */
237 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
242 JA_RECOMPACT_ADD_SAME
,
243 JA_RECOMPACT_ADD_NEXT
,
248 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
250 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
253 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
257 if (_ja_node_mask_ptr(node
) == NULL
) {
258 return NODE_INDEX_NULL
;
260 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
261 assert(type
< (1UL << JA_TYPE_BITS
));
265 struct cds_ja_inode
*ja_node_ptr(struct cds_ja_inode_flag
*node
)
267 unsigned long type_index
= ja_node_type(node
);
268 const struct cds_ja_type
*type
;
270 type
= &ja_types
[type_index
];
271 switch (type
->type_class
) {
273 case RCU_JA_PIGEON
: /* fall-through */
274 case RCU_JA_NULL
: /* fall-through */
275 default: /* fall-through */
276 return _ja_node_mask_ptr(node
);
278 switch (type
->nr_pool_order
) {
280 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_1D_MASK
| JA_TYPE_MASK
));
282 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_2D_MASK
| JA_POOL_1D_MASK
| JA_TYPE_MASK
));
290 struct cds_ja_inode
*alloc_cds_ja_node(struct cds_ja
*ja
,
291 const struct cds_ja_type
*ja_type
)
293 size_t len
= 1U << ja_type
->order
;
297 ret
= posix_memalign(&p
, len
, len
);
302 uatomic_inc(&ja
->nr_nodes_allocated
);
306 void free_cds_ja_node(struct cds_ja
*ja
, struct cds_ja_inode
*node
)
310 uatomic_inc(&ja
->nr_nodes_freed
);
313 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
314 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
315 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
316 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
319 uint8_t *align_ptr_size(uint8_t *ptr
)
321 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
325 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
326 struct cds_ja_inode
*node
)
328 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
329 return rcu_dereference(node
->u
.data
[0]);
333 * The order in which values and pointers are does does not matter: if
334 * a value is missing, we return NULL. If a value is there, but its
335 * associated pointers is still NULL, we return NULL too.
338 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
339 struct cds_ja_inode
*node
,
340 struct cds_ja_inode_flag
***node_flag_ptr
,
345 struct cds_ja_inode_flag
**pointers
;
346 struct cds_ja_inode_flag
*ptr
;
349 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
351 nr_child
= ja_linear_node_get_nr_child(type
, node
);
352 cmm_smp_rmb(); /* read nr_child before values and pointers */
353 assert(nr_child
<= type
->max_linear_child
);
354 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
356 values
= &node
->u
.data
[1];
357 for (i
= 0; i
< nr_child
; i
++) {
358 if (CMM_LOAD_SHARED(values
[i
]) == n
)
362 if (caa_unlikely(node_flag_ptr
))
363 *node_flag_ptr
= NULL
;
366 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
367 ptr
= rcu_dereference(pointers
[i
]);
368 if (caa_unlikely(node_flag_ptr
))
369 *node_flag_ptr
= &pointers
[i
];
374 struct cds_ja_inode_flag
*ja_linear_node_get_left(const struct cds_ja_type
*type
,
375 struct cds_ja_inode
*node
,
380 struct cds_ja_inode_flag
**pointers
;
381 struct cds_ja_inode_flag
*ptr
;
382 unsigned int i
, match_idx
;
385 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
387 nr_child
= ja_linear_node_get_nr_child(type
, node
);
388 cmm_smp_rmb(); /* read nr_child before values and pointers */
389 assert(nr_child
<= type
->max_linear_child
);
390 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
392 values
= &node
->u
.data
[1];
393 for (i
= 0; i
< nr_child
; i
++) {
396 v
= CMM_LOAD_SHARED(values
[i
]);
397 if (v
< n
&& (int) v
> match_v
) {
405 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
406 ptr
= rcu_dereference(pointers
[match_idx
]);
411 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
412 struct cds_ja_inode
*node
,
415 struct cds_ja_inode_flag
**iter
)
418 struct cds_ja_inode_flag
**pointers
;
420 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
421 assert(i
< ja_linear_node_get_nr_child(type
, node
));
423 values
= &node
->u
.data
[1];
425 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
430 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
431 struct cds_ja_inode
*node
,
432 struct cds_ja_inode_flag
*node_flag
,
433 struct cds_ja_inode_flag
***node_flag_ptr
,
436 struct cds_ja_inode
*linear
;
438 assert(type
->type_class
== RCU_JA_POOL
);
440 switch (type
->nr_pool_order
) {
443 unsigned long bitsel
, index
;
445 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
446 assert(bitsel
< CHAR_BIT
);
447 index
= ((unsigned long) n
>> bitsel
) & 0x1;
448 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
453 unsigned long bitsel
[2], index
[2], rindex
;
455 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
456 assert(bitsel
[0] < CHAR_BIT
);
457 assert(bitsel
[1] < CHAR_BIT
);
458 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
460 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
461 rindex
= index
[0] | index
[1];
462 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
469 return ja_linear_node_get_nth(type
, linear
, node_flag_ptr
, n
);
473 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
474 struct cds_ja_inode
*node
,
477 assert(type
->type_class
== RCU_JA_POOL
);
478 return (struct cds_ja_inode
*)
479 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
483 struct cds_ja_inode_flag
*ja_pool_node_get_left(const struct cds_ja_type
*type
,
484 struct cds_ja_inode
*node
,
487 unsigned int pool_nr
;
489 struct cds_ja_inode_flag
*match_node_flag
= NULL
;
491 assert(type
->type_class
== RCU_JA_POOL
);
493 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
494 struct cds_ja_inode
*pool
=
495 ja_pool_node_get_ith_pool(type
,
498 ja_linear_node_get_nr_child(type
, pool
);
501 for (j
= 0; j
< nr_child
; j
++) {
502 struct cds_ja_inode_flag
*iter
;
505 ja_linear_node_get_ith_pos(type
, pool
,
509 if (v
< n
&& (int) v
> match_v
) {
511 match_node_flag
= iter
;
515 return match_node_flag
;
519 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
520 struct cds_ja_inode
*node
,
521 struct cds_ja_inode_flag
***node_flag_ptr
,
524 struct cds_ja_inode_flag
**child_node_flag_ptr
;
525 struct cds_ja_inode_flag
*child_node_flag
;
527 assert(type
->type_class
== RCU_JA_PIGEON
);
528 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
529 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
530 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
531 child_node_flag_ptr
);
532 if (caa_unlikely(node_flag_ptr
))
533 *node_flag_ptr
= child_node_flag_ptr
;
534 return child_node_flag
;
538 struct cds_ja_inode_flag
*ja_pigeon_node_get_left(const struct cds_ja_type
*type
,
539 struct cds_ja_inode
*node
,
542 struct cds_ja_inode_flag
**child_node_flag_ptr
;
543 struct cds_ja_inode_flag
*child_node_flag
;
546 assert(type
->type_class
== RCU_JA_PIGEON
);
548 /* n - 1 is first value left of n */
549 for (i
= n
- 1; i
>= 0; i
--) {
550 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
551 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
552 if (child_node_flag
) {
553 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
555 return child_node_flag
;
562 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
563 struct cds_ja_inode
*node
,
566 return ja_pigeon_node_get_nth(type
, node
, NULL
, i
);
570 * ja_node_get_nth: get nth item from a node.
571 * node_flag is already rcu_dereference'd.
574 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
575 struct cds_ja_inode_flag
***node_flag_ptr
,
578 unsigned int type_index
;
579 struct cds_ja_inode
*node
;
580 const struct cds_ja_type
*type
;
582 node
= ja_node_ptr(node_flag
);
583 assert(node
!= NULL
);
584 type_index
= ja_node_type(node_flag
);
585 type
= &ja_types
[type_index
];
587 switch (type
->type_class
) {
589 return ja_linear_node_get_nth(type
, node
,
592 return ja_pool_node_get_nth(type
, node
, node_flag
,
595 return ja_pigeon_node_get_nth(type
, node
,
599 return (void *) -1UL;
604 struct cds_ja_inode_flag
*ja_node_get_left(struct cds_ja_inode_flag
*node_flag
,
607 unsigned int type_index
;
608 struct cds_ja_inode
*node
;
609 const struct cds_ja_type
*type
;
611 node
= ja_node_ptr(node_flag
);
612 assert(node
!= NULL
);
613 type_index
= ja_node_type(node_flag
);
614 type
= &ja_types
[type_index
];
616 switch (type
->type_class
) {
618 return ja_linear_node_get_left(type
, node
, n
);
620 return ja_pool_node_get_left(type
, node
, n
);
622 return ja_pigeon_node_get_left(type
, node
, n
);
625 return (void *) -1UL;
630 struct cds_ja_inode_flag
*ja_node_get_rightmost(struct cds_ja_inode_flag
*node_flag
)
632 return ja_node_get_left(node_flag
, JA_ENTRY_PER_NODE
);
636 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
637 struct cds_ja_inode
*node
,
638 struct cds_ja_shadow_node
*shadow_node
,
640 struct cds_ja_inode_flag
*child_node_flag
)
643 uint8_t *values
, *nr_child_ptr
;
644 struct cds_ja_inode_flag
**pointers
;
645 unsigned int i
, unused
= 0;
647 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
649 nr_child_ptr
= &node
->u
.data
[0];
650 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
651 (unsigned int) n
, nr_child_ptr
);
652 nr_child
= *nr_child_ptr
;
653 assert(nr_child
<= type
->max_linear_child
);
655 values
= &node
->u
.data
[1];
656 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
657 /* Check if node value is already populated */
658 for (i
= 0; i
< nr_child
; i
++) {
659 if (values
[i
] == n
) {
669 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
671 return -ERANGE
; /* recompact node */
673 return -ENOSPC
; /* No space left in this node type */
676 assert(pointers
[i
] == NULL
);
677 rcu_assign_pointer(pointers
[i
], child_node_flag
);
678 /* If we expanded the nr_child, increment it */
680 CMM_STORE_SHARED(values
[nr_child
], n
);
681 /* write pointer and value before nr_child */
683 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
685 shadow_node
->nr_child
++;
686 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
687 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
688 (unsigned int) shadow_node
->nr_child
,
695 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
696 struct cds_ja_inode
*node
,
697 struct cds_ja_inode_flag
*node_flag
,
698 struct cds_ja_shadow_node
*shadow_node
,
700 struct cds_ja_inode_flag
*child_node_flag
)
702 struct cds_ja_inode
*linear
;
704 assert(type
->type_class
== RCU_JA_POOL
);
706 switch (type
->nr_pool_order
) {
709 unsigned long bitsel
, index
;
711 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
712 assert(bitsel
< CHAR_BIT
);
713 index
= ((unsigned long) n
>> bitsel
) & 0x1;
714 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
719 unsigned long bitsel
[2], index
[2], rindex
;
721 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
722 assert(bitsel
[0] < CHAR_BIT
);
723 assert(bitsel
[1] < CHAR_BIT
);
724 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
726 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
727 rindex
= index
[0] | index
[1];
728 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
736 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
741 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
742 struct cds_ja_inode
*node
,
743 struct cds_ja_shadow_node
*shadow_node
,
745 struct cds_ja_inode_flag
*child_node_flag
)
747 struct cds_ja_inode_flag
**ptr
;
749 assert(type
->type_class
== RCU_JA_PIGEON
);
750 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
753 rcu_assign_pointer(*ptr
, child_node_flag
);
754 shadow_node
->nr_child
++;
759 * _ja_node_set_nth: set nth item within a node. Return an error
760 * (negative error value) if it is already there.
763 int _ja_node_set_nth(const struct cds_ja_type
*type
,
764 struct cds_ja_inode
*node
,
765 struct cds_ja_inode_flag
*node_flag
,
766 struct cds_ja_shadow_node
*shadow_node
,
768 struct cds_ja_inode_flag
*child_node_flag
)
770 switch (type
->type_class
) {
772 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
775 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
778 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
791 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
792 struct cds_ja_inode
*node
,
793 struct cds_ja_shadow_node
*shadow_node
,
794 struct cds_ja_inode_flag
**node_flag_ptr
)
797 uint8_t *nr_child_ptr
;
799 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
801 nr_child_ptr
= &node
->u
.data
[0];
802 nr_child
= *nr_child_ptr
;
803 assert(nr_child
<= type
->max_linear_child
);
805 if (type
->type_class
== RCU_JA_LINEAR
) {
806 assert(!shadow_node
->fallback_removal_count
);
807 if (shadow_node
->nr_child
<= type
->min_child
) {
808 /* We need to try recompacting the node */
812 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
813 assert(*node_flag_ptr
!= NULL
);
814 rcu_assign_pointer(*node_flag_ptr
, NULL
);
816 * Value and nr_child are never changed (would cause ABA issue).
817 * Instead, we leave the pointer to NULL and recompact the node
818 * once in a while. It is allowed to set a NULL pointer to a new
819 * value without recompaction though.
820 * Only update the shadow node accounting.
822 shadow_node
->nr_child
--;
823 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
824 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
825 (unsigned int) shadow_node
->nr_child
,
831 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
832 struct cds_ja_inode
*node
,
833 struct cds_ja_inode_flag
*node_flag
,
834 struct cds_ja_shadow_node
*shadow_node
,
835 struct cds_ja_inode_flag
**node_flag_ptr
,
838 struct cds_ja_inode
*linear
;
840 assert(type
->type_class
== RCU_JA_POOL
);
842 if (shadow_node
->fallback_removal_count
) {
843 shadow_node
->fallback_removal_count
--;
845 /* We should try recompacting the node */
846 if (shadow_node
->nr_child
<= type
->min_child
)
850 switch (type
->nr_pool_order
) {
853 unsigned long bitsel
, index
;
855 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
856 assert(bitsel
< CHAR_BIT
);
857 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
858 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
863 unsigned long bitsel
[2], index
[2], rindex
;
865 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
866 assert(bitsel
[0] < CHAR_BIT
);
867 assert(bitsel
[1] < CHAR_BIT
);
868 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
870 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
871 rindex
= index
[0] | index
[1];
872 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
880 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
884 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
885 struct cds_ja_inode
*node
,
886 struct cds_ja_shadow_node
*shadow_node
,
887 struct cds_ja_inode_flag
**node_flag_ptr
)
889 assert(type
->type_class
== RCU_JA_PIGEON
);
891 if (shadow_node
->fallback_removal_count
) {
892 shadow_node
->fallback_removal_count
--;
894 /* We should try recompacting the node */
895 if (shadow_node
->nr_child
<= type
->min_child
)
898 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
899 rcu_assign_pointer(*node_flag_ptr
, NULL
);
900 shadow_node
->nr_child
--;
905 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
906 * (negative error value) if it is not found (-ENOENT).
909 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
910 struct cds_ja_inode
*node
,
911 struct cds_ja_inode_flag
*node_flag
,
912 struct cds_ja_shadow_node
*shadow_node
,
913 struct cds_ja_inode_flag
**node_flag_ptr
,
916 switch (type
->type_class
) {
918 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
920 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
922 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
934 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
935 * distribution in two sub-distributions containing as much elements one
936 * compared to the other.
939 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
941 unsigned int type_index
,
942 const struct cds_ja_type
*type
,
943 struct cds_ja_inode
*node
,
944 struct cds_ja_shadow_node
*shadow_node
,
946 struct cds_ja_inode_flag
*child_node_flag
,
947 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
949 uint8_t nr_one
[JA_BITS_PER_BYTE
];
950 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
951 unsigned int distrib_nr_child
= 0;
953 memset(nr_one
, 0, sizeof(nr_one
));
955 switch (type
->type_class
) {
959 ja_linear_node_get_nr_child(type
, node
);
962 for (i
= 0; i
< nr_child
; i
++) {
963 struct cds_ja_inode_flag
*iter
;
966 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
969 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
971 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
972 if (v
& (1U << bit_i
))
981 unsigned int pool_nr
;
983 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
984 struct cds_ja_inode
*pool
=
985 ja_pool_node_get_ith_pool(type
,
988 ja_linear_node_get_nr_child(type
, pool
);
991 for (j
= 0; j
< nr_child
; j
++) {
992 struct cds_ja_inode_flag
*iter
;
995 ja_linear_node_get_ith_pos(type
, pool
,
999 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1001 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1002 if (v
& (1U << bit_i
))
1014 assert(mode
== JA_RECOMPACT_DEL
);
1015 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1016 struct cds_ja_inode_flag
*iter
;
1018 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1021 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1023 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1024 if (i
& (1U << bit_i
))
1032 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1039 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1040 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1041 if (n
& (1U << bit_i
))
1048 * The best bit selector is that for which the number of ones is
1049 * closest to half of the number of children in the
1050 * distribution. We calculate the distance using the double of
1051 * the sub-distribution sizes to eliminate truncation error.
1053 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1054 unsigned int distance_to_best
;
1056 distance_to_best
= abs_int((nr_one
[bit_i
] << 1U) - distrib_nr_child
);
1057 if (distance_to_best
< overall_best_distance
) {
1058 overall_best_distance
= distance_to_best
;
1062 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
1067 * Calculate bit distribution in two dimensions. Returns the two bits
1068 * (each 0 to 7) that splits the distribution in four sub-distributions
1069 * containing as much elements one compared to the other.
1072 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
1074 unsigned int type_index
,
1075 const struct cds_ja_type
*type
,
1076 struct cds_ja_inode
*node
,
1077 struct cds_ja_shadow_node
*shadow_node
,
1079 struct cds_ja_inode_flag
*child_node_flag
,
1080 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1081 unsigned int *_bitsel
)
1083 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1084 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1085 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1086 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
1087 unsigned int bitsel
[2] = { 0, 1 };
1088 unsigned int bit_i
, bit_j
;
1089 int overall_best_distance
= INT_MAX
;
1090 unsigned int distrib_nr_child
= 0;
1092 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
1093 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
1094 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
1095 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
1097 switch (type
->type_class
) {
1101 ja_linear_node_get_nr_child(type
, node
);
1104 for (i
= 0; i
< nr_child
; i
++) {
1105 struct cds_ja_inode_flag
*iter
;
1108 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1111 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1113 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1114 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1115 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1116 nr_2d_11
[bit_i
][bit_j
]++;
1118 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1119 nr_2d_10
[bit_i
][bit_j
]++;
1121 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1122 nr_2d_01
[bit_i
][bit_j
]++;
1124 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1125 nr_2d_00
[bit_i
][bit_j
]++;
1135 unsigned int pool_nr
;
1137 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1138 struct cds_ja_inode
*pool
=
1139 ja_pool_node_get_ith_pool(type
,
1142 ja_linear_node_get_nr_child(type
, pool
);
1145 for (j
= 0; j
< nr_child
; j
++) {
1146 struct cds_ja_inode_flag
*iter
;
1149 ja_linear_node_get_ith_pos(type
, pool
,
1153 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1155 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1156 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1157 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1158 nr_2d_11
[bit_i
][bit_j
]++;
1160 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1161 nr_2d_10
[bit_i
][bit_j
]++;
1163 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1164 nr_2d_01
[bit_i
][bit_j
]++;
1166 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1167 nr_2d_00
[bit_i
][bit_j
]++;
1180 assert(mode
== JA_RECOMPACT_DEL
);
1181 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1182 struct cds_ja_inode_flag
*iter
;
1184 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1187 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1189 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1190 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1191 if ((i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1192 nr_2d_11
[bit_i
][bit_j
]++;
1194 if ((i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1195 nr_2d_10
[bit_i
][bit_j
]++;
1197 if (!(i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1198 nr_2d_01
[bit_i
][bit_j
]++;
1200 if (!(i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1201 nr_2d_00
[bit_i
][bit_j
]++;
1210 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1217 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1218 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1219 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1220 if ((n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1221 nr_2d_11
[bit_i
][bit_j
]++;
1223 if ((n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1224 nr_2d_10
[bit_i
][bit_j
]++;
1226 if (!(n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1227 nr_2d_01
[bit_i
][bit_j
]++;
1229 if (!(n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1230 nr_2d_00
[bit_i
][bit_j
]++;
1238 * The best bit selector is that for which the number of nodes
1239 * in each sub-class is closest to one-fourth of the number of
1240 * children in the distribution. We calculate the distance using
1241 * 4 times the size of the sub-distribution to eliminate
1244 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1245 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1246 int distance_to_best
[4];
1248 distance_to_best
[0] = (nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1249 distance_to_best
[1] = (nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1250 distance_to_best
[2] = (nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1251 distance_to_best
[3] = (nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1253 /* Consider worse distance above best */
1254 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1255 distance_to_best
[0] = distance_to_best
[1];
1256 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1257 distance_to_best
[0] = distance_to_best
[2];
1258 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1259 distance_to_best
[0] = distance_to_best
[3];
1262 * If our worse distance is better than overall,
1263 * we become new best candidate.
1265 if (distance_to_best
[0] < overall_best_distance
) {
1266 overall_best_distance
= distance_to_best
[0];
1273 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1275 /* Return our bit selection */
1276 _bitsel
[0] = bitsel
[0];
1277 _bitsel
[1] = bitsel
[1];
1281 unsigned int find_nearest_type_index(unsigned int type_index
,
1282 unsigned int nr_nodes
)
1284 const struct cds_ja_type
*type
;
1286 assert(type_index
!= NODE_INDEX_NULL
);
1288 return NODE_INDEX_NULL
;
1290 type
= &ja_types
[type_index
];
1291 if (nr_nodes
< type
->min_child
)
1293 else if (nr_nodes
> type
->max_child
)
1302 * ja_node_recompact_add: recompact a node, adding a new child.
1303 * Return 0 on success, -EAGAIN if need to retry, or other negative
1304 * error value otherwise.
1307 int ja_node_recompact(enum ja_recompact mode
,
1309 unsigned int old_type_index
,
1310 const struct cds_ja_type
*old_type
,
1311 struct cds_ja_inode
*old_node
,
1312 struct cds_ja_shadow_node
*shadow_node
,
1313 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1314 struct cds_ja_inode_flag
*child_node_flag
,
1315 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1318 unsigned int new_type_index
;
1319 struct cds_ja_inode
*new_node
;
1320 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1321 const struct cds_ja_type
*new_type
;
1322 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1326 old_node_flag
= *old_node_flag_ptr
;
1329 * Need to find nearest type index even for ADD_SAME, because
1330 * this recompaction, when applied to linear nodes, will garbage
1331 * collect dummy (NULL) entries, and can therefore cause a few
1332 * linear representations to be skipped.
1335 case JA_RECOMPACT_ADD_SAME
:
1336 new_type_index
= find_nearest_type_index(old_type_index
,
1337 shadow_node
->nr_child
+ 1);
1338 dbg_printf("Recompact for node with %u children\n",
1339 shadow_node
->nr_child
+ 1);
1341 case JA_RECOMPACT_ADD_NEXT
:
1342 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1344 dbg_printf("Recompact for NULL\n");
1346 new_type_index
= find_nearest_type_index(old_type_index
,
1347 shadow_node
->nr_child
+ 1);
1348 dbg_printf("Recompact for node with %u children\n",
1349 shadow_node
->nr_child
+ 1);
1352 case JA_RECOMPACT_DEL
:
1353 new_type_index
= find_nearest_type_index(old_type_index
,
1354 shadow_node
->nr_child
- 1);
1355 dbg_printf("Recompact for node with %u children\n",
1356 shadow_node
->nr_child
- 1);
1362 retry
: /* for fallback */
1363 dbg_printf("Recompact from type %d to type %d\n",
1364 old_type_index
, new_type_index
);
1365 new_type
= &ja_types
[new_type_index
];
1366 if (new_type_index
!= NODE_INDEX_NULL
) {
1367 new_node
= alloc_cds_ja_node(ja
, new_type
);
1371 if (new_type
->type_class
== RCU_JA_POOL
) {
1372 switch (new_type
->nr_pool_order
) {
1375 unsigned int node_distrib_bitsel
;
1377 node_distrib_bitsel
=
1378 ja_node_sum_distribution_1d(mode
, ja
,
1379 old_type_index
, old_type
,
1380 old_node
, shadow_node
,
1382 nullify_node_flag_ptr
);
1383 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1384 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1385 new_type_index
, node_distrib_bitsel
);
1390 unsigned int node_distrib_bitsel
[2];
1392 ja_node_sum_distribution_2d(mode
, ja
,
1393 old_type_index
, old_type
,
1394 old_node
, shadow_node
,
1396 nullify_node_flag_ptr
,
1397 node_distrib_bitsel
);
1398 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1399 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1400 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1401 new_type_index
, node_distrib_bitsel
);
1408 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1411 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1412 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
, level
);
1413 if (!new_shadow_node
) {
1414 free_cds_ja_node(ja
, new_node
);
1418 new_shadow_node
->fallback_removal_count
=
1419 JA_FALLBACK_REMOVAL_COUNT
;
1422 new_node_flag
= NULL
;
1425 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1427 if (new_type_index
== NODE_INDEX_NULL
)
1430 switch (old_type
->type_class
) {
1434 ja_linear_node_get_nr_child(old_type
, old_node
);
1437 for (i
= 0; i
< nr_child
; i
++) {
1438 struct cds_ja_inode_flag
*iter
;
1441 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1444 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1446 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1449 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1450 goto fallback_toosmall
;
1458 unsigned int pool_nr
;
1460 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1461 struct cds_ja_inode
*pool
=
1462 ja_pool_node_get_ith_pool(old_type
,
1465 ja_linear_node_get_nr_child(old_type
, pool
);
1468 for (j
= 0; j
< nr_child
; j
++) {
1469 struct cds_ja_inode_flag
*iter
;
1472 ja_linear_node_get_ith_pos(old_type
, pool
,
1476 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1478 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1481 if (new_type
->type_class
== RCU_JA_POOL
1483 goto fallback_toosmall
;
1491 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1497 assert(mode
== JA_RECOMPACT_DEL
);
1498 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1499 struct cds_ja_inode_flag
*iter
;
1501 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1504 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1506 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1509 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1510 goto fallback_toosmall
;
1523 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1525 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1527 n
, child_node_flag
);
1528 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1529 goto fallback_toosmall
;
1535 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1536 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1537 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1538 uatomic_inc(&ja
->node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1541 /* Return pointer to new recompacted node through old_node_flag_ptr */
1542 *old_node_flag_ptr
= new_node_flag
;
1546 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1548 * It is OK to free the lock associated with a node
1549 * going to NULL, since we are holding the parent lock.
1550 * This synchronizes removal with re-add of that node.
1552 if (new_type_index
== NODE_INDEX_NULL
)
1553 flags
|= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1554 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1564 /* fallback if next pool is too small */
1565 assert(new_shadow_node
);
1566 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1567 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1571 case JA_RECOMPACT_ADD_SAME
:
1573 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1574 * node within a pool has unused entries. It should
1575 * therefore _never_ be too small.
1580 case JA_RECOMPACT_ADD_NEXT
:
1582 const struct cds_ja_type
*next_type
;
1585 * Recompaction attempt on add failed. Should only
1586 * happen if target node type is pool. Caused by
1587 * hard-to-split distribution. Recompact using the next
1588 * distribution size.
1590 assert(new_type
->type_class
== RCU_JA_POOL
);
1591 next_type
= &ja_types
[new_type_index
+ 1];
1593 * Try going to the next pool size if our population
1594 * fits within its range. This is not flagged as a
1597 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1598 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1603 dbg_printf("Add fallback to type %d\n", new_type_index
);
1604 uatomic_inc(&ja
->nr_fallback
);
1610 case JA_RECOMPACT_DEL
:
1612 * Recompaction attempt on delete failed. Should only
1613 * happen if target node type is pool. This is caused by
1614 * a hard-to-split distribution. Recompact on same node
1615 * size, but flag current node as "fallback" to ensure
1616 * we don't attempt recompaction before some activity
1617 * has reshuffled our node.
1619 assert(new_type
->type_class
== RCU_JA_POOL
);
1620 new_type_index
= old_type_index
;
1621 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1622 uatomic_inc(&ja
->nr_fallback
);
1631 * Last resort fallback: pigeon.
1633 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1634 dbg_printf("Fallback to type %d\n", new_type_index
);
1635 uatomic_inc(&ja
->nr_fallback
);
1641 * Return 0 on success, -EAGAIN if need to retry, or other negative
1642 * error value otherwise.
1645 int ja_node_set_nth(struct cds_ja
*ja
,
1646 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1647 struct cds_ja_inode_flag
*child_node_flag
,
1648 struct cds_ja_shadow_node
*shadow_node
,
1652 unsigned int type_index
;
1653 const struct cds_ja_type
*type
;
1654 struct cds_ja_inode
*node
;
1656 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1657 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1659 node
= ja_node_ptr(*node_flag
);
1660 type_index
= ja_node_type(*node_flag
);
1661 type
= &ja_types
[type_index
];
1662 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1663 n
, child_node_flag
);
1666 /* Not enough space in node, need to recompact to next type. */
1667 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1668 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1671 /* Node needs to be recompacted. */
1672 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1673 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1680 * Return 0 on success, -EAGAIN if need to retry, or other negative
1681 * error value otherwise.
1684 int ja_node_clear_ptr(struct cds_ja
*ja
,
1685 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1686 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1687 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1688 uint8_t n
, int level
)
1691 unsigned int type_index
;
1692 const struct cds_ja_type
*type
;
1693 struct cds_ja_inode
*node
;
1695 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1696 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1698 node
= ja_node_ptr(*parent_node_flag_ptr
);
1699 type_index
= ja_node_type(*parent_node_flag_ptr
);
1700 type
= &ja_types
[type_index
];
1701 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1702 if (ret
== -EFBIG
) {
1703 /* Should try recompaction. */
1704 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1705 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1706 node_flag_ptr
, level
);
1711 struct cds_hlist_head
cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1713 unsigned int tree_depth
, i
;
1714 struct cds_ja_inode_flag
*node_flag
;
1715 struct cds_hlist_head head
= { NULL
};
1717 if (caa_unlikely(key
> ja
->key_max
))
1719 tree_depth
= ja
->tree_depth
;
1720 node_flag
= rcu_dereference(ja
->root
);
1722 /* level 0: root node */
1723 if (!ja_node_ptr(node_flag
))
1726 for (i
= 1; i
< tree_depth
; i
++) {
1729 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1730 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1731 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1732 (unsigned int) iter_key
, node_flag
);
1733 if (!ja_node_ptr(node_flag
))
1737 /* Last level lookup succeded. We got an actual match. */
1738 head
.next
= (struct cds_hlist_node
*) node_flag
;
1742 struct cds_hlist_head
cds_ja_lookup_lower_equal(struct cds_ja
*ja
, uint64_t key
)
1744 int tree_depth
, level
;
1745 struct cds_ja_inode_flag
*node_flag
, *cur_node_depth
[JA_MAX_DEPTH
];
1746 struct cds_hlist_head head
= { NULL
};
1748 if (caa_unlikely(key
> ja
->key_max
|| !key
))
1751 memset(cur_node_depth
, 0, sizeof(cur_node_depth
));
1752 tree_depth
= ja
->tree_depth
;
1753 node_flag
= rcu_dereference(ja
->root
);
1754 cur_node_depth
[0] = node_flag
;
1756 /* level 0: root node */
1757 if (!ja_node_ptr(node_flag
))
1760 for (level
= 1; level
< tree_depth
; level
++) {
1763 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1764 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1765 if (!ja_node_ptr(node_flag
))
1767 cur_node_depth
[level
] = node_flag
;
1768 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1769 (unsigned int) iter_key
, node_flag
);
1772 if (level
== tree_depth
) {
1773 /* Last level lookup succeded. We got an equal match. */
1774 head
.next
= (struct cds_hlist_node
*) node_flag
;
1779 * Find highest value left of current node.
1780 * Current node is cur_node_depth[level].
1781 * Start at current level. If we cannot find any key left of
1782 * ours, go one level up, seek highest value left of current
1783 * (recursively), and when we find one, get the rightmost child
1784 * of its rightmost child (recursively).
1786 for (; level
> 0; level
--) {
1789 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1790 node_flag
= ja_node_get_left(cur_node_depth
[level
- 1],
1792 /* If found left sibling, find rightmost child. */
1793 if (ja_node_ptr(node_flag
))
1798 /* Reached the root and could not find a left sibling. */
1803 /* Find rightmost child of rightmost child (recursively). */
1804 for (; level
< tree_depth
; level
++) {
1805 node_flag
= ja_node_get_rightmost(node_flag
);
1806 /* If found left sibling, find rightmost child. */
1807 if (!ja_node_ptr(node_flag
))
1811 if (level
== tree_depth
) {
1812 /* Last level lookup succeded. We got a "lower than" match. */
1813 head
.next
= (struct cds_hlist_node
*) node_flag
;
1822 * We reached an unpopulated node. Create it and the children we need,
1823 * and then attach the entire branch to the current node. This may
1824 * trigger recompaction of the current node. Locks needed: node lock
1825 * (for add), and, possibly, parent node lock (to update pointer due to
1826 * node recompaction).
1828 * First take node lock, check if recompaction is needed, then take
1829 * parent lock (if needed). Then we can proceed to create the new
1830 * branch. Publish the new branch, and release locks.
1831 * TODO: we currently always take the parent lock even when not needed.
1834 int ja_attach_node(struct cds_ja
*ja
,
1835 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1836 struct cds_ja_inode_flag
*attach_node_flag
,
1837 struct cds_ja_inode_flag
*parent_attach_node_flag
,
1838 struct cds_ja_inode_flag
**old_node_flag_ptr
,
1839 struct cds_ja_inode_flag
*old_node_flag
,
1842 struct cds_ja_node
*child_node
)
1844 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1845 *parent_shadow_node
= NULL
;
1846 struct cds_hlist_head head
;
1847 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1849 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1850 int nr_created_nodes
= 0;
1852 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1853 level
, old_node_flag
, attach_node_flag_ptr
, attach_node_flag
, parent_attach_node_flag
);
1855 assert(!old_node_flag
);
1856 if (attach_node_flag
) {
1857 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, attach_node_flag
);
1863 if (parent_attach_node_flag
) {
1864 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1865 parent_attach_node_flag
);
1866 if (!parent_shadow_node
) {
1872 if (old_node_flag_ptr
&& ja_node_ptr(*old_node_flag_ptr
)) {
1874 * Target node has been updated between RCU lookup and
1875 * lock acquisition. We need to re-try lookup and
1883 * Perform a lookup query to handle the case where
1884 * old_node_flag_ptr is NULL. We cannot use it to check if the
1885 * node has been populated between RCU lookup and mutex
1888 if (!old_node_flag_ptr
) {
1890 struct cds_ja_inode_flag
*lookup_node_flag
;
1891 struct cds_ja_inode_flag
**lookup_node_flag_ptr
;
1893 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
1894 lookup_node_flag
= ja_node_get_nth(attach_node_flag
,
1895 &lookup_node_flag_ptr
,
1897 if (lookup_node_flag
) {
1903 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
1904 ja_node_ptr(attach_node_flag
)) {
1906 * Target node has been updated between RCU lookup and
1907 * lock acquisition. We need to re-try lookup and
1914 /* Create new branch, starting from bottom */
1915 CDS_INIT_HLIST_HEAD(&head
);
1916 cds_hlist_add_head_rcu(&child_node
->list
, &head
);
1917 iter_node_flag
= (struct cds_ja_inode_flag
*) head
.next
;
1919 for (i
= ja
->tree_depth
- 1; i
>= (int) level
; i
--) {
1922 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
- 1)));
1923 dbg_printf("branch creation level %d, key %u\n",
1924 i
, (unsigned int) iter_key
);
1925 iter_dest_node_flag
= NULL
;
1926 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1931 dbg_printf("branch creation error %d\n", ret
);
1934 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
1935 iter_node_flag
= iter_dest_node_flag
;
1939 /* Publish branch */
1942 * Attaching to root node.
1944 rcu_assign_pointer(ja
->root
, iter_node_flag
);
1948 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
1949 dbg_printf("publish branch at level %d, key %u\n",
1950 level
- 1, (unsigned int) iter_key
);
1951 /* We need to use set_nth on the previous level. */
1952 iter_dest_node_flag
= attach_node_flag
;
1953 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1956 shadow_node
, level
- 1);
1958 dbg_printf("branch publish error %d\n", ret
);
1964 rcu_assign_pointer(*attach_node_flag_ptr
, iter_dest_node_flag
);
1972 for (i
= 0; i
< nr_created_nodes
; i
++) {
1976 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1978 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1979 tmpret
= rcuja_shadow_clear(ja
->ht
,
1987 if (parent_shadow_node
)
1988 rcuja_shadow_unlock(parent_shadow_node
);
1991 rcuja_shadow_unlock(shadow_node
);
1997 * Lock the parent containing the hlist head pointer, and add node to list of
1998 * duplicates. Failure can happen if concurrent update changes the
1999 * parent before we get the lock. We return -EAGAIN in that case.
2000 * Return 0 on success, negative error value on failure.
2003 int ja_chain_node(struct cds_ja
*ja
,
2004 struct cds_ja_inode_flag
*parent_node_flag
,
2005 struct cds_ja_inode_flag
**node_flag_ptr
,
2006 struct cds_ja_inode_flag
*node_flag
,
2007 struct cds_ja_node
*node
)
2009 struct cds_ja_shadow_node
*shadow_node
;
2012 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2016 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2020 cds_hlist_add_head_rcu(&node
->list
, (struct cds_hlist_head
*) node_flag_ptr
);
2022 rcuja_shadow_unlock(shadow_node
);
2027 int _cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2028 struct cds_ja_node
*new_node
,
2029 struct cds_ja_node
**unique_node_ret
)
2031 unsigned int tree_depth
, i
;
2032 struct cds_ja_inode_flag
*attach_node_flag
,
2036 *parent_attach_node_flag
;
2037 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
2038 **parent_node_flag_ptr
,
2042 if (caa_unlikely(key
> ja
->key_max
)) {
2045 tree_depth
= ja
->tree_depth
;
2048 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
2050 parent2_node_flag
= NULL
;
2052 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
2053 parent_node_flag_ptr
= NULL
;
2054 node_flag
= rcu_dereference(ja
->root
);
2055 node_flag_ptr
= &ja
->root
;
2057 /* Iterate on all internal levels */
2058 for (i
= 1; i
< tree_depth
; i
++) {
2061 if (!ja_node_ptr(node_flag
))
2063 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2064 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2065 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2066 parent2_node_flag
= parent_node_flag
;
2067 parent_node_flag
= node_flag
;
2068 parent_node_flag_ptr
= node_flag_ptr
;
2069 node_flag
= ja_node_get_nth(node_flag
,
2075 * We reached either bottom of tree or internal NULL node,
2076 * simply add node to last internal level, or chain it if key is
2079 if (!ja_node_ptr(node_flag
)) {
2080 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2081 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2083 attach_node_flag
= parent_node_flag
;
2084 attach_node_flag_ptr
= parent_node_flag_ptr
;
2085 parent_attach_node_flag
= parent2_node_flag
;
2087 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
2089 parent_attach_node_flag
,
2094 if (unique_node_ret
) {
2095 *unique_node_ret
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2099 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2100 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2102 attach_node_flag
= node_flag
;
2103 attach_node_flag_ptr
= node_flag_ptr
;
2104 parent_attach_node_flag
= parent_node_flag
;
2106 ret
= ja_chain_node(ja
,
2107 parent_attach_node_flag
,
2108 attach_node_flag_ptr
,
2112 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
2118 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2119 struct cds_ja_node
*new_node
)
2121 return _cds_ja_add(ja
, key
, new_node
, NULL
);
2124 struct cds_ja_node
*cds_ja_add_unique(struct cds_ja
*ja
, uint64_t key
,
2125 struct cds_ja_node
*new_node
)
2128 struct cds_ja_node
*ret_node
;
2130 ret
= _cds_ja_add(ja
, key
, new_node
, &ret_node
);
2138 * Note: there is no need to lookup the pointer address associated with
2139 * each node's nth item after taking the lock: it's already been done by
2140 * cds_ja_del while holding the rcu read-side lock, and our node rules
2141 * ensure that when a match value -> pointer is found in a node, it is
2142 * _NEVER_ changed for that node without recompaction, and recompaction
2143 * reallocates the node.
2144 * However, when a child is removed from "linear" nodes, its pointer
2145 * is set to NULL. We therefore check, while holding the locks, if this
2146 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2149 int ja_detach_node(struct cds_ja
*ja
,
2150 struct cds_ja_inode_flag
**snapshot
,
2151 struct cds_ja_inode_flag
***snapshot_ptr
,
2152 uint8_t *snapshot_n
,
2155 struct cds_ja_node
*node
)
2157 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
2158 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
2159 *parent_node_flag
= NULL
,
2160 **parent_node_flag_ptr
= NULL
;
2161 struct cds_ja_inode_flag
*iter_node_flag
;
2162 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
2165 assert(nr_snapshot
== ja
->tree_depth
+ 1);
2168 * From the last internal level node going up, get the node
2169 * lock, check if the node has only one child left. If it is the
2170 * case, we continue iterating upward. When we reach a node
2171 * which has more that one child left, we lock the parent, and
2172 * proceed to the node deletion (removing its children too).
2174 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
2175 struct cds_ja_shadow_node
*shadow_node
;
2177 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2183 shadow_nodes
[nr_shadow
++] = shadow_node
;
2186 * Check if node has been removed between RCU
2187 * lookup and lock acquisition.
2189 assert(snapshot_ptr
[i
+ 1]);
2190 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
2191 != ja_node_ptr(snapshot
[i
+ 1])) {
2196 assert(shadow_node
->nr_child
> 0);
2197 if (shadow_node
->nr_child
== 1 && i
> 1)
2200 if (shadow_node
->nr_child
> 1 || i
== 1) {
2201 /* Lock parent and break */
2202 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2208 shadow_nodes
[nr_shadow
++] = shadow_node
;
2211 * Check if node has been removed between RCU
2212 * lookup and lock acquisition.
2214 assert(snapshot_ptr
[i
]);
2215 if (ja_node_ptr(*snapshot_ptr
[i
])
2216 != ja_node_ptr(snapshot
[i
])) {
2221 node_flag_ptr
= snapshot_ptr
[i
+ 1];
2222 n
= snapshot_n
[i
+ 1];
2223 parent_node_flag_ptr
= snapshot_ptr
[i
];
2224 parent_node_flag
= snapshot
[i
];
2228 * Lock parent's parent, in case we need
2229 * to recompact parent.
2231 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2237 shadow_nodes
[nr_shadow
++] = shadow_node
;
2240 * Check if node has been removed between RCU
2241 * lookup and lock acquisition.
2243 assert(snapshot_ptr
[i
- 1]);
2244 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2245 != ja_node_ptr(snapshot
[i
- 1])) {
2256 * At this point, we want to delete all nodes that are about to
2257 * be removed from shadow_nodes (except the last one, which is
2258 * either the root or the parent of the upmost node with 1
2259 * child). OK to free lock here, because RCU read lock is held,
2260 * and free only performed in call_rcu.
2263 for (i
= 0; i
< nr_clear
; i
++) {
2264 ret
= rcuja_shadow_clear(ja
->ht
,
2265 shadow_nodes
[i
]->node_flag
,
2267 RCUJA_SHADOW_CLEAR_FREE_NODE
2268 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2272 iter_node_flag
= parent_node_flag
;
2273 /* Remove from parent */
2274 ret
= ja_node_clear_ptr(ja
,
2275 node_flag_ptr
, /* Pointer to location to nullify */
2276 &iter_node_flag
, /* Old new parent ptr in its parent */
2277 shadow_nodes
[nr_branch
- 1], /* of parent */
2282 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2283 iter_node_flag
, *parent_node_flag_ptr
);
2284 /* Update address of parent ptr in its parent */
2285 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2288 for (i
= 0; i
< nr_shadow
; i
++)
2289 rcuja_shadow_unlock(shadow_nodes
[i
]);
2294 int ja_unchain_node(struct cds_ja
*ja
,
2295 struct cds_ja_inode_flag
*parent_node_flag
,
2296 struct cds_ja_inode_flag
**node_flag_ptr
,
2297 struct cds_ja_inode_flag
*node_flag
,
2298 struct cds_ja_node
*node
)
2300 struct cds_ja_shadow_node
*shadow_node
;
2301 struct cds_hlist_node
*hlist_node
;
2302 struct cds_hlist_head hlist_head
;
2303 int ret
= 0, count
= 0, found
= 0;
2305 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2308 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2312 hlist_head
.next
= (struct cds_hlist_node
*) ja_node_ptr(node_flag
);
2314 * Retry if another thread removed all but one of duplicates
2315 * since check (this check was performed without lock).
2316 * Ensure that the node we are about to remove is still in the
2317 * list (while holding lock).
2319 cds_hlist_for_each_rcu(hlist_node
, &hlist_head
) {
2321 /* FIXME: currently a work-around */
2322 hlist_node
->prev
= (struct cds_hlist_node
*) node_flag_ptr
;
2325 if (hlist_node
== &node
->list
)
2329 if (!found
|| count
== 1) {
2333 cds_hlist_del_rcu(&node
->list
);
2335 * Validate that we indeed removed the node from linked list.
2337 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2339 rcuja_shadow_unlock(shadow_node
);
2344 * Called with RCU read lock held.
2346 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2347 struct cds_ja_node
*node
)
2349 unsigned int tree_depth
, i
;
2350 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2351 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2352 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2353 struct cds_ja_inode_flag
*node_flag
;
2354 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2359 if (caa_unlikely(key
> ja
->key_max
))
2361 tree_depth
= ja
->tree_depth
;
2365 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2368 /* snapshot for level 0 is only for shadow node lookup */
2371 snapshot_ptr
[nr_snapshot
] = NULL
;
2372 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2373 node_flag
= rcu_dereference(ja
->root
);
2374 prev_node_flag_ptr
= &ja
->root
;
2375 node_flag_ptr
= &ja
->root
;
2377 /* Iterate on all internal levels */
2378 for (i
= 1; i
< tree_depth
; i
++) {
2381 dbg_printf("cds_ja_del iter node_flag %p\n",
2383 if (!ja_node_ptr(node_flag
)) {
2386 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2387 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2388 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2389 snapshot
[nr_snapshot
++] = node_flag
;
2390 node_flag
= ja_node_get_nth(node_flag
,
2394 prev_node_flag_ptr
= node_flag_ptr
;
2395 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2396 (unsigned int) iter_key
, node_flag
,
2397 prev_node_flag_ptr
);
2400 * We reached bottom of tree, try to find the node we are trying
2401 * to remove. Fail if we cannot find it.
2403 if (!ja_node_ptr(node_flag
)) {
2404 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2408 struct cds_hlist_head hlist_head
;
2409 struct cds_hlist_node
*hlist_node
;
2410 struct cds_ja_node
*entry
, *match
= NULL
;
2414 (struct cds_hlist_node
*) ja_node_ptr(node_flag
);
2415 cds_hlist_for_each_entry_rcu(entry
,
2419 dbg_printf("cds_ja_del: compare %p with entry %p\n", node
, entry
);
2425 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2431 * Removing last of duplicates. Last snapshot
2432 * does not have a shadow node (external leafs).
2434 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2435 snapshot
[nr_snapshot
++] = node_flag
;
2436 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2437 snapshot_n
, nr_snapshot
, key
, node
);
2439 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2440 node_flag_ptr
, node_flag
, match
);
2444 * Explanation of -ENOENT handling: caused by concurrent delete
2445 * between RCU lookup and actual removal. Need to re-do the
2446 * lookup and removal attempt.
2448 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2453 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2454 const struct rcu_flavor_struct
*flavor
)
2458 struct cds_ja_shadow_node
*root_shadow_node
;
2460 ja
= calloc(sizeof(*ja
), 1);
2472 ja
->key_max
= (1ULL << key_bits
) - 1;
2475 ja
->key_max
= UINT64_MAX
;
2481 /* ja->root is NULL */
2482 /* tree_depth 0 is for pointer to root node */
2483 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2484 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2485 ja
->ht
= rcuja_create_ht(flavor
);
2490 * Note: we should not free this node until judy array destroy.
2492 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2493 (struct cds_ja_inode_flag
*) &ja
->root
,
2495 if (!root_shadow_node
) {
2503 ret
= rcuja_delete_ht(ja
->ht
);
2513 * Called from RCU read-side CS.
2515 __attribute__((visibility("protected")))
2516 void rcuja_free_all_children(struct cds_ja_shadow_node
*shadow_node
,
2517 struct cds_ja_inode_flag
*node_flag
,
2518 void (*free_node_cb
)(struct rcu_head
*head
))
2520 const struct rcu_flavor_struct
*flavor
;
2521 unsigned int type_index
;
2522 struct cds_ja_inode
*node
;
2523 const struct cds_ja_type
*type
;
2525 flavor
= cds_lfht_rcu_flavor(shadow_node
->ja
->ht
);
2526 node
= ja_node_ptr(node_flag
);
2527 assert(node
!= NULL
);
2528 type_index
= ja_node_type(node_flag
);
2529 type
= &ja_types
[type_index
];
2531 switch (type
->type_class
) {
2535 ja_linear_node_get_nr_child(type
, node
);
2538 for (i
= 0; i
< nr_child
; i
++) {
2539 struct cds_ja_inode_flag
*iter
;
2540 struct cds_hlist_head head
;
2541 struct cds_ja_node
*entry
;
2542 struct cds_hlist_node
*pos
, *tmp
;
2545 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
2548 head
.next
= (struct cds_hlist_node
*) iter
;
2549 cds_hlist_for_each_entry_safe(entry
, pos
, tmp
, &head
, list
) {
2550 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2557 unsigned int pool_nr
;
2559 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
2560 struct cds_ja_inode
*pool
=
2561 ja_pool_node_get_ith_pool(type
, node
, pool_nr
);
2563 ja_linear_node_get_nr_child(type
, pool
);
2566 for (j
= 0; j
< nr_child
; j
++) {
2567 struct cds_ja_inode_flag
*iter
;
2568 struct cds_hlist_head head
;
2569 struct cds_ja_node
*entry
;
2570 struct cds_hlist_node
*pos
, *tmp
;
2573 ja_linear_node_get_ith_pos(type
, pool
, j
, &v
, &iter
);
2576 head
.next
= (struct cds_hlist_node
*) iter
;
2577 cds_hlist_for_each_entry_safe(entry
, pos
, tmp
, &head
, list
) {
2578 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2590 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2591 struct cds_ja_inode_flag
*iter
;
2592 struct cds_hlist_head head
;
2593 struct cds_ja_node
*entry
;
2594 struct cds_hlist_node
*pos
, *tmp
;
2596 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
2599 head
.next
= (struct cds_hlist_node
*) iter
;
2600 cds_hlist_for_each_entry_safe(entry
, pos
, tmp
, &head
, list
) {
2601 flavor
->update_call_rcu(&entry
->head
, free_node_cb
);
2612 void print_debug_fallback_distribution(struct cds_ja
*ja
)
2616 fprintf(stderr
, "Fallback node distribution:\n");
2617 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2618 if (!ja
->node_fallback_count_distribution
[i
])
2620 fprintf(stderr
, " %3u: %4lu\n",
2621 i
, ja
->node_fallback_count_distribution
[i
]);
2626 int ja_final_checks(struct cds_ja
*ja
)
2628 double fallback_ratio
;
2629 unsigned long na
, nf
, nr_fallback
;
2632 fallback_ratio
= (double) uatomic_read(&ja
->nr_fallback
);
2633 fallback_ratio
/= (double) uatomic_read(&ja
->nr_nodes_allocated
);
2634 nr_fallback
= uatomic_read(&ja
->nr_fallback
);
2637 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2638 uatomic_read(&ja
->nr_fallback
),
2641 na
= uatomic_read(&ja
->nr_nodes_allocated
);
2642 nf
= uatomic_read(&ja
->nr_nodes_freed
);
2643 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na
, nf
);
2645 print_debug_fallback_distribution(ja
);
2648 fprintf(stderr
, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2649 (long) na
- nf
, na
, nf
);
2656 * There should be no more concurrent add to the judy array while it is
2657 * being destroyed (ensured by the caller).
2659 int cds_ja_destroy(struct cds_ja
*ja
,
2660 void (*free_node_cb
)(struct rcu_head
*head
))
2662 const struct rcu_flavor_struct
*flavor
;
2665 flavor
= cds_lfht_rcu_flavor(ja
->ht
);
2666 rcuja_shadow_prune(ja
->ht
,
2667 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
,
2669 flavor
->thread_offline();
2670 ret
= rcuja_delete_ht(ja
->ht
);
2674 /* Wait for in-flight call_rcu free to complete. */
2677 flavor
->thread_online();
2678 ret
= ja_final_checks(ja
);