4 * Userspace RCU library - RCU Judy Array
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
39 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
42 enum cds_ja_type_class
{
43 RCU_JA_LINEAR
= 0, /* Type A */
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL
= 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
49 RCU_JA_PIGEON
= 2, /* Type C */
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
52 /* Leaf nodes are implicit from their height in the tree */
55 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
59 enum cds_ja_type_class type_class
;
60 uint16_t min_child
; /* minimum number of children: 1 to 256 */
61 uint16_t max_child
; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order
; /* node size is (1 << order), in bytes */
64 uint16_t nr_pool_order
; /* number of pools */
65 uint16_t pool_size_order
; /* pool size */
69 * Iteration on the array to find the right node size for the number of
70 * children stops when it reaches .max_child == 256 (this is the largest
71 * possible node size, which contains 256 children).
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
86 #if (CAA_BITS_PER_LONG < 64)
89 ja_type_0_max_child
= 1,
90 ja_type_1_max_child
= 3,
91 ja_type_2_max_child
= 6,
92 ja_type_3_max_child
= 12,
93 ja_type_4_max_child
= 25,
94 ja_type_5_max_child
= 48,
95 ja_type_6_max_child
= 92,
96 ja_type_7_max_child
= 256,
97 ja_type_8_max_child
= 0, /* NULL */
101 ja_type_0_max_linear_child
= 1,
102 ja_type_1_max_linear_child
= 3,
103 ja_type_2_max_linear_child
= 6,
104 ja_type_3_max_linear_child
= 12,
105 ja_type_4_max_linear_child
= 25,
106 ja_type_5_max_linear_child
= 24,
107 ja_type_6_max_linear_child
= 23,
111 ja_type_5_nr_pool_order
= 1,
112 ja_type_6_nr_pool_order
= 2,
115 const struct cds_ja_type ja_types
[] = {
116 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
122 /* Pools may fill sooner than max_child */
123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
124 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
126 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
132 { .type_class
= RCU_JA_PIGEON
, .min_child
= 83, .max_child
= ja_type_7_max_child
, .order
= 10, },
134 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
136 #else /* !(CAA_BITS_PER_LONG < 64) */
137 /* 64-bit pointers */
139 ja_type_0_max_child
= 1,
140 ja_type_1_max_child
= 3,
141 ja_type_2_max_child
= 7,
142 ja_type_3_max_child
= 14,
143 ja_type_4_max_child
= 28,
144 ja_type_5_max_child
= 54,
145 ja_type_6_max_child
= 104,
146 ja_type_7_max_child
= 256,
147 ja_type_8_max_child
= 256,
151 ja_type_0_max_linear_child
= 1,
152 ja_type_1_max_linear_child
= 3,
153 ja_type_2_max_linear_child
= 7,
154 ja_type_3_max_linear_child
= 14,
155 ja_type_4_max_linear_child
= 28,
156 ja_type_5_max_linear_child
= 27,
157 ja_type_6_max_linear_child
= 26,
161 ja_type_5_nr_pool_order
= 1,
162 ja_type_6_nr_pool_order
= 2,
165 const struct cds_ja_type ja_types
[] = {
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
169 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
170 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
172 /* Pools may fill sooner than max_child. */
173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
174 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
176 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
182 { .type_class
= RCU_JA_PIGEON
, .min_child
= 95, .max_child
= ja_type_7_max_child
, .order
= 11, },
184 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
186 #endif /* !(BITS_PER_LONG < 64) */
188 static inline __attribute__((unused
))
189 void static_array_size_check(void)
191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
195 * The cds_ja_node contains the compressed node data needed for
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
206 #define DECLARE_LINEAR_NODE(index) \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
213 #define DECLARE_POOL_NODE(index) \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
222 struct cds_ja_inode
{
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0
;
226 DECLARE_LINEAR_NODE(1) conf_1
;
227 DECLARE_LINEAR_NODE(2) conf_2
;
228 DECLARE_LINEAR_NODE(3) conf_3
;
229 DECLARE_LINEAR_NODE(4) conf_4
;
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5
;
233 DECLARE_POOL_NODE(6) conf_6
;
235 /* Pigeon configuration */
237 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
239 /* data aliasing nodes for computed accesses */
240 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
245 JA_RECOMPACT_ADD_SAME
,
246 JA_RECOMPACT_ADD_NEXT
,
250 enum ja_lookup_inequality
{
263 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
265 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
268 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
272 if (_ja_node_mask_ptr(node
) == NULL
) {
273 return NODE_INDEX_NULL
;
275 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
276 assert(type
< (1UL << JA_TYPE_BITS
));
281 struct cds_ja_inode
*alloc_cds_ja_node(struct cds_ja
*ja
,
282 const struct cds_ja_type
*ja_type
)
284 size_t len
= 1U << ja_type
->order
;
288 ret
= posix_memalign(&p
, len
, len
);
293 uatomic_inc(&ja
->nr_nodes_allocated
);
297 void free_cds_ja_node(struct cds_ja
*ja
, struct cds_ja_inode
*node
)
301 uatomic_inc(&ja
->nr_nodes_freed
);
304 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
305 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
306 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
307 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
310 uint8_t *align_ptr_size(uint8_t *ptr
)
312 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
316 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
317 struct cds_ja_inode
*node
)
319 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
320 return rcu_dereference(node
->u
.data
[0]);
324 * The order in which values and pointers are does does not matter: if
325 * a value is missing, we return NULL. If a value is there, but its
326 * associated pointers is still NULL, we return NULL too.
329 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
330 struct cds_ja_inode
*node
,
331 struct cds_ja_inode_flag
***node_flag_ptr
,
336 struct cds_ja_inode_flag
**pointers
;
337 struct cds_ja_inode_flag
*ptr
;
340 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
342 nr_child
= ja_linear_node_get_nr_child(type
, node
);
343 cmm_smp_rmb(); /* read nr_child before values and pointers */
344 assert(nr_child
<= type
->max_linear_child
);
345 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
347 values
= &node
->u
.data
[1];
348 for (i
= 0; i
< nr_child
; i
++) {
349 if (CMM_LOAD_SHARED(values
[i
]) == n
)
353 if (caa_unlikely(node_flag_ptr
))
354 *node_flag_ptr
= NULL
;
357 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
358 ptr
= rcu_dereference(pointers
[i
]);
359 if (caa_unlikely(node_flag_ptr
))
360 *node_flag_ptr
= &pointers
[i
];
365 struct cds_ja_inode_flag
*ja_linear_node_get_direction(const struct cds_ja_type
*type
,
366 struct cds_ja_inode
*node
,
367 int n
, uint8_t *result_key
,
368 enum ja_direction dir
)
372 struct cds_ja_inode_flag
**pointers
;
373 struct cds_ja_inode_flag
*ptr
;
375 int match_idx
= -1, match_v
;
377 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
378 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
380 if (dir
== JA_LEFT
) {
383 match_v
= JA_ENTRY_PER_NODE
;
386 nr_child
= ja_linear_node_get_nr_child(type
, node
);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child
<= type
->max_linear_child
);
389 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
391 values
= &node
->u
.data
[1];
392 for (i
= 0; i
< nr_child
; i
++) {
395 v
= CMM_LOAD_SHARED(values
[i
]);
396 if (dir
== JA_LEFT
) {
397 if ((int) v
< n
&& (int) v
> match_v
) {
402 if ((int) v
> n
&& (int) v
< match_v
) {
412 assert(match_v
>= 0 && match_v
< JA_ENTRY_PER_NODE
);
414 *result_key
= (uint8_t) match_v
;
415 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
416 ptr
= rcu_dereference(pointers
[match_idx
]);
421 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
422 struct cds_ja_inode
*node
,
425 struct cds_ja_inode_flag
**iter
)
428 struct cds_ja_inode_flag
**pointers
;
430 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
431 assert(i
< ja_linear_node_get_nr_child(type
, node
));
433 values
= &node
->u
.data
[1];
435 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
440 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
441 struct cds_ja_inode
*node
,
442 struct cds_ja_inode_flag
*node_flag
,
443 struct cds_ja_inode_flag
***node_flag_ptr
,
446 struct cds_ja_inode
*linear
;
448 assert(type
->type_class
== RCU_JA_POOL
);
450 switch (type
->nr_pool_order
) {
453 unsigned long bitsel
, index
;
455 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
456 assert(bitsel
< CHAR_BIT
);
457 index
= ((unsigned long) n
>> bitsel
) & 0x1;
458 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
463 unsigned long bitsel
[2], index
[2], rindex
;
465 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
466 assert(bitsel
[0] < CHAR_BIT
);
467 assert(bitsel
[1] < CHAR_BIT
);
468 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
470 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
471 rindex
= index
[0] | index
[1];
472 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
479 return ja_linear_node_get_nth(type
, linear
, node_flag_ptr
, n
);
483 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
484 struct cds_ja_inode
*node
,
487 assert(type
->type_class
== RCU_JA_POOL
);
488 return (struct cds_ja_inode
*)
489 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
493 struct cds_ja_inode_flag
*ja_pool_node_get_direction(const struct cds_ja_type
*type
,
494 struct cds_ja_inode
*node
,
495 int n
, uint8_t *result_key
,
496 enum ja_direction dir
)
498 unsigned int pool_nr
;
500 struct cds_ja_inode_flag
*match_node_flag
= NULL
;
502 assert(type
->type_class
== RCU_JA_POOL
);
503 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
505 if (dir
== JA_LEFT
) {
508 match_v
= JA_ENTRY_PER_NODE
;
511 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
512 struct cds_ja_inode
*pool
=
513 ja_pool_node_get_ith_pool(type
,
516 ja_linear_node_get_nr_child(type
, pool
);
519 for (j
= 0; j
< nr_child
; j
++) {
520 struct cds_ja_inode_flag
*iter
;
523 ja_linear_node_get_ith_pos(type
, pool
,
527 if (dir
== JA_LEFT
) {
528 if ((int) v
< n
&& (int) v
> match_v
) {
530 match_node_flag
= iter
;
533 if ((int) v
> n
&& (int) v
< match_v
) {
535 match_node_flag
= iter
;
541 *result_key
= (uint8_t) match_v
;
542 return match_node_flag
;
546 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
547 struct cds_ja_inode
*node
,
548 struct cds_ja_inode_flag
***node_flag_ptr
,
551 struct cds_ja_inode_flag
**child_node_flag_ptr
;
552 struct cds_ja_inode_flag
*child_node_flag
;
554 assert(type
->type_class
== RCU_JA_PIGEON
);
555 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
556 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
557 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
558 child_node_flag_ptr
);
559 if (caa_unlikely(node_flag_ptr
))
560 *node_flag_ptr
= child_node_flag_ptr
;
561 return child_node_flag
;
565 struct cds_ja_inode_flag
*ja_pigeon_node_get_direction(const struct cds_ja_type
*type
,
566 struct cds_ja_inode
*node
,
567 int n
, uint8_t *result_key
,
568 enum ja_direction dir
)
570 struct cds_ja_inode_flag
**child_node_flag_ptr
;
571 struct cds_ja_inode_flag
*child_node_flag
;
574 assert(type
->type_class
== RCU_JA_PIGEON
);
575 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
577 if (dir
== JA_LEFT
) {
578 /* n - 1 is first value left of n */
579 for (i
= n
- 1; i
>= 0; i
--) {
580 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
581 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
582 if (child_node_flag
) {
583 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
585 *result_key
= (uint8_t) i
;
586 return child_node_flag
;
590 /* n + 1 is first value right of n */
591 for (i
= n
+ 1; i
< JA_ENTRY_PER_NODE
; i
++) {
592 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
593 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
594 if (child_node_flag
) {
595 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
597 *result_key
= (uint8_t) i
;
598 return child_node_flag
;
606 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
607 struct cds_ja_inode
*node
,
610 return ja_pigeon_node_get_nth(type
, node
, NULL
, i
);
614 * ja_node_get_nth: get nth item from a node.
615 * node_flag is already rcu_dereference'd.
618 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
619 struct cds_ja_inode_flag
***node_flag_ptr
,
622 unsigned int type_index
;
623 struct cds_ja_inode
*node
;
624 const struct cds_ja_type
*type
;
626 node
= ja_node_ptr(node_flag
);
627 assert(node
!= NULL
);
628 type_index
= ja_node_type(node_flag
);
629 type
= &ja_types
[type_index
];
631 switch (type
->type_class
) {
633 return ja_linear_node_get_nth(type
, node
,
636 return ja_pool_node_get_nth(type
, node
, node_flag
,
639 return ja_pigeon_node_get_nth(type
, node
,
643 return (void *) -1UL;
648 struct cds_ja_inode_flag
*ja_node_get_direction(struct cds_ja_inode_flag
*node_flag
,
649 int n
, uint8_t *result_key
,
650 enum ja_direction dir
)
652 unsigned int type_index
;
653 struct cds_ja_inode
*node
;
654 const struct cds_ja_type
*type
;
656 node
= ja_node_ptr(node_flag
);
657 assert(node
!= NULL
);
658 type_index
= ja_node_type(node_flag
);
659 type
= &ja_types
[type_index
];
661 switch (type
->type_class
) {
663 return ja_linear_node_get_direction(type
, node
, n
, result_key
, dir
);
665 return ja_pool_node_get_direction(type
, node
, n
, result_key
, dir
);
667 return ja_pigeon_node_get_direction(type
, node
, n
, result_key
, dir
);
670 return (void *) -1UL;
675 struct cds_ja_inode_flag
*ja_node_get_leftright(struct cds_ja_inode_flag
*node_flag
,
676 unsigned int n
, uint8_t *result_key
,
677 enum ja_direction dir
)
679 return ja_node_get_direction(node_flag
, n
, result_key
, dir
);
683 struct cds_ja_inode_flag
*ja_node_get_minmax(struct cds_ja_inode_flag
*node_flag
,
685 enum ja_direction dir
)
689 return ja_node_get_direction(node_flag
,
690 -1, result_key
, JA_RIGHT
);
692 return ja_node_get_direction(node_flag
,
693 JA_ENTRY_PER_NODE
, result_key
, JA_LEFT
);
700 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
701 struct cds_ja_inode
*node
,
702 struct cds_ja_shadow_node
*shadow_node
,
704 struct cds_ja_inode_flag
*child_node_flag
)
707 uint8_t *values
, *nr_child_ptr
;
708 struct cds_ja_inode_flag
**pointers
;
709 unsigned int i
, unused
= 0;
711 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
713 nr_child_ptr
= &node
->u
.data
[0];
714 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
715 (unsigned int) n
, nr_child_ptr
);
716 nr_child
= *nr_child_ptr
;
717 assert(nr_child
<= type
->max_linear_child
);
719 values
= &node
->u
.data
[1];
720 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
721 /* Check if node value is already populated */
722 for (i
= 0; i
< nr_child
; i
++) {
723 if (values
[i
] == n
) {
733 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
735 return -ERANGE
; /* recompact node */
737 return -ENOSPC
; /* No space left in this node type */
740 assert(pointers
[i
] == NULL
);
741 rcu_assign_pointer(pointers
[i
], child_node_flag
);
742 /* If we expanded the nr_child, increment it */
744 CMM_STORE_SHARED(values
[nr_child
], n
);
745 /* write pointer and value before nr_child */
747 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
749 shadow_node
->nr_child
++;
750 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
751 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
752 (unsigned int) shadow_node
->nr_child
,
759 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
760 struct cds_ja_inode
*node
,
761 struct cds_ja_inode_flag
*node_flag
,
762 struct cds_ja_shadow_node
*shadow_node
,
764 struct cds_ja_inode_flag
*child_node_flag
)
766 struct cds_ja_inode
*linear
;
768 assert(type
->type_class
== RCU_JA_POOL
);
770 switch (type
->nr_pool_order
) {
773 unsigned long bitsel
, index
;
775 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
776 assert(bitsel
< CHAR_BIT
);
777 index
= ((unsigned long) n
>> bitsel
) & 0x1;
778 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
783 unsigned long bitsel
[2], index
[2], rindex
;
785 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
786 assert(bitsel
[0] < CHAR_BIT
);
787 assert(bitsel
[1] < CHAR_BIT
);
788 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
790 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
791 rindex
= index
[0] | index
[1];
792 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
800 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
805 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
806 struct cds_ja_inode
*node
,
807 struct cds_ja_shadow_node
*shadow_node
,
809 struct cds_ja_inode_flag
*child_node_flag
)
811 struct cds_ja_inode_flag
**ptr
;
813 assert(type
->type_class
== RCU_JA_PIGEON
);
814 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
817 rcu_assign_pointer(*ptr
, child_node_flag
);
818 shadow_node
->nr_child
++;
823 * _ja_node_set_nth: set nth item within a node. Return an error
824 * (negative error value) if it is already there.
827 int _ja_node_set_nth(const struct cds_ja_type
*type
,
828 struct cds_ja_inode
*node
,
829 struct cds_ja_inode_flag
*node_flag
,
830 struct cds_ja_shadow_node
*shadow_node
,
832 struct cds_ja_inode_flag
*child_node_flag
)
834 switch (type
->type_class
) {
836 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
839 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
842 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
855 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
856 struct cds_ja_inode
*node
,
857 struct cds_ja_shadow_node
*shadow_node
,
858 struct cds_ja_inode_flag
**node_flag_ptr
)
861 uint8_t *nr_child_ptr
;
863 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
865 nr_child_ptr
= &node
->u
.data
[0];
866 nr_child
= *nr_child_ptr
;
867 assert(nr_child
<= type
->max_linear_child
);
869 if (type
->type_class
== RCU_JA_LINEAR
) {
870 assert(!shadow_node
->fallback_removal_count
);
871 if (shadow_node
->nr_child
<= type
->min_child
) {
872 /* We need to try recompacting the node */
876 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
877 assert(*node_flag_ptr
!= NULL
);
878 rcu_assign_pointer(*node_flag_ptr
, NULL
);
880 * Value and nr_child are never changed (would cause ABA issue).
881 * Instead, we leave the pointer to NULL and recompact the node
882 * once in a while. It is allowed to set a NULL pointer to a new
883 * value without recompaction though.
884 * Only update the shadow node accounting.
886 shadow_node
->nr_child
--;
887 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
888 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
889 (unsigned int) shadow_node
->nr_child
,
895 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
896 struct cds_ja_inode
*node
,
897 struct cds_ja_inode_flag
*node_flag
,
898 struct cds_ja_shadow_node
*shadow_node
,
899 struct cds_ja_inode_flag
**node_flag_ptr
,
902 struct cds_ja_inode
*linear
;
904 assert(type
->type_class
== RCU_JA_POOL
);
906 if (shadow_node
->fallback_removal_count
) {
907 shadow_node
->fallback_removal_count
--;
909 /* We should try recompacting the node */
910 if (shadow_node
->nr_child
<= type
->min_child
)
914 switch (type
->nr_pool_order
) {
917 unsigned long bitsel
, index
;
919 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
920 assert(bitsel
< CHAR_BIT
);
921 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
922 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
927 unsigned long bitsel
[2], index
[2], rindex
;
929 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
930 assert(bitsel
[0] < CHAR_BIT
);
931 assert(bitsel
[1] < CHAR_BIT
);
932 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
934 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
935 rindex
= index
[0] | index
[1];
936 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
944 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
948 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
949 struct cds_ja_inode
*node
,
950 struct cds_ja_shadow_node
*shadow_node
,
951 struct cds_ja_inode_flag
**node_flag_ptr
)
953 assert(type
->type_class
== RCU_JA_PIGEON
);
955 if (shadow_node
->fallback_removal_count
) {
956 shadow_node
->fallback_removal_count
--;
958 /* We should try recompacting the node */
959 if (shadow_node
->nr_child
<= type
->min_child
)
962 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
963 rcu_assign_pointer(*node_flag_ptr
, NULL
);
964 shadow_node
->nr_child
--;
969 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
970 * (negative error value) if it is not found (-ENOENT).
973 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
974 struct cds_ja_inode
*node
,
975 struct cds_ja_inode_flag
*node_flag
,
976 struct cds_ja_shadow_node
*shadow_node
,
977 struct cds_ja_inode_flag
**node_flag_ptr
,
980 switch (type
->type_class
) {
982 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
984 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
986 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
998 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
999 * distribution in two sub-distributions containing as much elements one
1000 * compared to the other.
1003 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
1005 unsigned int type_index
,
1006 const struct cds_ja_type
*type
,
1007 struct cds_ja_inode
*node
,
1008 struct cds_ja_shadow_node
*shadow_node
,
1010 struct cds_ja_inode_flag
*child_node_flag
,
1011 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
1013 uint8_t nr_one
[JA_BITS_PER_BYTE
];
1014 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
1015 unsigned int distrib_nr_child
= 0;
1017 memset(nr_one
, 0, sizeof(nr_one
));
1019 switch (type
->type_class
) {
1023 ja_linear_node_get_nr_child(type
, node
);
1026 for (i
= 0; i
< nr_child
; i
++) {
1027 struct cds_ja_inode_flag
*iter
;
1030 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1033 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1035 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1036 if (v
& (1U << bit_i
))
1045 unsigned int pool_nr
;
1047 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1048 struct cds_ja_inode
*pool
=
1049 ja_pool_node_get_ith_pool(type
,
1052 ja_linear_node_get_nr_child(type
, pool
);
1055 for (j
= 0; j
< nr_child
; j
++) {
1056 struct cds_ja_inode_flag
*iter
;
1059 ja_linear_node_get_ith_pos(type
, pool
,
1063 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1065 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1066 if (v
& (1U << bit_i
))
1078 assert(mode
== JA_RECOMPACT_DEL
);
1079 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1080 struct cds_ja_inode_flag
*iter
;
1082 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1085 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1087 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1088 if (i
& (1U << bit_i
))
1096 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1103 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1104 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1105 if (n
& (1U << bit_i
))
1112 * The best bit selector is that for which the number of ones is
1113 * closest to half of the number of children in the
1114 * distribution. We calculate the distance using the double of
1115 * the sub-distribution sizes to eliminate truncation error.
1117 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1118 unsigned int distance_to_best
;
1120 distance_to_best
= abs_int(((unsigned int) nr_one
[bit_i
] << 1U) - distrib_nr_child
);
1121 if (distance_to_best
< overall_best_distance
) {
1122 overall_best_distance
= distance_to_best
;
1126 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
1131 * Calculate bit distribution in two dimensions. Returns the two bits
1132 * (each 0 to 7) that splits the distribution in four sub-distributions
1133 * containing as much elements one compared to the other.
1136 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
1138 unsigned int type_index
,
1139 const struct cds_ja_type
*type
,
1140 struct cds_ja_inode
*node
,
1141 struct cds_ja_shadow_node
*shadow_node
,
1143 struct cds_ja_inode_flag
*child_node_flag
,
1144 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1145 unsigned int *_bitsel
)
1147 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1148 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1149 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1150 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
1151 unsigned int bitsel
[2] = { 0, 1 };
1152 unsigned int bit_i
, bit_j
;
1153 int overall_best_distance
= INT_MAX
;
1154 unsigned int distrib_nr_child
= 0;
1156 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
1157 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
1158 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
1159 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
1161 switch (type
->type_class
) {
1165 ja_linear_node_get_nr_child(type
, node
);
1168 for (i
= 0; i
< nr_child
; i
++) {
1169 struct cds_ja_inode_flag
*iter
;
1172 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1175 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1177 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1178 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1179 if (v
& (1U << bit_i
)) {
1180 if (v
& (1U << bit_j
)) {
1181 nr_2d_11
[bit_i
][bit_j
]++;
1183 nr_2d_10
[bit_i
][bit_j
]++;
1186 if (v
& (1U << bit_j
)) {
1187 nr_2d_01
[bit_i
][bit_j
]++;
1189 nr_2d_00
[bit_i
][bit_j
]++;
1200 unsigned int pool_nr
;
1202 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1203 struct cds_ja_inode
*pool
=
1204 ja_pool_node_get_ith_pool(type
,
1207 ja_linear_node_get_nr_child(type
, pool
);
1210 for (j
= 0; j
< nr_child
; j
++) {
1211 struct cds_ja_inode_flag
*iter
;
1214 ja_linear_node_get_ith_pos(type
, pool
,
1218 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1220 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1221 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1222 if (v
& (1U << bit_i
)) {
1223 if (v
& (1U << bit_j
)) {
1224 nr_2d_11
[bit_i
][bit_j
]++;
1226 nr_2d_10
[bit_i
][bit_j
]++;
1229 if (v
& (1U << bit_j
)) {
1230 nr_2d_01
[bit_i
][bit_j
]++;
1232 nr_2d_00
[bit_i
][bit_j
]++;
1246 assert(mode
== JA_RECOMPACT_DEL
);
1247 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1248 struct cds_ja_inode_flag
*iter
;
1250 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1253 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1255 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1256 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1257 if (i
& (1U << bit_i
)) {
1258 if (i
& (1U << bit_j
)) {
1259 nr_2d_11
[bit_i
][bit_j
]++;
1261 nr_2d_10
[bit_i
][bit_j
]++;
1264 if (i
& (1U << bit_j
)) {
1265 nr_2d_01
[bit_i
][bit_j
]++;
1267 nr_2d_00
[bit_i
][bit_j
]++;
1277 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1284 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1285 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1286 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1287 if (n
& (1U << bit_i
)) {
1288 if (n
& (1U << bit_j
)) {
1289 nr_2d_11
[bit_i
][bit_j
]++;
1291 nr_2d_10
[bit_i
][bit_j
]++;
1294 if (n
& (1U << bit_j
)) {
1295 nr_2d_01
[bit_i
][bit_j
]++;
1297 nr_2d_00
[bit_i
][bit_j
]++;
1306 * The best bit selector is that for which the number of nodes
1307 * in each sub-class is closest to one-fourth of the number of
1308 * children in the distribution. We calculate the distance using
1309 * 4 times the size of the sub-distribution to eliminate
1312 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1313 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1314 int distance_to_best
[4];
1316 distance_to_best
[0] = ((unsigned int) nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1317 distance_to_best
[1] = ((unsigned int) nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1318 distance_to_best
[2] = ((unsigned int) nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1319 distance_to_best
[3] = ((unsigned int) nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1321 /* Consider worse distance above best */
1322 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1323 distance_to_best
[0] = distance_to_best
[1];
1324 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1325 distance_to_best
[0] = distance_to_best
[2];
1326 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1327 distance_to_best
[0] = distance_to_best
[3];
1330 * If our worse distance is better than overall,
1331 * we become new best candidate.
1333 if (distance_to_best
[0] < overall_best_distance
) {
1334 overall_best_distance
= distance_to_best
[0];
1341 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1343 /* Return our bit selection */
1344 _bitsel
[0] = bitsel
[0];
1345 _bitsel
[1] = bitsel
[1];
1349 unsigned int find_nearest_type_index(unsigned int type_index
,
1350 unsigned int nr_nodes
)
1352 const struct cds_ja_type
*type
;
1354 assert(type_index
!= NODE_INDEX_NULL
);
1356 return NODE_INDEX_NULL
;
1358 type
= &ja_types
[type_index
];
1359 if (nr_nodes
< type
->min_child
)
1361 else if (nr_nodes
> type
->max_child
)
1370 * ja_node_recompact_add: recompact a node, adding a new child.
1371 * Return 0 on success, -EAGAIN if need to retry, or other negative
1372 * error value otherwise.
1375 int ja_node_recompact(enum ja_recompact mode
,
1377 unsigned int old_type_index
,
1378 const struct cds_ja_type
*old_type
,
1379 struct cds_ja_inode
*old_node
,
1380 struct cds_ja_shadow_node
*shadow_node
,
1381 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1382 struct cds_ja_inode_flag
*child_node_flag
,
1383 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1386 unsigned int new_type_index
;
1387 struct cds_ja_inode
*new_node
;
1388 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1389 const struct cds_ja_type
*new_type
;
1390 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1394 old_node_flag
= *old_node_flag_ptr
;
1397 * Need to find nearest type index even for ADD_SAME, because
1398 * this recompaction, when applied to linear nodes, will garbage
1399 * collect dummy (NULL) entries, and can therefore cause a few
1400 * linear representations to be skipped.
1403 case JA_RECOMPACT_ADD_SAME
:
1404 new_type_index
= find_nearest_type_index(old_type_index
,
1405 shadow_node
->nr_child
+ 1);
1406 dbg_printf("Recompact for node with %u children\n",
1407 shadow_node
->nr_child
+ 1);
1409 case JA_RECOMPACT_ADD_NEXT
:
1410 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1412 dbg_printf("Recompact for NULL\n");
1414 new_type_index
= find_nearest_type_index(old_type_index
,
1415 shadow_node
->nr_child
+ 1);
1416 dbg_printf("Recompact for node with %u children\n",
1417 shadow_node
->nr_child
+ 1);
1420 case JA_RECOMPACT_DEL
:
1421 new_type_index
= find_nearest_type_index(old_type_index
,
1422 shadow_node
->nr_child
- 1);
1423 dbg_printf("Recompact for node with %u children\n",
1424 shadow_node
->nr_child
- 1);
1430 retry
: /* for fallback */
1431 dbg_printf("Recompact from type %d to type %d\n",
1432 old_type_index
, new_type_index
);
1433 new_type
= &ja_types
[new_type_index
];
1434 if (new_type_index
!= NODE_INDEX_NULL
) {
1435 new_node
= alloc_cds_ja_node(ja
, new_type
);
1439 if (new_type
->type_class
== RCU_JA_POOL
) {
1440 switch (new_type
->nr_pool_order
) {
1443 unsigned int node_distrib_bitsel
;
1445 node_distrib_bitsel
=
1446 ja_node_sum_distribution_1d(mode
, ja
,
1447 old_type_index
, old_type
,
1448 old_node
, shadow_node
,
1450 nullify_node_flag_ptr
);
1451 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1452 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1453 new_type_index
, node_distrib_bitsel
);
1458 unsigned int node_distrib_bitsel
[2];
1460 ja_node_sum_distribution_2d(mode
, ja
,
1461 old_type_index
, old_type
,
1462 old_node
, shadow_node
,
1464 nullify_node_flag_ptr
,
1465 node_distrib_bitsel
);
1466 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1467 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1468 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1469 new_type_index
, node_distrib_bitsel
);
1476 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1479 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1480 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
, level
);
1481 if (!new_shadow_node
) {
1482 free_cds_ja_node(ja
, new_node
);
1486 new_shadow_node
->fallback_removal_count
=
1487 JA_FALLBACK_REMOVAL_COUNT
;
1490 new_node_flag
= NULL
;
1493 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1495 if (new_type_index
== NODE_INDEX_NULL
)
1498 switch (old_type
->type_class
) {
1502 ja_linear_node_get_nr_child(old_type
, old_node
);
1505 for (i
= 0; i
< nr_child
; i
++) {
1506 struct cds_ja_inode_flag
*iter
;
1509 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1512 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1514 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1517 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1518 goto fallback_toosmall
;
1526 unsigned int pool_nr
;
1528 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1529 struct cds_ja_inode
*pool
=
1530 ja_pool_node_get_ith_pool(old_type
,
1533 ja_linear_node_get_nr_child(old_type
, pool
);
1536 for (j
= 0; j
< nr_child
; j
++) {
1537 struct cds_ja_inode_flag
*iter
;
1540 ja_linear_node_get_ith_pos(old_type
, pool
,
1544 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1546 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1549 if (new_type
->type_class
== RCU_JA_POOL
1551 goto fallback_toosmall
;
1559 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1565 assert(mode
== JA_RECOMPACT_DEL
);
1566 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1567 struct cds_ja_inode_flag
*iter
;
1569 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1572 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1574 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1577 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1578 goto fallback_toosmall
;
1591 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1593 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1595 n
, child_node_flag
);
1596 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1597 goto fallback_toosmall
;
1603 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1604 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1605 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1606 uatomic_inc(&ja
->node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1609 /* Return pointer to new recompacted node through old_node_flag_ptr */
1610 *old_node_flag_ptr
= new_node_flag
;
1614 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1616 * It is OK to free the lock associated with a node
1617 * going to NULL, since we are holding the parent lock.
1618 * This synchronizes removal with re-add of that node.
1620 if (new_type_index
== NODE_INDEX_NULL
)
1621 flags
|= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1622 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1632 /* fallback if next pool is too small */
1633 assert(new_shadow_node
);
1634 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1635 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1639 case JA_RECOMPACT_ADD_SAME
:
1641 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1642 * node within a pool has unused entries. It should
1643 * therefore _never_ be too small.
1648 case JA_RECOMPACT_ADD_NEXT
:
1650 const struct cds_ja_type
*next_type
;
1653 * Recompaction attempt on add failed. Should only
1654 * happen if target node type is pool. Caused by
1655 * hard-to-split distribution. Recompact using the next
1656 * distribution size.
1658 assert(new_type
->type_class
== RCU_JA_POOL
);
1659 next_type
= &ja_types
[new_type_index
+ 1];
1661 * Try going to the next pool size if our population
1662 * fits within its range. This is not flagged as a
1665 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1666 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1671 dbg_printf("Add fallback to type %d\n", new_type_index
);
1672 uatomic_inc(&ja
->nr_fallback
);
1678 case JA_RECOMPACT_DEL
:
1680 * Recompaction attempt on delete failed. Should only
1681 * happen if target node type is pool. This is caused by
1682 * a hard-to-split distribution. Recompact on same node
1683 * size, but flag current node as "fallback" to ensure
1684 * we don't attempt recompaction before some activity
1685 * has reshuffled our node.
1687 assert(new_type
->type_class
== RCU_JA_POOL
);
1688 new_type_index
= old_type_index
;
1689 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1690 uatomic_inc(&ja
->nr_fallback
);
1699 * Last resort fallback: pigeon.
1701 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1702 dbg_printf("Fallback to type %d\n", new_type_index
);
1703 uatomic_inc(&ja
->nr_fallback
);
1709 * Return 0 on success, -EAGAIN if need to retry, or other negative
1710 * error value otherwise.
1713 int ja_node_set_nth(struct cds_ja
*ja
,
1714 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1715 struct cds_ja_inode_flag
*child_node_flag
,
1716 struct cds_ja_shadow_node
*shadow_node
,
1720 unsigned int type_index
;
1721 const struct cds_ja_type
*type
;
1722 struct cds_ja_inode
*node
;
1724 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1725 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1727 node
= ja_node_ptr(*node_flag
);
1728 type_index
= ja_node_type(*node_flag
);
1729 type
= &ja_types
[type_index
];
1730 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1731 n
, child_node_flag
);
1734 /* Not enough space in node, need to recompact to next type. */
1735 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1736 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1739 /* Node needs to be recompacted. */
1740 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1741 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1748 * Return 0 on success, -EAGAIN if need to retry, or other negative
1749 * error value otherwise.
1752 int ja_node_clear_ptr(struct cds_ja
*ja
,
1753 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1754 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1755 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1756 uint8_t n
, int level
)
1759 unsigned int type_index
;
1760 const struct cds_ja_type
*type
;
1761 struct cds_ja_inode
*node
;
1763 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1764 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1766 node
= ja_node_ptr(*parent_node_flag_ptr
);
1767 type_index
= ja_node_type(*parent_node_flag_ptr
);
1768 type
= &ja_types
[type_index
];
1769 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1770 if (ret
== -EFBIG
) {
1771 /* Should try recompaction. */
1772 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1773 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1774 node_flag_ptr
, level
);
1779 struct cds_ja_node
*cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1781 unsigned int tree_depth
, i
;
1782 struct cds_ja_inode_flag
*node_flag
;
1784 if (caa_unlikely(key
> ja
->key_max
))
1786 tree_depth
= ja
->tree_depth
;
1787 node_flag
= rcu_dereference(ja
->root
);
1789 /* level 0: root node */
1790 if (!ja_node_ptr(node_flag
))
1793 for (i
= 1; i
< tree_depth
; i
++) {
1796 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1797 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1798 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1799 (unsigned int) iter_key
, node_flag
);
1800 if (!ja_node_ptr(node_flag
))
1804 /* Last level lookup succeded. We got an actual match. */
1805 return (struct cds_ja_node
*) node_flag
;
1809 struct cds_ja_node
*cds_ja_lookup_inequality(struct cds_ja
*ja
, uint64_t key
,
1810 uint64_t *result_key
, enum ja_lookup_inequality mode
)
1812 int tree_depth
, level
;
1813 struct cds_ja_inode_flag
*node_flag
, *cur_node_depth
[JA_MAX_DEPTH
];
1814 uint8_t cur_key
[JA_MAX_DEPTH
];
1815 uint64_t _result_key
= 0;
1816 enum ja_direction dir
;
1820 if (caa_unlikely(key
> ja
->key_max
|| key
== 0))
1824 if (caa_unlikely(key
>= ja
->key_max
))
1831 memset(cur_node_depth
, 0, sizeof(cur_node_depth
));
1832 memset(cur_key
, 0, sizeof(cur_key
));
1833 tree_depth
= ja
->tree_depth
;
1834 node_flag
= rcu_dereference(ja
->root
);
1835 cur_node_depth
[0] = node_flag
;
1837 /* level 0: root node */
1838 if (!ja_node_ptr(node_flag
))
1841 for (level
= 1; level
< tree_depth
; level
++) {
1844 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1845 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1846 if (!ja_node_ptr(node_flag
))
1848 cur_key
[level
- 1] = iter_key
;
1849 cur_node_depth
[level
] = node_flag
;
1850 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
1851 (unsigned int) iter_key
, node_flag
);
1854 if (level
== tree_depth
) {
1855 /* Last level lookup succeded. We got an equal match. */
1858 return (struct cds_ja_node
*) node_flag
;
1862 * Find highest value left/right of current node.
1863 * Current node is cur_node_depth[level].
1864 * Start at current level. If we cannot find any key left/right
1865 * of ours, go one level up, seek highest value left/right of
1866 * current (recursively), and when we find one, get the
1867 * rightmost/leftmost child of its rightmost/leftmost child
1880 for (; level
> 0; level
--) {
1883 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1884 node_flag
= ja_node_get_leftright(cur_node_depth
[level
- 1],
1885 iter_key
, &cur_key
[level
- 1], dir
);
1886 /* If found left/right sibling, find rightmost/leftmost child. */
1887 if (ja_node_ptr(node_flag
))
1892 /* Reached the root and could not find a left/right sibling. */
1899 * From this point, we are guaranteed to be able to find a
1900 * "below than"/"above than" match. ja_attach_node() and
1901 * ja_detach_node() both guarantee that it is not possible for a
1902 * lookup to reach a dead-end.
1906 * Find rightmost/leftmost child of rightmost/leftmost child
1919 for (; level
< tree_depth
; level
++) {
1920 node_flag
= ja_node_get_minmax(node_flag
, &cur_key
[level
- 1], dir
);
1921 if (!ja_node_ptr(node_flag
))
1925 assert(level
== tree_depth
);
1928 for (level
= 1; level
< tree_depth
; level
++) {
1929 _result_key
|= ((uint64_t) cur_key
[level
- 1])
1930 << (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1));
1932 *result_key
= _result_key
;
1934 return (struct cds_ja_node
*) node_flag
;
1937 struct cds_ja_node
*cds_ja_lookup_below_equal(struct cds_ja
*ja
,
1938 uint64_t key
, uint64_t *result_key
)
1940 return cds_ja_lookup_inequality(ja
, key
, result_key
, JA_LOOKUP_BE
);
1943 struct cds_ja_node
*cds_ja_lookup_above_equal(struct cds_ja
*ja
,
1944 uint64_t key
, uint64_t *result_key
)
1946 return cds_ja_lookup_inequality(ja
, key
, result_key
, JA_LOOKUP_AE
);
1950 * We reached an unpopulated node. Create it and the children we need,
1951 * and then attach the entire branch to the current node. This may
1952 * trigger recompaction of the current node. Locks needed: node lock
1953 * (for add), and, possibly, parent node lock (to update pointer due to
1954 * node recompaction).
1956 * First take node lock, check if recompaction is needed, then take
1957 * parent lock (if needed). Then we can proceed to create the new
1958 * branch. Publish the new branch, and release locks.
1959 * TODO: we currently always take the parent lock even when not needed.
1961 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1962 * leads to a dead-end: before attaching a branch, the entire content of
1963 * the new branch is populated, thus creating a cluster, before
1964 * attaching the cluster to the rest of the tree, thus making it visible
1968 int ja_attach_node(struct cds_ja
*ja
,
1969 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1970 struct cds_ja_inode_flag
*attach_node_flag
,
1971 struct cds_ja_inode_flag
*parent_attach_node_flag
,
1972 struct cds_ja_inode_flag
**old_node_flag_ptr
,
1973 struct cds_ja_inode_flag
*old_node_flag
,
1976 struct cds_ja_node
*child_node
)
1978 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1979 *parent_shadow_node
= NULL
;
1980 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1982 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1983 int nr_created_nodes
= 0;
1985 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1986 level
, old_node_flag
, attach_node_flag_ptr
, attach_node_flag
, parent_attach_node_flag
);
1988 assert(!old_node_flag
);
1989 if (attach_node_flag
) {
1990 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, attach_node_flag
);
1996 if (parent_attach_node_flag
) {
1997 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1998 parent_attach_node_flag
);
1999 if (!parent_shadow_node
) {
2005 if (old_node_flag_ptr
&& ja_node_ptr(*old_node_flag_ptr
)) {
2007 * Target node has been updated between RCU lookup and
2008 * lock acquisition. We need to re-try lookup and
2016 * Perform a lookup query to handle the case where
2017 * old_node_flag_ptr is NULL. We cannot use it to check if the
2018 * node has been populated between RCU lookup and mutex
2021 if (!old_node_flag_ptr
) {
2023 struct cds_ja_inode_flag
*lookup_node_flag
;
2024 struct cds_ja_inode_flag
**lookup_node_flag_ptr
;
2026 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2027 lookup_node_flag
= ja_node_get_nth(attach_node_flag
,
2028 &lookup_node_flag_ptr
,
2030 if (lookup_node_flag
) {
2036 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
2037 ja_node_ptr(attach_node_flag
)) {
2039 * Target node has been updated between RCU lookup and
2040 * lock acquisition. We need to re-try lookup and
2047 /* Create new branch, starting from bottom */
2048 iter_node_flag
= (struct cds_ja_inode_flag
*) child_node
;
2050 for (i
= ja
->tree_depth
- 1; i
>= (int) level
; i
--) {
2053 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
- 1)));
2054 dbg_printf("branch creation level %d, key %u\n",
2055 i
, (unsigned int) iter_key
);
2056 iter_dest_node_flag
= NULL
;
2057 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2062 dbg_printf("branch creation error %d\n", ret
);
2065 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
2066 iter_node_flag
= iter_dest_node_flag
;
2070 /* Publish branch */
2073 * Attaching to root node.
2075 rcu_assign_pointer(ja
->root
, iter_node_flag
);
2079 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2080 dbg_printf("publish branch at level %d, key %u\n",
2081 level
- 1, (unsigned int) iter_key
);
2082 /* We need to use set_nth on the previous level. */
2083 iter_dest_node_flag
= attach_node_flag
;
2084 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2087 shadow_node
, level
- 1);
2089 dbg_printf("branch publish error %d\n", ret
);
2095 rcu_assign_pointer(*attach_node_flag_ptr
, iter_dest_node_flag
);
2103 for (i
= 0; i
< nr_created_nodes
; i
++) {
2107 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
2109 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
2110 tmpret
= rcuja_shadow_clear(ja
->ht
,
2118 if (parent_shadow_node
)
2119 rcuja_shadow_unlock(parent_shadow_node
);
2122 rcuja_shadow_unlock(shadow_node
);
2128 * Lock the parent containing the pointer to list of duplicates, and add
2129 * node to this list. Failure can happen if concurrent update changes
2130 * the parent before we get the lock. We return -EAGAIN in that case.
2131 * Return 0 on success, negative error value on failure.
2134 int ja_chain_node(struct cds_ja
*ja
,
2135 struct cds_ja_inode_flag
*parent_node_flag
,
2136 struct cds_ja_inode_flag
**node_flag_ptr
,
2137 struct cds_ja_inode_flag
*node_flag
,
2138 struct cds_ja_node
*node
)
2140 struct cds_ja_shadow_node
*shadow_node
;
2143 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2147 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2152 * Add node to head of list. Safe against concurrent RCU read
2155 node
->next
= (struct cds_ja_node
*) node_flag
;
2156 rcu_assign_pointer(*node_flag_ptr
, (struct cds_ja_inode_flag
*) node
);
2158 rcuja_shadow_unlock(shadow_node
);
2163 int _cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2164 struct cds_ja_node
*node
,
2165 struct cds_ja_node
**unique_node_ret
)
2167 unsigned int tree_depth
, i
;
2168 struct cds_ja_inode_flag
*attach_node_flag
,
2172 *parent_attach_node_flag
;
2173 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
2174 **parent_node_flag_ptr
,
2178 if (caa_unlikely(key
> ja
->key_max
)) {
2181 tree_depth
= ja
->tree_depth
;
2184 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
2186 parent2_node_flag
= NULL
;
2188 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
2189 parent_node_flag_ptr
= NULL
;
2190 node_flag
= rcu_dereference(ja
->root
);
2191 node_flag_ptr
= &ja
->root
;
2193 /* Iterate on all internal levels */
2194 for (i
= 1; i
< tree_depth
; i
++) {
2197 if (!ja_node_ptr(node_flag
))
2199 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2200 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2201 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2202 parent2_node_flag
= parent_node_flag
;
2203 parent_node_flag
= node_flag
;
2204 parent_node_flag_ptr
= node_flag_ptr
;
2205 node_flag
= ja_node_get_nth(node_flag
,
2211 * We reached either bottom of tree or internal NULL node,
2212 * simply add node to last internal level, or chain it if key is
2215 if (!ja_node_ptr(node_flag
)) {
2216 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2217 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2219 attach_node_flag
= parent_node_flag
;
2220 attach_node_flag_ptr
= parent_node_flag_ptr
;
2221 parent_attach_node_flag
= parent2_node_flag
;
2223 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
2225 parent_attach_node_flag
,
2230 if (unique_node_ret
) {
2231 *unique_node_ret
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2235 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2236 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2238 attach_node_flag
= node_flag
;
2239 attach_node_flag_ptr
= node_flag_ptr
;
2240 parent_attach_node_flag
= parent_node_flag
;
2242 ret
= ja_chain_node(ja
,
2243 parent_attach_node_flag
,
2244 attach_node_flag_ptr
,
2248 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
2254 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2255 struct cds_ja_node
*node
)
2257 return _cds_ja_add(ja
, key
, node
, NULL
);
2260 struct cds_ja_node
*cds_ja_add_unique(struct cds_ja
*ja
, uint64_t key
,
2261 struct cds_ja_node
*node
)
2264 struct cds_ja_node
*ret_node
;
2266 ret
= _cds_ja_add(ja
, key
, node
, &ret_node
);
2274 * Note: there is no need to lookup the pointer address associated with
2275 * each node's nth item after taking the lock: it's already been done by
2276 * cds_ja_del while holding the rcu read-side lock, and our node rules
2277 * ensure that when a match value -> pointer is found in a node, it is
2278 * _NEVER_ changed for that node without recompaction, and recompaction
2279 * reallocates the node.
2280 * However, when a child is removed from "linear" nodes, its pointer
2281 * is set to NULL. We therefore check, while holding the locks, if this
2282 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2284 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2285 * leads to a dead-end: when removing branch, it makes sure to perform
2286 * the "cut" at the highest node that has only one child, effectively
2287 * replacing it with a NULL pointer.
2290 int ja_detach_node(struct cds_ja
*ja
,
2291 struct cds_ja_inode_flag
**snapshot
,
2292 struct cds_ja_inode_flag
***snapshot_ptr
,
2293 uint8_t *snapshot_n
,
2296 struct cds_ja_node
*node
)
2298 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
2299 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
2300 *parent_node_flag
= NULL
,
2301 **parent_node_flag_ptr
= NULL
;
2302 struct cds_ja_inode_flag
*iter_node_flag
;
2303 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
2306 assert(nr_snapshot
== ja
->tree_depth
+ 1);
2309 * From the last internal level node going up, get the node
2310 * lock, check if the node has only one child left. If it is the
2311 * case, we continue iterating upward. When we reach a node
2312 * which has more that one child left, we lock the parent, and
2313 * proceed to the node deletion (removing its children too).
2315 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
2316 struct cds_ja_shadow_node
*shadow_node
;
2318 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2324 shadow_nodes
[nr_shadow
++] = shadow_node
;
2327 * Check if node has been removed between RCU
2328 * lookup and lock acquisition.
2330 assert(snapshot_ptr
[i
+ 1]);
2331 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
2332 != ja_node_ptr(snapshot
[i
+ 1])) {
2337 assert(shadow_node
->nr_child
> 0);
2338 if (shadow_node
->nr_child
== 1 && i
> 1)
2341 if (shadow_node
->nr_child
> 1 || i
== 1) {
2342 /* Lock parent and break */
2343 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2349 shadow_nodes
[nr_shadow
++] = shadow_node
;
2352 * Check if node has been removed between RCU
2353 * lookup and lock acquisition.
2355 assert(snapshot_ptr
[i
]);
2356 if (ja_node_ptr(*snapshot_ptr
[i
])
2357 != ja_node_ptr(snapshot
[i
])) {
2362 node_flag_ptr
= snapshot_ptr
[i
+ 1];
2363 n
= snapshot_n
[i
+ 1];
2364 parent_node_flag_ptr
= snapshot_ptr
[i
];
2365 parent_node_flag
= snapshot
[i
];
2369 * Lock parent's parent, in case we need
2370 * to recompact parent.
2372 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2378 shadow_nodes
[nr_shadow
++] = shadow_node
;
2381 * Check if node has been removed between RCU
2382 * lookup and lock acquisition.
2384 assert(snapshot_ptr
[i
- 1]);
2385 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2386 != ja_node_ptr(snapshot
[i
- 1])) {
2397 * At this point, we want to delete all nodes that are about to
2398 * be removed from shadow_nodes (except the last one, which is
2399 * either the root or the parent of the upmost node with 1
2400 * child). OK to free lock here, because RCU read lock is held,
2401 * and free only performed in call_rcu.
2404 for (i
= 0; i
< nr_clear
; i
++) {
2405 ret
= rcuja_shadow_clear(ja
->ht
,
2406 shadow_nodes
[i
]->node_flag
,
2408 RCUJA_SHADOW_CLEAR_FREE_NODE
2409 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2413 iter_node_flag
= parent_node_flag
;
2414 /* Remove from parent */
2415 ret
= ja_node_clear_ptr(ja
,
2416 node_flag_ptr
, /* Pointer to location to nullify */
2417 &iter_node_flag
, /* Old new parent ptr in its parent */
2418 shadow_nodes
[nr_branch
- 1], /* of parent */
2423 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2424 iter_node_flag
, *parent_node_flag_ptr
);
2425 /* Update address of parent ptr in its parent */
2426 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2429 for (i
= 0; i
< nr_shadow
; i
++)
2430 rcuja_shadow_unlock(shadow_nodes
[i
]);
2435 int ja_unchain_node(struct cds_ja
*ja
,
2436 struct cds_ja_inode_flag
*parent_node_flag
,
2437 struct cds_ja_inode_flag
**node_flag_ptr
,
2438 struct cds_ja_inode_flag
*node_flag
,
2439 struct cds_ja_node
*node
)
2441 struct cds_ja_shadow_node
*shadow_node
;
2442 struct cds_ja_node
*iter_node
, **iter_node_ptr
, **prev_node_ptr
= NULL
;
2443 int ret
= 0, count
= 0, found
= 0;
2445 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2448 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2453 * Find the previous node's next pointer pointing to our node,
2454 * so we can update it. Retry if another thread removed all but
2455 * one of duplicates since check (this check was performed
2456 * without lock). Ensure that the node we are about to remove is
2457 * still in the list (while holding lock). No need for RCU
2458 * traversal here since we hold the lock on the parent.
2460 iter_node_ptr
= (struct cds_ja_node
**) node_flag_ptr
;
2461 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2462 cds_ja_for_each_duplicate(iter_node
) {
2464 if (iter_node
== node
) {
2465 prev_node_ptr
= iter_node_ptr
;
2468 iter_node_ptr
= &iter_node
->next
;
2471 if (!found
|| count
== 1) {
2475 CMM_STORE_SHARED(*prev_node_ptr
, node
->next
);
2477 * Validate that we indeed removed the node from linked list.
2479 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2481 rcuja_shadow_unlock(shadow_node
);
2486 * Called with RCU read lock held.
2488 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2489 struct cds_ja_node
*node
)
2491 unsigned int tree_depth
, i
;
2492 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2493 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2494 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2495 struct cds_ja_inode_flag
*node_flag
;
2496 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2501 if (caa_unlikely(key
> ja
->key_max
))
2503 tree_depth
= ja
->tree_depth
;
2507 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2510 /* snapshot for level 0 is only for shadow node lookup */
2513 snapshot_ptr
[nr_snapshot
] = NULL
;
2514 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2515 node_flag
= rcu_dereference(ja
->root
);
2516 prev_node_flag_ptr
= &ja
->root
;
2517 node_flag_ptr
= &ja
->root
;
2519 /* Iterate on all internal levels */
2520 for (i
= 1; i
< tree_depth
; i
++) {
2523 dbg_printf("cds_ja_del iter node_flag %p\n",
2525 if (!ja_node_ptr(node_flag
)) {
2528 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2529 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2530 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2531 snapshot
[nr_snapshot
++] = node_flag
;
2532 node_flag
= ja_node_get_nth(node_flag
,
2536 prev_node_flag_ptr
= node_flag_ptr
;
2537 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2538 (unsigned int) iter_key
, node_flag
,
2539 prev_node_flag_ptr
);
2542 * We reached bottom of tree, try to find the node we are trying
2543 * to remove. Fail if we cannot find it.
2545 if (!ja_node_ptr(node_flag
)) {
2546 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2550 struct cds_ja_node
*iter_node
, *match
= NULL
;
2553 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2554 cds_ja_for_each_duplicate_rcu(iter_node
) {
2555 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node
, iter_node
);
2556 if (iter_node
== node
)
2562 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2568 * Removing last of duplicates. Last snapshot
2569 * does not have a shadow node (external leafs).
2571 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2572 snapshot
[nr_snapshot
++] = node_flag
;
2573 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2574 snapshot_n
, nr_snapshot
, key
, node
);
2576 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2577 node_flag_ptr
, node_flag
, match
);
2581 * Explanation of -ENOENT handling: caused by concurrent delete
2582 * between RCU lookup and actual removal. Need to re-do the
2583 * lookup and removal attempt.
2585 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2590 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2591 const struct rcu_flavor_struct
*flavor
)
2595 struct cds_ja_shadow_node
*root_shadow_node
;
2597 ja
= calloc(sizeof(*ja
), 1);
2609 ja
->key_max
= (1ULL << key_bits
) - 1;
2612 ja
->key_max
= UINT64_MAX
;
2618 /* ja->root is NULL */
2619 /* tree_depth 0 is for pointer to root node */
2620 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2621 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2622 ja
->ht
= rcuja_create_ht(flavor
);
2627 * Note: we should not free this node until judy array destroy.
2629 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2630 (struct cds_ja_inode_flag
*) &ja
->root
,
2632 if (!root_shadow_node
) {
2640 ret
= rcuja_delete_ht(ja
->ht
);
2650 * Called from RCU read-side CS.
2652 __attribute__((visibility("protected")))
2653 void rcuja_free_all_children(struct cds_ja_shadow_node
*shadow_node
,
2654 struct cds_ja_inode_flag
*node_flag
,
2655 void (*rcu_free_node
)(struct cds_ja_node
*node
))
2657 unsigned int type_index
;
2658 struct cds_ja_inode
*node
;
2659 const struct cds_ja_type
*type
;
2661 node
= ja_node_ptr(node_flag
);
2662 assert(node
!= NULL
);
2663 type_index
= ja_node_type(node_flag
);
2664 type
= &ja_types
[type_index
];
2666 switch (type
->type_class
) {
2670 ja_linear_node_get_nr_child(type
, node
);
2673 for (i
= 0; i
< nr_child
; i
++) {
2674 struct cds_ja_inode_flag
*iter
;
2675 struct cds_ja_node
*node_iter
, *n
;
2678 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
2679 node_iter
= (struct cds_ja_node
*) iter
;
2680 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2681 rcu_free_node(node_iter
);
2688 unsigned int pool_nr
;
2690 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
2691 struct cds_ja_inode
*pool
=
2692 ja_pool_node_get_ith_pool(type
, node
, pool_nr
);
2694 ja_linear_node_get_nr_child(type
, pool
);
2697 for (j
= 0; j
< nr_child
; j
++) {
2698 struct cds_ja_inode_flag
*iter
;
2699 struct cds_ja_node
*node_iter
, *n
;
2702 ja_linear_node_get_ith_pos(type
, pool
, j
, &v
, &iter
);
2703 node_iter
= (struct cds_ja_node
*) iter
;
2704 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2705 rcu_free_node(node_iter
);
2717 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2718 struct cds_ja_inode_flag
*iter
;
2719 struct cds_ja_node
*node_iter
, *n
;
2721 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
2722 node_iter
= (struct cds_ja_node
*) iter
;
2723 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2724 rcu_free_node(node_iter
);
2735 void print_debug_fallback_distribution(struct cds_ja
*ja
)
2739 fprintf(stderr
, "Fallback node distribution:\n");
2740 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2741 if (!ja
->node_fallback_count_distribution
[i
])
2743 fprintf(stderr
, " %3u: %4lu\n",
2744 i
, ja
->node_fallback_count_distribution
[i
]);
2749 int ja_final_checks(struct cds_ja
*ja
)
2751 double fallback_ratio
;
2752 unsigned long na
, nf
, nr_fallback
;
2755 fallback_ratio
= (double) uatomic_read(&ja
->nr_fallback
);
2756 fallback_ratio
/= (double) uatomic_read(&ja
->nr_nodes_allocated
);
2757 nr_fallback
= uatomic_read(&ja
->nr_fallback
);
2760 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2761 uatomic_read(&ja
->nr_fallback
),
2764 na
= uatomic_read(&ja
->nr_nodes_allocated
);
2765 nf
= uatomic_read(&ja
->nr_nodes_freed
);
2766 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na
, nf
);
2768 print_debug_fallback_distribution(ja
);
2771 fprintf(stderr
, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2772 (long) na
- nf
, na
, nf
);
2779 * There should be no more concurrent add, delete, nor look-up performed
2780 * on the Judy array while it is being destroyed (ensured by the
2783 int cds_ja_destroy(struct cds_ja
*ja
,
2784 void (*free_node_cb
)(struct cds_ja_node
*node
))
2786 const struct rcu_flavor_struct
*flavor
;
2789 flavor
= cds_lfht_rcu_flavor(ja
->ht
);
2790 rcuja_shadow_prune(ja
->ht
,
2791 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
,
2793 flavor
->thread_offline();
2794 ret
= rcuja_delete_ht(ja
->ht
);
2798 /* Wait for in-flight call_rcu free to complete. */
2801 flavor
->thread_online();
2802 ret
= ja_final_checks(ja
);