4 * Userspace RCU library - RCU Judy Array
6 * Copyright (C) 2000 - 2002 Hewlett-Packard Company
7 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <urcu/rcuja.h>
31 #include <urcu/compiler.h>
32 #include <urcu/arch.h>
33 #include <urcu-pointer.h>
34 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
39 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
42 enum cds_ja_type_class
{
43 RCU_JA_LINEAR
= 0, /* Type A */
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL
= 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
49 RCU_JA_PIGEON
= 2, /* Type C */
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
52 /* Leaf nodes are implicit from their height in the tree */
55 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
59 enum cds_ja_type_class type_class
;
60 uint16_t min_child
; /* minimum number of children: 1 to 256 */
61 uint16_t max_child
; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order
; /* node size is (1 << order), in bytes */
64 uint16_t nr_pool_order
; /* number of pools */
65 uint16_t pool_size_order
; /* pool size */
69 * Iteration on the array to find the right node size for the number of
70 * children stops when it reaches .max_child == 256 (this is the largest
71 * possible node size, which contains 256 children).
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
86 #if (CAA_BITS_PER_LONG < 64)
89 ja_type_0_max_child
= 1,
90 ja_type_1_max_child
= 3,
91 ja_type_2_max_child
= 6,
92 ja_type_3_max_child
= 12,
93 ja_type_4_max_child
= 25,
94 ja_type_5_max_child
= 48,
95 ja_type_6_max_child
= 92,
96 ja_type_7_max_child
= 256,
97 ja_type_8_max_child
= 0, /* NULL */
101 ja_type_0_max_linear_child
= 1,
102 ja_type_1_max_linear_child
= 3,
103 ja_type_2_max_linear_child
= 6,
104 ja_type_3_max_linear_child
= 12,
105 ja_type_4_max_linear_child
= 25,
106 ja_type_5_max_linear_child
= 24,
107 ja_type_6_max_linear_child
= 23,
111 ja_type_5_nr_pool_order
= 1,
112 ja_type_6_nr_pool_order
= 2,
115 const struct cds_ja_type ja_types
[] = {
116 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
122 /* Pools may fill sooner than max_child */
123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
124 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
126 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
132 { .type_class
= RCU_JA_PIGEON
, .min_child
= 83, .max_child
= ja_type_7_max_child
, .order
= 10, },
134 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
136 #else /* !(CAA_BITS_PER_LONG < 64) */
137 /* 64-bit pointers */
139 ja_type_0_max_child
= 1,
140 ja_type_1_max_child
= 3,
141 ja_type_2_max_child
= 7,
142 ja_type_3_max_child
= 14,
143 ja_type_4_max_child
= 28,
144 ja_type_5_max_child
= 54,
145 ja_type_6_max_child
= 104,
146 ja_type_7_max_child
= 256,
147 ja_type_8_max_child
= 256,
151 ja_type_0_max_linear_child
= 1,
152 ja_type_1_max_linear_child
= 3,
153 ja_type_2_max_linear_child
= 7,
154 ja_type_3_max_linear_child
= 14,
155 ja_type_4_max_linear_child
= 28,
156 ja_type_5_max_linear_child
= 27,
157 ja_type_6_max_linear_child
= 26,
161 ja_type_5_nr_pool_order
= 1,
162 ja_type_6_nr_pool_order
= 2,
165 const struct cds_ja_type ja_types
[] = {
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
169 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
170 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
172 /* Pools may fill sooner than max_child. */
173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
174 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
176 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
182 { .type_class
= RCU_JA_PIGEON
, .min_child
= 95, .max_child
= ja_type_7_max_child
, .order
= 11, },
184 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
186 #endif /* !(BITS_PER_LONG < 64) */
188 static inline __attribute__((unused
))
189 void static_array_size_check(void)
191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
195 * The cds_ja_node contains the compressed node data needed for
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
206 #define DECLARE_LINEAR_NODE(index) \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
213 #define DECLARE_POOL_NODE(index) \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
222 struct cds_ja_inode
{
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0
;
226 DECLARE_LINEAR_NODE(1) conf_1
;
227 DECLARE_LINEAR_NODE(2) conf_2
;
228 DECLARE_LINEAR_NODE(3) conf_3
;
229 DECLARE_LINEAR_NODE(4) conf_4
;
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5
;
233 DECLARE_POOL_NODE(6) conf_6
;
235 /* Pigeon configuration */
237 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
239 /* data aliasing nodes for computed accesses */
240 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
245 JA_RECOMPACT_ADD_SAME
,
246 JA_RECOMPACT_ADD_NEXT
,
250 enum ja_lookup_inequality
{
263 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
265 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
268 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
272 if (_ja_node_mask_ptr(node
) == NULL
) {
273 return NODE_INDEX_NULL
;
275 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
276 assert(type
< (1UL << JA_TYPE_BITS
));
281 struct cds_ja_inode
*alloc_cds_ja_node(struct cds_ja
*ja
,
282 const struct cds_ja_type
*ja_type
)
284 size_t len
= 1U << ja_type
->order
;
288 ret
= posix_memalign(&p
, len
, len
);
293 uatomic_inc(&ja
->nr_nodes_allocated
);
297 void free_cds_ja_node(struct cds_ja
*ja
, struct cds_ja_inode
*node
)
301 uatomic_inc(&ja
->nr_nodes_freed
);
304 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
305 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
306 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
307 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
310 uint8_t *align_ptr_size(uint8_t *ptr
)
312 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
316 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
317 struct cds_ja_inode
*node
)
319 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
320 return rcu_dereference(node
->u
.data
[0]);
324 * The order in which values and pointers are does does not matter: if
325 * a value is missing, we return NULL. If a value is there, but its
326 * associated pointers is still NULL, we return NULL too.
329 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
330 struct cds_ja_inode
*node
,
331 struct cds_ja_inode_flag
***node_flag_ptr
,
336 struct cds_ja_inode_flag
**pointers
;
337 struct cds_ja_inode_flag
*ptr
;
340 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
342 nr_child
= ja_linear_node_get_nr_child(type
, node
);
343 cmm_smp_rmb(); /* read nr_child before values and pointers */
344 assert(nr_child
<= type
->max_linear_child
);
345 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
347 values
= &node
->u
.data
[1];
348 for (i
= 0; i
< nr_child
; i
++) {
349 if (CMM_LOAD_SHARED(values
[i
]) == n
)
353 if (caa_unlikely(node_flag_ptr
))
354 *node_flag_ptr
= NULL
;
357 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
358 ptr
= rcu_dereference(pointers
[i
]);
359 if (caa_unlikely(node_flag_ptr
))
360 *node_flag_ptr
= &pointers
[i
];
365 struct cds_ja_inode_flag
*ja_linear_node_get_direction(const struct cds_ja_type
*type
,
366 struct cds_ja_inode
*node
,
367 int n
, uint8_t *result_key
,
368 enum ja_direction dir
)
372 struct cds_ja_inode_flag
**pointers
;
373 struct cds_ja_inode_flag
*ptr
, *match_ptr
= NULL
;
377 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
378 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
380 if (dir
== JA_LEFT
) {
383 match_v
= JA_ENTRY_PER_NODE
;
386 nr_child
= ja_linear_node_get_nr_child(type
, node
);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child
<= type
->max_linear_child
);
389 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
391 values
= &node
->u
.data
[1];
392 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
393 for (i
= 0; i
< nr_child
; i
++) {
396 v
= CMM_LOAD_SHARED(values
[i
]);
397 ptr
= CMM_LOAD_SHARED(pointers
[i
]);
400 if (dir
== JA_LEFT
) {
401 if ((int) v
< n
&& (int) v
> match_v
) {
406 if ((int) v
> n
&& (int) v
< match_v
) {
416 assert(match_v
>= 0 && match_v
< JA_ENTRY_PER_NODE
);
418 *result_key
= (uint8_t) match_v
;
423 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
424 struct cds_ja_inode
*node
,
427 struct cds_ja_inode_flag
**iter
)
430 struct cds_ja_inode_flag
**pointers
;
432 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
433 assert(i
< ja_linear_node_get_nr_child(type
, node
));
435 values
= &node
->u
.data
[1];
437 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
442 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
443 struct cds_ja_inode
*node
,
444 struct cds_ja_inode_flag
*node_flag
,
445 struct cds_ja_inode_flag
***node_flag_ptr
,
448 struct cds_ja_inode
*linear
;
450 assert(type
->type_class
== RCU_JA_POOL
);
452 switch (type
->nr_pool_order
) {
455 unsigned long bitsel
, index
;
457 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
458 assert(bitsel
< CHAR_BIT
);
459 index
= ((unsigned long) n
>> bitsel
) & 0x1;
460 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
465 unsigned long bitsel
[2], index
[2], rindex
;
467 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
468 assert(bitsel
[0] < CHAR_BIT
);
469 assert(bitsel
[1] < CHAR_BIT
);
470 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
472 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
473 rindex
= index
[0] | index
[1];
474 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
481 return ja_linear_node_get_nth(type
, linear
, node_flag_ptr
, n
);
485 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
486 struct cds_ja_inode
*node
,
489 assert(type
->type_class
== RCU_JA_POOL
);
490 return (struct cds_ja_inode
*)
491 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
495 struct cds_ja_inode_flag
*ja_pool_node_get_direction(const struct cds_ja_type
*type
,
496 struct cds_ja_inode
*node
,
497 int n
, uint8_t *result_key
,
498 enum ja_direction dir
)
500 unsigned int pool_nr
;
502 struct cds_ja_inode_flag
*match_node_flag
= NULL
;
504 assert(type
->type_class
== RCU_JA_POOL
);
505 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
507 if (dir
== JA_LEFT
) {
510 match_v
= JA_ENTRY_PER_NODE
;
513 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
514 struct cds_ja_inode
*pool
=
515 ja_pool_node_get_ith_pool(type
,
518 ja_linear_node_get_nr_child(type
, pool
);
521 for (j
= 0; j
< nr_child
; j
++) {
522 struct cds_ja_inode_flag
*iter
;
525 ja_linear_node_get_ith_pos(type
, pool
,
529 if (dir
== JA_LEFT
) {
530 if ((int) v
< n
&& (int) v
> match_v
) {
532 match_node_flag
= iter
;
535 if ((int) v
> n
&& (int) v
< match_v
) {
537 match_node_flag
= iter
;
543 *result_key
= (uint8_t) match_v
;
544 return match_node_flag
;
548 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
549 struct cds_ja_inode
*node
,
550 struct cds_ja_inode_flag
***node_flag_ptr
,
553 struct cds_ja_inode_flag
**child_node_flag_ptr
;
554 struct cds_ja_inode_flag
*child_node_flag
;
556 assert(type
->type_class
== RCU_JA_PIGEON
);
557 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
558 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
559 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
560 child_node_flag_ptr
);
561 if (caa_unlikely(node_flag_ptr
))
562 *node_flag_ptr
= child_node_flag_ptr
;
563 return child_node_flag
;
567 struct cds_ja_inode_flag
*ja_pigeon_node_get_direction(const struct cds_ja_type
*type
,
568 struct cds_ja_inode
*node
,
569 int n
, uint8_t *result_key
,
570 enum ja_direction dir
)
572 struct cds_ja_inode_flag
**child_node_flag_ptr
;
573 struct cds_ja_inode_flag
*child_node_flag
;
576 assert(type
->type_class
== RCU_JA_PIGEON
);
577 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
579 if (dir
== JA_LEFT
) {
580 /* n - 1 is first value left of n */
581 for (i
= n
- 1; i
>= 0; i
--) {
582 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
583 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
584 if (child_node_flag
) {
585 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
587 *result_key
= (uint8_t) i
;
588 return child_node_flag
;
592 /* n + 1 is first value right of n */
593 for (i
= n
+ 1; i
< JA_ENTRY_PER_NODE
; i
++) {
594 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
595 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
596 if (child_node_flag
) {
597 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
599 *result_key
= (uint8_t) i
;
600 return child_node_flag
;
608 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
609 struct cds_ja_inode
*node
,
612 return ja_pigeon_node_get_nth(type
, node
, NULL
, i
);
616 * ja_node_get_nth: get nth item from a node.
617 * node_flag is already rcu_dereference'd.
620 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
621 struct cds_ja_inode_flag
***node_flag_ptr
,
624 unsigned int type_index
;
625 struct cds_ja_inode
*node
;
626 const struct cds_ja_type
*type
;
628 node
= ja_node_ptr(node_flag
);
629 assert(node
!= NULL
);
630 type_index
= ja_node_type(node_flag
);
631 type
= &ja_types
[type_index
];
633 switch (type
->type_class
) {
635 return ja_linear_node_get_nth(type
, node
,
638 return ja_pool_node_get_nth(type
, node
, node_flag
,
641 return ja_pigeon_node_get_nth(type
, node
,
645 return (void *) -1UL;
650 struct cds_ja_inode_flag
*ja_node_get_direction(struct cds_ja_inode_flag
*node_flag
,
651 int n
, uint8_t *result_key
,
652 enum ja_direction dir
)
654 unsigned int type_index
;
655 struct cds_ja_inode
*node
;
656 const struct cds_ja_type
*type
;
658 node
= ja_node_ptr(node_flag
);
659 assert(node
!= NULL
);
660 type_index
= ja_node_type(node_flag
);
661 type
= &ja_types
[type_index
];
663 switch (type
->type_class
) {
665 return ja_linear_node_get_direction(type
, node
, n
, result_key
, dir
);
667 return ja_pool_node_get_direction(type
, node
, n
, result_key
, dir
);
669 return ja_pigeon_node_get_direction(type
, node
, n
, result_key
, dir
);
672 return (void *) -1UL;
677 struct cds_ja_inode_flag
*ja_node_get_leftright(struct cds_ja_inode_flag
*node_flag
,
678 unsigned int n
, uint8_t *result_key
,
679 enum ja_direction dir
)
681 return ja_node_get_direction(node_flag
, n
, result_key
, dir
);
685 struct cds_ja_inode_flag
*ja_node_get_minmax(struct cds_ja_inode_flag
*node_flag
,
687 enum ja_direction dir
)
691 return ja_node_get_direction(node_flag
,
692 -1, result_key
, JA_RIGHT
);
694 return ja_node_get_direction(node_flag
,
695 JA_ENTRY_PER_NODE
, result_key
, JA_LEFT
);
702 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
703 struct cds_ja_inode
*node
,
704 struct cds_ja_shadow_node
*shadow_node
,
706 struct cds_ja_inode_flag
*child_node_flag
)
709 uint8_t *values
, *nr_child_ptr
;
710 struct cds_ja_inode_flag
**pointers
;
711 unsigned int i
, unused
= 0;
713 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
715 nr_child_ptr
= &node
->u
.data
[0];
716 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
717 (unsigned int) n
, nr_child_ptr
);
718 nr_child
= *nr_child_ptr
;
719 assert(nr_child
<= type
->max_linear_child
);
721 values
= &node
->u
.data
[1];
722 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
723 /* Check if node value is already populated */
724 for (i
= 0; i
< nr_child
; i
++) {
725 if (values
[i
] == n
) {
735 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
737 return -ERANGE
; /* recompact node */
739 return -ENOSPC
; /* No space left in this node type */
742 assert(pointers
[i
] == NULL
);
743 rcu_assign_pointer(pointers
[i
], child_node_flag
);
744 /* If we expanded the nr_child, increment it */
746 CMM_STORE_SHARED(values
[nr_child
], n
);
747 /* write pointer and value before nr_child */
749 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
751 shadow_node
->nr_child
++;
752 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
753 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
754 (unsigned int) shadow_node
->nr_child
,
761 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
762 struct cds_ja_inode
*node
,
763 struct cds_ja_inode_flag
*node_flag
,
764 struct cds_ja_shadow_node
*shadow_node
,
766 struct cds_ja_inode_flag
*child_node_flag
)
768 struct cds_ja_inode
*linear
;
770 assert(type
->type_class
== RCU_JA_POOL
);
772 switch (type
->nr_pool_order
) {
775 unsigned long bitsel
, index
;
777 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
778 assert(bitsel
< CHAR_BIT
);
779 index
= ((unsigned long) n
>> bitsel
) & 0x1;
780 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
785 unsigned long bitsel
[2], index
[2], rindex
;
787 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
788 assert(bitsel
[0] < CHAR_BIT
);
789 assert(bitsel
[1] < CHAR_BIT
);
790 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
792 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
793 rindex
= index
[0] | index
[1];
794 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
802 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
807 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
808 struct cds_ja_inode
*node
,
809 struct cds_ja_shadow_node
*shadow_node
,
811 struct cds_ja_inode_flag
*child_node_flag
)
813 struct cds_ja_inode_flag
**ptr
;
815 assert(type
->type_class
== RCU_JA_PIGEON
);
816 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
819 rcu_assign_pointer(*ptr
, child_node_flag
);
820 shadow_node
->nr_child
++;
825 * _ja_node_set_nth: set nth item within a node. Return an error
826 * (negative error value) if it is already there.
829 int _ja_node_set_nth(const struct cds_ja_type
*type
,
830 struct cds_ja_inode
*node
,
831 struct cds_ja_inode_flag
*node_flag
,
832 struct cds_ja_shadow_node
*shadow_node
,
834 struct cds_ja_inode_flag
*child_node_flag
)
836 switch (type
->type_class
) {
838 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
841 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
844 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
857 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
858 struct cds_ja_inode
*node
,
859 struct cds_ja_shadow_node
*shadow_node
,
860 struct cds_ja_inode_flag
**node_flag_ptr
)
863 uint8_t *nr_child_ptr
;
865 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
867 nr_child_ptr
= &node
->u
.data
[0];
868 nr_child
= *nr_child_ptr
;
869 assert(nr_child
<= type
->max_linear_child
);
871 if (type
->type_class
== RCU_JA_LINEAR
) {
872 assert(!shadow_node
->fallback_removal_count
);
873 if (shadow_node
->nr_child
<= type
->min_child
) {
874 /* We need to try recompacting the node */
878 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
879 assert(*node_flag_ptr
!= NULL
);
880 rcu_assign_pointer(*node_flag_ptr
, NULL
);
882 * Value and nr_child are never changed (would cause ABA issue).
883 * Instead, we leave the pointer to NULL and recompact the node
884 * once in a while. It is allowed to set a NULL pointer to a new
885 * value without recompaction though.
886 * Only update the shadow node accounting.
888 shadow_node
->nr_child
--;
889 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
890 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
891 (unsigned int) shadow_node
->nr_child
,
897 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
898 struct cds_ja_inode
*node
,
899 struct cds_ja_inode_flag
*node_flag
,
900 struct cds_ja_shadow_node
*shadow_node
,
901 struct cds_ja_inode_flag
**node_flag_ptr
,
904 struct cds_ja_inode
*linear
;
906 assert(type
->type_class
== RCU_JA_POOL
);
908 if (shadow_node
->fallback_removal_count
) {
909 shadow_node
->fallback_removal_count
--;
911 /* We should try recompacting the node */
912 if (shadow_node
->nr_child
<= type
->min_child
)
916 switch (type
->nr_pool_order
) {
919 unsigned long bitsel
, index
;
921 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
922 assert(bitsel
< CHAR_BIT
);
923 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
924 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
929 unsigned long bitsel
[2], index
[2], rindex
;
931 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
932 assert(bitsel
[0] < CHAR_BIT
);
933 assert(bitsel
[1] < CHAR_BIT
);
934 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
936 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
937 rindex
= index
[0] | index
[1];
938 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
946 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
950 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
951 struct cds_ja_inode
*node
,
952 struct cds_ja_shadow_node
*shadow_node
,
953 struct cds_ja_inode_flag
**node_flag_ptr
)
955 assert(type
->type_class
== RCU_JA_PIGEON
);
957 if (shadow_node
->fallback_removal_count
) {
958 shadow_node
->fallback_removal_count
--;
960 /* We should try recompacting the node */
961 if (shadow_node
->nr_child
<= type
->min_child
)
964 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
965 rcu_assign_pointer(*node_flag_ptr
, NULL
);
966 shadow_node
->nr_child
--;
971 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
972 * (negative error value) if it is not found (-ENOENT).
975 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
976 struct cds_ja_inode
*node
,
977 struct cds_ja_inode_flag
*node_flag
,
978 struct cds_ja_shadow_node
*shadow_node
,
979 struct cds_ja_inode_flag
**node_flag_ptr
,
982 switch (type
->type_class
) {
984 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
986 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
988 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
1000 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
1001 * distribution in two sub-distributions containing as much elements one
1002 * compared to the other.
1005 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
1007 unsigned int type_index
,
1008 const struct cds_ja_type
*type
,
1009 struct cds_ja_inode
*node
,
1010 struct cds_ja_shadow_node
*shadow_node
,
1012 struct cds_ja_inode_flag
*child_node_flag
,
1013 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
1015 uint8_t nr_one
[JA_BITS_PER_BYTE
];
1016 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
1017 unsigned int distrib_nr_child
= 0;
1019 memset(nr_one
, 0, sizeof(nr_one
));
1021 switch (type
->type_class
) {
1025 ja_linear_node_get_nr_child(type
, node
);
1028 for (i
= 0; i
< nr_child
; i
++) {
1029 struct cds_ja_inode_flag
*iter
;
1032 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1035 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1037 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1038 if (v
& (1U << bit_i
))
1047 unsigned int pool_nr
;
1049 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1050 struct cds_ja_inode
*pool
=
1051 ja_pool_node_get_ith_pool(type
,
1054 ja_linear_node_get_nr_child(type
, pool
);
1057 for (j
= 0; j
< nr_child
; j
++) {
1058 struct cds_ja_inode_flag
*iter
;
1061 ja_linear_node_get_ith_pos(type
, pool
,
1065 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1067 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1068 if (v
& (1U << bit_i
))
1080 assert(mode
== JA_RECOMPACT_DEL
);
1081 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1082 struct cds_ja_inode_flag
*iter
;
1084 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1087 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1089 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1090 if (i
& (1U << bit_i
))
1098 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1105 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1106 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1107 if (n
& (1U << bit_i
))
1114 * The best bit selector is that for which the number of ones is
1115 * closest to half of the number of children in the
1116 * distribution. We calculate the distance using the double of
1117 * the sub-distribution sizes to eliminate truncation error.
1119 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1120 unsigned int distance_to_best
;
1122 distance_to_best
= abs_int(((unsigned int) nr_one
[bit_i
] << 1U) - distrib_nr_child
);
1123 if (distance_to_best
< overall_best_distance
) {
1124 overall_best_distance
= distance_to_best
;
1128 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
1133 * Calculate bit distribution in two dimensions. Returns the two bits
1134 * (each 0 to 7) that splits the distribution in four sub-distributions
1135 * containing as much elements one compared to the other.
1138 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
1140 unsigned int type_index
,
1141 const struct cds_ja_type
*type
,
1142 struct cds_ja_inode
*node
,
1143 struct cds_ja_shadow_node
*shadow_node
,
1145 struct cds_ja_inode_flag
*child_node_flag
,
1146 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1147 unsigned int *_bitsel
)
1149 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1150 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1151 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1152 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
1153 unsigned int bitsel
[2] = { 0, 1 };
1154 unsigned int bit_i
, bit_j
;
1155 int overall_best_distance
= INT_MAX
;
1156 unsigned int distrib_nr_child
= 0;
1158 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
1159 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
1160 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
1161 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
1163 switch (type
->type_class
) {
1167 ja_linear_node_get_nr_child(type
, node
);
1170 for (i
= 0; i
< nr_child
; i
++) {
1171 struct cds_ja_inode_flag
*iter
;
1174 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1177 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1179 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1180 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1181 if (v
& (1U << bit_i
)) {
1182 if (v
& (1U << bit_j
)) {
1183 nr_2d_11
[bit_i
][bit_j
]++;
1185 nr_2d_10
[bit_i
][bit_j
]++;
1188 if (v
& (1U << bit_j
)) {
1189 nr_2d_01
[bit_i
][bit_j
]++;
1191 nr_2d_00
[bit_i
][bit_j
]++;
1202 unsigned int pool_nr
;
1204 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1205 struct cds_ja_inode
*pool
=
1206 ja_pool_node_get_ith_pool(type
,
1209 ja_linear_node_get_nr_child(type
, pool
);
1212 for (j
= 0; j
< nr_child
; j
++) {
1213 struct cds_ja_inode_flag
*iter
;
1216 ja_linear_node_get_ith_pos(type
, pool
,
1220 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1222 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1223 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1224 if (v
& (1U << bit_i
)) {
1225 if (v
& (1U << bit_j
)) {
1226 nr_2d_11
[bit_i
][bit_j
]++;
1228 nr_2d_10
[bit_i
][bit_j
]++;
1231 if (v
& (1U << bit_j
)) {
1232 nr_2d_01
[bit_i
][bit_j
]++;
1234 nr_2d_00
[bit_i
][bit_j
]++;
1248 assert(mode
== JA_RECOMPACT_DEL
);
1249 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1250 struct cds_ja_inode_flag
*iter
;
1252 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1255 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1257 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1258 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1259 if (i
& (1U << bit_i
)) {
1260 if (i
& (1U << bit_j
)) {
1261 nr_2d_11
[bit_i
][bit_j
]++;
1263 nr_2d_10
[bit_i
][bit_j
]++;
1266 if (i
& (1U << bit_j
)) {
1267 nr_2d_01
[bit_i
][bit_j
]++;
1269 nr_2d_00
[bit_i
][bit_j
]++;
1279 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1286 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1287 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1288 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1289 if (n
& (1U << bit_i
)) {
1290 if (n
& (1U << bit_j
)) {
1291 nr_2d_11
[bit_i
][bit_j
]++;
1293 nr_2d_10
[bit_i
][bit_j
]++;
1296 if (n
& (1U << bit_j
)) {
1297 nr_2d_01
[bit_i
][bit_j
]++;
1299 nr_2d_00
[bit_i
][bit_j
]++;
1308 * The best bit selector is that for which the number of nodes
1309 * in each sub-class is closest to one-fourth of the number of
1310 * children in the distribution. We calculate the distance using
1311 * 4 times the size of the sub-distribution to eliminate
1314 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1315 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1316 int distance_to_best
[4];
1318 distance_to_best
[0] = ((unsigned int) nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1319 distance_to_best
[1] = ((unsigned int) nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1320 distance_to_best
[2] = ((unsigned int) nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1321 distance_to_best
[3] = ((unsigned int) nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1323 /* Consider worse distance above best */
1324 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1325 distance_to_best
[0] = distance_to_best
[1];
1326 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1327 distance_to_best
[0] = distance_to_best
[2];
1328 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1329 distance_to_best
[0] = distance_to_best
[3];
1332 * If our worse distance is better than overall,
1333 * we become new best candidate.
1335 if (distance_to_best
[0] < overall_best_distance
) {
1336 overall_best_distance
= distance_to_best
[0];
1343 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1345 /* Return our bit selection */
1346 _bitsel
[0] = bitsel
[0];
1347 _bitsel
[1] = bitsel
[1];
1351 unsigned int find_nearest_type_index(unsigned int type_index
,
1352 unsigned int nr_nodes
)
1354 const struct cds_ja_type
*type
;
1356 assert(type_index
!= NODE_INDEX_NULL
);
1358 return NODE_INDEX_NULL
;
1360 type
= &ja_types
[type_index
];
1361 if (nr_nodes
< type
->min_child
)
1363 else if (nr_nodes
> type
->max_child
)
1372 * ja_node_recompact_add: recompact a node, adding a new child.
1373 * Return 0 on success, -EAGAIN if need to retry, or other negative
1374 * error value otherwise.
1377 int ja_node_recompact(enum ja_recompact mode
,
1379 unsigned int old_type_index
,
1380 const struct cds_ja_type
*old_type
,
1381 struct cds_ja_inode
*old_node
,
1382 struct cds_ja_shadow_node
*shadow_node
,
1383 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1384 struct cds_ja_inode_flag
*child_node_flag
,
1385 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1388 unsigned int new_type_index
;
1389 struct cds_ja_inode
*new_node
;
1390 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1391 const struct cds_ja_type
*new_type
;
1392 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1396 old_node_flag
= *old_node_flag_ptr
;
1399 * Need to find nearest type index even for ADD_SAME, because
1400 * this recompaction, when applied to linear nodes, will garbage
1401 * collect dummy (NULL) entries, and can therefore cause a few
1402 * linear representations to be skipped.
1405 case JA_RECOMPACT_ADD_SAME
:
1406 new_type_index
= find_nearest_type_index(old_type_index
,
1407 shadow_node
->nr_child
+ 1);
1408 dbg_printf("Recompact for node with %u children\n",
1409 shadow_node
->nr_child
+ 1);
1411 case JA_RECOMPACT_ADD_NEXT
:
1412 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1414 dbg_printf("Recompact for NULL\n");
1416 new_type_index
= find_nearest_type_index(old_type_index
,
1417 shadow_node
->nr_child
+ 1);
1418 dbg_printf("Recompact for node with %u children\n",
1419 shadow_node
->nr_child
+ 1);
1422 case JA_RECOMPACT_DEL
:
1423 new_type_index
= find_nearest_type_index(old_type_index
,
1424 shadow_node
->nr_child
- 1);
1425 dbg_printf("Recompact for node with %u children\n",
1426 shadow_node
->nr_child
- 1);
1432 retry
: /* for fallback */
1433 dbg_printf("Recompact from type %d to type %d\n",
1434 old_type_index
, new_type_index
);
1435 new_type
= &ja_types
[new_type_index
];
1436 if (new_type_index
!= NODE_INDEX_NULL
) {
1437 new_node
= alloc_cds_ja_node(ja
, new_type
);
1441 if (new_type
->type_class
== RCU_JA_POOL
) {
1442 switch (new_type
->nr_pool_order
) {
1445 unsigned int node_distrib_bitsel
;
1447 node_distrib_bitsel
=
1448 ja_node_sum_distribution_1d(mode
, ja
,
1449 old_type_index
, old_type
,
1450 old_node
, shadow_node
,
1452 nullify_node_flag_ptr
);
1453 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1454 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1455 new_type_index
, node_distrib_bitsel
);
1460 unsigned int node_distrib_bitsel
[2];
1462 ja_node_sum_distribution_2d(mode
, ja
,
1463 old_type_index
, old_type
,
1464 old_node
, shadow_node
,
1466 nullify_node_flag_ptr
,
1467 node_distrib_bitsel
);
1468 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1469 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1470 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1471 new_type_index
, node_distrib_bitsel
);
1478 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1481 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1482 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
, level
);
1483 if (!new_shadow_node
) {
1484 free_cds_ja_node(ja
, new_node
);
1488 new_shadow_node
->fallback_removal_count
=
1489 JA_FALLBACK_REMOVAL_COUNT
;
1492 new_node_flag
= NULL
;
1495 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1497 if (new_type_index
== NODE_INDEX_NULL
)
1500 switch (old_type
->type_class
) {
1504 ja_linear_node_get_nr_child(old_type
, old_node
);
1507 for (i
= 0; i
< nr_child
; i
++) {
1508 struct cds_ja_inode_flag
*iter
;
1511 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1514 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1516 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1519 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1520 goto fallback_toosmall
;
1528 unsigned int pool_nr
;
1530 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1531 struct cds_ja_inode
*pool
=
1532 ja_pool_node_get_ith_pool(old_type
,
1535 ja_linear_node_get_nr_child(old_type
, pool
);
1538 for (j
= 0; j
< nr_child
; j
++) {
1539 struct cds_ja_inode_flag
*iter
;
1542 ja_linear_node_get_ith_pos(old_type
, pool
,
1546 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1548 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1551 if (new_type
->type_class
== RCU_JA_POOL
1553 goto fallback_toosmall
;
1561 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1567 assert(mode
== JA_RECOMPACT_DEL
);
1568 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1569 struct cds_ja_inode_flag
*iter
;
1571 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1574 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1576 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1579 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1580 goto fallback_toosmall
;
1593 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1595 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1597 n
, child_node_flag
);
1598 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1599 goto fallback_toosmall
;
1605 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1606 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1607 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1608 uatomic_inc(&ja
->node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1611 /* Return pointer to new recompacted node through old_node_flag_ptr */
1612 *old_node_flag_ptr
= new_node_flag
;
1616 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1618 * It is OK to free the lock associated with a node
1619 * going to NULL, since we are holding the parent lock.
1620 * This synchronizes removal with re-add of that node.
1622 if (new_type_index
== NODE_INDEX_NULL
)
1623 flags
|= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1624 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1634 /* fallback if next pool is too small */
1635 assert(new_shadow_node
);
1636 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1637 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1641 case JA_RECOMPACT_ADD_SAME
:
1643 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1644 * node within a pool has unused entries. It should
1645 * therefore _never_ be too small.
1650 case JA_RECOMPACT_ADD_NEXT
:
1652 const struct cds_ja_type
*next_type
;
1655 * Recompaction attempt on add failed. Should only
1656 * happen if target node type is pool. Caused by
1657 * hard-to-split distribution. Recompact using the next
1658 * distribution size.
1660 assert(new_type
->type_class
== RCU_JA_POOL
);
1661 next_type
= &ja_types
[new_type_index
+ 1];
1663 * Try going to the next pool size if our population
1664 * fits within its range. This is not flagged as a
1667 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1668 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1673 dbg_printf("Add fallback to type %d\n", new_type_index
);
1674 uatomic_inc(&ja
->nr_fallback
);
1680 case JA_RECOMPACT_DEL
:
1682 * Recompaction attempt on delete failed. Should only
1683 * happen if target node type is pool. This is caused by
1684 * a hard-to-split distribution. Recompact on same node
1685 * size, but flag current node as "fallback" to ensure
1686 * we don't attempt recompaction before some activity
1687 * has reshuffled our node.
1689 assert(new_type
->type_class
== RCU_JA_POOL
);
1690 new_type_index
= old_type_index
;
1691 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1692 uatomic_inc(&ja
->nr_fallback
);
1701 * Last resort fallback: pigeon.
1703 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1704 dbg_printf("Fallback to type %d\n", new_type_index
);
1705 uatomic_inc(&ja
->nr_fallback
);
1711 * Return 0 on success, -EAGAIN if need to retry, or other negative
1712 * error value otherwise.
1715 int ja_node_set_nth(struct cds_ja
*ja
,
1716 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1717 struct cds_ja_inode_flag
*child_node_flag
,
1718 struct cds_ja_shadow_node
*shadow_node
,
1722 unsigned int type_index
;
1723 const struct cds_ja_type
*type
;
1724 struct cds_ja_inode
*node
;
1726 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1727 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1729 node
= ja_node_ptr(*node_flag
);
1730 type_index
= ja_node_type(*node_flag
);
1731 type
= &ja_types
[type_index
];
1732 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1733 n
, child_node_flag
);
1736 /* Not enough space in node, need to recompact to next type. */
1737 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1738 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1741 /* Node needs to be recompacted. */
1742 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1743 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1750 * Return 0 on success, -EAGAIN if need to retry, or other negative
1751 * error value otherwise.
1754 int ja_node_clear_ptr(struct cds_ja
*ja
,
1755 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1756 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1757 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1758 uint8_t n
, int level
)
1761 unsigned int type_index
;
1762 const struct cds_ja_type
*type
;
1763 struct cds_ja_inode
*node
;
1765 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1766 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1768 node
= ja_node_ptr(*parent_node_flag_ptr
);
1769 type_index
= ja_node_type(*parent_node_flag_ptr
);
1770 type
= &ja_types
[type_index
];
1771 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1772 if (ret
== -EFBIG
) {
1773 /* Should try recompaction. */
1774 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1775 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1776 node_flag_ptr
, level
);
1781 struct cds_ja_node
*cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1783 unsigned int tree_depth
, i
;
1784 struct cds_ja_inode_flag
*node_flag
;
1786 if (caa_unlikely(key
> ja
->key_max
|| key
== UINT64_MAX
))
1788 tree_depth
= ja
->tree_depth
;
1789 node_flag
= rcu_dereference(ja
->root
);
1791 /* level 0: root node */
1792 if (!ja_node_ptr(node_flag
))
1795 for (i
= 1; i
< tree_depth
; i
++) {
1798 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1799 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1800 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1801 (unsigned int) iter_key
, node_flag
);
1802 if (!ja_node_ptr(node_flag
))
1806 /* Last level lookup succeded. We got an actual match. */
1807 return (struct cds_ja_node
*) node_flag
;
1811 struct cds_ja_node
*cds_ja_lookup_inequality(struct cds_ja
*ja
, uint64_t key
,
1812 uint64_t *result_key
, enum ja_lookup_inequality mode
)
1814 int tree_depth
, level
;
1815 struct cds_ja_inode_flag
*node_flag
, *cur_node_depth
[JA_MAX_DEPTH
];
1816 uint8_t cur_key
[JA_MAX_DEPTH
];
1817 uint64_t _result_key
= 0;
1818 enum ja_direction dir
;
1823 if (caa_unlikely(key
> ja
->key_max
|| key
== UINT64_MAX
))
1830 memset(cur_node_depth
, 0, sizeof(cur_node_depth
));
1831 memset(cur_key
, 0, sizeof(cur_key
));
1832 tree_depth
= ja
->tree_depth
;
1833 node_flag
= rcu_dereference(ja
->root
);
1834 cur_node_depth
[0] = node_flag
;
1836 /* level 0: root node */
1837 if (!ja_node_ptr(node_flag
))
1840 for (level
= 1; level
< tree_depth
; level
++) {
1843 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1844 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1845 if (!ja_node_ptr(node_flag
))
1847 cur_key
[level
- 1] = iter_key
;
1848 cur_node_depth
[level
] = node_flag
;
1849 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
1850 (unsigned int) iter_key
, node_flag
);
1853 if (level
== tree_depth
) {
1854 /* Last level lookup succeded. We got an equal match. */
1857 return (struct cds_ja_node
*) node_flag
;
1861 * Find highest value left/right of current node.
1862 * Current node is cur_node_depth[level].
1863 * Start at current level. If we cannot find any key left/right
1864 * of ours, go one level up, seek highest value left/right of
1865 * current (recursively), and when we find one, get the
1866 * rightmost/leftmost child of its rightmost/leftmost child
1879 for (; level
> 0; level
--) {
1882 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1883 node_flag
= ja_node_get_leftright(cur_node_depth
[level
- 1],
1884 iter_key
, &cur_key
[level
- 1], dir
);
1885 dbg_printf("cds_ja_lookup_inequality find sibling from %u at %u finds node_flag %p\n",
1886 (unsigned int) iter_key
, (unsigned int) cur_key
[level
- 1],
1888 /* If found left/right sibling, find rightmost/leftmost child. */
1889 if (ja_node_ptr(node_flag
))
1894 /* Reached the root and could not find a left/right sibling. */
1901 * From this point, we are guaranteed to be able to find a
1902 * "below than"/"above than" match. ja_attach_node() and
1903 * ja_detach_node() both guarantee that it is not possible for a
1904 * lookup to reach a dead-end.
1908 * Find rightmost/leftmost child of rightmost/leftmost child
1921 for (; level
< tree_depth
; level
++) {
1922 node_flag
= ja_node_get_minmax(node_flag
, &cur_key
[level
- 1], dir
);
1923 dbg_printf("cds_ja_lookup_inequality find minmax at %u finds node_flag %p\n",
1924 (unsigned int) cur_key
[level
- 1],
1926 if (!ja_node_ptr(node_flag
))
1930 assert(level
== tree_depth
);
1933 for (level
= 1; level
< tree_depth
; level
++) {
1934 _result_key
|= ((uint64_t) cur_key
[level
- 1])
1935 << (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1));
1937 *result_key
= _result_key
;
1939 return (struct cds_ja_node
*) node_flag
;
1942 struct cds_ja_node
*cds_ja_lookup_below_equal(struct cds_ja
*ja
,
1943 uint64_t key
, uint64_t *result_key
)
1945 dbg_printf("cds_ja_lookup_below_equal key %" PRIu64
"\n", key
);
1946 return cds_ja_lookup_inequality(ja
, key
, result_key
, JA_LOOKUP_BE
);
1949 struct cds_ja_node
*cds_ja_lookup_above_equal(struct cds_ja
*ja
,
1950 uint64_t key
, uint64_t *result_key
)
1952 dbg_printf("cds_ja_lookup_above_equal key %" PRIu64
"\n", key
);
1953 return cds_ja_lookup_inequality(ja
, key
, result_key
, JA_LOOKUP_AE
);
1957 * We reached an unpopulated node. Create it and the children we need,
1958 * and then attach the entire branch to the current node. This may
1959 * trigger recompaction of the current node. Locks needed: node lock
1960 * (for add), and, possibly, parent node lock (to update pointer due to
1961 * node recompaction).
1963 * First take node lock, check if recompaction is needed, then take
1964 * parent lock (if needed). Then we can proceed to create the new
1965 * branch. Publish the new branch, and release locks.
1966 * TODO: we currently always take the parent lock even when not needed.
1968 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1969 * leads to a dead-end: before attaching a branch, the entire content of
1970 * the new branch is populated, thus creating a cluster, before
1971 * attaching the cluster to the rest of the tree, thus making it visible
1975 int ja_attach_node(struct cds_ja
*ja
,
1976 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1977 struct cds_ja_inode_flag
*attach_node_flag
,
1978 struct cds_ja_inode_flag
*parent_attach_node_flag
,
1979 struct cds_ja_inode_flag
**old_node_flag_ptr
,
1980 struct cds_ja_inode_flag
*old_node_flag
,
1983 struct cds_ja_node
*child_node
)
1985 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1986 *parent_shadow_node
= NULL
;
1987 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1989 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1990 int nr_created_nodes
= 0;
1992 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1993 level
, old_node_flag
, attach_node_flag_ptr
, attach_node_flag
, parent_attach_node_flag
);
1995 assert(!old_node_flag
);
1996 if (attach_node_flag
) {
1997 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, attach_node_flag
);
2003 if (parent_attach_node_flag
) {
2004 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2005 parent_attach_node_flag
);
2006 if (!parent_shadow_node
) {
2012 if (old_node_flag_ptr
&& ja_node_ptr(*old_node_flag_ptr
)) {
2014 * Target node has been updated between RCU lookup and
2015 * lock acquisition. We need to re-try lookup and
2023 * Perform a lookup query to handle the case where
2024 * old_node_flag_ptr is NULL. We cannot use it to check if the
2025 * node has been populated between RCU lookup and mutex
2028 if (!old_node_flag_ptr
) {
2030 struct cds_ja_inode_flag
*lookup_node_flag
;
2031 struct cds_ja_inode_flag
**lookup_node_flag_ptr
;
2033 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2034 lookup_node_flag
= ja_node_get_nth(attach_node_flag
,
2035 &lookup_node_flag_ptr
,
2037 if (lookup_node_flag
) {
2043 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
2044 ja_node_ptr(attach_node_flag
)) {
2046 * Target node has been updated between RCU lookup and
2047 * lock acquisition. We need to re-try lookup and
2054 /* Create new branch, starting from bottom */
2055 iter_node_flag
= (struct cds_ja_inode_flag
*) child_node
;
2057 for (i
= ja
->tree_depth
- 1; i
>= (int) level
; i
--) {
2060 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
- 1)));
2061 dbg_printf("branch creation level %d, key %u\n",
2062 i
, (unsigned int) iter_key
);
2063 iter_dest_node_flag
= NULL
;
2064 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2069 dbg_printf("branch creation error %d\n", ret
);
2072 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
2073 iter_node_flag
= iter_dest_node_flag
;
2077 /* Publish branch */
2080 * Attaching to root node.
2082 rcu_assign_pointer(ja
->root
, iter_node_flag
);
2086 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2087 dbg_printf("publish branch at level %d, key %u\n",
2088 level
- 1, (unsigned int) iter_key
);
2089 /* We need to use set_nth on the previous level. */
2090 iter_dest_node_flag
= attach_node_flag
;
2091 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2094 shadow_node
, level
- 1);
2096 dbg_printf("branch publish error %d\n", ret
);
2102 rcu_assign_pointer(*attach_node_flag_ptr
, iter_dest_node_flag
);
2110 for (i
= 0; i
< nr_created_nodes
; i
++) {
2114 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
2116 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
2117 tmpret
= rcuja_shadow_clear(ja
->ht
,
2125 if (parent_shadow_node
)
2126 rcuja_shadow_unlock(parent_shadow_node
);
2129 rcuja_shadow_unlock(shadow_node
);
2135 * Lock the parent containing the pointer to list of duplicates, and add
2136 * node to this list. Failure can happen if concurrent update changes
2137 * the parent before we get the lock. We return -EAGAIN in that case.
2138 * Return 0 on success, negative error value on failure.
2141 int ja_chain_node(struct cds_ja
*ja
,
2142 struct cds_ja_inode_flag
*parent_node_flag
,
2143 struct cds_ja_inode_flag
**node_flag_ptr
,
2144 struct cds_ja_inode_flag
*node_flag
,
2145 struct cds_ja_node
*last_node
,
2146 struct cds_ja_node
*node
)
2148 struct cds_ja_shadow_node
*shadow_node
;
2149 struct cds_ja_node
*iter_node
;
2150 int ret
= 0, found
= 0;
2152 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2157 * Ensure that previous node is still there at end of list.
2159 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2160 if ((struct cds_ja_node
*) ja_node_ptr(*node_flag_ptr
) != iter_node
) {
2164 cds_ja_for_each_duplicate(iter_node
) {
2165 if (iter_node
== last_node
)
2173 * Add node to tail of list to ensure that RCU traversals will
2174 * always see either the prior node or the newly added if
2175 * executed concurrently with a sequence of add followed by del
2176 * on the same key. Safe against concurrent RCU read traversals.
2179 rcu_assign_pointer(last_node
->next
, node
);
2181 rcuja_shadow_unlock(shadow_node
);
2186 int _cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2187 struct cds_ja_node
*node
,
2188 struct cds_ja_node
**unique_node_ret
)
2190 unsigned int tree_depth
, i
;
2191 struct cds_ja_inode_flag
*attach_node_flag
,
2195 *parent_attach_node_flag
;
2196 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
2197 **parent_node_flag_ptr
,
2201 if (caa_unlikely(key
> ja
->key_max
|| key
== UINT64_MAX
)) {
2204 tree_depth
= ja
->tree_depth
;
2207 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
2209 parent2_node_flag
= NULL
;
2211 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
2212 parent_node_flag_ptr
= NULL
;
2213 node_flag
= rcu_dereference(ja
->root
);
2214 node_flag_ptr
= &ja
->root
;
2216 /* Iterate on all internal levels */
2217 for (i
= 1; i
< tree_depth
; i
++) {
2220 if (!ja_node_ptr(node_flag
))
2222 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2223 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2224 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2225 parent2_node_flag
= parent_node_flag
;
2226 parent_node_flag
= node_flag
;
2227 parent_node_flag_ptr
= node_flag_ptr
;
2228 node_flag
= ja_node_get_nth(node_flag
,
2234 * We reached either bottom of tree or internal NULL node,
2235 * simply add node to last internal level, or chain it if key is
2238 if (!ja_node_ptr(node_flag
)) {
2239 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2240 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2242 attach_node_flag
= parent_node_flag
;
2243 attach_node_flag_ptr
= parent_node_flag_ptr
;
2244 parent_attach_node_flag
= parent2_node_flag
;
2246 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
2248 parent_attach_node_flag
,
2253 struct cds_ja_node
*iter_node
, *last_node
= NULL
;
2255 if (unique_node_ret
) {
2256 *unique_node_ret
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2260 /* Find last duplicate */
2261 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2262 cds_ja_for_each_duplicate_rcu(iter_node
)
2263 last_node
= iter_node
;
2265 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2266 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2268 attach_node_flag
= node_flag
;
2269 attach_node_flag_ptr
= node_flag_ptr
;
2270 parent_attach_node_flag
= parent_node_flag
;
2272 ret
= ja_chain_node(ja
,
2273 parent_attach_node_flag
,
2274 attach_node_flag_ptr
,
2279 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
2285 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2286 struct cds_ja_node
*node
)
2288 return _cds_ja_add(ja
, key
, node
, NULL
);
2291 struct cds_ja_node
*cds_ja_add_unique(struct cds_ja
*ja
, uint64_t key
,
2292 struct cds_ja_node
*node
)
2295 struct cds_ja_node
*ret_node
;
2297 ret
= _cds_ja_add(ja
, key
, node
, &ret_node
);
2305 * Note: there is no need to lookup the pointer address associated with
2306 * each node's nth item after taking the lock: it's already been done by
2307 * cds_ja_del while holding the rcu read-side lock, and our node rules
2308 * ensure that when a match value -> pointer is found in a node, it is
2309 * _NEVER_ changed for that node without recompaction, and recompaction
2310 * reallocates the node.
2311 * However, when a child is removed from "linear" nodes, its pointer
2312 * is set to NULL. We therefore check, while holding the locks, if this
2313 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2315 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2316 * leads to a dead-end: when removing branch, it makes sure to perform
2317 * the "cut" at the highest node that has only one child, effectively
2318 * replacing it with a NULL pointer.
2321 int ja_detach_node(struct cds_ja
*ja
,
2322 struct cds_ja_inode_flag
**snapshot
,
2323 struct cds_ja_inode_flag
***snapshot_ptr
,
2324 uint8_t *snapshot_n
,
2327 struct cds_ja_node
*node
)
2329 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
2330 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
2331 *parent_node_flag
= NULL
,
2332 **parent_node_flag_ptr
= NULL
;
2333 struct cds_ja_inode_flag
*iter_node_flag
;
2334 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
2337 assert(nr_snapshot
== ja
->tree_depth
+ 1);
2340 * From the last internal level node going up, get the node
2341 * lock, check if the node has only one child left. If it is the
2342 * case, we continue iterating upward. When we reach a node
2343 * which has more that one child left, we lock the parent, and
2344 * proceed to the node deletion (removing its children too).
2346 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
2347 struct cds_ja_shadow_node
*shadow_node
;
2349 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2355 shadow_nodes
[nr_shadow
++] = shadow_node
;
2358 * Check if node has been removed between RCU
2359 * lookup and lock acquisition.
2361 assert(snapshot_ptr
[i
+ 1]);
2362 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
2363 != ja_node_ptr(snapshot
[i
+ 1])) {
2368 assert(shadow_node
->nr_child
> 0);
2369 if (shadow_node
->nr_child
== 1 && i
> 1)
2372 if (shadow_node
->nr_child
> 1 || i
== 1) {
2373 /* Lock parent and break */
2374 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2380 shadow_nodes
[nr_shadow
++] = shadow_node
;
2383 * Check if node has been removed between RCU
2384 * lookup and lock acquisition.
2386 assert(snapshot_ptr
[i
]);
2387 if (ja_node_ptr(*snapshot_ptr
[i
])
2388 != ja_node_ptr(snapshot
[i
])) {
2393 node_flag_ptr
= snapshot_ptr
[i
+ 1];
2394 n
= snapshot_n
[i
+ 1];
2395 parent_node_flag_ptr
= snapshot_ptr
[i
];
2396 parent_node_flag
= snapshot
[i
];
2400 * Lock parent's parent, in case we need
2401 * to recompact parent.
2403 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2409 shadow_nodes
[nr_shadow
++] = shadow_node
;
2412 * Check if node has been removed between RCU
2413 * lookup and lock acquisition.
2415 assert(snapshot_ptr
[i
- 1]);
2416 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2417 != ja_node_ptr(snapshot
[i
- 1])) {
2428 * At this point, we want to delete all nodes that are about to
2429 * be removed from shadow_nodes (except the last one, which is
2430 * either the root or the parent of the upmost node with 1
2431 * child). OK to free lock here, because RCU read lock is held,
2432 * and free only performed in call_rcu.
2435 for (i
= 0; i
< nr_clear
; i
++) {
2436 ret
= rcuja_shadow_clear(ja
->ht
,
2437 shadow_nodes
[i
]->node_flag
,
2439 RCUJA_SHADOW_CLEAR_FREE_NODE
2440 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2444 iter_node_flag
= parent_node_flag
;
2445 /* Remove from parent */
2446 ret
= ja_node_clear_ptr(ja
,
2447 node_flag_ptr
, /* Pointer to location to nullify */
2448 &iter_node_flag
, /* Old new parent ptr in its parent */
2449 shadow_nodes
[nr_branch
- 1], /* of parent */
2454 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2455 iter_node_flag
, *parent_node_flag_ptr
);
2456 /* Update address of parent ptr in its parent */
2457 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2460 for (i
= 0; i
< nr_shadow
; i
++)
2461 rcuja_shadow_unlock(shadow_nodes
[i
]);
2466 int ja_unchain_node(struct cds_ja
*ja
,
2467 struct cds_ja_inode_flag
*parent_node_flag
,
2468 struct cds_ja_inode_flag
**node_flag_ptr
,
2469 struct cds_ja_inode_flag
*node_flag
,
2470 struct cds_ja_node
*node
)
2472 struct cds_ja_shadow_node
*shadow_node
;
2473 struct cds_ja_node
*iter_node
, **iter_node_ptr
, **prev_node_ptr
= NULL
;
2474 int ret
= 0, count
= 0, found
= 0;
2476 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2479 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2484 * Find the previous node's next pointer pointing to our node,
2485 * so we can update it. Retry if another thread removed all but
2486 * one of duplicates since check (this check was performed
2487 * without lock). Ensure that the node we are about to remove is
2488 * still in the list (while holding lock). No need for RCU
2489 * traversal here since we hold the lock on the parent.
2491 iter_node_ptr
= (struct cds_ja_node
**) node_flag_ptr
;
2492 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2493 cds_ja_for_each_duplicate(iter_node
) {
2495 if (iter_node
== node
) {
2496 prev_node_ptr
= iter_node_ptr
;
2499 iter_node_ptr
= &iter_node
->next
;
2502 if (!found
|| count
== 1) {
2506 CMM_STORE_SHARED(*prev_node_ptr
, node
->next
);
2508 * Validate that we indeed removed the node from linked list.
2510 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2512 rcuja_shadow_unlock(shadow_node
);
2517 * Called with RCU read lock held.
2519 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2520 struct cds_ja_node
*node
)
2522 unsigned int tree_depth
, i
;
2523 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2524 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2525 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2526 struct cds_ja_inode_flag
*node_flag
;
2527 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2532 if (caa_unlikely(key
> ja
->key_max
|| key
== UINT64_MAX
))
2534 tree_depth
= ja
->tree_depth
;
2538 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2541 /* snapshot for level 0 is only for shadow node lookup */
2544 snapshot_ptr
[nr_snapshot
] = NULL
;
2545 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2546 node_flag
= rcu_dereference(ja
->root
);
2547 prev_node_flag_ptr
= &ja
->root
;
2548 node_flag_ptr
= &ja
->root
;
2550 /* Iterate on all internal levels */
2551 for (i
= 1; i
< tree_depth
; i
++) {
2554 dbg_printf("cds_ja_del iter node_flag %p\n",
2556 if (!ja_node_ptr(node_flag
)) {
2559 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2560 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2561 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2562 snapshot
[nr_snapshot
++] = node_flag
;
2563 node_flag
= ja_node_get_nth(node_flag
,
2567 prev_node_flag_ptr
= node_flag_ptr
;
2568 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2569 (unsigned int) iter_key
, node_flag
,
2570 prev_node_flag_ptr
);
2573 * We reached bottom of tree, try to find the node we are trying
2574 * to remove. Fail if we cannot find it.
2576 if (!ja_node_ptr(node_flag
)) {
2577 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2581 struct cds_ja_node
*iter_node
, *match
= NULL
;
2584 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2585 cds_ja_for_each_duplicate_rcu(iter_node
) {
2586 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node
, iter_node
);
2587 if (iter_node
== node
)
2593 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2599 * Removing last of duplicates. Last snapshot
2600 * does not have a shadow node (external leafs).
2602 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2603 snapshot
[nr_snapshot
++] = node_flag
;
2604 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2605 snapshot_n
, nr_snapshot
, key
, node
);
2607 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2608 node_flag_ptr
, node_flag
, match
);
2612 * Explanation of -ENOENT handling: caused by concurrent delete
2613 * between RCU lookup and actual removal. Need to re-do the
2614 * lookup and removal attempt.
2616 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2621 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2622 const struct rcu_flavor_struct
*flavor
)
2626 struct cds_ja_shadow_node
*root_shadow_node
;
2628 ja
= calloc(sizeof(*ja
), 1);
2640 ja
->key_max
= (1ULL << key_bits
) - 1;
2643 ja
->key_max
= UINT64_MAX
;
2649 /* ja->root is NULL */
2650 /* tree_depth 0 is for pointer to root node */
2651 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2652 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2653 ja
->ht
= rcuja_create_ht(flavor
);
2658 * Note: we should not free this node until judy array destroy.
2660 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2661 (struct cds_ja_inode_flag
*) &ja
->root
,
2663 if (!root_shadow_node
) {
2671 ret
= rcuja_delete_ht(ja
->ht
);
2681 void print_debug_fallback_distribution(struct cds_ja
*ja
)
2685 fprintf(stderr
, "Fallback node distribution:\n");
2686 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2687 if (!ja
->node_fallback_count_distribution
[i
])
2689 fprintf(stderr
, " %3u: %4lu\n",
2690 i
, ja
->node_fallback_count_distribution
[i
]);
2695 int ja_final_checks(struct cds_ja
*ja
)
2697 double fallback_ratio
;
2698 unsigned long na
, nf
, nr_fallback
;
2701 fallback_ratio
= (double) uatomic_read(&ja
->nr_fallback
);
2702 fallback_ratio
/= (double) uatomic_read(&ja
->nr_nodes_allocated
);
2703 nr_fallback
= uatomic_read(&ja
->nr_fallback
);
2706 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2707 uatomic_read(&ja
->nr_fallback
),
2710 na
= uatomic_read(&ja
->nr_nodes_allocated
);
2711 nf
= uatomic_read(&ja
->nr_nodes_freed
);
2712 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na
, nf
);
2714 print_debug_fallback_distribution(ja
);
2717 fprintf(stderr
, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2718 (long) na
- nf
, na
, nf
);
2725 * There should be no more concurrent add, delete, nor look-up performed
2726 * on the Judy array while it is being destroyed (ensured by the
2729 int cds_ja_destroy(struct cds_ja
*ja
)
2731 const struct rcu_flavor_struct
*flavor
;
2734 flavor
= cds_lfht_rcu_flavor(ja
->ht
);
2735 rcuja_shadow_prune(ja
->ht
,
2736 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2737 flavor
->thread_offline();
2738 ret
= rcuja_delete_ht(ja
->ht
);
2742 /* Wait for in-flight call_rcu free to complete. */
2745 flavor
->thread_online();
2746 ret
= ja_final_checks(ja
);