4 * Userspace RCU library - RCU Judy Array
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
39 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
42 enum cds_ja_type_class
{
43 RCU_JA_LINEAR
= 0, /* Type A */
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL
= 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
49 RCU_JA_PIGEON
= 2, /* Type C */
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
52 /* Leaf nodes are implicit from their height in the tree */
55 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
59 enum cds_ja_type_class type_class
;
60 uint16_t min_child
; /* minimum number of children: 1 to 256 */
61 uint16_t max_child
; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order
; /* node size is (1 << order), in bytes */
64 uint16_t nr_pool_order
; /* number of pools */
65 uint16_t pool_size_order
; /* pool size */
69 * Iteration on the array to find the right node size for the number of
70 * children stops when it reaches .max_child == 256 (this is the largest
71 * possible node size, which contains 256 children).
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
86 #if (CAA_BITS_PER_LONG < 64)
89 ja_type_0_max_child
= 1,
90 ja_type_1_max_child
= 3,
91 ja_type_2_max_child
= 6,
92 ja_type_3_max_child
= 12,
93 ja_type_4_max_child
= 25,
94 ja_type_5_max_child
= 48,
95 ja_type_6_max_child
= 92,
96 ja_type_7_max_child
= 256,
97 ja_type_8_max_child
= 0, /* NULL */
101 ja_type_0_max_linear_child
= 1,
102 ja_type_1_max_linear_child
= 3,
103 ja_type_2_max_linear_child
= 6,
104 ja_type_3_max_linear_child
= 12,
105 ja_type_4_max_linear_child
= 25,
106 ja_type_5_max_linear_child
= 24,
107 ja_type_6_max_linear_child
= 23,
111 ja_type_5_nr_pool_order
= 1,
112 ja_type_6_nr_pool_order
= 2,
115 const struct cds_ja_type ja_types
[] = {
116 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
122 /* Pools may fill sooner than max_child */
123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
124 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
126 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
132 { .type_class
= RCU_JA_PIGEON
, .min_child
= 83, .max_child
= ja_type_7_max_child
, .order
= 10, },
134 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
136 #else /* !(CAA_BITS_PER_LONG < 64) */
137 /* 64-bit pointers */
139 ja_type_0_max_child
= 1,
140 ja_type_1_max_child
= 3,
141 ja_type_2_max_child
= 7,
142 ja_type_3_max_child
= 14,
143 ja_type_4_max_child
= 28,
144 ja_type_5_max_child
= 54,
145 ja_type_6_max_child
= 104,
146 ja_type_7_max_child
= 256,
147 ja_type_8_max_child
= 256,
151 ja_type_0_max_linear_child
= 1,
152 ja_type_1_max_linear_child
= 3,
153 ja_type_2_max_linear_child
= 7,
154 ja_type_3_max_linear_child
= 14,
155 ja_type_4_max_linear_child
= 28,
156 ja_type_5_max_linear_child
= 27,
157 ja_type_6_max_linear_child
= 26,
161 ja_type_5_nr_pool_order
= 1,
162 ja_type_6_nr_pool_order
= 2,
165 const struct cds_ja_type ja_types
[] = {
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
169 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
170 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
172 /* Pools may fill sooner than max_child. */
173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
174 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
176 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
182 { .type_class
= RCU_JA_PIGEON
, .min_child
= 95, .max_child
= ja_type_7_max_child
, .order
= 11, },
184 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
186 #endif /* !(BITS_PER_LONG < 64) */
188 static inline __attribute__((unused
))
189 void static_array_size_check(void)
191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
195 * The cds_ja_node contains the compressed node data needed for
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
206 #define DECLARE_LINEAR_NODE(index) \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
213 #define DECLARE_POOL_NODE(index) \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
222 struct cds_ja_inode
{
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0
;
226 DECLARE_LINEAR_NODE(1) conf_1
;
227 DECLARE_LINEAR_NODE(2) conf_2
;
228 DECLARE_LINEAR_NODE(3) conf_3
;
229 DECLARE_LINEAR_NODE(4) conf_4
;
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5
;
233 DECLARE_POOL_NODE(6) conf_6
;
235 /* Pigeon configuration */
237 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
239 /* data aliasing nodes for computed accesses */
240 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
245 JA_RECOMPACT_ADD_SAME
,
246 JA_RECOMPACT_ADD_NEXT
,
250 enum ja_lookup_inequality
{
263 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
265 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
268 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
272 if (_ja_node_mask_ptr(node
) == NULL
) {
273 return NODE_INDEX_NULL
;
275 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
276 assert(type
< (1UL << JA_TYPE_BITS
));
281 struct cds_ja_inode
*alloc_cds_ja_node(struct cds_ja
*ja
,
282 const struct cds_ja_type
*ja_type
)
284 size_t len
= 1U << ja_type
->order
;
288 ret
= posix_memalign(&p
, len
, len
);
293 uatomic_inc(&ja
->nr_nodes_allocated
);
297 void free_cds_ja_node(struct cds_ja
*ja
, struct cds_ja_inode
*node
)
301 uatomic_inc(&ja
->nr_nodes_freed
);
304 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
305 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
306 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
307 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
310 uint8_t *align_ptr_size(uint8_t *ptr
)
312 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
316 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
317 struct cds_ja_inode
*node
)
319 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
320 return rcu_dereference(node
->u
.data
[0]);
324 * The order in which values and pointers are does does not matter: if
325 * a value is missing, we return NULL. If a value is there, but its
326 * associated pointers is still NULL, we return NULL too.
329 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
330 struct cds_ja_inode
*node
,
331 struct cds_ja_inode_flag
***node_flag_ptr
,
336 struct cds_ja_inode_flag
**pointers
;
337 struct cds_ja_inode_flag
*ptr
;
340 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
342 nr_child
= ja_linear_node_get_nr_child(type
, node
);
343 cmm_smp_rmb(); /* read nr_child before values and pointers */
344 assert(nr_child
<= type
->max_linear_child
);
345 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
347 values
= &node
->u
.data
[1];
348 for (i
= 0; i
< nr_child
; i
++) {
349 if (CMM_LOAD_SHARED(values
[i
]) == n
)
353 if (caa_unlikely(node_flag_ptr
))
354 *node_flag_ptr
= NULL
;
357 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
358 ptr
= rcu_dereference(pointers
[i
]);
359 if (caa_unlikely(node_flag_ptr
))
360 *node_flag_ptr
= &pointers
[i
];
365 struct cds_ja_inode_flag
*ja_linear_node_get_direction(const struct cds_ja_type
*type
,
366 struct cds_ja_inode
*node
,
368 enum ja_direction dir
)
372 struct cds_ja_inode_flag
**pointers
;
373 struct cds_ja_inode_flag
*ptr
;
375 int match_idx
= -1, match_v
;
377 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
378 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
380 if (dir
== JA_LEFT
) {
383 match_v
= JA_ENTRY_PER_NODE
;
386 nr_child
= ja_linear_node_get_nr_child(type
, node
);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child
<= type
->max_linear_child
);
389 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
391 values
= &node
->u
.data
[1];
392 for (i
= 0; i
< nr_child
; i
++) {
395 v
= CMM_LOAD_SHARED(values
[i
]);
396 if (dir
== JA_LEFT
) {
397 if ((int) v
< n
&& (int) v
> match_v
) {
402 if ((int) v
> n
&& (int) v
< match_v
) {
412 assert(match_v
>= 0 && match_v
< JA_ENTRY_PER_NODE
);
414 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
415 ptr
= rcu_dereference(pointers
[match_idx
]);
420 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
421 struct cds_ja_inode
*node
,
424 struct cds_ja_inode_flag
**iter
)
427 struct cds_ja_inode_flag
**pointers
;
429 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
430 assert(i
< ja_linear_node_get_nr_child(type
, node
));
432 values
= &node
->u
.data
[1];
434 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
439 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
440 struct cds_ja_inode
*node
,
441 struct cds_ja_inode_flag
*node_flag
,
442 struct cds_ja_inode_flag
***node_flag_ptr
,
445 struct cds_ja_inode
*linear
;
447 assert(type
->type_class
== RCU_JA_POOL
);
449 switch (type
->nr_pool_order
) {
452 unsigned long bitsel
, index
;
454 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
455 assert(bitsel
< CHAR_BIT
);
456 index
= ((unsigned long) n
>> bitsel
) & 0x1;
457 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
462 unsigned long bitsel
[2], index
[2], rindex
;
464 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
465 assert(bitsel
[0] < CHAR_BIT
);
466 assert(bitsel
[1] < CHAR_BIT
);
467 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
469 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
470 rindex
= index
[0] | index
[1];
471 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
478 return ja_linear_node_get_nth(type
, linear
, node_flag_ptr
, n
);
482 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
483 struct cds_ja_inode
*node
,
486 assert(type
->type_class
== RCU_JA_POOL
);
487 return (struct cds_ja_inode
*)
488 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
492 struct cds_ja_inode_flag
*ja_pool_node_get_direction(const struct cds_ja_type
*type
,
493 struct cds_ja_inode
*node
,
495 enum ja_direction dir
)
497 unsigned int pool_nr
;
499 struct cds_ja_inode_flag
*match_node_flag
= NULL
;
501 assert(type
->type_class
== RCU_JA_POOL
);
502 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
504 if (dir
== JA_LEFT
) {
507 match_v
= JA_ENTRY_PER_NODE
;
510 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
511 struct cds_ja_inode
*pool
=
512 ja_pool_node_get_ith_pool(type
,
515 ja_linear_node_get_nr_child(type
, pool
);
518 for (j
= 0; j
< nr_child
; j
++) {
519 struct cds_ja_inode_flag
*iter
;
522 ja_linear_node_get_ith_pos(type
, pool
,
526 if (dir
== JA_LEFT
) {
527 if ((int) v
< n
&& (int) v
> match_v
) {
529 match_node_flag
= iter
;
532 if ((int) v
> n
&& (int) v
< match_v
) {
534 match_node_flag
= iter
;
539 return match_node_flag
;
543 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
544 struct cds_ja_inode
*node
,
545 struct cds_ja_inode_flag
***node_flag_ptr
,
548 struct cds_ja_inode_flag
**child_node_flag_ptr
;
549 struct cds_ja_inode_flag
*child_node_flag
;
551 assert(type
->type_class
== RCU_JA_PIGEON
);
552 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
553 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
554 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
555 child_node_flag_ptr
);
556 if (caa_unlikely(node_flag_ptr
))
557 *node_flag_ptr
= child_node_flag_ptr
;
558 return child_node_flag
;
562 struct cds_ja_inode_flag
*ja_pigeon_node_get_direction(const struct cds_ja_type
*type
,
563 struct cds_ja_inode
*node
,
565 enum ja_direction dir
)
567 struct cds_ja_inode_flag
**child_node_flag_ptr
;
568 struct cds_ja_inode_flag
*child_node_flag
;
571 assert(type
->type_class
== RCU_JA_PIGEON
);
572 assert(dir
== JA_LEFT
|| dir
== JA_RIGHT
);
574 if (dir
== JA_LEFT
) {
575 /* n - 1 is first value left of n */
576 for (i
= n
- 1; i
>= 0; i
--) {
577 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
578 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
579 if (child_node_flag
) {
580 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
582 return child_node_flag
;
586 /* n + 1 is first value right of n */
587 for (i
= n
+ 1; i
< JA_ENTRY_PER_NODE
; i
++) {
588 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
589 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
590 if (child_node_flag
) {
591 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
593 return child_node_flag
;
601 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
602 struct cds_ja_inode
*node
,
605 return ja_pigeon_node_get_nth(type
, node
, NULL
, i
);
609 * ja_node_get_nth: get nth item from a node.
610 * node_flag is already rcu_dereference'd.
613 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
614 struct cds_ja_inode_flag
***node_flag_ptr
,
617 unsigned int type_index
;
618 struct cds_ja_inode
*node
;
619 const struct cds_ja_type
*type
;
621 node
= ja_node_ptr(node_flag
);
622 assert(node
!= NULL
);
623 type_index
= ja_node_type(node_flag
);
624 type
= &ja_types
[type_index
];
626 switch (type
->type_class
) {
628 return ja_linear_node_get_nth(type
, node
,
631 return ja_pool_node_get_nth(type
, node
, node_flag
,
634 return ja_pigeon_node_get_nth(type
, node
,
638 return (void *) -1UL;
643 struct cds_ja_inode_flag
*ja_node_get_direction(struct cds_ja_inode_flag
*node_flag
,
645 enum ja_direction dir
)
647 unsigned int type_index
;
648 struct cds_ja_inode
*node
;
649 const struct cds_ja_type
*type
;
651 node
= ja_node_ptr(node_flag
);
652 assert(node
!= NULL
);
653 type_index
= ja_node_type(node_flag
);
654 type
= &ja_types
[type_index
];
656 switch (type
->type_class
) {
658 return ja_linear_node_get_direction(type
, node
, n
, dir
);
660 return ja_pool_node_get_direction(type
, node
, n
, dir
);
662 return ja_pigeon_node_get_direction(type
, node
, n
, dir
);
665 return (void *) -1UL;
670 struct cds_ja_inode_flag
*ja_node_get_leftright(struct cds_ja_inode_flag
*node_flag
,
672 enum ja_direction dir
)
674 return ja_node_get_direction(node_flag
, n
, dir
);
678 struct cds_ja_inode_flag
*ja_node_get_minmax(struct cds_ja_inode_flag
*node_flag
,
679 enum ja_direction dir
)
683 return ja_node_get_direction(node_flag
,
686 return ja_node_get_direction(node_flag
,
687 JA_ENTRY_PER_NODE
, JA_LEFT
);
694 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
695 struct cds_ja_inode
*node
,
696 struct cds_ja_shadow_node
*shadow_node
,
698 struct cds_ja_inode_flag
*child_node_flag
)
701 uint8_t *values
, *nr_child_ptr
;
702 struct cds_ja_inode_flag
**pointers
;
703 unsigned int i
, unused
= 0;
705 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
707 nr_child_ptr
= &node
->u
.data
[0];
708 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
709 (unsigned int) n
, nr_child_ptr
);
710 nr_child
= *nr_child_ptr
;
711 assert(nr_child
<= type
->max_linear_child
);
713 values
= &node
->u
.data
[1];
714 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
715 /* Check if node value is already populated */
716 for (i
= 0; i
< nr_child
; i
++) {
717 if (values
[i
] == n
) {
727 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
729 return -ERANGE
; /* recompact node */
731 return -ENOSPC
; /* No space left in this node type */
734 assert(pointers
[i
] == NULL
);
735 rcu_assign_pointer(pointers
[i
], child_node_flag
);
736 /* If we expanded the nr_child, increment it */
738 CMM_STORE_SHARED(values
[nr_child
], n
);
739 /* write pointer and value before nr_child */
741 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
743 shadow_node
->nr_child
++;
744 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
745 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
746 (unsigned int) shadow_node
->nr_child
,
753 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
754 struct cds_ja_inode
*node
,
755 struct cds_ja_inode_flag
*node_flag
,
756 struct cds_ja_shadow_node
*shadow_node
,
758 struct cds_ja_inode_flag
*child_node_flag
)
760 struct cds_ja_inode
*linear
;
762 assert(type
->type_class
== RCU_JA_POOL
);
764 switch (type
->nr_pool_order
) {
767 unsigned long bitsel
, index
;
769 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
770 assert(bitsel
< CHAR_BIT
);
771 index
= ((unsigned long) n
>> bitsel
) & 0x1;
772 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
777 unsigned long bitsel
[2], index
[2], rindex
;
779 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
780 assert(bitsel
[0] < CHAR_BIT
);
781 assert(bitsel
[1] < CHAR_BIT
);
782 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
784 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
785 rindex
= index
[0] | index
[1];
786 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
794 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
799 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
800 struct cds_ja_inode
*node
,
801 struct cds_ja_shadow_node
*shadow_node
,
803 struct cds_ja_inode_flag
*child_node_flag
)
805 struct cds_ja_inode_flag
**ptr
;
807 assert(type
->type_class
== RCU_JA_PIGEON
);
808 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
811 rcu_assign_pointer(*ptr
, child_node_flag
);
812 shadow_node
->nr_child
++;
817 * _ja_node_set_nth: set nth item within a node. Return an error
818 * (negative error value) if it is already there.
821 int _ja_node_set_nth(const struct cds_ja_type
*type
,
822 struct cds_ja_inode
*node
,
823 struct cds_ja_inode_flag
*node_flag
,
824 struct cds_ja_shadow_node
*shadow_node
,
826 struct cds_ja_inode_flag
*child_node_flag
)
828 switch (type
->type_class
) {
830 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
833 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
836 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
849 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
850 struct cds_ja_inode
*node
,
851 struct cds_ja_shadow_node
*shadow_node
,
852 struct cds_ja_inode_flag
**node_flag_ptr
)
855 uint8_t *nr_child_ptr
;
857 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
859 nr_child_ptr
= &node
->u
.data
[0];
860 nr_child
= *nr_child_ptr
;
861 assert(nr_child
<= type
->max_linear_child
);
863 if (type
->type_class
== RCU_JA_LINEAR
) {
864 assert(!shadow_node
->fallback_removal_count
);
865 if (shadow_node
->nr_child
<= type
->min_child
) {
866 /* We need to try recompacting the node */
870 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
871 assert(*node_flag_ptr
!= NULL
);
872 rcu_assign_pointer(*node_flag_ptr
, NULL
);
874 * Value and nr_child are never changed (would cause ABA issue).
875 * Instead, we leave the pointer to NULL and recompact the node
876 * once in a while. It is allowed to set a NULL pointer to a new
877 * value without recompaction though.
878 * Only update the shadow node accounting.
880 shadow_node
->nr_child
--;
881 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
882 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
883 (unsigned int) shadow_node
->nr_child
,
889 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
890 struct cds_ja_inode
*node
,
891 struct cds_ja_inode_flag
*node_flag
,
892 struct cds_ja_shadow_node
*shadow_node
,
893 struct cds_ja_inode_flag
**node_flag_ptr
,
896 struct cds_ja_inode
*linear
;
898 assert(type
->type_class
== RCU_JA_POOL
);
900 if (shadow_node
->fallback_removal_count
) {
901 shadow_node
->fallback_removal_count
--;
903 /* We should try recompacting the node */
904 if (shadow_node
->nr_child
<= type
->min_child
)
908 switch (type
->nr_pool_order
) {
911 unsigned long bitsel
, index
;
913 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
914 assert(bitsel
< CHAR_BIT
);
915 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
916 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
921 unsigned long bitsel
[2], index
[2], rindex
;
923 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
924 assert(bitsel
[0] < CHAR_BIT
);
925 assert(bitsel
[1] < CHAR_BIT
);
926 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
928 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
929 rindex
= index
[0] | index
[1];
930 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
938 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
942 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
943 struct cds_ja_inode
*node
,
944 struct cds_ja_shadow_node
*shadow_node
,
945 struct cds_ja_inode_flag
**node_flag_ptr
)
947 assert(type
->type_class
== RCU_JA_PIGEON
);
949 if (shadow_node
->fallback_removal_count
) {
950 shadow_node
->fallback_removal_count
--;
952 /* We should try recompacting the node */
953 if (shadow_node
->nr_child
<= type
->min_child
)
956 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
957 rcu_assign_pointer(*node_flag_ptr
, NULL
);
958 shadow_node
->nr_child
--;
963 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
964 * (negative error value) if it is not found (-ENOENT).
967 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
968 struct cds_ja_inode
*node
,
969 struct cds_ja_inode_flag
*node_flag
,
970 struct cds_ja_shadow_node
*shadow_node
,
971 struct cds_ja_inode_flag
**node_flag_ptr
,
974 switch (type
->type_class
) {
976 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
978 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
980 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
992 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
993 * distribution in two sub-distributions containing as much elements one
994 * compared to the other.
997 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
999 unsigned int type_index
,
1000 const struct cds_ja_type
*type
,
1001 struct cds_ja_inode
*node
,
1002 struct cds_ja_shadow_node
*shadow_node
,
1004 struct cds_ja_inode_flag
*child_node_flag
,
1005 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
1007 uint8_t nr_one
[JA_BITS_PER_BYTE
];
1008 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
1009 unsigned int distrib_nr_child
= 0;
1011 memset(nr_one
, 0, sizeof(nr_one
));
1013 switch (type
->type_class
) {
1017 ja_linear_node_get_nr_child(type
, node
);
1020 for (i
= 0; i
< nr_child
; i
++) {
1021 struct cds_ja_inode_flag
*iter
;
1024 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1027 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1029 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1030 if (v
& (1U << bit_i
))
1039 unsigned int pool_nr
;
1041 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1042 struct cds_ja_inode
*pool
=
1043 ja_pool_node_get_ith_pool(type
,
1046 ja_linear_node_get_nr_child(type
, pool
);
1049 for (j
= 0; j
< nr_child
; j
++) {
1050 struct cds_ja_inode_flag
*iter
;
1053 ja_linear_node_get_ith_pos(type
, pool
,
1057 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1059 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1060 if (v
& (1U << bit_i
))
1072 assert(mode
== JA_RECOMPACT_DEL
);
1073 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1074 struct cds_ja_inode_flag
*iter
;
1076 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1079 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1081 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1082 if (i
& (1U << bit_i
))
1090 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1097 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1098 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1099 if (n
& (1U << bit_i
))
1106 * The best bit selector is that for which the number of ones is
1107 * closest to half of the number of children in the
1108 * distribution. We calculate the distance using the double of
1109 * the sub-distribution sizes to eliminate truncation error.
1111 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1112 unsigned int distance_to_best
;
1114 distance_to_best
= abs_int(((unsigned int) nr_one
[bit_i
] << 1U) - distrib_nr_child
);
1115 if (distance_to_best
< overall_best_distance
) {
1116 overall_best_distance
= distance_to_best
;
1120 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
1125 * Calculate bit distribution in two dimensions. Returns the two bits
1126 * (each 0 to 7) that splits the distribution in four sub-distributions
1127 * containing as much elements one compared to the other.
1130 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
1132 unsigned int type_index
,
1133 const struct cds_ja_type
*type
,
1134 struct cds_ja_inode
*node
,
1135 struct cds_ja_shadow_node
*shadow_node
,
1137 struct cds_ja_inode_flag
*child_node_flag
,
1138 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1139 unsigned int *_bitsel
)
1141 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1142 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1143 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1144 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
1145 unsigned int bitsel
[2] = { 0, 1 };
1146 unsigned int bit_i
, bit_j
;
1147 int overall_best_distance
= INT_MAX
;
1148 unsigned int distrib_nr_child
= 0;
1150 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
1151 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
1152 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
1153 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
1155 switch (type
->type_class
) {
1159 ja_linear_node_get_nr_child(type
, node
);
1162 for (i
= 0; i
< nr_child
; i
++) {
1163 struct cds_ja_inode_flag
*iter
;
1166 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1169 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1171 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1172 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1173 if (v
& (1U << bit_i
)) {
1174 if (v
& (1U << bit_j
)) {
1175 nr_2d_11
[bit_i
][bit_j
]++;
1177 nr_2d_10
[bit_i
][bit_j
]++;
1180 if (v
& (1U << bit_j
)) {
1181 nr_2d_01
[bit_i
][bit_j
]++;
1183 nr_2d_00
[bit_i
][bit_j
]++;
1194 unsigned int pool_nr
;
1196 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1197 struct cds_ja_inode
*pool
=
1198 ja_pool_node_get_ith_pool(type
,
1201 ja_linear_node_get_nr_child(type
, pool
);
1204 for (j
= 0; j
< nr_child
; j
++) {
1205 struct cds_ja_inode_flag
*iter
;
1208 ja_linear_node_get_ith_pos(type
, pool
,
1212 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1214 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1215 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1216 if (v
& (1U << bit_i
)) {
1217 if (v
& (1U << bit_j
)) {
1218 nr_2d_11
[bit_i
][bit_j
]++;
1220 nr_2d_10
[bit_i
][bit_j
]++;
1223 if (v
& (1U << bit_j
)) {
1224 nr_2d_01
[bit_i
][bit_j
]++;
1226 nr_2d_00
[bit_i
][bit_j
]++;
1240 assert(mode
== JA_RECOMPACT_DEL
);
1241 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1242 struct cds_ja_inode_flag
*iter
;
1244 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1247 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1249 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1250 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1251 if (i
& (1U << bit_i
)) {
1252 if (i
& (1U << bit_j
)) {
1253 nr_2d_11
[bit_i
][bit_j
]++;
1255 nr_2d_10
[bit_i
][bit_j
]++;
1258 if (i
& (1U << bit_j
)) {
1259 nr_2d_01
[bit_i
][bit_j
]++;
1261 nr_2d_00
[bit_i
][bit_j
]++;
1271 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1278 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1279 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1280 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1281 if (n
& (1U << bit_i
)) {
1282 if (n
& (1U << bit_j
)) {
1283 nr_2d_11
[bit_i
][bit_j
]++;
1285 nr_2d_10
[bit_i
][bit_j
]++;
1288 if (n
& (1U << bit_j
)) {
1289 nr_2d_01
[bit_i
][bit_j
]++;
1291 nr_2d_00
[bit_i
][bit_j
]++;
1300 * The best bit selector is that for which the number of nodes
1301 * in each sub-class is closest to one-fourth of the number of
1302 * children in the distribution. We calculate the distance using
1303 * 4 times the size of the sub-distribution to eliminate
1306 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1307 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1308 int distance_to_best
[4];
1310 distance_to_best
[0] = ((unsigned int) nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1311 distance_to_best
[1] = ((unsigned int) nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1312 distance_to_best
[2] = ((unsigned int) nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1313 distance_to_best
[3] = ((unsigned int) nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1315 /* Consider worse distance above best */
1316 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1317 distance_to_best
[0] = distance_to_best
[1];
1318 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1319 distance_to_best
[0] = distance_to_best
[2];
1320 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1321 distance_to_best
[0] = distance_to_best
[3];
1324 * If our worse distance is better than overall,
1325 * we become new best candidate.
1327 if (distance_to_best
[0] < overall_best_distance
) {
1328 overall_best_distance
= distance_to_best
[0];
1335 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1337 /* Return our bit selection */
1338 _bitsel
[0] = bitsel
[0];
1339 _bitsel
[1] = bitsel
[1];
1343 unsigned int find_nearest_type_index(unsigned int type_index
,
1344 unsigned int nr_nodes
)
1346 const struct cds_ja_type
*type
;
1348 assert(type_index
!= NODE_INDEX_NULL
);
1350 return NODE_INDEX_NULL
;
1352 type
= &ja_types
[type_index
];
1353 if (nr_nodes
< type
->min_child
)
1355 else if (nr_nodes
> type
->max_child
)
1364 * ja_node_recompact_add: recompact a node, adding a new child.
1365 * Return 0 on success, -EAGAIN if need to retry, or other negative
1366 * error value otherwise.
1369 int ja_node_recompact(enum ja_recompact mode
,
1371 unsigned int old_type_index
,
1372 const struct cds_ja_type
*old_type
,
1373 struct cds_ja_inode
*old_node
,
1374 struct cds_ja_shadow_node
*shadow_node
,
1375 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1376 struct cds_ja_inode_flag
*child_node_flag
,
1377 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1380 unsigned int new_type_index
;
1381 struct cds_ja_inode
*new_node
;
1382 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1383 const struct cds_ja_type
*new_type
;
1384 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1388 old_node_flag
= *old_node_flag_ptr
;
1391 * Need to find nearest type index even for ADD_SAME, because
1392 * this recompaction, when applied to linear nodes, will garbage
1393 * collect dummy (NULL) entries, and can therefore cause a few
1394 * linear representations to be skipped.
1397 case JA_RECOMPACT_ADD_SAME
:
1398 new_type_index
= find_nearest_type_index(old_type_index
,
1399 shadow_node
->nr_child
+ 1);
1400 dbg_printf("Recompact for node with %u children\n",
1401 shadow_node
->nr_child
+ 1);
1403 case JA_RECOMPACT_ADD_NEXT
:
1404 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1406 dbg_printf("Recompact for NULL\n");
1408 new_type_index
= find_nearest_type_index(old_type_index
,
1409 shadow_node
->nr_child
+ 1);
1410 dbg_printf("Recompact for node with %u children\n",
1411 shadow_node
->nr_child
+ 1);
1414 case JA_RECOMPACT_DEL
:
1415 new_type_index
= find_nearest_type_index(old_type_index
,
1416 shadow_node
->nr_child
- 1);
1417 dbg_printf("Recompact for node with %u children\n",
1418 shadow_node
->nr_child
- 1);
1424 retry
: /* for fallback */
1425 dbg_printf("Recompact from type %d to type %d\n",
1426 old_type_index
, new_type_index
);
1427 new_type
= &ja_types
[new_type_index
];
1428 if (new_type_index
!= NODE_INDEX_NULL
) {
1429 new_node
= alloc_cds_ja_node(ja
, new_type
);
1433 if (new_type
->type_class
== RCU_JA_POOL
) {
1434 switch (new_type
->nr_pool_order
) {
1437 unsigned int node_distrib_bitsel
;
1439 node_distrib_bitsel
=
1440 ja_node_sum_distribution_1d(mode
, ja
,
1441 old_type_index
, old_type
,
1442 old_node
, shadow_node
,
1444 nullify_node_flag_ptr
);
1445 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1446 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1447 new_type_index
, node_distrib_bitsel
);
1452 unsigned int node_distrib_bitsel
[2];
1454 ja_node_sum_distribution_2d(mode
, ja
,
1455 old_type_index
, old_type
,
1456 old_node
, shadow_node
,
1458 nullify_node_flag_ptr
,
1459 node_distrib_bitsel
);
1460 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1461 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1462 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1463 new_type_index
, node_distrib_bitsel
);
1470 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1473 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1474 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
, level
);
1475 if (!new_shadow_node
) {
1476 free_cds_ja_node(ja
, new_node
);
1480 new_shadow_node
->fallback_removal_count
=
1481 JA_FALLBACK_REMOVAL_COUNT
;
1484 new_node_flag
= NULL
;
1487 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1489 if (new_type_index
== NODE_INDEX_NULL
)
1492 switch (old_type
->type_class
) {
1496 ja_linear_node_get_nr_child(old_type
, old_node
);
1499 for (i
= 0; i
< nr_child
; i
++) {
1500 struct cds_ja_inode_flag
*iter
;
1503 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1506 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1508 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1511 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1512 goto fallback_toosmall
;
1520 unsigned int pool_nr
;
1522 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1523 struct cds_ja_inode
*pool
=
1524 ja_pool_node_get_ith_pool(old_type
,
1527 ja_linear_node_get_nr_child(old_type
, pool
);
1530 for (j
= 0; j
< nr_child
; j
++) {
1531 struct cds_ja_inode_flag
*iter
;
1534 ja_linear_node_get_ith_pos(old_type
, pool
,
1538 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1540 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1543 if (new_type
->type_class
== RCU_JA_POOL
1545 goto fallback_toosmall
;
1553 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1559 assert(mode
== JA_RECOMPACT_DEL
);
1560 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1561 struct cds_ja_inode_flag
*iter
;
1563 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1566 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1568 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1571 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1572 goto fallback_toosmall
;
1585 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1587 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1589 n
, child_node_flag
);
1590 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1591 goto fallback_toosmall
;
1597 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1598 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1599 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1600 uatomic_inc(&ja
->node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1603 /* Return pointer to new recompacted node through old_node_flag_ptr */
1604 *old_node_flag_ptr
= new_node_flag
;
1608 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1610 * It is OK to free the lock associated with a node
1611 * going to NULL, since we are holding the parent lock.
1612 * This synchronizes removal with re-add of that node.
1614 if (new_type_index
== NODE_INDEX_NULL
)
1615 flags
|= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1616 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1626 /* fallback if next pool is too small */
1627 assert(new_shadow_node
);
1628 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1629 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1633 case JA_RECOMPACT_ADD_SAME
:
1635 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1636 * node within a pool has unused entries. It should
1637 * therefore _never_ be too small.
1642 case JA_RECOMPACT_ADD_NEXT
:
1644 const struct cds_ja_type
*next_type
;
1647 * Recompaction attempt on add failed. Should only
1648 * happen if target node type is pool. Caused by
1649 * hard-to-split distribution. Recompact using the next
1650 * distribution size.
1652 assert(new_type
->type_class
== RCU_JA_POOL
);
1653 next_type
= &ja_types
[new_type_index
+ 1];
1655 * Try going to the next pool size if our population
1656 * fits within its range. This is not flagged as a
1659 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1660 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1665 dbg_printf("Add fallback to type %d\n", new_type_index
);
1666 uatomic_inc(&ja
->nr_fallback
);
1672 case JA_RECOMPACT_DEL
:
1674 * Recompaction attempt on delete failed. Should only
1675 * happen if target node type is pool. This is caused by
1676 * a hard-to-split distribution. Recompact on same node
1677 * size, but flag current node as "fallback" to ensure
1678 * we don't attempt recompaction before some activity
1679 * has reshuffled our node.
1681 assert(new_type
->type_class
== RCU_JA_POOL
);
1682 new_type_index
= old_type_index
;
1683 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1684 uatomic_inc(&ja
->nr_fallback
);
1693 * Last resort fallback: pigeon.
1695 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1696 dbg_printf("Fallback to type %d\n", new_type_index
);
1697 uatomic_inc(&ja
->nr_fallback
);
1703 * Return 0 on success, -EAGAIN if need to retry, or other negative
1704 * error value otherwise.
1707 int ja_node_set_nth(struct cds_ja
*ja
,
1708 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1709 struct cds_ja_inode_flag
*child_node_flag
,
1710 struct cds_ja_shadow_node
*shadow_node
,
1714 unsigned int type_index
;
1715 const struct cds_ja_type
*type
;
1716 struct cds_ja_inode
*node
;
1718 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1719 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1721 node
= ja_node_ptr(*node_flag
);
1722 type_index
= ja_node_type(*node_flag
);
1723 type
= &ja_types
[type_index
];
1724 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1725 n
, child_node_flag
);
1728 /* Not enough space in node, need to recompact to next type. */
1729 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1730 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1733 /* Node needs to be recompacted. */
1734 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1735 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1742 * Return 0 on success, -EAGAIN if need to retry, or other negative
1743 * error value otherwise.
1746 int ja_node_clear_ptr(struct cds_ja
*ja
,
1747 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1748 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1749 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1750 uint8_t n
, int level
)
1753 unsigned int type_index
;
1754 const struct cds_ja_type
*type
;
1755 struct cds_ja_inode
*node
;
1757 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1758 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1760 node
= ja_node_ptr(*parent_node_flag_ptr
);
1761 type_index
= ja_node_type(*parent_node_flag_ptr
);
1762 type
= &ja_types
[type_index
];
1763 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1764 if (ret
== -EFBIG
) {
1765 /* Should try recompaction. */
1766 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1767 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1768 node_flag_ptr
, level
);
1773 struct cds_ja_node
*cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1775 unsigned int tree_depth
, i
;
1776 struct cds_ja_inode_flag
*node_flag
;
1778 if (caa_unlikely(key
> ja
->key_max
))
1780 tree_depth
= ja
->tree_depth
;
1781 node_flag
= rcu_dereference(ja
->root
);
1783 /* level 0: root node */
1784 if (!ja_node_ptr(node_flag
))
1787 for (i
= 1; i
< tree_depth
; i
++) {
1790 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1791 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1792 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1793 (unsigned int) iter_key
, node_flag
);
1794 if (!ja_node_ptr(node_flag
))
1798 /* Last level lookup succeded. We got an actual match. */
1799 return (struct cds_ja_node
*) node_flag
;
1803 struct cds_ja_node
*cds_ja_lookup_inequality(struct cds_ja
*ja
, uint64_t key
,
1804 enum ja_lookup_inequality mode
)
1806 int tree_depth
, level
;
1807 struct cds_ja_inode_flag
*node_flag
, *cur_node_depth
[JA_MAX_DEPTH
];
1808 enum ja_direction dir
;
1812 if (caa_unlikely(key
> ja
->key_max
|| key
== 0))
1816 if (caa_unlikely(key
>= ja
->key_max
))
1823 memset(cur_node_depth
, 0, sizeof(cur_node_depth
));
1824 tree_depth
= ja
->tree_depth
;
1825 node_flag
= rcu_dereference(ja
->root
);
1826 cur_node_depth
[0] = node_flag
;
1828 /* level 0: root node */
1829 if (!ja_node_ptr(node_flag
))
1832 for (level
= 1; level
< tree_depth
; level
++) {
1835 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1836 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1837 if (!ja_node_ptr(node_flag
))
1839 cur_node_depth
[level
] = node_flag
;
1840 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
1841 (unsigned int) iter_key
, node_flag
);
1844 if (level
== tree_depth
) {
1845 /* Last level lookup succeded. We got an equal match. */
1846 return (struct cds_ja_node
*) node_flag
;
1850 * Find highest value left/right of current node.
1851 * Current node is cur_node_depth[level].
1852 * Start at current level. If we cannot find any key left/right
1853 * of ours, go one level up, seek highest value left/right of
1854 * current (recursively), and when we find one, get the
1855 * rightmost/leftmost child of its rightmost/leftmost child
1868 for (; level
> 0; level
--) {
1871 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1872 node_flag
= ja_node_get_leftright(cur_node_depth
[level
- 1],
1874 /* If found left sibling, find rightmost/leftmost child. */
1875 if (ja_node_ptr(node_flag
))
1880 /* Reached the root and could not find a left/right sibling. */
1887 * From this point, we are guaranteed to be able to find a
1888 * "below than"/"above than" match. ja_attach_node() and
1889 * ja_detach_node() both guarantee that it is not possible for a
1890 * lookup to reach a dead-end.
1894 * Find rightmost/leftmost child of rightmost/leftmost child
1907 for (; level
< tree_depth
; level
++) {
1908 node_flag
= ja_node_get_minmax(node_flag
, dir
);
1909 if (!ja_node_ptr(node_flag
))
1913 assert(level
== tree_depth
);
1915 return (struct cds_ja_node
*) node_flag
;
1918 struct cds_ja_node
*cds_ja_lookup_below_equal(struct cds_ja
*ja
, uint64_t key
)
1920 return cds_ja_lookup_inequality(ja
, key
, JA_LOOKUP_BE
);
1923 struct cds_ja_node
*cds_ja_lookup_above_equal(struct cds_ja
*ja
, uint64_t key
)
1925 return cds_ja_lookup_inequality(ja
, key
, JA_LOOKUP_AE
);
1929 * We reached an unpopulated node. Create it and the children we need,
1930 * and then attach the entire branch to the current node. This may
1931 * trigger recompaction of the current node. Locks needed: node lock
1932 * (for add), and, possibly, parent node lock (to update pointer due to
1933 * node recompaction).
1935 * First take node lock, check if recompaction is needed, then take
1936 * parent lock (if needed). Then we can proceed to create the new
1937 * branch. Publish the new branch, and release locks.
1938 * TODO: we currently always take the parent lock even when not needed.
1940 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1941 * leads to a dead-end: before attaching a branch, the entire content of
1942 * the new branch is populated, thus creating a cluster, before
1943 * attaching the cluster to the rest of the tree, thus making it visible
1947 int ja_attach_node(struct cds_ja
*ja
,
1948 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1949 struct cds_ja_inode_flag
*attach_node_flag
,
1950 struct cds_ja_inode_flag
*parent_attach_node_flag
,
1951 struct cds_ja_inode_flag
**old_node_flag_ptr
,
1952 struct cds_ja_inode_flag
*old_node_flag
,
1955 struct cds_ja_node
*child_node
)
1957 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1958 *parent_shadow_node
= NULL
;
1959 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1961 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1962 int nr_created_nodes
= 0;
1964 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1965 level
, old_node_flag
, attach_node_flag_ptr
, attach_node_flag
, parent_attach_node_flag
);
1967 assert(!old_node_flag
);
1968 if (attach_node_flag
) {
1969 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, attach_node_flag
);
1975 if (parent_attach_node_flag
) {
1976 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1977 parent_attach_node_flag
);
1978 if (!parent_shadow_node
) {
1984 if (old_node_flag_ptr
&& ja_node_ptr(*old_node_flag_ptr
)) {
1986 * Target node has been updated between RCU lookup and
1987 * lock acquisition. We need to re-try lookup and
1995 * Perform a lookup query to handle the case where
1996 * old_node_flag_ptr is NULL. We cannot use it to check if the
1997 * node has been populated between RCU lookup and mutex
2000 if (!old_node_flag_ptr
) {
2002 struct cds_ja_inode_flag
*lookup_node_flag
;
2003 struct cds_ja_inode_flag
**lookup_node_flag_ptr
;
2005 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2006 lookup_node_flag
= ja_node_get_nth(attach_node_flag
,
2007 &lookup_node_flag_ptr
,
2009 if (lookup_node_flag
) {
2015 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
2016 ja_node_ptr(attach_node_flag
)) {
2018 * Target node has been updated between RCU lookup and
2019 * lock acquisition. We need to re-try lookup and
2026 /* Create new branch, starting from bottom */
2027 iter_node_flag
= (struct cds_ja_inode_flag
*) child_node
;
2029 for (i
= ja
->tree_depth
- 1; i
>= (int) level
; i
--) {
2032 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
- 1)));
2033 dbg_printf("branch creation level %d, key %u\n",
2034 i
, (unsigned int) iter_key
);
2035 iter_dest_node_flag
= NULL
;
2036 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2041 dbg_printf("branch creation error %d\n", ret
);
2044 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
2045 iter_node_flag
= iter_dest_node_flag
;
2049 /* Publish branch */
2052 * Attaching to root node.
2054 rcu_assign_pointer(ja
->root
, iter_node_flag
);
2058 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
2059 dbg_printf("publish branch at level %d, key %u\n",
2060 level
- 1, (unsigned int) iter_key
);
2061 /* We need to use set_nth on the previous level. */
2062 iter_dest_node_flag
= attach_node_flag
;
2063 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
2066 shadow_node
, level
- 1);
2068 dbg_printf("branch publish error %d\n", ret
);
2074 rcu_assign_pointer(*attach_node_flag_ptr
, iter_dest_node_flag
);
2082 for (i
= 0; i
< nr_created_nodes
; i
++) {
2086 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
2088 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
2089 tmpret
= rcuja_shadow_clear(ja
->ht
,
2097 if (parent_shadow_node
)
2098 rcuja_shadow_unlock(parent_shadow_node
);
2101 rcuja_shadow_unlock(shadow_node
);
2107 * Lock the parent containing the pointer to list of duplicates, and add
2108 * node to this list. Failure can happen if concurrent update changes
2109 * the parent before we get the lock. We return -EAGAIN in that case.
2110 * Return 0 on success, negative error value on failure.
2113 int ja_chain_node(struct cds_ja
*ja
,
2114 struct cds_ja_inode_flag
*parent_node_flag
,
2115 struct cds_ja_inode_flag
**node_flag_ptr
,
2116 struct cds_ja_inode_flag
*node_flag
,
2117 struct cds_ja_node
*node
)
2119 struct cds_ja_shadow_node
*shadow_node
;
2122 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2126 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2131 * Add node to head of list. Safe against concurrent RCU read
2134 node
->next
= (struct cds_ja_node
*) node_flag
;
2135 rcu_assign_pointer(*node_flag_ptr
, (struct cds_ja_inode_flag
*) node
);
2137 rcuja_shadow_unlock(shadow_node
);
2142 int _cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2143 struct cds_ja_node
*node
,
2144 struct cds_ja_node
**unique_node_ret
)
2146 unsigned int tree_depth
, i
;
2147 struct cds_ja_inode_flag
*attach_node_flag
,
2151 *parent_attach_node_flag
;
2152 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
2153 **parent_node_flag_ptr
,
2157 if (caa_unlikely(key
> ja
->key_max
)) {
2160 tree_depth
= ja
->tree_depth
;
2163 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
2165 parent2_node_flag
= NULL
;
2167 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
2168 parent_node_flag_ptr
= NULL
;
2169 node_flag
= rcu_dereference(ja
->root
);
2170 node_flag_ptr
= &ja
->root
;
2172 /* Iterate on all internal levels */
2173 for (i
= 1; i
< tree_depth
; i
++) {
2176 if (!ja_node_ptr(node_flag
))
2178 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2179 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2180 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2181 parent2_node_flag
= parent_node_flag
;
2182 parent_node_flag
= node_flag
;
2183 parent_node_flag_ptr
= node_flag_ptr
;
2184 node_flag
= ja_node_get_nth(node_flag
,
2190 * We reached either bottom of tree or internal NULL node,
2191 * simply add node to last internal level, or chain it if key is
2194 if (!ja_node_ptr(node_flag
)) {
2195 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2196 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2198 attach_node_flag
= parent_node_flag
;
2199 attach_node_flag_ptr
= parent_node_flag_ptr
;
2200 parent_attach_node_flag
= parent2_node_flag
;
2202 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
2204 parent_attach_node_flag
,
2209 if (unique_node_ret
) {
2210 *unique_node_ret
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2214 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2215 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2217 attach_node_flag
= node_flag
;
2218 attach_node_flag_ptr
= node_flag_ptr
;
2219 parent_attach_node_flag
= parent_node_flag
;
2221 ret
= ja_chain_node(ja
,
2222 parent_attach_node_flag
,
2223 attach_node_flag_ptr
,
2227 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
2233 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2234 struct cds_ja_node
*node
)
2236 return _cds_ja_add(ja
, key
, node
, NULL
);
2239 struct cds_ja_node
*cds_ja_add_unique(struct cds_ja
*ja
, uint64_t key
,
2240 struct cds_ja_node
*node
)
2243 struct cds_ja_node
*ret_node
;
2245 ret
= _cds_ja_add(ja
, key
, node
, &ret_node
);
2253 * Note: there is no need to lookup the pointer address associated with
2254 * each node's nth item after taking the lock: it's already been done by
2255 * cds_ja_del while holding the rcu read-side lock, and our node rules
2256 * ensure that when a match value -> pointer is found in a node, it is
2257 * _NEVER_ changed for that node without recompaction, and recompaction
2258 * reallocates the node.
2259 * However, when a child is removed from "linear" nodes, its pointer
2260 * is set to NULL. We therefore check, while holding the locks, if this
2261 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2263 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2264 * leads to a dead-end: when removing branch, it makes sure to perform
2265 * the "cut" at the highest node that has only one child, effectively
2266 * replacing it with a NULL pointer.
2269 int ja_detach_node(struct cds_ja
*ja
,
2270 struct cds_ja_inode_flag
**snapshot
,
2271 struct cds_ja_inode_flag
***snapshot_ptr
,
2272 uint8_t *snapshot_n
,
2275 struct cds_ja_node
*node
)
2277 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
2278 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
2279 *parent_node_flag
= NULL
,
2280 **parent_node_flag_ptr
= NULL
;
2281 struct cds_ja_inode_flag
*iter_node_flag
;
2282 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
2285 assert(nr_snapshot
== ja
->tree_depth
+ 1);
2288 * From the last internal level node going up, get the node
2289 * lock, check if the node has only one child left. If it is the
2290 * case, we continue iterating upward. When we reach a node
2291 * which has more that one child left, we lock the parent, and
2292 * proceed to the node deletion (removing its children too).
2294 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
2295 struct cds_ja_shadow_node
*shadow_node
;
2297 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2303 shadow_nodes
[nr_shadow
++] = shadow_node
;
2306 * Check if node has been removed between RCU
2307 * lookup and lock acquisition.
2309 assert(snapshot_ptr
[i
+ 1]);
2310 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
2311 != ja_node_ptr(snapshot
[i
+ 1])) {
2316 assert(shadow_node
->nr_child
> 0);
2317 if (shadow_node
->nr_child
== 1 && i
> 1)
2320 if (shadow_node
->nr_child
> 1 || i
== 1) {
2321 /* Lock parent and break */
2322 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2328 shadow_nodes
[nr_shadow
++] = shadow_node
;
2331 * Check if node has been removed between RCU
2332 * lookup and lock acquisition.
2334 assert(snapshot_ptr
[i
]);
2335 if (ja_node_ptr(*snapshot_ptr
[i
])
2336 != ja_node_ptr(snapshot
[i
])) {
2341 node_flag_ptr
= snapshot_ptr
[i
+ 1];
2342 n
= snapshot_n
[i
+ 1];
2343 parent_node_flag_ptr
= snapshot_ptr
[i
];
2344 parent_node_flag
= snapshot
[i
];
2348 * Lock parent's parent, in case we need
2349 * to recompact parent.
2351 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2357 shadow_nodes
[nr_shadow
++] = shadow_node
;
2360 * Check if node has been removed between RCU
2361 * lookup and lock acquisition.
2363 assert(snapshot_ptr
[i
- 1]);
2364 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2365 != ja_node_ptr(snapshot
[i
- 1])) {
2376 * At this point, we want to delete all nodes that are about to
2377 * be removed from shadow_nodes (except the last one, which is
2378 * either the root or the parent of the upmost node with 1
2379 * child). OK to free lock here, because RCU read lock is held,
2380 * and free only performed in call_rcu.
2383 for (i
= 0; i
< nr_clear
; i
++) {
2384 ret
= rcuja_shadow_clear(ja
->ht
,
2385 shadow_nodes
[i
]->node_flag
,
2387 RCUJA_SHADOW_CLEAR_FREE_NODE
2388 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2392 iter_node_flag
= parent_node_flag
;
2393 /* Remove from parent */
2394 ret
= ja_node_clear_ptr(ja
,
2395 node_flag_ptr
, /* Pointer to location to nullify */
2396 &iter_node_flag
, /* Old new parent ptr in its parent */
2397 shadow_nodes
[nr_branch
- 1], /* of parent */
2402 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2403 iter_node_flag
, *parent_node_flag_ptr
);
2404 /* Update address of parent ptr in its parent */
2405 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2408 for (i
= 0; i
< nr_shadow
; i
++)
2409 rcuja_shadow_unlock(shadow_nodes
[i
]);
2414 int ja_unchain_node(struct cds_ja
*ja
,
2415 struct cds_ja_inode_flag
*parent_node_flag
,
2416 struct cds_ja_inode_flag
**node_flag_ptr
,
2417 struct cds_ja_inode_flag
*node_flag
,
2418 struct cds_ja_node
*node
)
2420 struct cds_ja_shadow_node
*shadow_node
;
2421 struct cds_ja_node
*iter_node
, **iter_node_ptr
, **prev_node_ptr
= NULL
;
2422 int ret
= 0, count
= 0, found
= 0;
2424 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2427 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2432 * Find the previous node's next pointer pointing to our node,
2433 * so we can update it. Retry if another thread removed all but
2434 * one of duplicates since check (this check was performed
2435 * without lock). Ensure that the node we are about to remove is
2436 * still in the list (while holding lock). No need for RCU
2437 * traversal here since we hold the lock on the parent.
2439 iter_node_ptr
= (struct cds_ja_node
**) node_flag_ptr
;
2440 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2441 cds_ja_for_each_duplicate(iter_node
) {
2443 if (iter_node
== node
) {
2444 prev_node_ptr
= iter_node_ptr
;
2447 iter_node_ptr
= &iter_node
->next
;
2450 if (!found
|| count
== 1) {
2454 CMM_STORE_SHARED(*prev_node_ptr
, node
->next
);
2456 * Validate that we indeed removed the node from linked list.
2458 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2460 rcuja_shadow_unlock(shadow_node
);
2465 * Called with RCU read lock held.
2467 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2468 struct cds_ja_node
*node
)
2470 unsigned int tree_depth
, i
;
2471 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2472 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2473 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2474 struct cds_ja_inode_flag
*node_flag
;
2475 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2480 if (caa_unlikely(key
> ja
->key_max
))
2482 tree_depth
= ja
->tree_depth
;
2486 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2489 /* snapshot for level 0 is only for shadow node lookup */
2492 snapshot_ptr
[nr_snapshot
] = NULL
;
2493 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2494 node_flag
= rcu_dereference(ja
->root
);
2495 prev_node_flag_ptr
= &ja
->root
;
2496 node_flag_ptr
= &ja
->root
;
2498 /* Iterate on all internal levels */
2499 for (i
= 1; i
< tree_depth
; i
++) {
2502 dbg_printf("cds_ja_del iter node_flag %p\n",
2504 if (!ja_node_ptr(node_flag
)) {
2507 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2508 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2509 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2510 snapshot
[nr_snapshot
++] = node_flag
;
2511 node_flag
= ja_node_get_nth(node_flag
,
2515 prev_node_flag_ptr
= node_flag_ptr
;
2516 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2517 (unsigned int) iter_key
, node_flag
,
2518 prev_node_flag_ptr
);
2521 * We reached bottom of tree, try to find the node we are trying
2522 * to remove. Fail if we cannot find it.
2524 if (!ja_node_ptr(node_flag
)) {
2525 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2529 struct cds_ja_node
*iter_node
, *match
= NULL
;
2532 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2533 cds_ja_for_each_duplicate_rcu(iter_node
) {
2534 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node
, iter_node
);
2535 if (iter_node
== node
)
2541 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2547 * Removing last of duplicates. Last snapshot
2548 * does not have a shadow node (external leafs).
2550 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2551 snapshot
[nr_snapshot
++] = node_flag
;
2552 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2553 snapshot_n
, nr_snapshot
, key
, node
);
2555 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2556 node_flag_ptr
, node_flag
, match
);
2560 * Explanation of -ENOENT handling: caused by concurrent delete
2561 * between RCU lookup and actual removal. Need to re-do the
2562 * lookup and removal attempt.
2564 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2569 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2570 const struct rcu_flavor_struct
*flavor
)
2574 struct cds_ja_shadow_node
*root_shadow_node
;
2576 ja
= calloc(sizeof(*ja
), 1);
2588 ja
->key_max
= (1ULL << key_bits
) - 1;
2591 ja
->key_max
= UINT64_MAX
;
2597 /* ja->root is NULL */
2598 /* tree_depth 0 is for pointer to root node */
2599 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2600 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2601 ja
->ht
= rcuja_create_ht(flavor
);
2606 * Note: we should not free this node until judy array destroy.
2608 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2609 (struct cds_ja_inode_flag
*) &ja
->root
,
2611 if (!root_shadow_node
) {
2619 ret
= rcuja_delete_ht(ja
->ht
);
2629 * Called from RCU read-side CS.
2631 __attribute__((visibility("protected")))
2632 void rcuja_free_all_children(struct cds_ja_shadow_node
*shadow_node
,
2633 struct cds_ja_inode_flag
*node_flag
,
2634 void (*rcu_free_node
)(struct cds_ja_node
*node
))
2636 unsigned int type_index
;
2637 struct cds_ja_inode
*node
;
2638 const struct cds_ja_type
*type
;
2640 node
= ja_node_ptr(node_flag
);
2641 assert(node
!= NULL
);
2642 type_index
= ja_node_type(node_flag
);
2643 type
= &ja_types
[type_index
];
2645 switch (type
->type_class
) {
2649 ja_linear_node_get_nr_child(type
, node
);
2652 for (i
= 0; i
< nr_child
; i
++) {
2653 struct cds_ja_inode_flag
*iter
;
2654 struct cds_ja_node
*node_iter
, *n
;
2657 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
2658 node_iter
= (struct cds_ja_node
*) iter
;
2659 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2660 rcu_free_node(node_iter
);
2667 unsigned int pool_nr
;
2669 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
2670 struct cds_ja_inode
*pool
=
2671 ja_pool_node_get_ith_pool(type
, node
, pool_nr
);
2673 ja_linear_node_get_nr_child(type
, pool
);
2676 for (j
= 0; j
< nr_child
; j
++) {
2677 struct cds_ja_inode_flag
*iter
;
2678 struct cds_ja_node
*node_iter
, *n
;
2681 ja_linear_node_get_ith_pos(type
, pool
, j
, &v
, &iter
);
2682 node_iter
= (struct cds_ja_node
*) iter
;
2683 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2684 rcu_free_node(node_iter
);
2696 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2697 struct cds_ja_inode_flag
*iter
;
2698 struct cds_ja_node
*node_iter
, *n
;
2700 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
2701 node_iter
= (struct cds_ja_node
*) iter
;
2702 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2703 rcu_free_node(node_iter
);
2714 void print_debug_fallback_distribution(struct cds_ja
*ja
)
2718 fprintf(stderr
, "Fallback node distribution:\n");
2719 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2720 if (!ja
->node_fallback_count_distribution
[i
])
2722 fprintf(stderr
, " %3u: %4lu\n",
2723 i
, ja
->node_fallback_count_distribution
[i
]);
2728 int ja_final_checks(struct cds_ja
*ja
)
2730 double fallback_ratio
;
2731 unsigned long na
, nf
, nr_fallback
;
2734 fallback_ratio
= (double) uatomic_read(&ja
->nr_fallback
);
2735 fallback_ratio
/= (double) uatomic_read(&ja
->nr_nodes_allocated
);
2736 nr_fallback
= uatomic_read(&ja
->nr_fallback
);
2739 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2740 uatomic_read(&ja
->nr_fallback
),
2743 na
= uatomic_read(&ja
->nr_nodes_allocated
);
2744 nf
= uatomic_read(&ja
->nr_nodes_freed
);
2745 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na
, nf
);
2747 print_debug_fallback_distribution(ja
);
2750 fprintf(stderr
, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2751 (long) na
- nf
, na
, nf
);
2758 * There should be no more concurrent add, delete, nor look-up performed
2759 * on the Judy array while it is being destroyed (ensured by the
2762 int cds_ja_destroy(struct cds_ja
*ja
,
2763 void (*free_node_cb
)(struct cds_ja_node
*node
))
2765 const struct rcu_flavor_struct
*flavor
;
2768 flavor
= cds_lfht_rcu_flavor(ja
->ht
);
2769 rcuja_shadow_prune(ja
->ht
,
2770 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
,
2772 flavor
->thread_offline();
2773 ret
= rcuja_delete_ht(ja
->ht
);
2777 /* Wait for in-flight call_rcu free to complete. */
2780 flavor
->thread_online();
2781 ret
= ja_final_checks(ja
);