4 * Userspace RCU library - RCU Judy Array
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
36 #include "rcuja-internal.h"
39 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
42 enum cds_ja_type_class
{
43 RCU_JA_LINEAR
= 0, /* Type A */
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL
= 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
49 RCU_JA_PIGEON
= 2, /* Type C */
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
52 /* Leaf nodes are implicit from their height in the tree */
55 RCU_JA_NULL
, /* not an encoded type, but keeps code regular */
59 enum cds_ja_type_class type_class
;
60 uint16_t min_child
; /* minimum number of children: 1 to 256 */
61 uint16_t max_child
; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child
; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order
; /* node size is (1 << order), in bytes */
64 uint16_t nr_pool_order
; /* number of pools */
65 uint16_t pool_size_order
; /* pool size */
69 * Iteration on the array to find the right node size for the number of
70 * children stops when it reaches .max_child == 256 (this is the largest
71 * possible node size, which contains 256 children).
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
86 #if (CAA_BITS_PER_LONG < 64)
89 ja_type_0_max_child
= 1,
90 ja_type_1_max_child
= 3,
91 ja_type_2_max_child
= 6,
92 ja_type_3_max_child
= 12,
93 ja_type_4_max_child
= 25,
94 ja_type_5_max_child
= 48,
95 ja_type_6_max_child
= 92,
96 ja_type_7_max_child
= 256,
97 ja_type_8_max_child
= 0, /* NULL */
101 ja_type_0_max_linear_child
= 1,
102 ja_type_1_max_linear_child
= 3,
103 ja_type_2_max_linear_child
= 6,
104 ja_type_3_max_linear_child
= 12,
105 ja_type_4_max_linear_child
= 25,
106 ja_type_5_max_linear_child
= 24,
107 ja_type_6_max_linear_child
= 23,
111 ja_type_5_nr_pool_order
= 1,
112 ja_type_6_nr_pool_order
= 2,
115 const struct cds_ja_type ja_types
[] = {
116 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 3, },
117 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 4, },
118 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 5, },
119 { .type_class
= RCU_JA_LINEAR
, .min_child
= 4, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 6, },
120 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 7, },
122 /* Pools may fill sooner than max_child */
123 { .type_class
= RCU_JA_POOL
, .min_child
= 20, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 8, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 7, },
124 { .type_class
= RCU_JA_POOL
, .min_child
= 45, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 7, },
127 * Upon node removal below min_child, if child pool is filled
128 * beyond capacity, we roll back to pigeon.
130 { .type_class
= RCU_JA_PIGEON
, .min_child
= 83, .max_child
= ja_type_7_max_child
, .order
= 10, },
132 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
134 #else /* !(CAA_BITS_PER_LONG < 64) */
135 /* 64-bit pointers */
137 ja_type_0_max_child
= 1,
138 ja_type_1_max_child
= 3,
139 ja_type_2_max_child
= 7,
140 ja_type_3_max_child
= 14,
141 ja_type_4_max_child
= 28,
142 ja_type_5_max_child
= 54,
143 ja_type_6_max_child
= 104,
144 ja_type_7_max_child
= 256,
145 ja_type_8_max_child
= 256,
149 ja_type_0_max_linear_child
= 1,
150 ja_type_1_max_linear_child
= 3,
151 ja_type_2_max_linear_child
= 7,
152 ja_type_3_max_linear_child
= 14,
153 ja_type_4_max_linear_child
= 28,
154 ja_type_5_max_linear_child
= 27,
155 ja_type_6_max_linear_child
= 26,
159 ja_type_5_nr_pool_order
= 1,
160 ja_type_6_nr_pool_order
= 2,
163 const struct cds_ja_type ja_types
[] = {
164 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_0_max_child
, .max_linear_child
= ja_type_0_max_linear_child
, .order
= 4, },
165 { .type_class
= RCU_JA_LINEAR
, .min_child
= 1, .max_child
= ja_type_1_max_child
, .max_linear_child
= ja_type_1_max_linear_child
, .order
= 5, },
166 { .type_class
= RCU_JA_LINEAR
, .min_child
= 3, .max_child
= ja_type_2_max_child
, .max_linear_child
= ja_type_2_max_linear_child
, .order
= 6, },
167 { .type_class
= RCU_JA_LINEAR
, .min_child
= 5, .max_child
= ja_type_3_max_child
, .max_linear_child
= ja_type_3_max_linear_child
, .order
= 7, },
168 { .type_class
= RCU_JA_LINEAR
, .min_child
= 10, .max_child
= ja_type_4_max_child
, .max_linear_child
= ja_type_4_max_linear_child
, .order
= 8, },
170 /* Pools may fill sooner than max_child. */
171 { .type_class
= RCU_JA_POOL
, .min_child
= 22, .max_child
= ja_type_5_max_child
, .max_linear_child
= ja_type_5_max_linear_child
, .order
= 9, .nr_pool_order
= ja_type_5_nr_pool_order
, .pool_size_order
= 8, },
172 { .type_class
= RCU_JA_POOL
, .min_child
= 51, .max_child
= ja_type_6_max_child
, .max_linear_child
= ja_type_6_max_linear_child
, .order
= 10, .nr_pool_order
= ja_type_6_nr_pool_order
, .pool_size_order
= 8, },
175 * Upon node removal below min_child, if child pool is filled
176 * beyond capacity, we roll back to pigeon.
178 { .type_class
= RCU_JA_PIGEON
, .min_child
= 95, .max_child
= ja_type_7_max_child
, .order
= 11, },
180 { .type_class
= RCU_JA_NULL
, .min_child
= 0, .max_child
= ja_type_8_max_child
, },
182 #endif /* !(BITS_PER_LONG < 64) */
184 static inline __attribute__((unused
))
185 void static_array_size_check(void)
187 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types
) < JA_TYPE_MAX_NR
);
191 * The cds_ja_node contains the compressed node data needed for
192 * read-side. For linear and pool node configurations, it starts with a
193 * byte counting the number of children in the node. Then, the
194 * node-specific data is placed.
195 * The node mutex, if any is needed, protecting concurrent updated of
196 * each node is placed in a separate hash table indexed by node address.
197 * For the pigeon configuration, the number of children is also kept in
198 * a separate hash table, indexed by node address, because it is only
199 * required for updates.
202 #define DECLARE_LINEAR_NODE(index) \
205 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
206 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
209 #define DECLARE_POOL_NODE(index) \
213 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
214 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
215 } linear[1U << ja_type_## index ##_nr_pool_order]; \
218 struct cds_ja_inode
{
220 /* Linear configuration */
221 DECLARE_LINEAR_NODE(0) conf_0
;
222 DECLARE_LINEAR_NODE(1) conf_1
;
223 DECLARE_LINEAR_NODE(2) conf_2
;
224 DECLARE_LINEAR_NODE(3) conf_3
;
225 DECLARE_LINEAR_NODE(4) conf_4
;
227 /* Pool configuration */
228 DECLARE_POOL_NODE(5) conf_5
;
229 DECLARE_POOL_NODE(6) conf_6
;
231 /* Pigeon configuration */
233 struct cds_ja_inode_flag
*child
[ja_type_7_max_child
];
235 /* data aliasing nodes for computed accesses */
236 uint8_t data
[sizeof(struct cds_ja_inode_flag
*) * ja_type_7_max_child
];
241 JA_RECOMPACT_ADD_SAME
,
242 JA_RECOMPACT_ADD_NEXT
,
247 struct cds_ja_inode
*_ja_node_mask_ptr(struct cds_ja_inode_flag
*node
)
249 return (struct cds_ja_inode
*) (((unsigned long) node
) & JA_PTR_MASK
);
252 unsigned long ja_node_type(struct cds_ja_inode_flag
*node
)
256 if (_ja_node_mask_ptr(node
) == NULL
) {
257 return NODE_INDEX_NULL
;
259 type
= (unsigned int) ((unsigned long) node
& JA_TYPE_MASK
);
260 assert(type
< (1UL << JA_TYPE_BITS
));
264 struct cds_ja_inode
*ja_node_ptr(struct cds_ja_inode_flag
*node
)
266 unsigned long type_index
= ja_node_type(node
);
267 const struct cds_ja_type
*type
;
269 type
= &ja_types
[type_index
];
270 switch (type
->type_class
) {
272 case RCU_JA_PIGEON
: /* fall-through */
273 case RCU_JA_NULL
: /* fall-through */
274 default: /* fall-through */
275 return _ja_node_mask_ptr(node
);
277 switch (type
->nr_pool_order
) {
279 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_1D_MASK
| JA_TYPE_MASK
));
281 return (struct cds_ja_inode
*) (((unsigned long) node
) & ~(JA_POOL_2D_MASK
| JA_POOL_1D_MASK
| JA_TYPE_MASK
));
289 struct cds_ja_inode
*alloc_cds_ja_node(struct cds_ja
*ja
,
290 const struct cds_ja_type
*ja_type
)
292 size_t len
= 1U << ja_type
->order
;
296 ret
= posix_memalign(&p
, len
, len
);
301 uatomic_inc(&ja
->nr_nodes_allocated
);
305 void free_cds_ja_node(struct cds_ja
*ja
, struct cds_ja_inode
*node
)
309 uatomic_inc(&ja
->nr_nodes_freed
);
312 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
313 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
314 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
315 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
318 uint8_t *align_ptr_size(uint8_t *ptr
)
320 return (uint8_t *) JA_ALIGN((unsigned long) ptr
, sizeof(void *));
324 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type
*type
,
325 struct cds_ja_inode
*node
)
327 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
328 return rcu_dereference(node
->u
.data
[0]);
332 * The order in which values and pointers are does does not matter: if
333 * a value is missing, we return NULL. If a value is there, but its
334 * associated pointers is still NULL, we return NULL too.
337 struct cds_ja_inode_flag
*ja_linear_node_get_nth(const struct cds_ja_type
*type
,
338 struct cds_ja_inode
*node
,
339 struct cds_ja_inode_flag
***node_flag_ptr
,
344 struct cds_ja_inode_flag
**pointers
;
345 struct cds_ja_inode_flag
*ptr
;
348 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
350 nr_child
= ja_linear_node_get_nr_child(type
, node
);
351 cmm_smp_rmb(); /* read nr_child before values and pointers */
352 assert(nr_child
<= type
->max_linear_child
);
353 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
355 values
= &node
->u
.data
[1];
356 for (i
= 0; i
< nr_child
; i
++) {
357 if (CMM_LOAD_SHARED(values
[i
]) == n
)
361 if (caa_unlikely(node_flag_ptr
))
362 *node_flag_ptr
= NULL
;
365 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
366 ptr
= rcu_dereference(pointers
[i
]);
367 if (caa_unlikely(node_flag_ptr
))
368 *node_flag_ptr
= &pointers
[i
];
373 struct cds_ja_inode_flag
*ja_linear_node_get_left(const struct cds_ja_type
*type
,
374 struct cds_ja_inode
*node
,
379 struct cds_ja_inode_flag
**pointers
;
380 struct cds_ja_inode_flag
*ptr
;
381 unsigned int i
, match_idx
;
384 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
386 nr_child
= ja_linear_node_get_nr_child(type
, node
);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child
<= type
->max_linear_child
);
389 assert(type
->type_class
!= RCU_JA_LINEAR
|| nr_child
>= type
->min_child
);
391 values
= &node
->u
.data
[1];
392 for (i
= 0; i
< nr_child
; i
++) {
395 v
= CMM_LOAD_SHARED(values
[i
]);
396 if (v
< n
&& (int) v
> match_v
) {
404 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
405 ptr
= rcu_dereference(pointers
[match_idx
]);
410 void ja_linear_node_get_ith_pos(const struct cds_ja_type
*type
,
411 struct cds_ja_inode
*node
,
414 struct cds_ja_inode_flag
**iter
)
417 struct cds_ja_inode_flag
**pointers
;
419 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
420 assert(i
< ja_linear_node_get_nr_child(type
, node
));
422 values
= &node
->u
.data
[1];
424 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
429 struct cds_ja_inode_flag
*ja_pool_node_get_nth(const struct cds_ja_type
*type
,
430 struct cds_ja_inode
*node
,
431 struct cds_ja_inode_flag
*node_flag
,
432 struct cds_ja_inode_flag
***node_flag_ptr
,
435 struct cds_ja_inode
*linear
;
437 assert(type
->type_class
== RCU_JA_POOL
);
439 switch (type
->nr_pool_order
) {
442 unsigned long bitsel
, index
;
444 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
445 assert(bitsel
< CHAR_BIT
);
446 index
= ((unsigned long) n
>> bitsel
) & 0x1;
447 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
452 unsigned long bitsel
[2], index
[2], rindex
;
454 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
455 assert(bitsel
[0] < CHAR_BIT
);
456 assert(bitsel
[1] < CHAR_BIT
);
457 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
459 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
460 rindex
= index
[0] | index
[1];
461 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
468 return ja_linear_node_get_nth(type
, linear
, node_flag_ptr
, n
);
472 struct cds_ja_inode
*ja_pool_node_get_ith_pool(const struct cds_ja_type
*type
,
473 struct cds_ja_inode
*node
,
476 assert(type
->type_class
== RCU_JA_POOL
);
477 return (struct cds_ja_inode
*)
478 &node
->u
.data
[(unsigned int) i
<< type
->pool_size_order
];
482 struct cds_ja_inode_flag
*ja_pool_node_get_left(const struct cds_ja_type
*type
,
483 struct cds_ja_inode
*node
,
486 unsigned int pool_nr
;
488 struct cds_ja_inode_flag
*match_node_flag
= NULL
;
490 assert(type
->type_class
== RCU_JA_POOL
);
492 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
493 struct cds_ja_inode
*pool
=
494 ja_pool_node_get_ith_pool(type
,
497 ja_linear_node_get_nr_child(type
, pool
);
500 for (j
= 0; j
< nr_child
; j
++) {
501 struct cds_ja_inode_flag
*iter
;
504 ja_linear_node_get_ith_pos(type
, pool
,
508 if (v
< n
&& (int) v
> match_v
) {
510 match_node_flag
= iter
;
514 return match_node_flag
;
518 struct cds_ja_inode_flag
*ja_pigeon_node_get_nth(const struct cds_ja_type
*type
,
519 struct cds_ja_inode
*node
,
520 struct cds_ja_inode_flag
***node_flag_ptr
,
523 struct cds_ja_inode_flag
**child_node_flag_ptr
;
524 struct cds_ja_inode_flag
*child_node_flag
;
526 assert(type
->type_class
== RCU_JA_PIGEON
);
527 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
528 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
529 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
530 child_node_flag_ptr
);
531 if (caa_unlikely(node_flag_ptr
))
532 *node_flag_ptr
= child_node_flag_ptr
;
533 return child_node_flag
;
537 struct cds_ja_inode_flag
*ja_pigeon_node_get_left(const struct cds_ja_type
*type
,
538 struct cds_ja_inode
*node
,
541 struct cds_ja_inode_flag
**child_node_flag_ptr
;
542 struct cds_ja_inode_flag
*child_node_flag
;
545 assert(type
->type_class
== RCU_JA_PIGEON
);
547 /* n - 1 is first value left of n */
548 for (i
= n
- 1; i
>= 0; i
--) {
549 child_node_flag_ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[i
];
550 child_node_flag
= rcu_dereference(*child_node_flag_ptr
);
551 if (child_node_flag
) {
552 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
554 return child_node_flag
;
561 struct cds_ja_inode_flag
*ja_pigeon_node_get_ith_pos(const struct cds_ja_type
*type
,
562 struct cds_ja_inode
*node
,
565 return ja_pigeon_node_get_nth(type
, node
, NULL
, i
);
569 * ja_node_get_nth: get nth item from a node.
570 * node_flag is already rcu_dereference'd.
573 struct cds_ja_inode_flag
*ja_node_get_nth(struct cds_ja_inode_flag
*node_flag
,
574 struct cds_ja_inode_flag
***node_flag_ptr
,
577 unsigned int type_index
;
578 struct cds_ja_inode
*node
;
579 const struct cds_ja_type
*type
;
581 node
= ja_node_ptr(node_flag
);
582 assert(node
!= NULL
);
583 type_index
= ja_node_type(node_flag
);
584 type
= &ja_types
[type_index
];
586 switch (type
->type_class
) {
588 return ja_linear_node_get_nth(type
, node
,
591 return ja_pool_node_get_nth(type
, node
, node_flag
,
594 return ja_pigeon_node_get_nth(type
, node
,
598 return (void *) -1UL;
603 struct cds_ja_inode_flag
*ja_node_get_left(struct cds_ja_inode_flag
*node_flag
,
606 unsigned int type_index
;
607 struct cds_ja_inode
*node
;
608 const struct cds_ja_type
*type
;
610 node
= ja_node_ptr(node_flag
);
611 assert(node
!= NULL
);
612 type_index
= ja_node_type(node_flag
);
613 type
= &ja_types
[type_index
];
615 switch (type
->type_class
) {
617 return ja_linear_node_get_left(type
, node
, n
);
619 return ja_pool_node_get_left(type
, node
, n
);
621 return ja_pigeon_node_get_left(type
, node
, n
);
624 return (void *) -1UL;
629 struct cds_ja_inode_flag
*ja_node_get_rightmost(struct cds_ja_inode_flag
*node_flag
)
631 return ja_node_get_left(node_flag
, JA_ENTRY_PER_NODE
);
635 int ja_linear_node_set_nth(const struct cds_ja_type
*type
,
636 struct cds_ja_inode
*node
,
637 struct cds_ja_shadow_node
*shadow_node
,
639 struct cds_ja_inode_flag
*child_node_flag
)
642 uint8_t *values
, *nr_child_ptr
;
643 struct cds_ja_inode_flag
**pointers
;
644 unsigned int i
, unused
= 0;
646 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
648 nr_child_ptr
= &node
->u
.data
[0];
649 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
650 (unsigned int) n
, nr_child_ptr
);
651 nr_child
= *nr_child_ptr
;
652 assert(nr_child
<= type
->max_linear_child
);
654 values
= &node
->u
.data
[1];
655 pointers
= (struct cds_ja_inode_flag
**) align_ptr_size(&values
[type
->max_linear_child
]);
656 /* Check if node value is already populated */
657 for (i
= 0; i
< nr_child
; i
++) {
658 if (values
[i
] == n
) {
668 if (i
== nr_child
&& nr_child
>= type
->max_linear_child
) {
670 return -ERANGE
; /* recompact node */
672 return -ENOSPC
; /* No space left in this node type */
675 assert(pointers
[i
] == NULL
);
676 rcu_assign_pointer(pointers
[i
], child_node_flag
);
677 /* If we expanded the nr_child, increment it */
679 CMM_STORE_SHARED(values
[nr_child
], n
);
680 /* write pointer and value before nr_child */
682 CMM_STORE_SHARED(*nr_child_ptr
, nr_child
+ 1);
684 shadow_node
->nr_child
++;
685 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
686 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
687 (unsigned int) shadow_node
->nr_child
,
694 int ja_pool_node_set_nth(const struct cds_ja_type
*type
,
695 struct cds_ja_inode
*node
,
696 struct cds_ja_inode_flag
*node_flag
,
697 struct cds_ja_shadow_node
*shadow_node
,
699 struct cds_ja_inode_flag
*child_node_flag
)
701 struct cds_ja_inode
*linear
;
703 assert(type
->type_class
== RCU_JA_POOL
);
705 switch (type
->nr_pool_order
) {
708 unsigned long bitsel
, index
;
710 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
711 assert(bitsel
< CHAR_BIT
);
712 index
= ((unsigned long) n
>> bitsel
) & 0x1;
713 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
718 unsigned long bitsel
[2], index
[2], rindex
;
720 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
721 assert(bitsel
[0] < CHAR_BIT
);
722 assert(bitsel
[1] < CHAR_BIT
);
723 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
725 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
726 rindex
= index
[0] | index
[1];
727 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
735 return ja_linear_node_set_nth(type
, linear
, shadow_node
,
740 int ja_pigeon_node_set_nth(const struct cds_ja_type
*type
,
741 struct cds_ja_inode
*node
,
742 struct cds_ja_shadow_node
*shadow_node
,
744 struct cds_ja_inode_flag
*child_node_flag
)
746 struct cds_ja_inode_flag
**ptr
;
748 assert(type
->type_class
== RCU_JA_PIGEON
);
749 ptr
= &((struct cds_ja_inode_flag
**) node
->u
.data
)[n
];
752 rcu_assign_pointer(*ptr
, child_node_flag
);
753 shadow_node
->nr_child
++;
758 * _ja_node_set_nth: set nth item within a node. Return an error
759 * (negative error value) if it is already there.
762 int _ja_node_set_nth(const struct cds_ja_type
*type
,
763 struct cds_ja_inode
*node
,
764 struct cds_ja_inode_flag
*node_flag
,
765 struct cds_ja_shadow_node
*shadow_node
,
767 struct cds_ja_inode_flag
*child_node_flag
)
769 switch (type
->type_class
) {
771 return ja_linear_node_set_nth(type
, node
, shadow_node
, n
,
774 return ja_pool_node_set_nth(type
, node
, node_flag
, shadow_node
, n
,
777 return ja_pigeon_node_set_nth(type
, node
, shadow_node
, n
,
790 int ja_linear_node_clear_ptr(const struct cds_ja_type
*type
,
791 struct cds_ja_inode
*node
,
792 struct cds_ja_shadow_node
*shadow_node
,
793 struct cds_ja_inode_flag
**node_flag_ptr
)
796 uint8_t *nr_child_ptr
;
798 assert(type
->type_class
== RCU_JA_LINEAR
|| type
->type_class
== RCU_JA_POOL
);
800 nr_child_ptr
= &node
->u
.data
[0];
801 nr_child
= *nr_child_ptr
;
802 assert(nr_child
<= type
->max_linear_child
);
804 if (type
->type_class
== RCU_JA_LINEAR
) {
805 assert(!shadow_node
->fallback_removal_count
);
806 if (shadow_node
->nr_child
<= type
->min_child
) {
807 /* We need to try recompacting the node */
811 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr
);
812 assert(*node_flag_ptr
!= NULL
);
813 rcu_assign_pointer(*node_flag_ptr
, NULL
);
815 * Value and nr_child are never changed (would cause ABA issue).
816 * Instead, we leave the pointer to NULL and recompact the node
817 * once in a while. It is allowed to set a NULL pointer to a new
818 * value without recompaction though.
819 * Only update the shadow node accounting.
821 shadow_node
->nr_child
--;
822 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
823 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr
),
824 (unsigned int) shadow_node
->nr_child
,
830 int ja_pool_node_clear_ptr(const struct cds_ja_type
*type
,
831 struct cds_ja_inode
*node
,
832 struct cds_ja_inode_flag
*node_flag
,
833 struct cds_ja_shadow_node
*shadow_node
,
834 struct cds_ja_inode_flag
**node_flag_ptr
,
837 struct cds_ja_inode
*linear
;
839 assert(type
->type_class
== RCU_JA_POOL
);
841 if (shadow_node
->fallback_removal_count
) {
842 shadow_node
->fallback_removal_count
--;
844 /* We should try recompacting the node */
845 if (shadow_node
->nr_child
<= type
->min_child
)
849 switch (type
->nr_pool_order
) {
852 unsigned long bitsel
, index
;
854 bitsel
= ja_node_pool_1d_bitsel(node_flag
);
855 assert(bitsel
< CHAR_BIT
);
856 index
= ((unsigned long) n
>> bitsel
) & type
->nr_pool_order
;
857 linear
= (struct cds_ja_inode
*) &node
->u
.data
[index
<< type
->pool_size_order
];
862 unsigned long bitsel
[2], index
[2], rindex
;
864 ja_node_pool_2d_bitsel(node_flag
, bitsel
);
865 assert(bitsel
[0] < CHAR_BIT
);
866 assert(bitsel
[1] < CHAR_BIT
);
867 index
[0] = ((unsigned long) n
>> bitsel
[0]) & 0x1;
869 index
[1] = ((unsigned long) n
>> bitsel
[1]) & 0x1;
870 rindex
= index
[0] | index
[1];
871 linear
= (struct cds_ja_inode
*) &node
->u
.data
[rindex
<< type
->pool_size_order
];
879 return ja_linear_node_clear_ptr(type
, linear
, shadow_node
, node_flag_ptr
);
883 int ja_pigeon_node_clear_ptr(const struct cds_ja_type
*type
,
884 struct cds_ja_inode
*node
,
885 struct cds_ja_shadow_node
*shadow_node
,
886 struct cds_ja_inode_flag
**node_flag_ptr
)
888 assert(type
->type_class
== RCU_JA_PIGEON
);
890 if (shadow_node
->fallback_removal_count
) {
891 shadow_node
->fallback_removal_count
--;
893 /* We should try recompacting the node */
894 if (shadow_node
->nr_child
<= type
->min_child
)
897 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr
);
898 rcu_assign_pointer(*node_flag_ptr
, NULL
);
899 shadow_node
->nr_child
--;
904 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
905 * (negative error value) if it is not found (-ENOENT).
908 int _ja_node_clear_ptr(const struct cds_ja_type
*type
,
909 struct cds_ja_inode
*node
,
910 struct cds_ja_inode_flag
*node_flag
,
911 struct cds_ja_shadow_node
*shadow_node
,
912 struct cds_ja_inode_flag
**node_flag_ptr
,
915 switch (type
->type_class
) {
917 return ja_linear_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
919 return ja_pool_node_clear_ptr(type
, node
, node_flag
, shadow_node
, node_flag_ptr
, n
);
921 return ja_pigeon_node_clear_ptr(type
, node
, shadow_node
, node_flag_ptr
);
933 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
934 * distribution in two sub-distributions containing as much elements one
935 * compared to the other.
938 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode
,
940 unsigned int type_index
,
941 const struct cds_ja_type
*type
,
942 struct cds_ja_inode
*node
,
943 struct cds_ja_shadow_node
*shadow_node
,
945 struct cds_ja_inode_flag
*child_node_flag
,
946 struct cds_ja_inode_flag
**nullify_node_flag_ptr
)
948 uint8_t nr_one
[JA_BITS_PER_BYTE
];
949 unsigned int bitsel
= 0, bit_i
, overall_best_distance
= UINT_MAX
;
950 unsigned int distrib_nr_child
= 0;
952 memset(nr_one
, 0, sizeof(nr_one
));
954 switch (type
->type_class
) {
958 ja_linear_node_get_nr_child(type
, node
);
961 for (i
= 0; i
< nr_child
; i
++) {
962 struct cds_ja_inode_flag
*iter
;
965 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
968 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
970 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
971 if (v
& (1U << bit_i
))
980 unsigned int pool_nr
;
982 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
983 struct cds_ja_inode
*pool
=
984 ja_pool_node_get_ith_pool(type
,
987 ja_linear_node_get_nr_child(type
, pool
);
990 for (j
= 0; j
< nr_child
; j
++) {
991 struct cds_ja_inode_flag
*iter
;
994 ja_linear_node_get_ith_pos(type
, pool
,
998 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1000 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1001 if (v
& (1U << bit_i
))
1013 assert(mode
== JA_RECOMPACT_DEL
);
1014 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1015 struct cds_ja_inode_flag
*iter
;
1017 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1020 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1022 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1023 if (i
& (1U << bit_i
))
1031 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1038 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1039 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1040 if (n
& (1U << bit_i
))
1047 * The best bit selector is that for which the number of ones is
1048 * closest to half of the number of children in the
1049 * distribution. We calculate the distance using the double of
1050 * the sub-distribution sizes to eliminate truncation error.
1052 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1053 unsigned int distance_to_best
;
1055 distance_to_best
= abs_int((nr_one
[bit_i
] << 1U) - distrib_nr_child
);
1056 if (distance_to_best
< overall_best_distance
) {
1057 overall_best_distance
= distance_to_best
;
1061 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel
);
1066 * Calculate bit distribution in two dimensions. Returns the two bits
1067 * (each 0 to 7) that splits the distribution in four sub-distributions
1068 * containing as much elements one compared to the other.
1071 void ja_node_sum_distribution_2d(enum ja_recompact mode
,
1073 unsigned int type_index
,
1074 const struct cds_ja_type
*type
,
1075 struct cds_ja_inode
*node
,
1076 struct cds_ja_shadow_node
*shadow_node
,
1078 struct cds_ja_inode_flag
*child_node_flag
,
1079 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1080 unsigned int *_bitsel
)
1082 uint8_t nr_2d_11
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1083 nr_2d_10
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1084 nr_2d_01
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
],
1085 nr_2d_00
[JA_BITS_PER_BYTE
][JA_BITS_PER_BYTE
];
1086 unsigned int bitsel
[2] = { 0, 1 };
1087 unsigned int bit_i
, bit_j
;
1088 int overall_best_distance
= INT_MAX
;
1089 unsigned int distrib_nr_child
= 0;
1091 memset(nr_2d_11
, 0, sizeof(nr_2d_11
));
1092 memset(nr_2d_10
, 0, sizeof(nr_2d_10
));
1093 memset(nr_2d_01
, 0, sizeof(nr_2d_01
));
1094 memset(nr_2d_00
, 0, sizeof(nr_2d_00
));
1096 switch (type
->type_class
) {
1100 ja_linear_node_get_nr_child(type
, node
);
1103 for (i
= 0; i
< nr_child
; i
++) {
1104 struct cds_ja_inode_flag
*iter
;
1107 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
1110 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1112 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1113 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1114 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1115 nr_2d_11
[bit_i
][bit_j
]++;
1117 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1118 nr_2d_10
[bit_i
][bit_j
]++;
1120 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1121 nr_2d_01
[bit_i
][bit_j
]++;
1123 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1124 nr_2d_00
[bit_i
][bit_j
]++;
1134 unsigned int pool_nr
;
1136 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
1137 struct cds_ja_inode
*pool
=
1138 ja_pool_node_get_ith_pool(type
,
1141 ja_linear_node_get_nr_child(type
, pool
);
1144 for (j
= 0; j
< nr_child
; j
++) {
1145 struct cds_ja_inode_flag
*iter
;
1148 ja_linear_node_get_ith_pos(type
, pool
,
1152 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1154 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1155 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1156 if ((v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1157 nr_2d_11
[bit_i
][bit_j
]++;
1159 if ((v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1160 nr_2d_10
[bit_i
][bit_j
]++;
1162 if (!(v
& (1U << bit_i
)) && (v
& (1U << bit_j
))) {
1163 nr_2d_01
[bit_i
][bit_j
]++;
1165 if (!(v
& (1U << bit_i
)) && !(v
& (1U << bit_j
))) {
1166 nr_2d_00
[bit_i
][bit_j
]++;
1179 assert(mode
== JA_RECOMPACT_DEL
);
1180 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1181 struct cds_ja_inode_flag
*iter
;
1183 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
1186 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1188 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1189 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1190 if ((i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1191 nr_2d_11
[bit_i
][bit_j
]++;
1193 if ((i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1194 nr_2d_10
[bit_i
][bit_j
]++;
1196 if (!(i
& (1U << bit_i
)) && (i
& (1U << bit_j
))) {
1197 nr_2d_01
[bit_i
][bit_j
]++;
1199 if (!(i
& (1U << bit_i
)) && !(i
& (1U << bit_j
))) {
1200 nr_2d_00
[bit_i
][bit_j
]++;
1209 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1216 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1217 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1218 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1219 if ((n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1220 nr_2d_11
[bit_i
][bit_j
]++;
1222 if ((n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1223 nr_2d_10
[bit_i
][bit_j
]++;
1225 if (!(n
& (1U << bit_i
)) && (n
& (1U << bit_j
))) {
1226 nr_2d_01
[bit_i
][bit_j
]++;
1228 if (!(n
& (1U << bit_i
)) && !(n
& (1U << bit_j
))) {
1229 nr_2d_00
[bit_i
][bit_j
]++;
1237 * The best bit selector is that for which the number of nodes
1238 * in each sub-class is closest to one-fourth of the number of
1239 * children in the distribution. We calculate the distance using
1240 * 4 times the size of the sub-distribution to eliminate
1243 for (bit_i
= 0; bit_i
< JA_BITS_PER_BYTE
; bit_i
++) {
1244 for (bit_j
= 0; bit_j
< bit_i
; bit_j
++) {
1245 int distance_to_best
[4];
1247 distance_to_best
[0] = (nr_2d_11
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1248 distance_to_best
[1] = (nr_2d_10
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1249 distance_to_best
[2] = (nr_2d_01
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1250 distance_to_best
[3] = (nr_2d_00
[bit_i
][bit_j
] << 2U) - distrib_nr_child
;
1252 /* Consider worse distance above best */
1253 if (distance_to_best
[1] > 0 && distance_to_best
[1] > distance_to_best
[0])
1254 distance_to_best
[0] = distance_to_best
[1];
1255 if (distance_to_best
[2] > 0 && distance_to_best
[2] > distance_to_best
[0])
1256 distance_to_best
[0] = distance_to_best
[2];
1257 if (distance_to_best
[3] > 0 && distance_to_best
[3] > distance_to_best
[0])
1258 distance_to_best
[0] = distance_to_best
[3];
1261 * If our worse distance is better than overall,
1262 * we become new best candidate.
1264 if (distance_to_best
[0] < overall_best_distance
) {
1265 overall_best_distance
= distance_to_best
[0];
1272 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel
[0], bitsel
[1]);
1274 /* Return our bit selection */
1275 _bitsel
[0] = bitsel
[0];
1276 _bitsel
[1] = bitsel
[1];
1280 unsigned int find_nearest_type_index(unsigned int type_index
,
1281 unsigned int nr_nodes
)
1283 const struct cds_ja_type
*type
;
1285 assert(type_index
!= NODE_INDEX_NULL
);
1287 return NODE_INDEX_NULL
;
1289 type
= &ja_types
[type_index
];
1290 if (nr_nodes
< type
->min_child
)
1292 else if (nr_nodes
> type
->max_child
)
1301 * ja_node_recompact_add: recompact a node, adding a new child.
1302 * Return 0 on success, -EAGAIN if need to retry, or other negative
1303 * error value otherwise.
1306 int ja_node_recompact(enum ja_recompact mode
,
1308 unsigned int old_type_index
,
1309 const struct cds_ja_type
*old_type
,
1310 struct cds_ja_inode
*old_node
,
1311 struct cds_ja_shadow_node
*shadow_node
,
1312 struct cds_ja_inode_flag
**old_node_flag_ptr
, uint8_t n
,
1313 struct cds_ja_inode_flag
*child_node_flag
,
1314 struct cds_ja_inode_flag
**nullify_node_flag_ptr
,
1317 unsigned int new_type_index
;
1318 struct cds_ja_inode
*new_node
;
1319 struct cds_ja_shadow_node
*new_shadow_node
= NULL
;
1320 const struct cds_ja_type
*new_type
;
1321 struct cds_ja_inode_flag
*new_node_flag
, *old_node_flag
;
1325 old_node_flag
= *old_node_flag_ptr
;
1328 * Need to find nearest type index even for ADD_SAME, because
1329 * this recompaction, when applied to linear nodes, will garbage
1330 * collect dummy (NULL) entries, and can therefore cause a few
1331 * linear representations to be skipped.
1334 case JA_RECOMPACT_ADD_SAME
:
1335 new_type_index
= find_nearest_type_index(old_type_index
,
1336 shadow_node
->nr_child
+ 1);
1337 dbg_printf("Recompact for node with %u children\n",
1338 shadow_node
->nr_child
+ 1);
1340 case JA_RECOMPACT_ADD_NEXT
:
1341 if (!shadow_node
|| old_type_index
== NODE_INDEX_NULL
) {
1343 dbg_printf("Recompact for NULL\n");
1345 new_type_index
= find_nearest_type_index(old_type_index
,
1346 shadow_node
->nr_child
+ 1);
1347 dbg_printf("Recompact for node with %u children\n",
1348 shadow_node
->nr_child
+ 1);
1351 case JA_RECOMPACT_DEL
:
1352 new_type_index
= find_nearest_type_index(old_type_index
,
1353 shadow_node
->nr_child
- 1);
1354 dbg_printf("Recompact for node with %u children\n",
1355 shadow_node
->nr_child
- 1);
1361 retry
: /* for fallback */
1362 dbg_printf("Recompact from type %d to type %d\n",
1363 old_type_index
, new_type_index
);
1364 new_type
= &ja_types
[new_type_index
];
1365 if (new_type_index
!= NODE_INDEX_NULL
) {
1366 new_node
= alloc_cds_ja_node(ja
, new_type
);
1370 if (new_type
->type_class
== RCU_JA_POOL
) {
1371 switch (new_type
->nr_pool_order
) {
1374 unsigned int node_distrib_bitsel
;
1376 node_distrib_bitsel
=
1377 ja_node_sum_distribution_1d(mode
, ja
,
1378 old_type_index
, old_type
,
1379 old_node
, shadow_node
,
1381 nullify_node_flag_ptr
);
1382 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1383 new_node_flag
= ja_node_flag_pool_1d(new_node
,
1384 new_type_index
, node_distrib_bitsel
);
1389 unsigned int node_distrib_bitsel
[2];
1391 ja_node_sum_distribution_2d(mode
, ja
,
1392 old_type_index
, old_type
,
1393 old_node
, shadow_node
,
1395 nullify_node_flag_ptr
,
1396 node_distrib_bitsel
);
1397 assert(!((unsigned long) new_node
& JA_POOL_1D_MASK
));
1398 assert(!((unsigned long) new_node
& JA_POOL_2D_MASK
));
1399 new_node_flag
= ja_node_flag_pool_2d(new_node
,
1400 new_type_index
, node_distrib_bitsel
);
1407 new_node_flag
= ja_node_flag(new_node
, new_type_index
);
1410 dbg_printf("Recompact inherit lock from %p\n", shadow_node
);
1411 new_shadow_node
= rcuja_shadow_set(ja
->ht
, new_node_flag
, shadow_node
, ja
, level
);
1412 if (!new_shadow_node
) {
1413 free_cds_ja_node(ja
, new_node
);
1417 new_shadow_node
->fallback_removal_count
=
1418 JA_FALLBACK_REMOVAL_COUNT
;
1421 new_node_flag
= NULL
;
1424 assert(mode
!= JA_RECOMPACT_ADD_NEXT
|| old_type
->type_class
!= RCU_JA_PIGEON
);
1426 if (new_type_index
== NODE_INDEX_NULL
)
1429 switch (old_type
->type_class
) {
1433 ja_linear_node_get_nr_child(old_type
, old_node
);
1436 for (i
= 0; i
< nr_child
; i
++) {
1437 struct cds_ja_inode_flag
*iter
;
1440 ja_linear_node_get_ith_pos(old_type
, old_node
, i
, &v
, &iter
);
1443 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1445 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1448 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1449 goto fallback_toosmall
;
1457 unsigned int pool_nr
;
1459 for (pool_nr
= 0; pool_nr
< (1U << old_type
->nr_pool_order
); pool_nr
++) {
1460 struct cds_ja_inode
*pool
=
1461 ja_pool_node_get_ith_pool(old_type
,
1464 ja_linear_node_get_nr_child(old_type
, pool
);
1467 for (j
= 0; j
< nr_child
; j
++) {
1468 struct cds_ja_inode_flag
*iter
;
1471 ja_linear_node_get_ith_pos(old_type
, pool
,
1475 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1477 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1480 if (new_type
->type_class
== RCU_JA_POOL
1482 goto fallback_toosmall
;
1490 assert(mode
== JA_RECOMPACT_ADD_NEXT
);
1496 assert(mode
== JA_RECOMPACT_DEL
);
1497 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
1498 struct cds_ja_inode_flag
*iter
;
1500 iter
= ja_pigeon_node_get_ith_pos(old_type
, old_node
, i
);
1503 if (mode
== JA_RECOMPACT_DEL
&& *nullify_node_flag_ptr
== iter
)
1505 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1508 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1509 goto fallback_toosmall
;
1522 if (mode
== JA_RECOMPACT_ADD_NEXT
|| mode
== JA_RECOMPACT_ADD_SAME
) {
1524 ret
= _ja_node_set_nth(new_type
, new_node
, new_node_flag
,
1526 n
, child_node_flag
);
1527 if (new_type
->type_class
== RCU_JA_POOL
&& ret
) {
1528 goto fallback_toosmall
;
1534 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1535 new_shadow_node
->nr_child
, old_type_index
, mode
== JA_RECOMPACT_ADD_NEXT
? "add_next" :
1536 (mode
== JA_RECOMPACT_DEL
? "del" : "add_same"));
1537 uatomic_inc(&ja
->node_fallback_count_distribution
[new_shadow_node
->nr_child
]);
1540 /* Return pointer to new recompacted node through old_node_flag_ptr */
1541 *old_node_flag_ptr
= new_node_flag
;
1545 flags
= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1547 * It is OK to free the lock associated with a node
1548 * going to NULL, since we are holding the parent lock.
1549 * This synchronizes removal with re-add of that node.
1551 if (new_type_index
== NODE_INDEX_NULL
)
1552 flags
|= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1553 ret
= rcuja_shadow_clear(ja
->ht
, old_node_flag
, shadow_node
,
1563 /* fallback if next pool is too small */
1564 assert(new_shadow_node
);
1565 ret
= rcuja_shadow_clear(ja
->ht
, new_node_flag
, new_shadow_node
,
1566 RCUJA_SHADOW_CLEAR_FREE_NODE
);
1570 case JA_RECOMPACT_ADD_SAME
:
1572 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1573 * node within a pool has unused entries. It should
1574 * therefore _never_ be too small.
1579 case JA_RECOMPACT_ADD_NEXT
:
1581 const struct cds_ja_type
*next_type
;
1584 * Recompaction attempt on add failed. Should only
1585 * happen if target node type is pool. Caused by
1586 * hard-to-split distribution. Recompact using the next
1587 * distribution size.
1589 assert(new_type
->type_class
== RCU_JA_POOL
);
1590 next_type
= &ja_types
[new_type_index
+ 1];
1592 * Try going to the next pool size if our population
1593 * fits within its range. This is not flagged as a
1596 if (shadow_node
->nr_child
+ 1 >= next_type
->min_child
1597 && shadow_node
->nr_child
+ 1 <= next_type
->max_child
) {
1602 dbg_printf("Add fallback to type %d\n", new_type_index
);
1603 uatomic_inc(&ja
->nr_fallback
);
1609 case JA_RECOMPACT_DEL
:
1611 * Recompaction attempt on delete failed. Should only
1612 * happen if target node type is pool. This is caused by
1613 * a hard-to-split distribution. Recompact on same node
1614 * size, but flag current node as "fallback" to ensure
1615 * we don't attempt recompaction before some activity
1616 * has reshuffled our node.
1618 assert(new_type
->type_class
== RCU_JA_POOL
);
1619 new_type_index
= old_type_index
;
1620 dbg_printf("Delete fallback keeping type %d\n", new_type_index
);
1621 uatomic_inc(&ja
->nr_fallback
);
1630 * Last resort fallback: pigeon.
1632 new_type_index
= (1UL << JA_TYPE_BITS
) - 1;
1633 dbg_printf("Fallback to type %d\n", new_type_index
);
1634 uatomic_inc(&ja
->nr_fallback
);
1640 * Return 0 on success, -EAGAIN if need to retry, or other negative
1641 * error value otherwise.
1644 int ja_node_set_nth(struct cds_ja
*ja
,
1645 struct cds_ja_inode_flag
**node_flag
, uint8_t n
,
1646 struct cds_ja_inode_flag
*child_node_flag
,
1647 struct cds_ja_shadow_node
*shadow_node
,
1651 unsigned int type_index
;
1652 const struct cds_ja_type
*type
;
1653 struct cds_ja_inode
*node
;
1655 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1656 (unsigned int) n
, ja_node_ptr(*node_flag
), shadow_node
);
1658 node
= ja_node_ptr(*node_flag
);
1659 type_index
= ja_node_type(*node_flag
);
1660 type
= &ja_types
[type_index
];
1661 ret
= _ja_node_set_nth(type
, node
, *node_flag
, shadow_node
,
1662 n
, child_node_flag
);
1665 /* Not enough space in node, need to recompact to next type. */
1666 ret
= ja_node_recompact(JA_RECOMPACT_ADD_NEXT
, ja
, type_index
, type
, node
,
1667 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1670 /* Node needs to be recompacted. */
1671 ret
= ja_node_recompact(JA_RECOMPACT_ADD_SAME
, ja
, type_index
, type
, node
,
1672 shadow_node
, node_flag
, n
, child_node_flag
, NULL
, level
);
1679 * Return 0 on success, -EAGAIN if need to retry, or other negative
1680 * error value otherwise.
1683 int ja_node_clear_ptr(struct cds_ja
*ja
,
1684 struct cds_ja_inode_flag
**node_flag_ptr
, /* Pointer to location to nullify */
1685 struct cds_ja_inode_flag
**parent_node_flag_ptr
, /* Address of parent ptr in its parent */
1686 struct cds_ja_shadow_node
*shadow_node
, /* of parent */
1687 uint8_t n
, int level
)
1690 unsigned int type_index
;
1691 const struct cds_ja_type
*type
;
1692 struct cds_ja_inode
*node
;
1694 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1695 ja_node_ptr(*parent_node_flag_ptr
), shadow_node
, node_flag_ptr
);
1697 node
= ja_node_ptr(*parent_node_flag_ptr
);
1698 type_index
= ja_node_type(*parent_node_flag_ptr
);
1699 type
= &ja_types
[type_index
];
1700 ret
= _ja_node_clear_ptr(type
, node
, *parent_node_flag_ptr
, shadow_node
, node_flag_ptr
, n
);
1701 if (ret
== -EFBIG
) {
1702 /* Should try recompaction. */
1703 ret
= ja_node_recompact(JA_RECOMPACT_DEL
, ja
, type_index
, type
, node
,
1704 shadow_node
, parent_node_flag_ptr
, n
, NULL
,
1705 node_flag_ptr
, level
);
1710 struct cds_ja_node
*cds_ja_lookup(struct cds_ja
*ja
, uint64_t key
)
1712 unsigned int tree_depth
, i
;
1713 struct cds_ja_inode_flag
*node_flag
;
1715 if (caa_unlikely(key
> ja
->key_max
))
1717 tree_depth
= ja
->tree_depth
;
1718 node_flag
= rcu_dereference(ja
->root
);
1720 /* level 0: root node */
1721 if (!ja_node_ptr(node_flag
))
1724 for (i
= 1; i
< tree_depth
; i
++) {
1727 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
1728 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1729 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1730 (unsigned int) iter_key
, node_flag
);
1731 if (!ja_node_ptr(node_flag
))
1735 /* Last level lookup succeded. We got an actual match. */
1736 return (struct cds_ja_node
*) node_flag
;
1739 struct cds_ja_node
*cds_ja_lookup_lower_equal(struct cds_ja
*ja
, uint64_t key
)
1741 int tree_depth
, level
;
1742 struct cds_ja_inode_flag
*node_flag
, *cur_node_depth
[JA_MAX_DEPTH
];
1744 if (caa_unlikely(key
> ja
->key_max
|| !key
))
1747 memset(cur_node_depth
, 0, sizeof(cur_node_depth
));
1748 tree_depth
= ja
->tree_depth
;
1749 node_flag
= rcu_dereference(ja
->root
);
1750 cur_node_depth
[0] = node_flag
;
1752 /* level 0: root node */
1753 if (!ja_node_ptr(node_flag
))
1756 for (level
= 1; level
< tree_depth
; level
++) {
1759 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1760 node_flag
= ja_node_get_nth(node_flag
, NULL
, iter_key
);
1761 if (!ja_node_ptr(node_flag
))
1763 cur_node_depth
[level
] = node_flag
;
1764 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1765 (unsigned int) iter_key
, node_flag
);
1768 if (level
== tree_depth
) {
1769 /* Last level lookup succeded. We got an equal match. */
1770 return (struct cds_ja_node
*) node_flag
;
1774 * Find highest value left of current node.
1775 * Current node is cur_node_depth[level].
1776 * Start at current level. If we cannot find any key left of
1777 * ours, go one level up, seek highest value left of current
1778 * (recursively), and when we find one, get the rightmost child
1779 * of its rightmost child (recursively).
1781 for (; level
> 0; level
--) {
1784 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- level
- 1)));
1785 node_flag
= ja_node_get_left(cur_node_depth
[level
- 1],
1787 /* If found left sibling, find rightmost child. */
1788 if (ja_node_ptr(node_flag
))
1793 /* Reached the root and could not find a left sibling. */
1800 * From this point, we are guaranteed to be able to find a
1801 * "lower than" match. ja_attach_node() and ja_detach_node()
1802 * both guarantee that it is not possible for a lookup to reach
1806 /* Find rightmost child of rightmost child (recursively). */
1807 for (; level
< tree_depth
; level
++) {
1808 node_flag
= ja_node_get_rightmost(node_flag
);
1809 /* If found left sibling, find rightmost child. */
1810 if (!ja_node_ptr(node_flag
))
1814 assert(level
== tree_depth
);
1816 return (struct cds_ja_node
*) node_flag
;
1820 * We reached an unpopulated node. Create it and the children we need,
1821 * and then attach the entire branch to the current node. This may
1822 * trigger recompaction of the current node. Locks needed: node lock
1823 * (for add), and, possibly, parent node lock (to update pointer due to
1824 * node recompaction).
1826 * First take node lock, check if recompaction is needed, then take
1827 * parent lock (if needed). Then we can proceed to create the new
1828 * branch. Publish the new branch, and release locks.
1829 * TODO: we currently always take the parent lock even when not needed.
1831 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1832 * leads to a dead-end: before attaching a branch, the entire content of
1833 * the new branch is populated, thus creating a cluster, before
1834 * attaching the cluster to the rest of the tree, thus making it visible
1838 int ja_attach_node(struct cds_ja
*ja
,
1839 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
1840 struct cds_ja_inode_flag
*attach_node_flag
,
1841 struct cds_ja_inode_flag
*parent_attach_node_flag
,
1842 struct cds_ja_inode_flag
**old_node_flag_ptr
,
1843 struct cds_ja_inode_flag
*old_node_flag
,
1846 struct cds_ja_node
*child_node
)
1848 struct cds_ja_shadow_node
*shadow_node
= NULL
,
1849 *parent_shadow_node
= NULL
;
1850 struct cds_ja_inode_flag
*iter_node_flag
, *iter_dest_node_flag
;
1852 struct cds_ja_inode_flag
*created_nodes
[JA_MAX_DEPTH
];
1853 int nr_created_nodes
= 0;
1855 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1856 level
, old_node_flag
, attach_node_flag_ptr
, attach_node_flag
, parent_attach_node_flag
);
1858 assert(!old_node_flag
);
1859 if (attach_node_flag
) {
1860 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, attach_node_flag
);
1866 if (parent_attach_node_flag
) {
1867 parent_shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
1868 parent_attach_node_flag
);
1869 if (!parent_shadow_node
) {
1875 if (old_node_flag_ptr
&& ja_node_ptr(*old_node_flag_ptr
)) {
1877 * Target node has been updated between RCU lookup and
1878 * lock acquisition. We need to re-try lookup and
1886 * Perform a lookup query to handle the case where
1887 * old_node_flag_ptr is NULL. We cannot use it to check if the
1888 * node has been populated between RCU lookup and mutex
1891 if (!old_node_flag_ptr
) {
1893 struct cds_ja_inode_flag
*lookup_node_flag
;
1894 struct cds_ja_inode_flag
**lookup_node_flag_ptr
;
1896 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
1897 lookup_node_flag
= ja_node_get_nth(attach_node_flag
,
1898 &lookup_node_flag_ptr
,
1900 if (lookup_node_flag
) {
1906 if (attach_node_flag_ptr
&& ja_node_ptr(*attach_node_flag_ptr
) !=
1907 ja_node_ptr(attach_node_flag
)) {
1909 * Target node has been updated between RCU lookup and
1910 * lock acquisition. We need to re-try lookup and
1917 /* Create new branch, starting from bottom */
1918 iter_node_flag
= (struct cds_ja_inode_flag
*) child_node
;
1920 for (i
= ja
->tree_depth
- 1; i
>= (int) level
; i
--) {
1923 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- i
- 1)));
1924 dbg_printf("branch creation level %d, key %u\n",
1925 i
, (unsigned int) iter_key
);
1926 iter_dest_node_flag
= NULL
;
1927 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1932 dbg_printf("branch creation error %d\n", ret
);
1935 created_nodes
[nr_created_nodes
++] = iter_dest_node_flag
;
1936 iter_node_flag
= iter_dest_node_flag
;
1940 /* Publish branch */
1943 * Attaching to root node.
1945 rcu_assign_pointer(ja
->root
, iter_node_flag
);
1949 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (ja
->tree_depth
- level
)));
1950 dbg_printf("publish branch at level %d, key %u\n",
1951 level
- 1, (unsigned int) iter_key
);
1952 /* We need to use set_nth on the previous level. */
1953 iter_dest_node_flag
= attach_node_flag
;
1954 ret
= ja_node_set_nth(ja
, &iter_dest_node_flag
,
1957 shadow_node
, level
- 1);
1959 dbg_printf("branch publish error %d\n", ret
);
1965 rcu_assign_pointer(*attach_node_flag_ptr
, iter_dest_node_flag
);
1973 for (i
= 0; i
< nr_created_nodes
; i
++) {
1977 flags
= RCUJA_SHADOW_CLEAR_FREE_LOCK
;
1979 flags
|= RCUJA_SHADOW_CLEAR_FREE_NODE
;
1980 tmpret
= rcuja_shadow_clear(ja
->ht
,
1988 if (parent_shadow_node
)
1989 rcuja_shadow_unlock(parent_shadow_node
);
1992 rcuja_shadow_unlock(shadow_node
);
1998 * Lock the parent containing the pointer to list of duplicates, and add
1999 * node to this list. Failure can happen if concurrent update changes
2000 * the parent before we get the lock. We return -EAGAIN in that case.
2001 * Return 0 on success, negative error value on failure.
2004 int ja_chain_node(struct cds_ja
*ja
,
2005 struct cds_ja_inode_flag
*parent_node_flag
,
2006 struct cds_ja_inode_flag
**node_flag_ptr
,
2007 struct cds_ja_inode_flag
*node_flag
,
2008 struct cds_ja_node
*node
)
2010 struct cds_ja_shadow_node
*shadow_node
;
2013 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2017 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2022 * Add node to head of list. Safe against concurrent RCU read
2025 node
->next
= (struct cds_ja_node
*) node_flag
;
2026 rcu_assign_pointer(*node_flag_ptr
, (struct cds_ja_inode_flag
*) node
);
2028 rcuja_shadow_unlock(shadow_node
);
2033 int _cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2034 struct cds_ja_node
*new_node
,
2035 struct cds_ja_node
**unique_node_ret
)
2037 unsigned int tree_depth
, i
;
2038 struct cds_ja_inode_flag
*attach_node_flag
,
2042 *parent_attach_node_flag
;
2043 struct cds_ja_inode_flag
**attach_node_flag_ptr
,
2044 **parent_node_flag_ptr
,
2048 if (caa_unlikely(key
> ja
->key_max
)) {
2051 tree_depth
= ja
->tree_depth
;
2054 dbg_printf("cds_ja_add attempt: key %" PRIu64
", node %p\n",
2056 parent2_node_flag
= NULL
;
2058 (struct cds_ja_inode_flag
*) &ja
->root
; /* Use root ptr address as key for mutex */
2059 parent_node_flag_ptr
= NULL
;
2060 node_flag
= rcu_dereference(ja
->root
);
2061 node_flag_ptr
= &ja
->root
;
2063 /* Iterate on all internal levels */
2064 for (i
= 1; i
< tree_depth
; i
++) {
2067 if (!ja_node_ptr(node_flag
))
2069 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2070 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2071 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2072 parent2_node_flag
= parent_node_flag
;
2073 parent_node_flag
= node_flag
;
2074 parent_node_flag_ptr
= node_flag_ptr
;
2075 node_flag
= ja_node_get_nth(node_flag
,
2081 * We reached either bottom of tree or internal NULL node,
2082 * simply add node to last internal level, or chain it if key is
2085 if (!ja_node_ptr(node_flag
)) {
2086 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2087 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2089 attach_node_flag
= parent_node_flag
;
2090 attach_node_flag_ptr
= parent_node_flag_ptr
;
2091 parent_attach_node_flag
= parent2_node_flag
;
2093 ret
= ja_attach_node(ja
, attach_node_flag_ptr
,
2095 parent_attach_node_flag
,
2100 if (unique_node_ret
) {
2101 *unique_node_ret
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2105 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2106 parent2_node_flag
, parent_node_flag
, node_flag_ptr
, node_flag
);
2108 attach_node_flag
= node_flag
;
2109 attach_node_flag_ptr
= node_flag_ptr
;
2110 parent_attach_node_flag
= parent_node_flag
;
2112 ret
= ja_chain_node(ja
,
2113 parent_attach_node_flag
,
2114 attach_node_flag_ptr
,
2118 if (ret
== -EAGAIN
|| ret
== -EEXIST
)
2124 int cds_ja_add(struct cds_ja
*ja
, uint64_t key
,
2125 struct cds_ja_node
*new_node
)
2127 return _cds_ja_add(ja
, key
, new_node
, NULL
);
2130 struct cds_ja_node
*cds_ja_add_unique(struct cds_ja
*ja
, uint64_t key
,
2131 struct cds_ja_node
*new_node
)
2134 struct cds_ja_node
*ret_node
;
2136 ret
= _cds_ja_add(ja
, key
, new_node
, &ret_node
);
2144 * Note: there is no need to lookup the pointer address associated with
2145 * each node's nth item after taking the lock: it's already been done by
2146 * cds_ja_del while holding the rcu read-side lock, and our node rules
2147 * ensure that when a match value -> pointer is found in a node, it is
2148 * _NEVER_ changed for that node without recompaction, and recompaction
2149 * reallocates the node.
2150 * However, when a child is removed from "linear" nodes, its pointer
2151 * is set to NULL. We therefore check, while holding the locks, if this
2152 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2154 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2155 * leads to a dead-end: when removing branch, it makes sure to perform
2156 * the "cut" at the highest node that has only one child, effectively
2157 * replacing it with a NULL pointer.
2160 int ja_detach_node(struct cds_ja
*ja
,
2161 struct cds_ja_inode_flag
**snapshot
,
2162 struct cds_ja_inode_flag
***snapshot_ptr
,
2163 uint8_t *snapshot_n
,
2166 struct cds_ja_node
*node
)
2168 struct cds_ja_shadow_node
*shadow_nodes
[JA_MAX_DEPTH
];
2169 struct cds_ja_inode_flag
**node_flag_ptr
= NULL
,
2170 *parent_node_flag
= NULL
,
2171 **parent_node_flag_ptr
= NULL
;
2172 struct cds_ja_inode_flag
*iter_node_flag
;
2173 int ret
, i
, nr_shadow
= 0, nr_clear
= 0, nr_branch
= 0;
2176 assert(nr_snapshot
== ja
->tree_depth
+ 1);
2179 * From the last internal level node going up, get the node
2180 * lock, check if the node has only one child left. If it is the
2181 * case, we continue iterating upward. When we reach a node
2182 * which has more that one child left, we lock the parent, and
2183 * proceed to the node deletion (removing its children too).
2185 for (i
= nr_snapshot
- 2; i
>= 1; i
--) {
2186 struct cds_ja_shadow_node
*shadow_node
;
2188 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2194 shadow_nodes
[nr_shadow
++] = shadow_node
;
2197 * Check if node has been removed between RCU
2198 * lookup and lock acquisition.
2200 assert(snapshot_ptr
[i
+ 1]);
2201 if (ja_node_ptr(*snapshot_ptr
[i
+ 1])
2202 != ja_node_ptr(snapshot
[i
+ 1])) {
2207 assert(shadow_node
->nr_child
> 0);
2208 if (shadow_node
->nr_child
== 1 && i
> 1)
2211 if (shadow_node
->nr_child
> 1 || i
== 1) {
2212 /* Lock parent and break */
2213 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2219 shadow_nodes
[nr_shadow
++] = shadow_node
;
2222 * Check if node has been removed between RCU
2223 * lookup and lock acquisition.
2225 assert(snapshot_ptr
[i
]);
2226 if (ja_node_ptr(*snapshot_ptr
[i
])
2227 != ja_node_ptr(snapshot
[i
])) {
2232 node_flag_ptr
= snapshot_ptr
[i
+ 1];
2233 n
= snapshot_n
[i
+ 1];
2234 parent_node_flag_ptr
= snapshot_ptr
[i
];
2235 parent_node_flag
= snapshot
[i
];
2239 * Lock parent's parent, in case we need
2240 * to recompact parent.
2242 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
,
2248 shadow_nodes
[nr_shadow
++] = shadow_node
;
2251 * Check if node has been removed between RCU
2252 * lookup and lock acquisition.
2254 assert(snapshot_ptr
[i
- 1]);
2255 if (ja_node_ptr(*snapshot_ptr
[i
- 1])
2256 != ja_node_ptr(snapshot
[i
- 1])) {
2267 * At this point, we want to delete all nodes that are about to
2268 * be removed from shadow_nodes (except the last one, which is
2269 * either the root or the parent of the upmost node with 1
2270 * child). OK to free lock here, because RCU read lock is held,
2271 * and free only performed in call_rcu.
2274 for (i
= 0; i
< nr_clear
; i
++) {
2275 ret
= rcuja_shadow_clear(ja
->ht
,
2276 shadow_nodes
[i
]->node_flag
,
2278 RCUJA_SHADOW_CLEAR_FREE_NODE
2279 | RCUJA_SHADOW_CLEAR_FREE_LOCK
);
2283 iter_node_flag
= parent_node_flag
;
2284 /* Remove from parent */
2285 ret
= ja_node_clear_ptr(ja
,
2286 node_flag_ptr
, /* Pointer to location to nullify */
2287 &iter_node_flag
, /* Old new parent ptr in its parent */
2288 shadow_nodes
[nr_branch
- 1], /* of parent */
2293 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2294 iter_node_flag
, *parent_node_flag_ptr
);
2295 /* Update address of parent ptr in its parent */
2296 rcu_assign_pointer(*parent_node_flag_ptr
, iter_node_flag
);
2299 for (i
= 0; i
< nr_shadow
; i
++)
2300 rcuja_shadow_unlock(shadow_nodes
[i
]);
2305 int ja_unchain_node(struct cds_ja
*ja
,
2306 struct cds_ja_inode_flag
*parent_node_flag
,
2307 struct cds_ja_inode_flag
**node_flag_ptr
,
2308 struct cds_ja_inode_flag
*node_flag
,
2309 struct cds_ja_node
*node
)
2311 struct cds_ja_shadow_node
*shadow_node
;
2312 struct cds_ja_node
*iter_node
, **iter_node_ptr
, **prev_node_ptr
= NULL
;
2313 int ret
= 0, count
= 0, found
= 0;
2315 shadow_node
= rcuja_shadow_lookup_lock(ja
->ht
, parent_node_flag
);
2318 if (ja_node_ptr(*node_flag_ptr
) != ja_node_ptr(node_flag
)) {
2323 * Find the previous node's next pointer pointing to our node,
2324 * so we can update it. Retry if another thread removed all but
2325 * one of duplicates since check (this check was performed
2326 * without lock). Ensure that the node we are about to remove is
2327 * still in the list (while holding lock). No need for RCU
2328 * traversal here since we hold the lock on the parent.
2330 iter_node_ptr
= (struct cds_ja_node
**) node_flag_ptr
;
2331 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2332 cds_ja_for_each_duplicate(iter_node
) {
2334 if (iter_node
== node
) {
2335 prev_node_ptr
= iter_node_ptr
;
2338 iter_node_ptr
= &iter_node
->next
;
2341 if (!found
|| count
== 1) {
2345 CMM_STORE_SHARED(*prev_node_ptr
, node
->next
);
2347 * Validate that we indeed removed the node from linked list.
2349 assert(ja_node_ptr(*node_flag_ptr
) != (struct cds_ja_inode
*) node
);
2351 rcuja_shadow_unlock(shadow_node
);
2356 * Called with RCU read lock held.
2358 int cds_ja_del(struct cds_ja
*ja
, uint64_t key
,
2359 struct cds_ja_node
*node
)
2361 unsigned int tree_depth
, i
;
2362 struct cds_ja_inode_flag
*snapshot
[JA_MAX_DEPTH
];
2363 struct cds_ja_inode_flag
**snapshot_ptr
[JA_MAX_DEPTH
];
2364 uint8_t snapshot_n
[JA_MAX_DEPTH
];
2365 struct cds_ja_inode_flag
*node_flag
;
2366 struct cds_ja_inode_flag
**prev_node_flag_ptr
,
2371 if (caa_unlikely(key
> ja
->key_max
))
2373 tree_depth
= ja
->tree_depth
;
2377 dbg_printf("cds_ja_del attempt: key %" PRIu64
", node %p\n",
2380 /* snapshot for level 0 is only for shadow node lookup */
2383 snapshot_ptr
[nr_snapshot
] = NULL
;
2384 snapshot
[nr_snapshot
++] = (struct cds_ja_inode_flag
*) &ja
->root
;
2385 node_flag
= rcu_dereference(ja
->root
);
2386 prev_node_flag_ptr
= &ja
->root
;
2387 node_flag_ptr
= &ja
->root
;
2389 /* Iterate on all internal levels */
2390 for (i
= 1; i
< tree_depth
; i
++) {
2393 dbg_printf("cds_ja_del iter node_flag %p\n",
2395 if (!ja_node_ptr(node_flag
)) {
2398 iter_key
= (uint8_t) (key
>> (JA_BITS_PER_BYTE
* (tree_depth
- i
- 1)));
2399 snapshot_n
[nr_snapshot
+ 1] = iter_key
;
2400 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2401 snapshot
[nr_snapshot
++] = node_flag
;
2402 node_flag
= ja_node_get_nth(node_flag
,
2406 prev_node_flag_ptr
= node_flag_ptr
;
2407 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2408 (unsigned int) iter_key
, node_flag
,
2409 prev_node_flag_ptr
);
2412 * We reached bottom of tree, try to find the node we are trying
2413 * to remove. Fail if we cannot find it.
2415 if (!ja_node_ptr(node_flag
)) {
2416 dbg_printf("cds_ja_del: no node found for key %" PRIu64
"\n",
2420 struct cds_ja_node
*iter_node
, *match
= NULL
;
2423 iter_node
= (struct cds_ja_node
*) ja_node_ptr(node_flag
);
2424 cds_ja_for_each_duplicate_rcu(iter_node
) {
2425 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node
, iter_node
);
2426 if (iter_node
== node
)
2432 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64
"\n", node
, key
);
2438 * Removing last of duplicates. Last snapshot
2439 * does not have a shadow node (external leafs).
2441 snapshot_ptr
[nr_snapshot
] = prev_node_flag_ptr
;
2442 snapshot
[nr_snapshot
++] = node_flag
;
2443 ret
= ja_detach_node(ja
, snapshot
, snapshot_ptr
,
2444 snapshot_n
, nr_snapshot
, key
, node
);
2446 ret
= ja_unchain_node(ja
, snapshot
[nr_snapshot
- 1],
2447 node_flag_ptr
, node_flag
, match
);
2451 * Explanation of -ENOENT handling: caused by concurrent delete
2452 * between RCU lookup and actual removal. Need to re-do the
2453 * lookup and removal attempt.
2455 if (ret
== -EAGAIN
|| ret
== -ENOENT
)
2460 struct cds_ja
*_cds_ja_new(unsigned int key_bits
,
2461 const struct rcu_flavor_struct
*flavor
)
2465 struct cds_ja_shadow_node
*root_shadow_node
;
2467 ja
= calloc(sizeof(*ja
), 1);
2479 ja
->key_max
= (1ULL << key_bits
) - 1;
2482 ja
->key_max
= UINT64_MAX
;
2488 /* ja->root is NULL */
2489 /* tree_depth 0 is for pointer to root node */
2490 ja
->tree_depth
= (key_bits
>> JA_LOG2_BITS_PER_BYTE
) + 1;
2491 assert(ja
->tree_depth
<= JA_MAX_DEPTH
);
2492 ja
->ht
= rcuja_create_ht(flavor
);
2497 * Note: we should not free this node until judy array destroy.
2499 root_shadow_node
= rcuja_shadow_set(ja
->ht
,
2500 (struct cds_ja_inode_flag
*) &ja
->root
,
2502 if (!root_shadow_node
) {
2510 ret
= rcuja_delete_ht(ja
->ht
);
2520 * Called from RCU read-side CS.
2522 __attribute__((visibility("protected")))
2523 void rcuja_free_all_children(struct cds_ja_shadow_node
*shadow_node
,
2524 struct cds_ja_inode_flag
*node_flag
,
2525 void (*rcu_free_node
)(struct cds_ja_node
*node
))
2527 unsigned int type_index
;
2528 struct cds_ja_inode
*node
;
2529 const struct cds_ja_type
*type
;
2531 node
= ja_node_ptr(node_flag
);
2532 assert(node
!= NULL
);
2533 type_index
= ja_node_type(node_flag
);
2534 type
= &ja_types
[type_index
];
2536 switch (type
->type_class
) {
2540 ja_linear_node_get_nr_child(type
, node
);
2543 for (i
= 0; i
< nr_child
; i
++) {
2544 struct cds_ja_inode_flag
*iter
;
2545 struct cds_ja_node
*node_iter
, *n
;
2548 ja_linear_node_get_ith_pos(type
, node
, i
, &v
, &iter
);
2549 node_iter
= (struct cds_ja_node
*) iter
;
2550 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2551 rcu_free_node(node_iter
);
2558 unsigned int pool_nr
;
2560 for (pool_nr
= 0; pool_nr
< (1U << type
->nr_pool_order
); pool_nr
++) {
2561 struct cds_ja_inode
*pool
=
2562 ja_pool_node_get_ith_pool(type
, node
, pool_nr
);
2564 ja_linear_node_get_nr_child(type
, pool
);
2567 for (j
= 0; j
< nr_child
; j
++) {
2568 struct cds_ja_inode_flag
*iter
;
2569 struct cds_ja_node
*node_iter
, *n
;
2572 ja_linear_node_get_ith_pos(type
, pool
, j
, &v
, &iter
);
2573 node_iter
= (struct cds_ja_node
*) iter
;
2574 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2575 rcu_free_node(node_iter
);
2587 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2588 struct cds_ja_inode_flag
*iter
;
2589 struct cds_ja_node
*node_iter
, *n
;
2591 iter
= ja_pigeon_node_get_ith_pos(type
, node
, i
);
2592 node_iter
= (struct cds_ja_node
*) iter
;
2593 cds_ja_for_each_duplicate_safe(node_iter
, n
) {
2594 rcu_free_node(node_iter
);
2605 void print_debug_fallback_distribution(struct cds_ja
*ja
)
2609 fprintf(stderr
, "Fallback node distribution:\n");
2610 for (i
= 0; i
< JA_ENTRY_PER_NODE
; i
++) {
2611 if (!ja
->node_fallback_count_distribution
[i
])
2613 fprintf(stderr
, " %3u: %4lu\n",
2614 i
, ja
->node_fallback_count_distribution
[i
]);
2619 int ja_final_checks(struct cds_ja
*ja
)
2621 double fallback_ratio
;
2622 unsigned long na
, nf
, nr_fallback
;
2625 fallback_ratio
= (double) uatomic_read(&ja
->nr_fallback
);
2626 fallback_ratio
/= (double) uatomic_read(&ja
->nr_nodes_allocated
);
2627 nr_fallback
= uatomic_read(&ja
->nr_fallback
);
2630 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2631 uatomic_read(&ja
->nr_fallback
),
2634 na
= uatomic_read(&ja
->nr_nodes_allocated
);
2635 nf
= uatomic_read(&ja
->nr_nodes_freed
);
2636 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na
, nf
);
2638 print_debug_fallback_distribution(ja
);
2641 fprintf(stderr
, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2642 (long) na
- nf
, na
, nf
);
2649 * There should be no more concurrent add to the judy array while it is
2650 * being destroyed (ensured by the caller).
2652 int cds_ja_destroy(struct cds_ja
*ja
,
2653 void (*rcu_free_node
)(struct cds_ja_node
*node
))
2655 const struct rcu_flavor_struct
*flavor
;
2658 flavor
= cds_lfht_rcu_flavor(ja
->ht
);
2659 rcuja_shadow_prune(ja
->ht
,
2660 RCUJA_SHADOW_CLEAR_FREE_NODE
| RCUJA_SHADOW_CLEAR_FREE_LOCK
,
2662 flavor
->thread_offline();
2663 ret
= rcuja_delete_ht(ja
->ht
);
2667 /* Wait for in-flight call_rcu free to complete. */
2670 flavor
->thread_online();
2671 ret
= ja_final_checks(ja
);