* Upon node removal below min_child, if child pool is filled
* beyond capacity, we roll back to pigeon.
*/
- { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
+ { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
{ .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
};
* Upon node removal below min_child, if child pool is filled
* beyond capacity, we roll back to pigeon.
*/
- { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
+ { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
{ .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
};
JA_RECOMPACT_DEL,
};
-static
-unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE];
-static
-unsigned long nr_nodes_allocated, nr_nodes_freed;
-
static
struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
{
}
}
-struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
+static
+struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
+ const struct cds_ja_type *ja_type)
{
size_t len = 1U << ja_type->order;
void *p;
return NULL;
}
memset(p, 0, len);
- uatomic_inc(&nr_nodes_allocated);
+ uatomic_inc(&ja->nr_nodes_allocated);
return p;
}
-void free_cds_ja_node(struct cds_ja_inode *node)
+void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
{
free(node);
if (node)
- uatomic_inc(&nr_nodes_freed);
+ uatomic_inc(&ja->nr_nodes_freed);
}
#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
old_type_index, new_type_index);
new_type = &ja_types[new_type_index];
if (new_type_index != NODE_INDEX_NULL) {
- new_node = alloc_cds_ja_node(new_type);
+ new_node = alloc_cds_ja_node(ja, new_type);
if (!new_node)
return -ENOMEM;
dbg_printf("Recompact inherit lock from %p\n", shadow_node);
new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
if (!new_shadow_node) {
- free_cds_ja_node(new_node);
+ free_cds_ja_node(ja, new_node);
return -ENOMEM;
}
if (fallback)
dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
(mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
- uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]);
+ uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
}
/* Return pointer to new recompacted node through old_node_flag_ptr */
goto unlock_parent;
}
+ /*
+ * Perform a lookup query to handle the case where
+ * old_node_flag_ptr is NULL. We cannot use it to check if the
+ * node has been populated between RCU lookup and mutex
+ * acquisition.
+ */
+ if (!old_node_flag_ptr) {
+ uint8_t iter_key;
+ struct cds_ja_inode_flag *lookup_node_flag;
+ struct cds_ja_inode_flag **lookup_node_flag_ptr;
+
+ iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
+ lookup_node_flag = ja_node_get_nth(attach_node_flag,
+ &lookup_node_flag_ptr,
+ iter_key);
+ if (lookup_node_flag) {
+ ret = -EEXIST;
+ goto unlock_parent;
+ }
+ }
+
if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
ja_node_ptr(attach_node_flag)) {
/*
iter_key,
iter_node_flag,
NULL, i);
- if (ret)
+ if (ret) {
+ dbg_printf("branch creation error %d\n", ret);
goto check_error;
+ }
created_nodes[nr_created_nodes++] = iter_dest_node_flag;
iter_node_flag = iter_dest_node_flag;
}
iter_key,
iter_node_flag,
shadow_node, level - 1);
- if (ret)
+ if (ret) {
+ dbg_printf("branch publish error %d\n", ret);
goto check_error;
+ }
/*
* Attach branch
*/
}
static
-void print_debug_fallback_distribution(void)
+void print_debug_fallback_distribution(struct cds_ja *ja)
{
int i;
fprintf(stderr, "Fallback node distribution:\n");
for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
- if (!node_fallback_count_distribution[i])
+ if (!ja->node_fallback_count_distribution[i])
continue;
fprintf(stderr, " %3u: %4lu\n",
- i, node_fallback_count_distribution[i]);
+ i, ja->node_fallback_count_distribution[i]);
}
}
+static
+int ja_final_checks(struct cds_ja *ja)
+{
+ double fallback_ratio;
+ unsigned long na, nf, nr_fallback;
+ int ret = 0;
+
+ fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
+ fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
+ nr_fallback = uatomic_read(&ja->nr_fallback);
+ if (nr_fallback)
+ fprintf(stderr,
+ "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
+ uatomic_read(&ja->nr_fallback),
+ fallback_ratio);
+
+ na = uatomic_read(&ja->nr_nodes_allocated);
+ nf = uatomic_read(&ja->nr_nodes_freed);
+ dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
+ if (nr_fallback)
+ print_debug_fallback_distribution(ja);
+
+ if (na != nf) {
+ fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
+ (long) na - nf, na, nf);
+ ret = -1;
+ }
+ return ret;
+}
+
/*
* There should be no more concurrent add to the judy array while it is
* being destroyed (ensured by the caller).
ret = rcuja_delete_ht(ja->ht);
if (ret)
return ret;
+
+ /* Wait for in-flight call_rcu free to complete. */
+ flavor->barrier();
+
flavor->thread_online();
- if (uatomic_read(&ja->nr_fallback))
- fprintf(stderr,
- "[warning] RCU Judy Array used %lu fallback node(s)\n",
- uatomic_read(&ja->nr_fallback));
- fprintf(stderr, "Nodes allocated: %lu, Nodes freed: %lu. Fallback ratio: %g\n",
- uatomic_read(&nr_nodes_allocated),
- uatomic_read(&nr_nodes_freed),
- (double) uatomic_read(&ja->nr_fallback) / (double) uatomic_read(&nr_nodes_allocated));
- print_debug_fallback_distribution();
+ ret = ja_final_checks(ja);
free(ja);
- return 0;
+ return ret;
}