__attribute__((visibility("protected")))
void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
struct cds_ja_inode_flag *node_flag,
- void (*rcu_free_node)(struct cds_ja_node *node));
+ void (*free_node_cb)(struct cds_ja_node *node));
__attribute__((visibility("protected")))
struct cds_ja_shadow_node *rcuja_shadow_lookup_lock(struct cds_lfht *ht,
__attribute__((visibility("protected")))
void rcuja_shadow_prune(struct cds_lfht *ht,
unsigned int flags,
- void (*rcu_free_node)(struct cds_ja_node *node));
+ void (*free_node_cb)(struct cds_ja_node *node));
__attribute__((visibility("protected")))
struct cds_lfht *rcuja_create_ht(const struct rcu_flavor_struct *flavor);
* don't let RCU JA use a node being removed.
*/
ret = cds_lfht_del(ht, lookup_node);
- if (!ret) {
- if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
- && shadow_node->level) {
- if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_node_and_lock);
- } else {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_node);
- }
+ if (ret)
+ goto unlock;
+ if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
+ && shadow_node->level) {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node_and_lock);
} else {
- if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_lock);
- } else {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node);
- }
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node);
+ }
+ } else {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node);
}
}
+unlock:
if (lookup_shadow) {
lockret = pthread_mutex_unlock(shadow_node->lock);
assert(!lockret);
__attribute__((visibility("protected")))
void rcuja_shadow_prune(struct cds_lfht *ht,
unsigned int flags,
- void (*rcu_free_node)(struct cds_ja_node *node))
+ void (*free_node_cb)(struct cds_ja_node *node))
{
const struct rcu_flavor_struct *flavor;
struct cds_ja_shadow_node *shadow_node;
int ret, lockret;
flavor = cds_lfht_rcu_flavor(ht);
+ /*
+ * Read-side lock is needed to ensure hash table node existence
+ * vs concurrent resize.
+ */
flavor->read_lock();
cds_lfht_for_each_entry(ht, &iter, shadow_node, ht_node) {
lockret = pthread_mutex_lock(shadow_node->lock);
assert(!lockret);
ret = cds_lfht_del(ht, &shadow_node->ht_node);
- if (!ret) {
- if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
- && shadow_node->level) {
- if (shadow_node->level == shadow_node->ja->tree_depth - 1) {
- rcuja_free_all_children(shadow_node,
- shadow_node->node_flag,
- rcu_free_node);
- }
- if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_node_and_lock);
- } else {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_node);
- }
+ if (ret)
+ goto unlock;
+ if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE)
+ && shadow_node->level) {
+ if (shadow_node->level == shadow_node->ja->tree_depth - 1) {
+ rcuja_free_all_children(shadow_node,
+ shadow_node->node_flag,
+ free_node_cb);
+ }
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node_and_lock);
+ } else {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_node);
+ }
+ } else {
+ if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node_and_lock);
} else {
- if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node_and_lock);
- } else {
- flavor->update_call_rcu(&shadow_node->head,
- free_shadow_node);
- }
+ flavor->update_call_rcu(&shadow_node->head,
+ free_shadow_node);
}
}
+ unlock:
lockret = pthread_mutex_unlock(shadow_node->lock);
assert(!lockret);
}
}
/*
- * There should be no more concurrent add to the judy array while it is
- * being destroyed (ensured by the caller).
+ * There should be no more concurrent add, delete, nor look-up performed
+ * on the Judy array while it is being destroyed (ensured by the
+ * caller).
*/
int cds_ja_destroy(struct cds_ja *ja,
- void (*rcu_free_node)(struct cds_ja_node *node))
+ void (*free_node_cb)(struct cds_ja_node *node))
{
const struct rcu_flavor_struct *flavor;
int ret;
flavor = cds_lfht_rcu_flavor(ja->ht);
rcuja_shadow_prune(ja->ht,
RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
- rcu_free_node);
+ free_node_cb);
flavor->thread_offline();
ret = rcuja_delete_ht(ja->ht);
if (ret)
* @rcu_free_node_cb: callback performing memory free of leftover nodes.
*
* Returns 0 on success, negative error value on error.
- * The @rcu_free_node_cb callback should internally wait for a grace
- * period before freeing the node.
+ * There should be no more concurrent add, delete, nor look-up performed
+ * on the Judy array while it is being destroyed (ensured by the caller).
+ * There is no need for the @rcu_free_node_cb callback to wait for grace
+ * periods, since there are no more concurrent users of the Judy array.
*/
int cds_ja_destroy(struct cds_ja *ja,
- void (*rcu_free_node_cb)(struct cds_ja_node *node));
+ void (*free_node_cb)(struct cds_ja_node *node));
/*
* Iterate through duplicates returned by cds_ja_lookup*()