3 * TODO: keys are currently assumed <= sizeof(void *). Key target never freed.
13 #include <urcu-defer.h>
15 #include <arch_atomic.h>
17 #include <urcu/jhash.h>
21 #define NODE_STOLEN (1 << 0)
26 struct rcu_ht_node
*next
;
33 struct rcu_ht_node
**tbl
;
35 void (*free_fct
)(void *data
); /* fct to free data */
44 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
, void (*free_fct
)(void *data
),
45 unsigned long init_size
, uint32_t keylen
,
50 ht
= calloc(1, sizeof(struct rcu_ht
));
51 ht
->hash_fct
= hash_fct
;
52 ht
->free_fct
= free_fct
;
53 ht
->size
.add
= init_size
;
54 ht
->size
.lookup
= init_size
;
56 ht
->hashseed
= hashseed
;
57 ht
->tbl
= calloc(init_size
, sizeof(struct rcu_ht_node
*));
61 void *ht_lookup(struct rcu_ht
*ht
, void *key
)
64 struct rcu_ht_node
*node
;
67 hash
= ht
->hash_fct(key
, ht
->keylen
, ht
->hashseed
) % ht
->size
.lookup
;
70 node
= rcu_dereference(ht
->tbl
[hash
]);
76 if (node
->key
== key
) {
80 node
= rcu_dereference(node
->next
);
88 * Will re-try until either:
89 * - The key is already there (-EEXIST)
90 * - We successfully add the key at the head of a table bucket.
92 int ht_add(struct rcu_ht
*ht
, void *key
, void *data
)
94 struct rcu_ht_node
*node
, *old_head
, *new_head
;
98 new_head
= calloc(1, sizeof(struct rcu_ht_node
));
100 new_head
->data
= data
;
102 /* here comes the fun and tricky part.
103 * Add at the beginning with a cmpxchg.
104 * Hold a read lock between the moment the first element is read
105 * and the nodes traversal (to find duplicates). This ensures
106 * the head pointer has not been reclaimed when cmpxchg is done.
107 * Always adding at the head ensures that we would have to
108 * re-try if a new item has been added concurrently. So we ensure that
109 * we never add duplicates. */
113 hash
= ht
->hash_fct(key
, ht
->keylen
, ht
->hashseed
) % ht
->size
.add
;
115 old_head
= node
= rcu_dereference(ht
->tbl
[hash
]);
120 if (node
->key
== key
) {
124 node
= rcu_dereference(node
->next
);
126 new_head
->next
= old_head
;
127 if (rcu_cmpxchg_pointer(&ht
->tbl
[hash
], old_head
, new_head
) != old_head
)
134 /* restart loop, release and re-take the read lock to be kind to GP */
141 * Restart until we successfully remove the entry, or no entry is left
142 * ((void *)(unsigned long)-ENOENT).
143 * Deal with concurrent stealers by doing an extra verification pass to check
144 * that no element in the list are still pointing to the element stolen.
145 * This could happen if two concurrent steal for consecutive objects are
146 * executed. A pointer to an object being stolen could be saved by the
147 * concurrent stealer for the previous object.
148 * Also, given that in this precise scenario, another stealer can also want to
149 * delete the doubly-referenced object; use a "stolen" flag to let only one
150 * stealer delete the object.
152 void *ht_steal(struct rcu_ht
*ht
, void *key
)
154 struct rcu_ht_node
**prev
, *node
, *del_node
= NULL
;
161 hash
= ht
->hash_fct(key
, ht
->keylen
, ht
->hashseed
) % ht
->size
.lookup
;
163 prev
= &ht
->tbl
[hash
];
164 node
= rcu_dereference(*prev
);
173 if (node
->key
== key
) {
177 node
= rcu_dereference(*prev
);
182 * Another concurrent thread stole it ? If so, let it deal with
183 * this. Assume NODE_STOLEN is the only flag. If this changes,
184 * read flags before cmpxchg.
186 if (cmpxchg(&node
->flags
, 0, NODE_STOLEN
) != 0)
190 /* Found it ! pointer to object is in "prev" */
191 if (rcu_cmpxchg_pointer(prev
, node
, node
->next
) == node
)
197 * From that point, we own node. Note that there can still be concurrent
198 * RCU readers using it. We can free it outside of read lock after a GP.
202 data
= del_node
->data
;
203 call_rcu(free
, del_node
);
207 data
= (void *)(unsigned long)-ENOENT
;
211 /* restart loop, release and re-take the read lock to be kind to GP */
217 int ht_delete(struct rcu_ht
*ht
, void *key
)
221 data
= ht_steal(ht
, key
);
222 if (data
&& data
!= (void *)(unsigned long)-ENOENT
) {
224 call_rcu(ht
->free_fct
, data
);
231 /* Delete all old elements. Allow concurrent writer accesses. */
232 int ht_delete_all(struct rcu_ht
*ht
)
235 struct rcu_ht_node
**prev
, *node
, *inext
;
238 for (i
= 0; i
< ht
->size
.lookup
; i
++) {
242 * Cut the head. After that, we own the first element.
244 node
= rcu_xchg_pointer(prev
, NULL
);
250 * We manage a list shared with concurrent writers and readers.
251 * Note that a concurrent add may or may not be deleted by us,
252 * depending if it arrives before or after the head is cut.
253 * "node" points to our first node. Remove first elements
260 inext
= rcu_xchg_pointer(prev
, NULL
);
262 * "node" is the first element of the list we have cut.
263 * We therefore own it, no concurrent writer may delete
264 * it. There can only be concurrent lookups. Concurrent
265 * add can only be done on a bucket head, but we've cut
266 * it already. inext is also owned by us, because we
267 * have exchanged it for "NULL". It will therefore be
268 * safe to use it after a G.P.
272 call_rcu(ht
->free_fct
, node
->data
);
273 call_rcu(free
, node
);
285 * Should only be called when no more concurrent readers nor writers can
286 * possibly access the table.
288 int ht_destroy(struct rcu_ht
*ht
)
292 ret
= ht_delete_all(ht
);
299 * Expects keys <= than pointer size to be encoded in the pointer itself.
301 uint32_t ht_jhash(void *key
, uint32_t length
, uint32_t initval
)
306 if (length
<= sizeof(void *))
310 ret
= jhash(vkey
, length
, initval
);
This page took 0.035367 seconds and 4 git commands to generate.