4 * Userspace RCU library - RCU Judy Array Range Support
6 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <urcu/rcuja.h>
31 #include <urcu/compiler.h>
32 #include <urcu/arch.h>
33 #include <urcu-pointer.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/rcuja-range.h>
36 #include <urcu-flavor.h>
38 #include "rcuja-internal.h"
41 * Discussion about order of lookup/lock vs allocated node deletion.
43 * - If node deletion returns before call to
44 * cds_ja_range_lookup(), the node will not be found by lookup.
45 * - If node deletion is called after cds_ja_range_lock() returns a
46 * non-NULL range, the deletion will wait until the lock is released
47 * before it takes place.
48 * - If node deletion call/return overlaps with the call to
49 * cds_ja_range_lookup() and return from cds_ja_range_lock(), the node
50 * may or may not be found by each of cds_ja_range_lookup() and
51 * cds_ja_range_lock().
55 * Discussion about order of lookup/lock vs allocated node add. Assuming
56 * no concurrent delete.
58 * - If node add returns before call to
59 * cds_ja_range_lookup(), the node will be found by lookup.
60 * - If node add is called after cds_ja_range_lookup returns, the node
61 * will not be found by lookup.
62 * - If node add call/return overlaps with the call to and return from
63 * cds_ja_range_lookup(), the node may or may not be found.
64 * - If node add call/return overlaps with call to cds_ja_range_lookup()
65 * and return from cds_ja_range_lock(), in the specific case where
66 * cds_ja_range_lookup() _does_ succeed, then cds_ja_range_lock() will
67 * succeed (still assuming no concurrent deletion).
71 * Discussion: concurrent deletion of contiguous allocated ranges.
73 * Ensuring that merge of contiguous free ranges is always performed, we
74 * need to ensure locking of concurrent removal of contiguous allocated
75 * ranges one with respect to another. This is done by locking the
76 * ranges prior to and after the range to remove, even if that range is
77 * allocated. This serializes removal of contiguous ranges. The only
78 * cases for which there is no range to lock is when removing an
79 * allocated range starting at 0, and/or ending at the end of the key
84 * Discussion of the type state transitions.
86 * State transitions of "type" always go from either:
88 * CDS_JA_RANGE_FREE -> CDS_JA_RANGE_REMOVED
90 * CDS_JA_RANGE_ALLOCATED -> CDS_JA_RANGE_REMOVED
92 * A range type never changes otherwise.
95 #define CDS_JA_RANGE_KEY_BITS 64
97 struct cds_ja_range
*cds_ja_range_lookup(struct cds_ja
*ja
, uint64_t key
)
99 struct cds_ja_node
*node
, *last_node
;
100 struct cds_ja_range
*range
;
102 node
= cds_ja_lookup_below_equal(ja
, key
, NULL
);
105 * Get the last of duplicate chain. Adding a node to Judy array
106 * duplicates inserts them at the end of the chain.
108 cds_ja_for_each_duplicate_rcu(node
)
110 range
= caa_container_of(last_node
, struct cds_ja_range
, ja_node
);
112 * If last node in the duplicates is removed or free, we can
113 * consider that either a removal or add operation is in
114 * progress, or removal is the last completed operation to
115 * update this range. We can therefore consider that this area
118 if (range
->type
!= CDS_JA_RANGE_ALLOCATED
)
121 * We found an allocated range. We can return it for use with
122 * RCU read-side protection for existence. However, we have no
123 * mutual exclusion against removal at this point.
129 * Provide mutual exclusion against removal.
131 struct cds_ja_range
*cds_ja_range_lock(struct cds_ja_range
*range
)
133 pthread_mutex_lock(&range
->lock
);
135 if (range
->type
== CDS_JA_RANGE_REMOVED
)
140 pthread_mutex_unlock(&range
->lock
);
144 void cds_ja_range_unlock(struct cds_ja_range
*range
)
146 pthread_mutex_unlock(&range
->lock
);
150 struct cds_ja_range
*range_create(
151 uint64_t start
, /* inclusive */
152 uint64_t end
, /* inclusive */
153 enum cds_ja_range_type type
)
155 struct cds_ja_range
*range
;
157 range
= calloc(sizeof(*range
), 1);
160 range
->start
= start
;
163 pthread_mutex_init(&range
->lock
, NULL
);
168 void free_range_cb(struct rcu_head
*head
)
170 struct cds_ja_range
*range
=
171 caa_container_of(head
, struct cds_ja_range
, head
);
176 void free_range(struct cds_ja_range
*range
)
182 void rcu_free_range(struct cds_ja
*ja
, struct cds_ja_range
*range
)
184 cds_lfht_rcu_flavor(ja
->ht
)->update_call_rcu(&range
->head
,
188 struct cds_ja_range
*cds_ja_range_add(struct cds_ja
*ja
,
189 uint64_t start
, /* inclusive */
190 uint64_t end
) /* inclusive */
192 struct cds_ja_node
*old_node
, *old_node_end
;
193 struct cds_ja_range
*old_range
, *old_range_end
, *new_range
, *ranges
[3];
194 unsigned int nr_ranges
, i
;
199 * Find if requested range is entirely contained within a single
202 old_node
= cds_ja_lookup_below_equal(ja
, start
, NULL
);
205 old_range
= caa_container_of(old_node
, struct cds_ja_range
, ja_node
);
206 switch (CMM_LOAD_SHARED(old_range
->type
)) {
207 case CDS_JA_RANGE_ALLOCATED
:
209 case CDS_JA_RANGE_FREE
:
211 case CDS_JA_RANGE_REMOVED
:
215 old_node_end
= cds_ja_lookup_below_equal(ja
, end
, NULL
);
216 assert(old_node_end
);
217 old_range_end
= caa_container_of(old_node_end
,
218 struct cds_ja_range
, ja_node
);
219 if (old_range_end
!= old_range
) {
220 switch (CMM_LOAD_SHARED(old_range
->type
)) {
221 case CDS_JA_RANGE_ALLOCATED
:
222 case CDS_JA_RANGE_FREE
: /* fall-through */
224 case CDS_JA_RANGE_REMOVED
:
229 pthread_mutex_lock(&old_range
->lock
);
231 if (old_range
->type
== CDS_JA_RANGE_REMOVED
) {
232 pthread_mutex_unlock(&old_range
->lock
);
236 /* Create replacement ranges: at most 2 free and 1 allocated */
237 if (start
== old_range
->start
) {
238 if (end
== old_range
->end
) {
240 ranges
[0] = new_range
= range_create(start
, end
,
241 CDS_JA_RANGE_ALLOCATED
);
245 ranges
[0] = new_range
= range_create(start
, end
,
246 CDS_JA_RANGE_ALLOCATED
);
247 ranges
[1] = range_create(end
+ 1, old_range
->end
,
252 if (end
== old_range
->end
) {
254 ranges
[0] = range_create(old_range
->start
, start
- 1,
256 ranges
[1] = new_range
= range_create(start
, end
,
257 CDS_JA_RANGE_ALLOCATED
);
261 ranges
[0] = range_create(old_range
->start
, start
- 1,
263 ranges
[1] = new_range
= range_create(start
, end
,
264 CDS_JA_RANGE_ALLOCATED
);
265 ranges
[2] = range_create(end
+ 1, old_range
->end
,
271 /* Add replacement ranges to Judy array */
272 for (i
= 0; i
< nr_ranges
; i
++) {
273 ret
= cds_ja_add(ja
, ranges
[i
]->start
, &ranges
[i
]->ja_node
);
278 * We add replacement ranges _before_ removing old ranges, so
279 * concurrent traversals will always see one or the other. This
280 * is OK because we temporarily have a duplicate key, and Judy
281 * arrays provide key existence guarantee for lookups performed
282 * concurrently with add followed by del of duplicate keys.
285 /* Remove old free range */
286 ret
= cds_ja_del(ja
, old_range
->start
, &old_range
->ja_node
);
288 old_range
->type
= CDS_JA_RANGE_REMOVED
;
289 pthread_mutex_unlock(&old_range
->lock
);
291 rcu_free_range(ja
, old_range
);
296 int cds_ja_range_del(struct cds_ja
*ja
, struct cds_ja_range
*range
)
298 struct cds_ja_node
*prev_node
, *next_node
;
299 struct cds_ja_range
*new_range
;
300 struct cds_ja_range
*merge_ranges
[3], *lock_ranges
[3];
301 unsigned int nr_merge
, nr_lock
, i
;
308 prev_node
= cds_ja_lookup_below_equal(ja
, range
->start
- 1, NULL
);
310 struct cds_ja_range
*prev_range
;
312 prev_range
= caa_container_of(prev_node
,
313 struct cds_ja_range
, ja_node
);
314 lock_ranges
[nr_lock
++] = prev_range
;
315 if (prev_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
316 merge_ranges
[nr_merge
++] = prev_range
;
319 lock_ranges
[nr_lock
++] = range
;
320 merge_ranges
[nr_merge
++] = range
;
322 next_node
= cds_ja_lookup_above_equal(ja
, range
->end
+ 1, NULL
);
324 struct cds_ja_range
*next_range
;
326 next_range
= caa_container_of(next_node
,
327 struct cds_ja_range
, ja_node
);
328 lock_ranges
[nr_lock
++] = next_range
;
329 if (next_range
->type
!= CDS_JA_RANGE_ALLOCATED
)
330 merge_ranges
[nr_merge
++] = next_range
;
333 /* Acquire locks in increasing key order for range merge */
334 for (i
= 0; i
< nr_lock
; i
++)
335 pthread_mutex_lock(&lock_ranges
[i
]->lock
);
336 /* Ensure they are valid */
337 for (i
= 0; i
< nr_lock
; i
++) {
338 if (lock_ranges
[i
]->type
== CDS_JA_RANGE_REMOVED
)
342 /* Create new free range */
343 start
= merge_ranges
[0]->start
;
344 end
= merge_ranges
[nr_merge
- 1]->end
;
345 new_range
= range_create(start
, end
, CDS_JA_RANGE_FREE
);
346 ret
= cds_ja_add(ja
, start
, &new_range
->ja_node
);
349 /* Remove old ranges */
350 for (i
= 0; i
< nr_merge
; i
++) {
351 ret
= cds_ja_del(ja
, merge_ranges
[i
]->start
,
352 &merge_ranges
[i
]->ja_node
);
354 merge_ranges
[i
]->type
= CDS_JA_RANGE_REMOVED
;
356 for (i
= 0; i
< nr_lock
; i
++)
357 pthread_mutex_unlock(&lock_ranges
[i
]->lock
);
358 /* Free old merged ranges */
359 for (i
= 0; i
< nr_merge
; i
++)
360 rcu_free_range(ja
, merge_ranges
[i
]);
366 for (i
= 0; i
< nr_lock
; i
++)
367 pthread_mutex_unlock(&lock_ranges
[i
]->lock
);
371 struct cds_ja
*_cds_ja_range_new(const struct rcu_flavor_struct
*flavor
)
373 struct cds_ja_range
*range
;
377 ja
= _cds_ja_new(CDS_JA_RANGE_KEY_BITS
, flavor
);
380 range
= range_create(0, UINT64_MAX
, CDS_JA_RANGE_FREE
);
383 ret
= cds_ja_add(ja
, 0, &range
->ja_node
);
391 ret
= cds_ja_destroy(ja
);
396 int cds_ja_range_destroy(struct cds_ja
*ja
)
399 struct cds_ja_node
*ja_node
;
402 cds_ja_for_each_key_rcu(ja
, key
, ja_node
) {
403 struct cds_ja_node
*tmp_node
;
405 cds_ja_for_each_duplicate_safe_rcu(ja_node
, tmp_node
) {
406 struct cds_ja_range
*range
;
408 range
= caa_container_of(ja_node
,
409 struct cds_ja_range
, ja_node
);
410 ret
= cds_ja_del(ja
, key
, &range
->ja_node
);
413 /* Alone using Judy array, OK to free now */
417 return cds_ja_destroy(ja
);
This page took 0.039111 seconds and 5 git commands to generate.