1 // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
5 #ifndef _URCU_STATIC_WFSTACK_H
6 #define _URCU_STATIC_WFSTACK_H
9 * Userspace RCU library - Stack with with wait-free push, blocking traversal.
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
12 * linking dynamically with the userspace rcu library.
18 #include <urcu/assert.h>
19 #include <urcu/compiler.h>
20 #include <urcu/uatomic.h>
26 #define CDS_WFS_END ((struct cds_wfs_head *) 0x1UL)
27 #define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
28 #define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
31 * Stack with wait-free push, blocking traversal.
33 * Stack implementing push, pop, pop_all operations, as well as iterator
34 * on the stack head returned by pop_all.
36 * Wait-free operations: cds_wfs_push, __cds_wfs_pop_all, cds_wfs_empty,
38 * Blocking operations: cds_wfs_pop, cds_wfs_pop_all, cds_wfs_next,
39 * iteration on stack head returned by pop_all.
41 * Synchronization table:
43 * External synchronization techniques described in the API below is
44 * required between pairs marked with "X". No external synchronization
45 * required between pairs marked with "-".
47 * cds_wfs_push __cds_wfs_pop __cds_wfs_pop_all
50 * __cds_wfs_pop_all - X -
52 * cds_wfs_pop and cds_wfs_pop_all use an internal mutex to provide
57 * cds_wfs_node_init: initialize wait-free stack node.
60 void _cds_wfs_node_init(struct cds_wfs_node
*node
)
66 * __cds_wfs_init: initialize wait-free stack. Don't pair with
67 * any destroy function.
69 static inline void ___cds_wfs_init(struct __cds_wfs_stack
*s
)
71 s
->head
= CDS_WFS_END
;
75 * cds_wfs_init: initialize wait-free stack. Pair with
79 void _cds_wfs_init(struct cds_wfs_stack
*s
)
83 s
->head
= CDS_WFS_END
;
84 ret
= pthread_mutex_init(&s
->lock
, NULL
);
85 urcu_posix_assert(!ret
);
89 * cds_wfs_destroy: destroy wait-free stack. Pair with
93 void _cds_wfs_destroy(struct cds_wfs_stack
*s
)
95 int ret
= pthread_mutex_destroy(&s
->lock
);
96 urcu_posix_assert(!ret
);
99 static inline bool ___cds_wfs_end(void *node
)
101 return node
== CDS_WFS_END
;
105 * cds_wfs_empty: return whether wait-free stack is empty.
107 * No memory barrier is issued. No mutual exclusion is required.
109 static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack
)
111 struct __cds_wfs_stack
*s
= u_stack
._s
;
113 return ___cds_wfs_end(uatomic_load(&s
->head
, CMM_RELAXED
));
117 * cds_wfs_push: push a node into the stack.
119 * Issues a full memory barrier before push. No mutual exclusion is
122 * Operations before push are consistent when observed after associated pop.
124 * Returns 0 if the stack was empty prior to adding the node.
125 * Returns non-zero otherwise.
128 int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack
, struct cds_wfs_node
*node
)
130 struct __cds_wfs_stack
*s
= u_stack
._s
;
131 struct cds_wfs_head
*old_head
, *new_head
;
133 urcu_posix_assert(node
->next
== NULL
);
134 new_head
= caa_container_of(node
, struct cds_wfs_head
, node
);
136 * uatomic_xchg() implicit memory barrier orders earlier stores
137 * to node (setting it to NULL) before publication.
139 cmm_emit_legacy_smp_mb();
140 old_head
= uatomic_xchg_mo(&s
->head
, new_head
, CMM_SEQ_CST
);
142 * At this point, dequeuers see a NULL node->next, they should
143 * busy-wait until node->next is set to old_head.
145 uatomic_store(&node
->next
, &old_head
->node
, CMM_RELEASE
);
146 return !___cds_wfs_end(old_head
);
150 * Waiting for push to complete enqueue and return the next node.
152 static inline struct cds_wfs_node
*
153 ___cds_wfs_node_sync_next(struct cds_wfs_node
*node
, int blocking
)
155 struct cds_wfs_node
*next
;
159 * Adaptative busy-looping waiting for push to complete.
161 while ((next
= uatomic_load(&node
->next
, CMM_CONSUME
)) == NULL
) {
163 return CDS_WFS_WOULDBLOCK
;
164 if (++attempt
>= CDS_WFS_ADAPT_ATTEMPTS
) {
165 (void) poll(NULL
, 0, CDS_WFS_WAIT
); /* Wait for 10ms */
176 struct cds_wfs_node
*
177 ___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack
, int *state
, int blocking
)
179 struct cds_wfs_head
*head
, *new_head
;
180 struct cds_wfs_node
*next
;
181 struct __cds_wfs_stack
*s
= u_stack
._s
;
186 head
= uatomic_load(&s
->head
, CMM_CONSUME
);
187 if (___cds_wfs_end(head
)) {
190 next
= ___cds_wfs_node_sync_next(&head
->node
, blocking
);
191 if (!blocking
&& next
== CDS_WFS_WOULDBLOCK
) {
192 return CDS_WFS_WOULDBLOCK
;
194 new_head
= caa_container_of(next
, struct cds_wfs_head
, node
);
195 if (uatomic_cmpxchg_mo(&s
->head
, head
, new_head
,
196 CMM_SEQ_CST
, CMM_SEQ_CST
) == head
) {
197 if (state
&& ___cds_wfs_end(new_head
))
198 *state
|= CDS_WFS_STATE_LAST
;
199 cmm_emit_legacy_smp_mb();
203 return CDS_WFS_WOULDBLOCK
;
205 /* busy-loop if head changed under us */
210 * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state.
212 * Returns NULL if stack is empty.
214 * Operations after pop push are consistent when observed before associated push.
216 * __cds_wfs_pop_blocking needs to be synchronized using one of the
217 * following techniques:
219 * 1) Calling __cds_wfs_pop_blocking under rcu read lock critical
220 * section. The caller must wait for a grace period to pass before
221 * freeing the returned node or modifying the cds_wfs_node structure.
222 * 2) Using mutual exclusion (e.g. mutexes) to protect
223 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
224 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
225 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
227 * "state" saves state flags atomically sampled with pop operation.
230 struct cds_wfs_node
*
231 ___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack
, int *state
)
233 return ___cds_wfs_pop(u_stack
, state
, 1);
237 struct cds_wfs_node
*
238 ___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack
)
240 return ___cds_wfs_pop_with_state_blocking(u_stack
, NULL
);
244 * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack.
246 * Same as __cds_wfs_pop_with_state_blocking, but returns
247 * CDS_WFS_WOULDBLOCK if it needs to block.
249 * "state" saves state flags atomically sampled with pop operation.
252 struct cds_wfs_node
*
253 ___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack
, int *state
)
255 return ___cds_wfs_pop(u_stack
, state
, 0);
259 * __cds_wfs_pop_nonblocking: pop a node from the stack.
261 * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if
265 struct cds_wfs_node
*
266 ___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack
)
268 return ___cds_wfs_pop_with_state_nonblocking(u_stack
, NULL
);
272 * __cds_wfs_pop_all: pop all nodes from a stack.
274 * Operations after pop push are consistent when observed before associated push.
276 * __cds_wfs_pop_all does not require any synchronization with other
277 * push, nor with other __cds_wfs_pop_all, but requires synchronization
278 * matching the technique used to synchronize __cds_wfs_pop_blocking:
280 * 1) If __cds_wfs_pop_blocking is called under rcu read lock critical
281 * section, both __cds_wfs_pop_blocking and cds_wfs_pop_all callers
282 * must wait for a grace period to pass before freeing the returned
283 * node or modifying the cds_wfs_node structure. However, no RCU
284 * read-side critical section is needed around __cds_wfs_pop_all.
285 * 2) Using mutual exclusion (e.g. mutexes) to protect
286 * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers.
287 * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking()
288 * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme).
291 struct cds_wfs_head
*
292 ___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack
)
294 struct __cds_wfs_stack
*s
= u_stack
._s
;
295 struct cds_wfs_head
*head
;
298 * Implicit memory barrier after uatomic_xchg() matches implicit
299 * memory barrier before uatomic_xchg() in cds_wfs_push. It
300 * ensures that all nodes of the returned list are consistent.
301 * There is no need to issue memory barriers when iterating on
302 * the returned list, because the full memory barrier issued
303 * prior to each uatomic_cmpxchg, which each write to head, are
304 * taking care to order writes to each node prior to the full
305 * memory barrier after this uatomic_xchg().
307 head
= uatomic_xchg_mo(&s
->head
, CDS_WFS_END
, CMM_SEQ_CST
);
308 cmm_emit_legacy_smp_mb();
309 if (___cds_wfs_end(head
))
315 * cds_wfs_pop_lock: lock stack pop-protection mutex.
317 static inline void _cds_wfs_pop_lock(struct cds_wfs_stack
*s
)
321 ret
= pthread_mutex_lock(&s
->lock
);
322 urcu_posix_assert(!ret
);
326 * cds_wfs_pop_unlock: unlock stack pop-protection mutex.
328 static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack
*s
)
332 ret
= pthread_mutex_unlock(&s
->lock
);
333 urcu_posix_assert(!ret
);
337 * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held.
340 struct cds_wfs_node
*
341 _cds_wfs_pop_with_state_blocking(struct cds_wfs_stack
*s
, int *state
)
343 struct cds_wfs_node
*retnode
;
344 cds_wfs_stack_ptr_t stack
;
346 _cds_wfs_pop_lock(s
);
348 retnode
= ___cds_wfs_pop_with_state_blocking(stack
, state
);
349 _cds_wfs_pop_unlock(s
);
354 * Call _cds_wfs_pop_with_state_blocking without saving any state.
357 struct cds_wfs_node
*
358 _cds_wfs_pop_blocking(struct cds_wfs_stack
*s
)
360 return _cds_wfs_pop_with_state_blocking(s
, NULL
);
364 * Call __cds_wfs_pop_all with an internal pop mutex held.
367 struct cds_wfs_head
*
368 _cds_wfs_pop_all_blocking(struct cds_wfs_stack
*s
)
370 struct cds_wfs_head
*rethead
;
371 cds_wfs_stack_ptr_t stack
;
373 _cds_wfs_pop_lock(s
);
375 rethead
= ___cds_wfs_pop_all(stack
);
376 _cds_wfs_pop_unlock(s
);
381 * cds_wfs_first: get first node of a popped stack.
383 * Content written into the node before enqueue is guaranteed to be
384 * consistent, but no other memory ordering is ensured.
386 * Used by for-like iteration macros in urcu/wfstack.h:
387 * cds_wfs_for_each_blocking()
388 * cds_wfs_for_each_blocking_safe()
390 * Returns NULL if popped stack is empty, top stack node otherwise.
392 static inline struct cds_wfs_node
*
393 _cds_wfs_first(struct cds_wfs_head
*head
)
395 if (___cds_wfs_end(head
))
400 static inline struct cds_wfs_node
*
401 ___cds_wfs_next(struct cds_wfs_node
*node
, int blocking
)
403 struct cds_wfs_node
*next
;
405 next
= ___cds_wfs_node_sync_next(node
, blocking
);
407 * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end
408 * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK,
409 * and still return CDS_WFS_WOULDBLOCK.
411 if (___cds_wfs_end(next
))
417 * cds_wfs_next_blocking: get next node of a popped stack.
419 * Content written into the node before enqueue is guaranteed to be
420 * consistent, but no other memory ordering is ensured.
422 * Used by for-like iteration macros in urcu/wfstack.h:
423 * cds_wfs_for_each_blocking()
424 * cds_wfs_for_each_blocking_safe()
426 * Returns NULL if reached end of popped stack, non-NULL next stack
429 static inline struct cds_wfs_node
*
430 _cds_wfs_next_blocking(struct cds_wfs_node
*node
)
432 return ___cds_wfs_next(node
, 1);
437 * cds_wfs_next_nonblocking: get next node of a popped stack.
439 * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it
442 static inline struct cds_wfs_node
*
443 _cds_wfs_next_nonblocking(struct cds_wfs_node
*node
)
445 return ___cds_wfs_next(node
, 0);
452 #endif /* _URCU_STATIC_WFSTACK_H */