1 // SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
5 #ifndef _URCU_STATIC_LFSTACK_H
6 #define _URCU_STATIC_LFSTACK_H
9 * Userspace RCU library - Lock-Free Stack
11 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for
12 * linking dynamically with the userspace rcu library.
17 #include <urcu/assert.h>
18 #include <urcu/uatomic.h>
19 #include <urcu-pointer.h>
28 * Stack implementing push, pop, pop_all operations, as well as iterator
29 * on the stack head returned by pop_all.
31 * Synchronization table:
33 * External synchronization techniques described in the API below is
34 * required between pairs marked with "X". No external synchronization
35 * required between pairs marked with "-".
37 * cds_lfs_push __cds_lfs_pop __cds_lfs_pop_all
40 * __cds_lfs_pop_all - X -
42 * cds_lfs_pop_blocking and cds_lfs_pop_all_blocking use an internal
43 * mutex to provide synchronization.
47 * cds_lfs_node_init: initialize lock-free stack node.
50 void _cds_lfs_node_init(struct cds_lfs_node
*node
__attribute__((unused
)))
55 * cds_lfs_init: initialize lock-free stack (with lock). Pair with
59 void _cds_lfs_init(struct cds_lfs_stack
*s
)
64 ret
= pthread_mutex_init(&s
->lock
, NULL
);
65 urcu_posix_assert(!ret
);
69 * cds_lfs_destroy: destroy lock-free stack (with lock). Pair with
73 void _cds_lfs_destroy(struct cds_lfs_stack
*s
)
75 int ret
= pthread_mutex_destroy(&s
->lock
);
76 urcu_posix_assert(!ret
);
80 * ___cds_lfs_init: initialize lock-free stack (without lock).
81 * Don't pair with any destroy function.
84 void ___cds_lfs_init(struct __cds_lfs_stack
*s
)
90 bool ___cds_lfs_empty_head(struct cds_lfs_head
*head
)
96 * cds_lfs_empty: return whether lock-free stack is empty.
98 * No memory barrier is issued. No mutual exclusion is required.
101 bool _cds_lfs_empty(cds_lfs_stack_ptr_t s
)
103 return ___cds_lfs_empty_head(uatomic_load(&s
._s
->head
, CMM_RELAXED
));
107 * cds_lfs_push: push a node into the stack.
109 * Does not require any synchronization with other push nor pop.
111 * Operations before push are consistent when observed after associated pop.
113 * Lock-free stack push is not subject to ABA problem, so no need to
114 * take the RCU read-side lock. Even if "head" changes between two
115 * uatomic_cmpxchg() invocations here (being popped, and then pushed
116 * again by one or more concurrent threads), the second
117 * uatomic_cmpxchg() invocation only cares about pushing a new entry at
118 * the head of the stack, ensuring consistency by making sure the new
119 * node->next is the same pointer value as the value replaced as head.
120 * It does not care about the content of the actual next node, so it can
121 * very well be reallocated between the two uatomic_cmpxchg().
123 * We take the approach of expecting the stack to be usually empty, so
124 * we first try an initial uatomic_cmpxchg() on a NULL old_head, and
125 * retry if the old head was non-NULL (the value read by the first
126 * uatomic_cmpxchg() is used as old head for the following loop). The
127 * upside of this scheme is to minimize the amount of cacheline traffic,
128 * always performing an exclusive cacheline access, rather than doing
129 * non-exclusive followed by exclusive cacheline access (which would be
130 * required if we first read the old head value). This design decision
131 * might be revisited after more thorough benchmarking on various
134 * Returns 0 if the stack was empty prior to adding the node.
135 * Returns non-zero otherwise.
138 bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s
,
139 struct cds_lfs_node
*node
)
141 struct __cds_lfs_stack
*s
= u_s
._s
;
142 struct cds_lfs_head
*head
= NULL
;
143 struct cds_lfs_head
*new_head
=
144 caa_container_of(node
, struct cds_lfs_head
, node
);
147 struct cds_lfs_head
*old_head
= head
;
150 * node->next is still private at this point, no need to
151 * perform a _CMM_STORE_SHARED().
153 node
->next
= &head
->node
;
155 * uatomic_cmpxchg() implicit memory barrier orders earlier
156 * stores to node before publication.
158 cmm_emit_legacy_smp_mb();
159 head
= uatomic_cmpxchg_mo(&s
->head
, old_head
, new_head
,
160 CMM_SEQ_CST
, CMM_SEQ_CST
);
161 if (old_head
== head
)
164 return !___cds_lfs_empty_head(head
);
168 * __cds_lfs_pop: pop a node from the stack.
170 * Returns NULL if stack is empty.
172 * Operations after pop are consistent when observed before associated push.
174 * __cds_lfs_pop needs to be synchronized using one of the following
177 * 1) Calling __cds_lfs_pop under rcu read lock critical section.
178 * Both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
179 * grace period to pass before freeing the returned node or pushing
180 * the node back into the stack. It is valid to overwrite the content
181 * of cds_lfs_node immediately after __cds_lfs_pop and
182 * __cds_lfs_pop_all. No RCU read-side critical section is needed
183 * around __cds_lfs_pop_all.
184 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop
185 * and __cds_lfs_pop_all callers.
186 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
187 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
190 struct cds_lfs_node
*___cds_lfs_pop(cds_lfs_stack_ptr_t u_s
)
192 struct __cds_lfs_stack
*s
= u_s
._s
;
195 struct cds_lfs_head
*head
, *next_head
;
196 struct cds_lfs_node
*next
;
198 head
= uatomic_load(&s
->head
, CMM_CONSUME
);
199 if (___cds_lfs_empty_head(head
))
200 return NULL
; /* Empty stack */
203 * Read head before head->next. Matches the implicit
204 * memory barrier before uatomic_cmpxchg() in
207 next
= uatomic_load(&head
->node
.next
, CMM_RELAXED
);
208 next_head
= caa_container_of(next
,
209 struct cds_lfs_head
, node
);
210 if (uatomic_cmpxchg_mo(&s
->head
, head
, next_head
,
211 CMM_SEQ_CST
, CMM_SEQ_CST
) == head
){
212 cmm_emit_legacy_smp_mb();
215 /* busy-loop if head changed under us */
220 * __cds_lfs_pop_all: pop all nodes from a stack.
222 * __cds_lfs_pop_all does not require any synchronization with other
223 * push, nor with other __cds_lfs_pop_all, but requires synchronization
224 * matching the technique used to synchronize __cds_lfs_pop:
226 * 1) If __cds_lfs_pop is called under rcu read lock critical section,
227 * both __cds_lfs_pop and __cds_lfs_pop_all callers must wait for a
228 * grace period to pass before freeing the returned node or pushing
229 * the node back into the stack. It is valid to overwrite the content
230 * of cds_lfs_node immediately after __cds_lfs_pop and
231 * __cds_lfs_pop_all. No RCU read-side critical section is needed
232 * around __cds_lfs_pop_all.
233 * 2) Using mutual exclusion (e.g. mutexes) to protect __cds_lfs_pop and
234 * __cds_lfs_pop_all callers.
235 * 3) Ensuring that only ONE thread can call __cds_lfs_pop() and
236 * __cds_lfs_pop_all(). (multi-provider/single-consumer scheme).
239 struct cds_lfs_head
*___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s
)
241 struct __cds_lfs_stack
*s
= u_s
._s
;
242 struct cds_lfs_head
*head
;
245 * Implicit memory barrier after uatomic_xchg() matches implicit
246 * memory barrier before uatomic_cmpxchg() in cds_lfs_push. It
247 * ensures that all nodes of the returned list are consistent.
248 * There is no need to issue memory barriers when iterating on
249 * the returned list, because the full memory barrier issued
250 * prior to each uatomic_cmpxchg, which each write to head, are
251 * taking care to order writes to each node prior to the full
252 * memory barrier after this uatomic_xchg().
254 head
= uatomic_xchg_mo(&s
->head
, NULL
, CMM_SEQ_CST
);
255 cmm_emit_legacy_smp_mb();
260 * cds_lfs_pop_lock: lock stack pop-protection mutex.
262 static inline void _cds_lfs_pop_lock(struct cds_lfs_stack
*s
)
266 ret
= pthread_mutex_lock(&s
->lock
);
267 urcu_posix_assert(!ret
);
271 * cds_lfs_pop_unlock: unlock stack pop-protection mutex.
273 static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack
*s
)
277 ret
= pthread_mutex_unlock(&s
->lock
);
278 urcu_posix_assert(!ret
);
282 * Call __cds_lfs_pop with an internal pop mutex held.
285 struct cds_lfs_node
*
286 _cds_lfs_pop_blocking(struct cds_lfs_stack
*s
)
288 struct cds_lfs_node
*retnode
;
289 cds_lfs_stack_ptr_t stack
;
291 _cds_lfs_pop_lock(s
);
293 retnode
= ___cds_lfs_pop(stack
);
294 _cds_lfs_pop_unlock(s
);
299 * Call __cds_lfs_pop_all with an internal pop mutex held.
302 struct cds_lfs_head
*
303 _cds_lfs_pop_all_blocking(struct cds_lfs_stack
*s
)
305 struct cds_lfs_head
*rethead
;
306 cds_lfs_stack_ptr_t stack
;
308 _cds_lfs_pop_lock(s
);
310 rethead
= ___cds_lfs_pop_all(stack
);
311 _cds_lfs_pop_unlock(s
);
319 #endif /* _URCU_STATIC_LFSTACK_H */