1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
4 // SPDX-License-Identifier: LGPL-2.1-or-later
6 #ifndef _URCU_POINTER_STATIC_H
7 #define _URCU_POINTER_STATIC_H
10 * Userspace RCU header. Operations on pointers.
12 * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
13 * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
15 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
18 #include <urcu/compiler.h>
19 #include <urcu/arch.h>
20 #include <urcu/system.h>
21 #include <urcu/uatomic.h>
28 * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
29 * into a RCU read-side critical section. The pointer can later be safely
30 * dereferenced within the critical section.
32 * This ensures that the pointer copy is invariant thorough the whole critical
35 * Inserts memory barriers on architectures that require them (currently only
36 * Alpha) and documents which pointers are protected by RCU.
38 * With C standards prior to C11/C++11, the compiler memory barrier in
39 * CMM_LOAD_SHARED() ensures that value-speculative optimizations (e.g.
40 * VSS: Value Speculation Scheduling) does not perform the data read
41 * before the pointer read by speculating the value of the pointer.
42 * Correct ordering is ensured because the pointer is read as a volatile
43 * access. This acts as a global side-effect operation, which forbids
44 * reordering of dependent memory operations.
46 * With C standards C11/C++11, concerns about dependency-breaking
47 * optimizations are taken care of by the "memory_order_consume" atomic
50 * Use the gcc __atomic_load() rather than C11/C++11 atomic load
51 * explicit because the pointer used as input argument is a pointer,
52 * not an _Atomic type as required by C11/C++11.
54 * By defining URCU_DEREFERENCE_USE_VOLATILE, the user requires use of
55 * volatile access to implement rcu_dereference rather than
56 * memory_order_consume load from the C11/C++11 standards.
58 * This may improve performance on weakly-ordered architectures where
59 * the compiler implements memory_order_consume as a
60 * memory_order_acquire, which is stricter than required by the
63 * Note that using volatile accesses for rcu_dereference may cause
64 * LTO to generate incorrectly ordered code starting from C11/C++11.
66 * Should match rcu_assign_pointer() or rcu_xchg_pointer().
68 * This macro is less than 10 lines long. The intent is that this macro
69 * meets the 10-line criterion in LGPL, allowing this function to be
70 * expanded directly in non-LGPL code.
73 #if !defined (URCU_DEREFERENCE_USE_VOLATILE) && \
74 ((defined (__cplusplus) && __cplusplus >= 201103L) || \
75 (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
76 # define __URCU_DEREFERENCE_USE_ATOMIC_CONSUME
80 * If p is const (the pointer itself, not what it points to), using
81 * __typeof__(p) would declare a const variable, leading to
82 * -Wincompatible-pointer-types errors. Using the statement expression
83 * makes it an rvalue and gets rid of the const-ness.
85 # define _rcu_dereference(p) \
86 uatomic_load(&(p), CMM_CONSUME)
88 * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
89 * is as expected by "old". If succeeds, returns the previous pointer to the
90 * data structure, which can be safely freed after waiting for a quiescent state
91 * using synchronize_rcu(). If fails (unexpected value), returns old (which
92 * should not be freed !).
94 * uatomic_cmpxchg() acts as both release and acquire barriers on success.
96 * This macro is less than 10 lines long. The intent is that this macro
97 * meets the 10-line criterion in LGPL, allowing this function to be
98 * expanded directly in non-LGPL code.
100 #define _rcu_cmpxchg_pointer(p, old, _new) \
103 __typeof__(*p) _________pold = (old); \
104 __typeof__(*p) _________pnew = (_new); \
105 uatomic_cmpxchg_mo(p, _________pold, _________pnew, \
106 CMM_SEQ_CST, CMM_RELAXED); \
110 * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
111 * pointer to the data structure, which can be safely freed after waiting for a
112 * quiescent state using synchronize_rcu().
114 * uatomic_xchg() acts as both release and acquire barriers.
116 * This macro is less than 10 lines long. The intent is that this macro
117 * meets the 10-line criterion in LGPL, allowing this function to be
118 * expanded directly in non-LGPL code.
120 #define _rcu_xchg_pointer(p, v) \
123 __typeof__(*p) _________pv = (v); \
124 uatomic_xchg_mo(p, _________pv, \
129 #define _rcu_set_pointer(p, v) \
131 __typeof__(*p) _________pv = (v); \
132 uatomic_store(p, _________pv, \
133 __builtin_constant_p(v) && (v) == NULL ? \
134 CMM_RELAXED : CMM_RELEASE); \
138 * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
139 * meant to be read by RCU read-side critical sections. Returns the assigned
142 * Documents which pointers will be dereferenced by RCU read-side critical
143 * sections and adds the required memory barriers on architectures requiring
144 * them. It also makes sure the compiler does not reorder code initializing the
145 * data structure before its publication.
147 * Should match rcu_dereference().
149 * This macro is less than 10 lines long. The intent is that this macro
150 * meets the 10-line criterion in LGPL, allowing this function to be
151 * expanded directly in non-LGPL code.
153 #define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
159 #endif /* _URCU_POINTER_STATIC_H */
This page took 0.053067 seconds and 5 git commands to generate.