1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
4 // SPDX-License-Identifier: LGPL-2.1-or-later
6 #ifndef _URCU_POINTER_STATIC_H
7 #define _URCU_POINTER_STATIC_H
10 * Userspace RCU header. Operations on pointers.
12 * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
13 * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
15 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
18 #include <urcu/compiler.h>
19 #include <urcu/arch.h>
20 #include <urcu/system.h>
21 #include <urcu/uatomic.h>
28 * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
29 * into a RCU read-side critical section. The pointer can later be safely
30 * dereferenced within the critical section.
32 * This ensures that the pointer copy is invariant thorough the whole critical
35 * Inserts memory barriers on architectures that require them (currently only
36 * Alpha) and documents which pointers are protected by RCU.
38 * With C standards prior to C11/C++11, the compiler memory barrier in
39 * CMM_LOAD_SHARED() ensures that value-speculative optimizations (e.g.
40 * VSS: Value Speculation Scheduling) does not perform the data read
41 * before the pointer read by speculating the value of the pointer.
42 * Correct ordering is ensured because the pointer is read as a volatile
43 * access. This acts as a global side-effect operation, which forbids
44 * reordering of dependent memory operations.
46 * With C standards C11/C++11, concerns about dependency-breaking
47 * optimizations are taken care of by the "memory_order_consume" atomic
50 * Use the gcc __atomic_load() rather than C11/C++11 atomic load
51 * explicit because the pointer used as input argument is a pointer,
52 * not an _Atomic type as required by C11/C++11.
54 * By defining URCU_DEREFERENCE_USE_VOLATILE, the user requires use of
55 * volatile access to implement rcu_dereference rather than
56 * memory_order_consume load from the C11/C++11 standards.
58 * This may improve performance on weakly-ordered architectures where
59 * the compiler implements memory_order_consume as a
60 * memory_order_acquire, which is stricter than required by the
63 * Note that using volatile accesses for rcu_dereference may cause
64 * LTO to generate incorrectly ordered code starting from C11/C++11.
66 * Should match rcu_assign_pointer() or rcu_xchg_pointer().
68 * This macro is less than 10 lines long. The intent is that this macro
69 * meets the 10-line criterion in LGPL, allowing this function to be
70 * expanded directly in non-LGPL code.
73 #if !defined (URCU_DEREFERENCE_USE_VOLATILE) && \
74 ((defined (__cplusplus) && __cplusplus >= 201103L) || \
75 (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
76 # define __URCU_DEREFERENCE_USE_ATOMIC_CONSUME
80 * If p is const (the pointer itself, not what it points to), using
81 * __typeof__(p) would declare a const variable, leading to
82 * -Wincompatible-pointer-types errors. Using the statement expression
83 * makes it an rvalue and gets rid of the const-ness.
85 #ifdef __URCU_DEREFERENCE_USE_ATOMIC_CONSUME
86 # define _rcu_dereference(p) __extension__ ({ \
87 __typeof__(__extension__ ({ \
88 __typeof__(p) __attribute__((unused)) _________p0 = { 0 }; \
91 __atomic_load(&(p), &_________p1, __ATOMIC_CONSUME); \
95 # define _rcu_dereference(p) __extension__ ({ \
96 __typeof__(p) _________p1 = CMM_LOAD_SHARED(p); \
97 cmm_smp_read_barrier_depends(); \
103 * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
104 * is as expected by "old". If succeeds, returns the previous pointer to the
105 * data structure, which can be safely freed after waiting for a quiescent state
106 * using synchronize_rcu(). If fails (unexpected value), returns old (which
107 * should not be freed !).
109 * uatomic_cmpxchg() acts as both release and acquire barriers.
111 * This macro is less than 10 lines long. The intent is that this macro
112 * meets the 10-line criterion in LGPL, allowing this function to be
113 * expanded directly in non-LGPL code.
115 #define _rcu_cmpxchg_pointer(p, old, _new) \
118 __typeof__(*p) _________pold = (old); \
119 __typeof__(*p) _________pnew = (_new); \
120 uatomic_cmpxchg(p, _________pold, _________pnew); \
124 * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
125 * pointer to the data structure, which can be safely freed after waiting for a
126 * quiescent state using synchronize_rcu().
128 * uatomic_xchg() acts as both release and acquire barriers.
130 * This macro is less than 10 lines long. The intent is that this macro
131 * meets the 10-line criterion in LGPL, allowing this function to be
132 * expanded directly in non-LGPL code.
134 #define _rcu_xchg_pointer(p, v) \
137 __typeof__(*p) _________pv = (v); \
138 uatomic_xchg(p, _________pv); \
142 #define _rcu_set_pointer(p, v) \
144 __typeof__(*p) _________pv = (v); \
145 if (!__builtin_constant_p(v) || \
148 uatomic_set(p, _________pv); \
152 * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
153 * meant to be read by RCU read-side critical sections. Returns the assigned
156 * Documents which pointers will be dereferenced by RCU read-side critical
157 * sections and adds the required memory barriers on architectures requiring
158 * them. It also makes sure the compiler does not reorder code initializing the
159 * data structure before its publication.
161 * Should match rcu_dereference().
163 * This macro is less than 10 lines long. The intent is that this macro
164 * meets the 10-line criterion in LGPL, allowing this function to be
165 * expanded directly in non-LGPL code.
167 #define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
173 #endif /* _URCU_POINTER_STATIC_H */
This page took 0.03457 seconds and 4 git commands to generate.