f27f8b6b7a151fdbe3ddfbb78719a11fbb51e168
7 * Userspace RCU header.
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
37 #include <urcu/compiler.h>
38 #include <urcu/arch.h>
39 #include <urcu/system.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/list.h>
42 #include <urcu/futex.h>
43 #include <urcu/tls-compat.h>
49 /* Default is RCU_MEMBARRIER */
50 #if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
51 #define RCU_MEMBARRIER
55 * RCU_MEMBARRIER is only possibly available on Linux. Fallback to RCU_MB
58 #if !defined(__linux__) && defined(RCU_MEMBARRIER)
66 /* If the headers do not support SYS_membarrier, statically use RCU_MB */
68 # define MEMBARRIER_EXPEDITED (1 << 0)
69 # define MEMBARRIER_DELAYED (1 << 1)
70 # define MEMBARRIER_QUERY (1 << 16)
71 # define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
73 # undef RCU_MEMBARRIER
79 * This code section can only be included in LGPL 2.1 compatible source code.
80 * See below for the function call wrappers which can be used in code meant to
81 * be only linked with the Userspace RCU library. This comes with a small
82 * performance degradation on the read-side due to the added function calls.
83 * This is required to permit relinking with newer versions of the library.
87 * The signal number used by the RCU library can be overridden with
88 * -DSIGRCU= when compiling the library.
89 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
92 #define SIGRCU SIGURCU
96 #define SIGRCU SIGUSR1
100 #define rcu_assert(args...) assert(args)
102 #define rcu_assert(args...)
111 #define YIELD_READ (1 << 0)
112 #define YIELD_WRITE (1 << 1)
115 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
118 /* maximum sleep delay, in us */
119 #define MAX_SLEEP 30000
124 extern unsigned int yield_active
;
125 extern DECLARE_URCU_TLS(unsigned int, rand_yield
);
127 static inline void debug_yield_read(void)
129 if (yield_active
& YIELD_READ
)
130 if (rand_r(&URCU_TLS(rand_yield
)) & 0x1)
131 usleep(rand_r(&URCU_TLS(rand_yield
)) % MAX_SLEEP
);
134 static inline void debug_yield_write(void)
136 if (yield_active
& YIELD_WRITE
)
137 if (rand_r(&URCU_TLS(rand_yield
)) & 0x1)
138 usleep(rand_r(&URCU_TLS(rand_yield
)) % MAX_SLEEP
);
141 static inline void debug_yield_init(void)
143 URCU_TLS(rand_yield
) = time(NULL
) ^ (unsigned long) pthread_self();
146 static inline void debug_yield_read(void)
150 static inline void debug_yield_write(void)
154 static inline void debug_yield_init(void)
161 * RCU memory barrier broadcast group. Currently, only broadcast to all process
162 * threads is supported (group 0).
164 * Slave barriers are only guaranteed to be ordered wrt master barriers.
166 * The pair ordering is detailed as (O: ordered, X: not ordered) :
172 #define MB_GROUP_ALL 0
173 #define RCU_MB_GROUP MB_GROUP_ALL
175 #ifdef RCU_MEMBARRIER
176 extern int has_sys_membarrier
;
178 static inline void smp_mb_slave(int group
)
180 if (caa_likely(has_sys_membarrier
))
188 static inline void smp_mb_slave(int group
)
195 static inline void smp_mb_slave(int group
)
202 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
203 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
205 #define RCU_GP_COUNT (1UL << 0)
206 /* Use the amount of bits equal to half of the architecture long size */
207 #define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
208 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
211 * Global quiescent period counter with low-order bits unused.
212 * Using a int rather than a char to eliminate false register dependencies
213 * causing stalls on some architectures.
215 extern unsigned long rcu_gp_ctr
;
218 /* Data used by both reader and synchronize_rcu() */
221 /* Data used for registry */
222 struct cds_list_head node
__attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
226 extern DECLARE_URCU_TLS(struct rcu_reader
, rcu_reader
);
228 extern int32_t gp_futex
;
231 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
233 static inline void wake_up_gp(void)
235 if (caa_unlikely(uatomic_read(&gp_futex
) == -1)) {
236 uatomic_set(&gp_futex
, 0);
237 futex_async(&gp_futex
, FUTEX_WAKE
, 1,
242 static inline int rcu_gp_ongoing(unsigned long *ctr
)
247 * Make sure both tests below are done on the same version of *value
248 * to insure consistency.
250 v
= CMM_LOAD_SHARED(*ctr
);
251 return (v
& RCU_GP_CTR_NEST_MASK
) &&
252 ((v
^ rcu_gp_ctr
) & RCU_GP_CTR_PHASE
);
255 static inline void _rcu_read_lock(void)
259 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
260 tmp
= URCU_TLS(rcu_reader
).ctr
;
263 * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
265 if (caa_likely(!(tmp
& RCU_GP_CTR_NEST_MASK
))) {
266 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).ctr
, _CMM_LOAD_SHARED(rcu_gp_ctr
));
268 * Set active readers count for outermost nesting level before
269 * accessing the pointer. See smp_mb_master().
271 smp_mb_slave(RCU_MB_GROUP
);
273 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).ctr
, tmp
+ RCU_GP_COUNT
);
277 static inline void _rcu_read_unlock(void)
281 tmp
= URCU_TLS(rcu_reader
).ctr
;
283 * Finish using rcu before decrementing the pointer.
284 * See smp_mb_master().
286 if (caa_likely((tmp
& RCU_GP_CTR_NEST_MASK
) == RCU_GP_COUNT
)) {
287 smp_mb_slave(RCU_MB_GROUP
);
288 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).ctr
, URCU_TLS(rcu_reader
).ctr
- RCU_GP_COUNT
);
289 /* write URCU_TLS(rcu_reader).ctr before read futex */
290 smp_mb_slave(RCU_MB_GROUP
);
293 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).ctr
, URCU_TLS(rcu_reader
).ctr
- RCU_GP_COUNT
);
295 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
302 #endif /* _URCU_STATIC_H */
This page took 0.043772 seconds and 4 git commands to generate.