*
* Userspace RCU header.
*
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
- * dynamically with the userspace rcu library.
+ * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
+ * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
*
* Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
}
+/*
+ * Helper for _rcu_read_lock(). The format of rcu_gp_ctr (as well as
+ * the per-thread rcu_reader.ctr) has the upper bits containing a count of
+ * _rcu_read_lock() nesting, and a lower-order bit that contains either zero
+ * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
+ * _rcu_read_lock() happen before the subsequent read-side critical section.
+ */
+static inline void _rcu_read_lock_update(unsigned long tmp)
+{
+ if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
+ } else
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);
+}
+
+/*
+ * Enter an RCU read-side critical section.
+ *
+ * The first cmm_barrier() call ensures that the compiler does not reorder
+ * the body of _rcu_read_lock() with a mutex.
+ *
+ * This function and its helper are both less than 10 lines long. The
+ * intent is that this function meets the 10-line criterion in LGPL,
+ * allowing this function to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_read_lock(void)
{
long tmp;
- /* Check if registered */
if (caa_unlikely(!URCU_TLS(rcu_reader)))
- rcu_bp_register();
-
+ rcu_bp_register(); /* If not yet registered. */
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
tmp = URCU_TLS(rcu_reader)->ctr;
- /*
- * rcu_gp_ctr is
- * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
- */
- if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- /*
- * Set active readers count for outermost nesting level before
- * accessing the pointer.
- */
- cmm_smp_mb();
- } else {
- _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);
- }
+ _rcu_read_lock_update(tmp);
}
+/*
+ * Exit an RCU read-side critical section. This function is less than
+ * 10 lines of code, and is intended to be usable by non-LGPL code, as
+ * called out in LGPL.
+ */
static inline void _rcu_read_unlock(void)
{
/*
*
* Userspace RCU header. Operations on pointers.
*
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-pointer.h for
- * linking dynamically with the userspace rcu library.
+ * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
+ * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
*
* Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
* addition to forthcoming C++ standard.
*
* Should match rcu_assign_pointer() or rcu_xchg_pointer().
+ *
+ * This macro is less than 10 lines long. The intent is that this macro
+ * meets the 10-line criterion in LGPL, allowing this function to be
+ * expanded directly in non-LGPL code.
*/
-
#define _rcu_dereference(p) ({ \
__typeof__(p) _________p1 = CMM_LOAD_SHARED(p); \
cmm_smp_read_barrier_depends(); \
* data structure, which can be safely freed after waiting for a quiescent state
* using synchronize_rcu(). If fails (unexpected value), returns old (which
* should not be freed !).
+ *
+ * This macro is less than 10 lines long. The intent is that this macro
+ * meets the 10-line criterion in LGPL, allowing this function to be
+ * expanded directly in non-LGPL code.
*/
-
#define _rcu_cmpxchg_pointer(p, old, _new) \
({ \
__typeof__(*p) _________pold = (old); \
* _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
* pointer to the data structure, which can be safely freed after waiting for a
* quiescent state using synchronize_rcu().
+ *
+ * This macro is less than 10 lines long. The intent is that this macro
+ * meets the 10-line criterion in LGPL, allowing this function to be
+ * expanded directly in non-LGPL code.
*/
-
#define _rcu_xchg_pointer(p, v) \
({ \
__typeof__(*p) _________pv = (v); \
* data structure before its publication.
*
* Should match rcu_dereference_pointer().
+ *
+ * This macro is less than 10 lines long. The intent is that this macro
+ * meets the 10-line criterion in LGPL, allowing this function to be
+ * expanded directly in non-LGPL code.
*/
-
#define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
#ifdef __cplusplus
*
* Userspace RCU QSBR header.
*
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-qsbr.h for linking
- * dynamically with the userspace rcu QSBR library.
+ * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
+ * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
*
* Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
return v && (v != rcu_gp_ctr);
}
+/*
+ * Enter an RCU read-side critical section.
+ *
+ * This function is less than 10 lines long. The intent is that this
+ * function meets the 10-line criterion for LGPL, allowing this function
+ * to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_read_lock(void)
{
rcu_assert(URCU_TLS(rcu_reader).ctr);
}
+/*
+ * Exit an RCU read-side critical section.
+ *
+ * This function is less than 10 lines long. The intent is that this
+ * function meets the 10-line criterion for LGPL, allowing this function
+ * to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_read_unlock(void)
{
}
+/*
+ * Inform RCU of a quiescent state.
+ *
+ * This function is less than 10 lines long. The intent is that this
+ * function meets the 10-line criterion for LGPL, allowing this function
+ * to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_quiescent_state(void)
{
cmm_smp_mb();
cmm_smp_mb();
}
+/*
+ * Take a thread offline, prohibiting it from entering further RCU
+ * read-side critical sections.
+ *
+ * This function is less than 10 lines long. The intent is that this
+ * function meets the 10-line criterion for LGPL, allowing this function
+ * to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_thread_offline(void)
{
cmm_smp_mb();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}
+/*
+ * Bring a thread online, allowing it to once again enter RCU
+ * read-side critical sections.
+ *
+ * This function is less than 10 lines long. The intent is that this
+ * function meets the 10-line criterion for LGPL, allowing this function
+ * to be invoked directly from non-LGPL code.
+ */
static inline void _rcu_thread_online(void)
{
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
*
* Userspace RCU header.
*
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
- * dynamically with the userspace rcu library.
+ * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
+ * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
*
* Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
}
-static inline void _rcu_read_lock(void)
+/*
+ * Helper for _rcu_read_lock(). The format of rcu_gp_ctr (as well as
+ * the per-thread rcu_reader.ctr) has the upper bits containing a count of
+ * _rcu_read_lock() nesting, and a lower-order bit that contains either zero
+ * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
+ * _rcu_read_lock() happen before the subsequent read-side critical section.
+ */
+static inline void _rcu_read_lock_update(unsigned long tmp)
{
- unsigned long tmp;
-
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = URCU_TLS(rcu_reader).ctr;
- /*
- * rcu_gp_ctr is
- * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
- */
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- /*
- * Set active readers count for outermost nesting level before
- * accessing the pointer. See smp_mb_master().
- */
smp_mb_slave(RCU_MB_GROUP);
- } else {
+ } else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
- }
}
-static inline void _rcu_read_unlock(void)
+/*
+ * Enter an RCU read-side critical section.
+ *
+ * The first cmm_barrier() call ensures that the compiler does not reorder
+ * the body of _rcu_read_lock() with a mutex.
+ *
+ * This function and its helper are both less than 10 lines long. The
+ * intent is that this function meets the 10-line criterion in LGPL,
+ * allowing this function to be invoked directly from non-LGPL code.
+ */
+static inline void _rcu_read_lock(void)
{
unsigned long tmp;
+ cmm_barrier();
tmp = URCU_TLS(rcu_reader).ctr;
- /*
- * Finish using rcu before decrementing the pointer.
- * See smp_mb_master().
- */
+ _rcu_read_lock_update(tmp);
+}
+
+/*
+ * This is a helper function for _rcu_read_unlock().
+ *
+ * The first smp_mb_slave() call ensures that the critical section is
+ * seen to precede the store to rcu_reader.ctr.
+ * The second smp_mb_slave() call ensures that we write to rcu_reader.ctr
+ * before reading the update-side futex.
+ */
+static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
+{
if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
smp_mb_slave(RCU_MB_GROUP);
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
- /* write URCU_TLS(rcu_reader).ctr before read futex */
smp_mb_slave(RCU_MB_GROUP);
wake_up_gp();
- } else {
+ } else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
- }
+}
+
+/*
+ * Exit an RCU read-side crtical section. Both this function and its
+ * helper are smaller than 10 lines of code, and are intended to be
+ * usable by non-LGPL code, as called out in LGPL.
+ */
+static inline void _rcu_read_unlock(void)
+{
+ unsigned long tmp;
+
+ tmp = URCU_TLS(rcu_reader).ctr;
+ _rcu_read_unlock_update_and_wakeup(tmp);
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
}