DEFINE_URCU_TLS(unsigned int, rcu_rand_yield);
#endif
-/*
- * Global grace period counter.
- * Contains the current RCU_GP_CTR_PHASE.
- * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
- * Written to only by writer with mutex taken. Read by both writer and readers.
- */
-unsigned long rcu_gp_ctr = RCU_GP_COUNT;
+struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
/*
* Pointer to registry elements. Written to only by each individual reader. Read
/*
* Wait for each thread URCU_TLS(rcu_reader).ctr to either
* indicate quiescence (not nested), or observe the current
- * rcu_gp_ctr value.
+ * rcu_gp.ctr value.
*/
for (;;) {
wait_loops++;
cmm_smp_mb();
/* Switch parity: 0 -> 1, 1 -> 0 */
- CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
+ CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
/*
* Must commit qparity update to memory before waiting for other parity
#define rcu_exit rcu_exit_bp
#define synchronize_rcu synchronize_rcu_bp
#define rcu_reader rcu_reader_bp
-#define rcu_gp_ctr rcu_gp_ctr_bp
-#define rcu_gp_futex rcu_gp_futex_bp /* unused */
+#define rcu_gp rcu_gp_bp
#define get_cpu_call_rcu_data get_cpu_call_rcu_data_bp
#define get_call_rcu_thread get_call_rcu_thread_bp
*/
extern void rcu_bp_register(void);
-/*
- * Global quiescent period counter with low-order bits unused.
- * Using a int rather than a char to eliminate false register dependencies
- * causing stalls on some architectures.
- */
-extern unsigned long rcu_gp_ctr;
+struct rcu_gp {
+ /*
+ * Global grace period counter.
+ * Contains the current RCU_GP_CTR_PHASE.
+ * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
+ * Written to only by writer with mutex taken.
+ * Read by both writer and readers.
+ */
+ unsigned long ctr;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+extern struct rcu_gp rcu_gp;
struct rcu_reader {
/* Data used by both reader and synchronize_rcu() */
v = CMM_LOAD_SHARED(*ctr);
if (!(v & RCU_GP_CTR_NEST_MASK))
return RCU_READER_INACTIVE;
- if (!((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE))
+ if (!((v ^ rcu_gp.ctr) & RCU_GP_CTR_PHASE))
return RCU_READER_ACTIVE_CURRENT;
return RCU_READER_ACTIVE_OLD;
}
/*
- * Helper for _rcu_read_lock(). The format of rcu_gp_ctr (as well as
+ * Helper for _rcu_read_lock(). The format of rcu_gp.ctr (as well as
* the per-thread rcu_reader.ctr) has the upper bits containing a count of
* _rcu_read_lock() nesting, and a lower-order bit that contains either zero
* or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
static inline void _rcu_read_lock_update(unsigned long tmp)
{
if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ _CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
cmm_smp_mb();
} else
_CMM_STORE_SHARED(URCU_TLS(rcu_reader)->ctr, tmp + RCU_GP_COUNT);