* Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
* Written to only by writer with mutex taken. Read by both writer and readers.
*/
-long rcu_gp_ctr = RCU_GP_COUNT;
+unsigned long rcu_gp_ctr = RCU_GP_COUNT;
/*
* Pointer to registry elements. Written to only by each individual reader. Read
urcu_die(ret);
}
-static void wait_for_readers(void)
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
{
- CDS_LIST_HEAD(qsreaders);
int wait_loops = 0;
struct rcu_reader *index, *tmp;
*/
for (;;) {
wait_loops++;
- cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
- if (!rcu_old_gp_ongoing(&index->ctr))
- cds_list_move(&index->node, &qsreaders);
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (rcu_reader_state(&index->ctr)) {
+ case RCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case RCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case RCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
}
- if (cds_list_empty(®istry)) {
+ if (cds_list_empty(input_readers)) {
break;
} else {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
caa_cpu_relax();
}
}
- /* put back the reader list in the registry */
- cds_list_splice(&qsreaders, ®istry);
}
void synchronize_rcu(void)
{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
sigset_t newmask, oldmask;
int ret;
/*
* Wait for readers to observe original parity or be quiescent.
*/
- wait_for_readers();
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
* Adding a cmm_smp_mb() which is _not_ formally required, but makes the
/*
* Wait for readers to observe new parity or be quiescent.
*/
- wait_for_readers();
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
/*
* Finish waiting for reader threads before letting the old ptr being
#define rcu_assert(args...)
#endif
+enum rcu_state {
+ RCU_READER_ACTIVE_CURRENT,
+ RCU_READER_ACTIVE_OLD,
+ RCU_READER_INACTIVE,
+};
+
#ifdef DEBUG_YIELD
#include <sched.h>
#include <time.h>
* Using a int rather than a char to eliminate false register dependencies
* causing stalls on some architectures.
*/
-extern long rcu_gp_ctr;
+extern unsigned long rcu_gp_ctr;
struct rcu_reader {
/* Data used by both reader and synchronize_rcu() */
- long ctr;
+ unsigned long ctr;
/* Data used for registry */
struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
pthread_t tid;
*/
extern DECLARE_URCU_TLS(struct rcu_reader *, rcu_reader);
-static inline int rcu_old_gp_ongoing(long *value)
+static inline enum rcu_state rcu_reader_state(unsigned long *ctr)
{
- long v;
+ unsigned long v;
- if (value == NULL)
- return 0;
+ if (ctr == NULL)
+ return RCU_READER_INACTIVE;
/*
* Make sure both tests below are done on the same version of *value
* to insure consistency.
*/
- v = CMM_LOAD_SHARED(*value);
- return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
+ v = CMM_LOAD_SHARED(*ctr);
+ if (!(v & RCU_GP_CTR_NEST_MASK))
+ return RCU_READER_INACTIVE;
+ if (!((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE))
+ return RCU_READER_ACTIVE_CURRENT;
+ return RCU_READER_ACTIVE_OLD;
}
/*
*/
static inline void _rcu_read_lock(void)
{
- long tmp;
+ unsigned long tmp;
if (caa_unlikely(!URCU_TLS(rcu_reader)))
rcu_bp_register(); /* If not yet registered. */