static
void __attribute__((destructor)) rcu_bp_exit(void);
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
static int initialized;
urcu_die(ret);
}
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
static void wait_for_readers(struct cds_list_head *input_readers,
struct cds_list_head *cur_snap_readers,
struct cds_list_head *qsreaders)
if (cds_list_empty(input_readers)) {
break;
} else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
(void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
else
caa_cpu_relax();
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
}
}
}
mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+
if (cds_list_empty(®istry))
goto out;
/*
* Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
* Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
*/
cmm_smp_mb();
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
*/
rcu_bp_init();
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
add_thread();
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
end:
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
if (ret)
if (ret)
abort();
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
remove_thread(rcu_reader_reg);
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
if (ret)
abort();
}
/*
- * Holding the rcu_gp_lock across fork will make sure we fork() don't race with
- * a concurrent thread executing with this same lock held. This ensures that the
- * registry is in a coherent state in the child.
+ * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
+ * sure we fork() don't race with a concurrent thread executing with
+ * any of those locks held. This ensures that the registry and data
+ * protected by rcu_gp_lock are in a coherent state in the child.
*/
void rcu_bp_before_fork(void)
{
ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
assert(!ret);
mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
saved_fork_signal_mask = oldmask;
}
int ret;
oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
/*
* Prune all entries from registry except our own thread. Fits the Linux
- * fork behavior. Called with rcu_gp_lock held.
+ * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
*/
static
void urcu_bp_prune_registry(void)
urcu_bp_prune_registry();
oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
assert(!ret);
void __attribute__((destructor)) rcu_exit(void);
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
struct rcu_gp rcu_gp = { .ctr = RCU_GP_ONLINE };
/*
NULL, NULL, 0);
}
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
static void wait_for_readers(struct cds_list_head *input_readers,
struct cds_list_head *cur_snap_readers,
struct cds_list_head *qsreaders)
}
break;
} else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
wait_gp();
} else {
cmm_smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
}
}
}
*/
urcu_move_waiters(&waiters, &gp_waiters);
+ mutex_lock(&rcu_registry_lock);
+
if (cds_list_empty(®istry))
goto out;
/*
* Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
* Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
*/
cds_list_splice(&qsreaders, ®istry);
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
urcu_wake_all_waiters(&waiters);
gp_end:
*/
urcu_move_waiters(&waiters, &gp_waiters);
+ mutex_lock(&rcu_registry_lock);
+
if (cds_list_empty(®istry))
goto out;
/*
* Wait for readers to observe new count of be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(®istry, NULL, &qsreaders);
*/
cds_list_splice(&qsreaders, ®istry);
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
urcu_wake_all_waiters(&waiters);
gp_end:
URCU_TLS(rcu_reader).tid = pthread_self();
assert(URCU_TLS(rcu_reader).ctr == 0);
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
_rcu_thread_online();
}
* with a waiting writer.
*/
_rcu_thread_offline();
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
cds_list_del(&URCU_TLS(rcu_reader).node);
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
}
void rcu_exit(void)
void __attribute__((destructor)) rcu_exit(void);
#endif
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
/*
NULL, NULL, 0);
}
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
static void wait_for_readers(struct cds_list_head *input_readers,
struct cds_list_head *cur_snap_readers,
struct cds_list_head *qsreaders)
}
break;
} else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
wait_gp();
else
caa_cpu_relax();
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
}
#else /* #ifndef HAS_INCOHERENT_CACHES */
/*
smp_mb_master(RCU_MB_GROUP);
wait_gp_loops = 0;
}
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
wait_gp();
wait_gp_loops++;
} else {
caa_cpu_relax();
}
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
}
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
}
*/
urcu_move_waiters(&waiters, &gp_waiters);
+ mutex_lock(&rcu_registry_lock);
+
if (cds_list_empty(®istry))
goto out;
- /* All threads should read qparity before accessing data structure
- * where new ptr points to. Must be done within rcu_gp_lock because it
- * iterates on reader threads.*/
+ /*
+ * All threads should read qparity before accessing data structure
+ * where new ptr points to. Must be done within rcu_registry_lock
+ * because it iterates on reader threads.
+ */
/* Write new ptr before changing the qparity */
smp_mb_master(RCU_MB_GROUP);
/*
* Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
/*
* Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
*/
wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
*/
cds_list_splice(&qsreaders, ®istry);
- /* Finish waiting for reader threads before letting the old ptr being
- * freed. Must be done within rcu_gp_lock because it iterates on reader
- * threads. */
+ /*
+ * Finish waiting for reader threads before letting the old ptr
+ * being freed. Must be done within rcu_registry_lock because it
+ * iterates on reader threads.
+ */
smp_mb_master(RCU_MB_GROUP);
out:
+ mutex_unlock(&rcu_registry_lock);
mutex_unlock(&rcu_gp_lock);
/*
assert(URCU_TLS(rcu_reader).need_mb == 0);
assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK));
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
rcu_init(); /* In case gcc does not support constructor attribute */
cds_list_add(&URCU_TLS(rcu_reader).node, ®istry);
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
}
void rcu_unregister_thread(void)
{
- mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
cds_list_del(&URCU_TLS(rcu_reader).node);
- mutex_unlock(&rcu_gp_lock);
+ mutex_unlock(&rcu_registry_lock);
}
#ifdef RCU_MEMBARRIER
* rcu_init constructor. Called when the library is linked, but also when
* reader threads are calling rcu_register_thread().
* Should only be called by a single thread at a given time. This is ensured by
- * holing the rcu_gp_lock from rcu_register_thread() or by running at library
- * load time, which should not be executed by multiple threads nor concurrently
- * with rcu_register_thread() anyway.
+ * holing the rcu_registry_lock from rcu_register_thread() or by running
+ * at library load time, which should not be executed by multiple
+ * threads nor concurrently with rcu_register_thread() anyway.
*/
void rcu_init(void)
{