static __thread struct call_rcu_data *thread_call_rcu_data;
-/* Guard call_rcu thread creation. */
-
+/*
+ * Guard call_rcu thread creation and atfork handlers.
+ */
static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
/* If a given thread does not have its own call_rcu thread, this is default. */
cmm_smp_mb();
}
for (;;) {
+ if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
+ /*
+ * Pause requested. Become quiescent: remove
+ * ourself from all global lists, and don't
+ * process any callback. The callback lists may
+ * still be non-empty though.
+ */
+ rcu_unregister_thread();
+ cmm_smp_mb__before_uatomic_or();
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
+ poll(NULL, 0, 1);
+ rcu_register_thread();
+ }
+
if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
poll(NULL, 0, 1);
/*
* Acquire the call_rcu_mutex in order to ensure that the child sees
- * all of the call_rcu() data structures in a consistent state.
+ * all of the call_rcu() data structures in a consistent state. Ensure
+ * that all call_rcu threads are in a quiescent state across fork.
* Suitable for pthread_atfork() and friends.
*/
void call_rcu_before_fork(void)
{
+ struct call_rcu_data *crdp;
+
call_rcu_lock(&call_rcu_mutex);
+
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
+ cmm_smp_mb__after_uatomic_or();
+ wake_call_rcu_thread(crdp);
+ }
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
+ poll(NULL, 0, 1);
+ }
}
/*
*/
void call_rcu_after_fork_parent(void)
{
+ struct call_rcu_data *crdp;
+
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
+ uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
call_rcu_unlock(&call_rcu_mutex);
}
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
thread_call_rcu_data = NULL;
- /* Dispose of all of the rest of the call_rcu_data structures. */
+ /*
+ * Dispose of all of the rest of the call_rcu_data structures.
+ * Leftover call_rcu callbacks will be merged into the new
+ * default call_rcu thread queue.
+ */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)
continue;
/* Flag values. */
-#define URCU_CALL_RCU_RT 0x1
-#define URCU_CALL_RCU_RUNNING 0x2
-#define URCU_CALL_RCU_STOP 0x4
-#define URCU_CALL_RCU_STOPPED 0x8
+#define URCU_CALL_RCU_RT (1U << 0)
+#define URCU_CALL_RCU_RUNNING (1U << 1)
+#define URCU_CALL_RCU_STOP (1U << 2)
+#define URCU_CALL_RCU_STOPPED (1U << 3)
+#define URCU_CALL_RCU_PAUSE (1U << 4)
+#define URCU_CALL_RCU_PAUSED (1U << 5)
/*
* The rcu_head data structure is placed in the structure to be freed