+ wake_call_rcu_thread(crdp);
+ while ((crdp->flags & URCU_CALL_RCU_STOPPED) == 0)
+ poll(NULL, 0, 1);
+ }
+ if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
+ while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
+ poll(NULL, 0, 1);
+ _CMM_STORE_SHARED(crdp->cbs.head, NULL);
+ cbs_tail = (struct cds_wfq_node **)
+ uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+ cbs_endprev = (struct cds_wfq_node **)
+ uatomic_xchg(&default_call_rcu_data, cbs_tail);
+ *cbs_endprev = cbs;
+ uatomic_add(&default_call_rcu_data->qlen,
+ uatomic_read(&crdp->qlen));
+ cds_list_del(&crdp->list);
+ free(crdp);
+ }
+}
+
+/*
+ * Clean up all the per-CPU call_rcu threads.
+ */
+void free_all_cpu_call_rcu_data(void)
+{
+ int cpu;
+ struct call_rcu_data *crdp;
+
+ if (maxcpus <= 0)
+ return;
+ for (cpu = 0; cpu < maxcpus; cpu++) {
+ crdp = get_cpu_call_rcu_data(cpu);
+ if (crdp == NULL)
+ continue;
+ set_cpu_call_rcu_data(cpu, NULL);
+ call_rcu_data_free(crdp);
+ }
+}
+
+/*
+ * Clean up call_rcu data structures in the child of a successful fork()
+ * that is not followed by exec().
+ */
+void call_rcu_after_fork_child(void)
+{
+ struct call_rcu_data *crdp;
+
+ /*
+ * Allocate a new default call_rcu_data structure in order
+ * to get a working call_rcu thread to go with it.
+ */
+ default_call_rcu_data = NULL;
+ (void)get_default_call_rcu_data();
+
+ /* Dispose of all of the rest of the call_rcu_data structures. */
+ while (call_rcu_data_list.next != call_rcu_data_list.prev) {
+ crdp = cds_list_entry(call_rcu_data_list.prev,
+ struct call_rcu_data, list);
+ if (crdp == default_call_rcu_data)
+ crdp = cds_list_entry(crdp->list.prev,
+ struct call_rcu_data, list);
+ crdp->flags = URCU_CALL_RCU_STOPPED;
+ call_rcu_data_free(crdp);