*/
for (;;) {
wait_loops++;
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_dec(&gp_futex);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_set(&gp_futex, -1);
+ /*
+ * Write futex before write waiting (the other side
+ * reads them in the opposite order).
+ */
+ cmm_smp_wmb();
+ cds_list_for_each_entry(index, ®istry, node) {
+ _CMM_STORE_SHARED(index->waiting, 1);
+ }
/* Write futex before read reader_gp */
cmm_smp_mb();
}
-
cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_gp_ongoing(&index->ctr))
cds_list_move(&index->node, &qsreaders);
}
if (cds_list_empty(®istry)) {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
cmm_smp_mb();
uatomic_set(&gp_futex, 0);
}
break;
} else {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
wait_gp();
} else {
#ifndef HAS_INCOHERENT_CACHES
unsigned long ctr;
/* Data used for registry */
struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+ int waiting;
pthread_t tid;
};
*/
static inline void wake_up_gp(void)
{
- if (unlikely(uatomic_read(&gp_futex) == -1)) {
+ if (unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
+ _CMM_STORE_SHARED(rcu_reader.waiting, 0);
+ cmm_smp_mb();
+ if (uatomic_read(&gp_futex) != -1)
+ return;
uatomic_set(&gp_futex, 0);
futex_noasync(&gp_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);