int compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
- int ret, gret = 0;
+ int ret;
/*
* Check if NULL. Don't let users expect that they are taken into
cmm_smp_mb();
ret = pthread_mutex_lock(&__urcu_compat_futex_lock);
- assert(!ret);
+ if (ret) {
+ errno = ret;
+ ret = -1;
+ goto end;
+ }
switch (op) {
case FUTEX_WAIT:
/*
pthread_cond_broadcast(&__urcu_compat_futex_cond);
break;
default:
- gret = -EINVAL;
+ errno = EINVAL;
+ ret = -1;
}
ret = pthread_mutex_unlock(&__urcu_compat_futex_lock);
- assert(!ret);
- return gret;
+ if (ret) {
+ errno = ret;
+ ret = -1;
+ }
+end:
+ return ret;
}
/*
int compat_futex_async(int32_t *uaddr, int op, int32_t val,
const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
+ int ret = 0;
+
/*
* Check if NULL. Don't let users expect that they are taken into
* account.
switch (op) {
case FUTEX_WAIT:
- while (CMM_LOAD_SHARED(*uaddr) == val)
- poll(NULL, 0, 10);
+ while (CMM_LOAD_SHARED(*uaddr) == val) {
+ if (poll(NULL, 0, 10) < 0) {
+ ret = -1;
+ /* Keep poll errno. Caller handles EINTR. */
+ goto end;
+ }
+ }
break;
case FUTEX_WAKE:
break;
default:
- return -EINVAL;
+ errno = EINVAL;
+ ret = -1;
}
- return 0;
+end:
+ return ret;
}
{
/* Read call_rcu list before read futex */
cmm_smp_mb();
- if (uatomic_read(&crdp->futex) == -1)
- futex_async(&crdp->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&crdp->futex) != -1)
+ return;
+ while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
static void call_rcu_wake_up(struct call_rcu_data *crdp)
cmm_smp_mb();
if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
- futex_async(&crdp->futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0) < 0)
+ urcu_die(errno);
}
}
{
/* Read completion barrier count before read futex */
cmm_smp_mb();
- if (uatomic_read(&completion->futex) == -1)
- futex_async(&completion->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&completion->futex) != -1)
+ return;
+ while (futex_async(&completion->futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
cmm_smp_mb();
if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
uatomic_set(&completion->futex, 0);
- futex_async(&completion->futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ if (futex_async(&completion->futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0) < 0)
+ urcu_die(errno);
}
}
{
if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) {
uatomic_set(&defer_thread_futex, 0);
- futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ if (futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0) < 0)
+ urcu_die(errno);
}
}
uatomic_set(&defer_thread_futex, 0);
} else {
cmm_smp_rmb(); /* Read queue before read futex */
- if (uatomic_read(&defer_thread_futex) == -1)
- futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&defer_thread_futex) != -1)
+ return;
+ while (futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
}
{
/* Read reader_gp before read futex */
cmm_smp_rmb();
- if (uatomic_read(&rcu_gp.futex) == -1)
- futex_noasync(&rcu_gp.futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&rcu_gp.futex) != -1)
+ return;
+ while (futex_noasync(&rcu_gp.futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
/*
#include <urcu/uatomic.h>
#include <urcu/wfstack.h>
+#include "urcu-die.h"
/*
* Number of busy-loop attempts before waiting on futex for grace period
cmm_smp_mb();
assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING);
uatomic_set(&wait->state, URCU_WAIT_WAKEUP);
- if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING))
- futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0);
+ if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
+ if (futex_noasync(&wait->state, FUTEX_WAKE, 1,
+ NULL, NULL, 0) < 0)
+ urcu_die(errno);
+ }
/* Allow teardown of struct urcu_wait memory. */
uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
}
goto skip_futex_wait;
caa_cpu_relax();
}
- futex_noasync(&wait->state, FUTEX_WAIT,
- URCU_WAIT_WAITING, NULL, NULL, 0);
+ while (futex_noasync(&wait->state, FUTEX_WAIT, URCU_WAIT_WAITING,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ goto skip_futex_wait;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
skip_futex_wait:
/* Tell waker thread than we are running. */
{
/* Read reader_gp before read futex */
smp_mb_master(RCU_MB_GROUP);
- if (uatomic_read(&rcu_gp.futex) == -1)
- futex_async(&rcu_gp.futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&rcu_gp.futex) != -1)
+ return;
+ while (futex_async(&rcu_gp.futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
/*
*
* futex_async is signal-handler safe for the wakeup. It uses polling
* on the wait-side in compatibility mode.
+ *
+ * BEWARE: sys_futex() FUTEX_WAIT may return early if interrupted
+ * (returns EINTR).
*/
#ifdef CONFIG_RCU_HAVE_FUTEX
if (uatomic_read(&rcu_gp.futex) != -1)
return;
uatomic_set(&rcu_gp.futex, 0);
- futex_noasync(&rcu_gp.futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ /*
+ * Ignoring return value until we can make this function
+ * return something (because urcu_die() is not publicly
+ * exposed).
+ */
+ (void) futex_noasync(&rcu_gp.futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
}
}
{
if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) {
uatomic_set(&rcu_gp.futex, 0);
- futex_async(&rcu_gp.futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ /*
+ * Ignoring return value until we can make this function
+ * return something (because urcu_die() is not publicly
+ * exposed).
+ */
+ (void) futex_async(&rcu_gp.futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
}
}