* throughout its execution. In this scheme, the waiter owns the node
* memory, and we only allow it to free this memory when it receives the
* URCU_WAIT_TEARDOWN flag.
+ * Return true if wakeup is performed, false if thread was already
+ * running.
*/
static inline
-void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
+bool urcu_adaptative_wake_up(struct urcu_wait_node *wait)
{
+ bool wakeup_performed = false;
+
cmm_smp_mb();
/*
* "or" of WAKEUP flag rather than "set" is useful for multiple
* "value" should then be handled by the caller.
*/
uatomic_or(&wait->state, URCU_WAIT_WAKEUP);
- if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING))
+ if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0);
+ wakeup_performed = true;
+ }
/* Allow teardown of struct urcu_wait memory. */
uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
+ return wakeup_performed;
}
/*
{
struct cds_wfs_node *node;
struct urcu_wait_node *wait_node;
- int wakeup_done = 0;
+ int ret = 0;
node = __cds_wfs_pop_blocking(&queue->stack);
if (!node)
wait_node = caa_container_of(node, struct urcu_wait_node, node);
CMM_STORE_SHARED(wait_node->node.next, NULL);
/* Don't wake already running threads */
- if (!(wait_node->state & URCU_WAIT_RUNNING)) {
- urcu_adaptative_wake_up(wait_node);
- wakeup_done = 1;
- }
- return wakeup_done;
+ if (!(wait_node->state & URCU_WAIT_RUNNING))
+ ret = urcu_adaptative_wake_up(wait_node);
+ return ret;
}
/*
/* Don't wake already running threads */
if (wait_node->state & URCU_WAIT_RUNNING)
continue;
- urcu_adaptative_wake_up(wait_node);
- nr_wakeup++;
+ if (urcu_adaptative_wake_up(wait_node))
+ nr_wakeup++;
}
return nr_wakeup;
}
}
static inline
-void ___urcu_wakeup_sibling(struct urcu_worker *sibling)
+bool ___urcu_wakeup_sibling(struct urcu_worker *sibling)
{
- urcu_adaptative_wake_up(&sibling->wait_node);
+ return urcu_adaptative_wake_up(&sibling->wait_node);
}
static inline
-void __urcu_wakeup_siblings(struct urcu_workqueue *queue,
+bool __urcu_wakeup_siblings(struct urcu_workqueue *queue,
struct urcu_worker *worker)
{
struct urcu_worker *sibling_prev, *sibling_next;
struct cds_list_head *sibling_node;
+ bool wakeup_performed = 0;
if (!(worker->flags & URCU_WORKER_STEAL))
return;
sibling_next = caa_container_of(sibling_node, struct urcu_worker,
sibling_node);
if (sibling_next != worker)
- ___urcu_wakeup_sibling(sibling_next);
+ wakeup_performed = ___urcu_wakeup_sibling(sibling_next);
+ if (wakeup_performed)
+ goto end;
sibling_node = rcu_dereference(worker->sibling_node.prev);
if (sibling_node == &queue->sibling_head)
sibling_prev = caa_container_of(sibling_node, struct urcu_worker,
sibling_node);
if (sibling_prev != worker && sibling_prev != sibling_next)
- ___urcu_wakeup_sibling(sibling_prev);
-
+ wakeup_performed = ___urcu_wakeup_sibling(sibling_prev);
+end:
rcu_read_unlock();
+
+ return wakeup_performed;
}
static inline
* We will be busy handling the work batch, awaken siblings so
* they can steal from us.
*/
- __urcu_wakeup_siblings(queue, worker);
+ (void) __urcu_wakeup_siblings(queue, worker);
}
static inline