summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
de10a58)
This fixes namespace conflicts.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
22 files changed:
for (;;) {
pthread_mutex_lock(&lock);
assert(test_array.a == 8);
for (;;) {
pthread_mutex_lock(&lock);
assert(test_array.a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&lock);
nr_reads++;
loop_sleep(rduration);
pthread_mutex_unlock(&lock);
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
pthread_mutex_lock(&lock);
test_array.a = 0;
test_array.a = 8;
pthread_mutex_lock(&lock);
test_array.a = 0;
test_array.a = 8;
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_mutex_unlock(&lock);
nr_writes++;
loop_sleep(wduration);
pthread_mutex_unlock(&lock);
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
for (;;) {
pthread_mutex_lock(&per_thread_lock[tidx].lock);
assert(test_array.a == 8);
for (;;) {
pthread_mutex_lock(&per_thread_lock[tidx].lock);
assert(test_array.a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
nr_reads++;
loop_sleep(rduration);
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
}
test_array.a = 0;
test_array.a = 8;
}
test_array.a = 0;
test_array.a = 8;
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) {
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
}
nr_writes++;
loop_sleep(wduration);
for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) {
pthread_mutex_unlock(&per_thread_lock[tidx].lock);
}
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
for (;;) {
pthread_rwlock_rdlock(&lock);
assert(test_array.a == 8);
for (;;) {
pthread_rwlock_rdlock(&lock);
assert(test_array.a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
pthread_rwlock_unlock(&lock);
nr_reads++;
loop_sleep(rduration);
pthread_rwlock_unlock(&lock);
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
pthread_rwlock_wrlock(&lock);
test_array.a = 0;
test_array.a = 8;
pthread_rwlock_wrlock(&lock);
test_array.a = 0;
test_array.a = 8;
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
pthread_rwlock_unlock(&lock);
nr_writes++;
loop_sleep(wduration);
pthread_rwlock_unlock(&lock);
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
synchronize_rcu();
if (old)
loop_sleep(wduration);
synchronize_rcu();
if (old)
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
new->a = 8;
old = test_rcu_pointer;
rcu_assign_pointer(test_rcu_pointer, new);
new->a = 8;
old = test_rcu_pointer;
rcu_assign_pointer(test_rcu_pointer, new);
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
synchronize_rcu();
if (old)
loop_sleep(wduration);
synchronize_rcu();
if (old)
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
synchronize_rcu();
if (old)
loop_sleep(wduration);
synchronize_rcu();
if (old)
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
new = malloc(sizeof(*new));
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = malloc(sizeof(*new));
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
defer_rcu(free, old);
defer_rcu(test_cb1, old);
loop_sleep(wduration);
defer_rcu(free, old);
defer_rcu(test_cb1, old);
defer_rcu(test_cb2, (void *)-4L);
defer_rcu(test_cb2, (void *)-2L);
nr_writes++;
defer_rcu(test_cb2, (void *)-4L);
defer_rcu(test_cb2, (void *)-2L);
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
*pending_reclaims[wtidx].head = old;
pending_reclaims[wtidx].head++;
*pending_reclaims[wtidx].head = old;
pending_reclaims[wtidx].head++;
- if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
+ if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
< reclaim_batch))
return;
< reclaim_batch))
return;
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
#endif
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
#endif
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
nr_writes++;
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
rcu_read_unlock();
nr_successful_enqueues++;
rcu_read_unlock();
nr_successful_enqueues++;
+ if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
nr_enqueues++;
loop_sleep(wdelay);
fail:
nr_enqueues++;
- if (unlikely(!test_duration_enqueue()))
+ if (caa_unlikely(!test_duration_enqueue()))
- if (unlikely(!test_duration_dequeue()))
+ if (caa_unlikely(!test_duration_dequeue()))
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
cds_lfs_push_rcu(&s, &node->list);
nr_successful_enqueues++;
cds_lfs_push_rcu(&s, &node->list);
nr_successful_enqueues++;
+ if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
nr_enqueues++;
loop_sleep(wdelay);
fail:
nr_enqueues++;
- if (unlikely(!test_duration_enqueue()))
+ if (caa_unlikely(!test_duration_enqueue()))
nr_successful_dequeues++;
}
nr_dequeues++;
nr_successful_dequeues++;
}
nr_dequeues++;
- if (unlikely(!test_duration_dequeue()))
+ if (caa_unlikely(!test_duration_dequeue()))
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
/* QS each 1024 reads */
loop_sleep(rduration);
rcu_read_unlock();
nr_reads++;
/* QS each 1024 reads */
- if (unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
synchronize_rcu();
/* can be done after unlock */
loop_sleep(wduration);
synchronize_rcu();
/* can be done after unlock */
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
test_array_free(old);
rcu_copy_mutex_unlock();
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
_rcu_read_unlock();
nr_reads++;
/* QS each 1024 reads */
loop_sleep(rduration);
_rcu_read_unlock();
nr_reads++;
/* QS each 1024 reads */
- if (unlikely((nr_reads & ((1 << 10) - 1)) == 0))
+ if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0))
- if (unlikely(!test_duration_read()))
+ if (caa_unlikely(!test_duration_read()))
*pending_reclaims[wtidx].head = old;
pending_reclaims[wtidx].head++;
*pending_reclaims[wtidx].head = old;
pending_reclaims[wtidx].head++;
- if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
+ if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
< reclaim_batch))
return;
< reclaim_batch))
return;
new->a = 8;
old = _rcu_xchg_pointer(&test_rcu_pointer, new);
#endif
new->a = 8;
old = _rcu_xchg_pointer(&test_rcu_pointer, new);
#endif
- if (unlikely(wduration))
+ if (caa_unlikely(wduration))
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
nr_writes++;
loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
nr_writes++;
- if (unlikely(!test_duration_write()))
+ if (caa_unlikely(!test_duration_write()))
+ if (caa_unlikely(wdelay))
cds_wfq_enqueue(&q, node);
nr_successful_enqueues++;
cds_wfq_enqueue(&q, node);
nr_successful_enqueues++;
+ if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
nr_enqueues++;
loop_sleep(wdelay);
fail:
nr_enqueues++;
- if (unlikely(!test_duration_enqueue()))
+ if (caa_unlikely(!test_duration_enqueue()))
- if (unlikely(!test_duration_dequeue()))
+ if (caa_unlikely(!test_duration_dequeue()))
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
cds_wfs_push(&s, node);
nr_successful_enqueues++;
cds_wfs_push(&s, node);
nr_successful_enqueues++;
+ if (caa_unlikely(wdelay))
loop_sleep(wdelay);
fail:
nr_enqueues++;
loop_sleep(wdelay);
fail:
nr_enqueues++;
- if (unlikely(!test_duration_enqueue()))
+ if (caa_unlikely(!test_duration_enqueue()))
- if (unlikely(!test_duration_dequeue()))
+ if (caa_unlikely(!test_duration_dequeue()))
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
{
/* Write to call_rcu list before reading/writing futex */
cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&crdp->futex, 0);
futex_async(&crdp->futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
*/
static void wake_up_defer(void)
{
*/
static void wake_up_defer(void)
{
- if (unlikely(uatomic_read(&defer_thread_futex) == -1)) {
+ if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) {
uatomic_set(&defer_thread_futex, 0);
futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&defer_thread_futex, 0);
futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
for (i = queue->tail; i != head;) {
cmm_smp_rmb(); /* read head before q[]. */
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
for (i = queue->tail; i != head;) {
cmm_smp_rmb(); /* read head before q[]. */
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
- if (unlikely(DQ_IS_FCT_BIT(p))) {
+ if (caa_unlikely(DQ_IS_FCT_BIT(p))) {
DQ_CLEAR_FCT_BIT(p);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
DQ_CLEAR_FCT_BIT(p);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
- } else if (unlikely(p == DQ_FCT_MARK)) {
+ } else if (caa_unlikely(p == DQ_FCT_MARK)) {
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
head = defer_queue.head;
num_items = head - defer_queue.tail;
head = defer_queue.head;
num_items = head - defer_queue.tail;
- if (unlikely(!num_items))
+ if (caa_unlikely(!num_items))
return;
synchronize_rcu();
rcu_defer_barrier_queue(&defer_queue, head);
return;
synchronize_rcu();
rcu_defer_barrier_queue(&defer_queue, head);
index->last_head = CMM_LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
}
index->last_head = CMM_LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
}
- if (likely(!num_items)) {
+ if (caa_likely(!num_items)) {
/*
* We skip the grace period because there are no queued
* callbacks to execute.
/*
* We skip the grace period because there are no queued
* callbacks to execute.
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
- if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
+ if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
* Decode: see the comments before 'struct defer_queue'
* or the code in rcu_defer_barrier_queue().
*/
- if (unlikely(defer_queue.last_fct_in != fct
+ if (caa_unlikely(defer_queue.last_fct_in != fct
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
defer_queue.last_fct_in = fct;
|| DQ_IS_FCT_BIT(p)
|| p == DQ_FCT_MARK)) {
defer_queue.last_fct_in = fct;
- if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
+ if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
#ifdef RCU_MEMBARRIER
static void smp_mb_master(int group)
{
#ifdef RCU_MEMBARRIER
static void smp_mb_master(int group)
{
- if (likely(has_sys_membarrier))
+ if (caa_likely(has_sys_membarrier))
membarrier(MEMBARRIER_EXPEDITED);
else
cmm_smp_mb();
membarrier(MEMBARRIER_EXPEDITED);
else
cmm_smp_mb();
#include <stddef.h> /* for offsetof */
#include <stddef.h> /* for offsetof */
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
+#define caa_likely(x) __builtin_expect(!!(x), 1)
+#define caa_unlikely(x) __builtin_expect(!!(x), 0)
#define cmm_barrier() asm volatile("" : : : "memory")
#define cmm_barrier() asm volatile("" : : : "memory")
long tmp;
/* Check if registered */
long tmp;
/* Check if registered */
- if (unlikely(!rcu_reader))
+ if (caa_unlikely(!rcu_reader))
rcu_bp_register();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
rcu_bp_register();
cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
- if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+ if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
_CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
*/
static inline void wake_up_gp(void)
{
*/
static inline void wake_up_gp(void)
{
- if (unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
+ if (caa_unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
_CMM_STORE_SHARED(rcu_reader.waiting, 0);
cmm_smp_mb();
if (uatomic_read(&gp_futex) != -1)
_CMM_STORE_SHARED(rcu_reader.waiting, 0);
cmm_smp_mb();
if (uatomic_read(&gp_futex) != -1)
static inline void smp_mb_slave(int group)
{
static inline void smp_mb_slave(int group)
{
- if (likely(has_sys_membarrier))
+ if (caa_likely(has_sys_membarrier))
cmm_barrier();
else
cmm_smp_mb();
cmm_barrier();
else
cmm_smp_mb();
*/
static inline void wake_up_gp(void)
{
*/
static inline void wake_up_gp(void)
{
- if (unlikely(uatomic_read(&gp_futex) == -1)) {
+ if (caa_unlikely(uatomic_read(&gp_futex) == -1)) {
uatomic_set(&gp_futex, 0);
futex_async(&gp_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&gp_futex, 0);
futex_async(&gp_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
* rcu_gp_ctr is
* RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
*/
- if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+ if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
_CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
_CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* Finish using rcu before decrementing the pointer.
* See smp_mb_master().
*/
* Finish using rcu before decrementing the pointer.
* See smp_mb_master().
*/
- if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
+ if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
smp_mb_slave(RCU_MB_GROUP);
_CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
/* write rcu_reader.ctr before read futex */
smp_mb_slave(RCU_MB_GROUP);
_CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
/* write rcu_reader.ctr before read futex */
extern int __rcu_cas_init(void);
#define UATOMIC_COMPAT(insn) \
extern int __rcu_cas_init(void);
#define UATOMIC_COMPAT(insn) \
- ((likely(__rcu_cas_avail > 0)) \
+ ((caa_likely(__rcu_cas_avail > 0)) \
- : ((unlikely(__rcu_cas_avail < 0) \
+ : ((caa_unlikely(__rcu_cas_avail < 0) \
? ((__rcu_cas_init() > 0) \
? (_uatomic_##insn) \
: (compat_uatomic_##insn)) \
? ((__rcu_cas_init() > 0) \
? (_uatomic_##insn) \
: (compat_uatomic_##insn)) \