From: Mathieu Desnoyers Date: Tue, 1 Nov 2011 23:58:52 +0000 (-0400) Subject: Rename likely/unlikely to caa_likely/caa_unlikely X-Git-Tag: v0.6.6~2 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=a0b7f7ea3fc3339a1c42caffd53ce9f056e5b901;p=urcu.git Rename likely/unlikely to caa_likely/caa_unlikely This fixes namespace conflicts. Signed-off-by: Mathieu Desnoyers --- diff --git a/tests/test_mutex.c b/tests/test_mutex.c index 1194b02..3f84bbf 100644 --- a/tests/test_mutex.c +++ b/tests/test_mutex.c @@ -205,11 +205,11 @@ void *thr_reader(void *data) for (;;) { pthread_mutex_lock(&lock); assert(test_array.a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); pthread_mutex_unlock(&lock); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -238,13 +238,13 @@ void *thr_writer(void *data) pthread_mutex_lock(&lock); test_array.a = 0; test_array.a = 8; - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); pthread_mutex_unlock(&lock); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_perthreadlock.c b/tests/test_perthreadlock.c index 6c7114c..fa9c89a 100644 --- a/tests/test_perthreadlock.c +++ b/tests/test_perthreadlock.c @@ -209,11 +209,11 @@ void *thr_reader(void *data) for (;;) { pthread_mutex_lock(&per_thread_lock[tidx].lock); assert(test_array.a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); pthread_mutex_unlock(&per_thread_lock[tidx].lock); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -245,15 +245,15 @@ void *thr_writer(void *data) } test_array.a = 0; test_array.a = 8; - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) { pthread_mutex_unlock(&per_thread_lock[tidx].lock); } nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_rwlock.c b/tests/test_rwlock.c index 9c8f97e..34d8c07 100644 --- a/tests/test_rwlock.c +++ b/tests/test_rwlock.c @@ -201,11 +201,11 @@ void *thr_reader(void *_count) for (;;) { pthread_rwlock_rdlock(&lock); assert(test_array.a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); pthread_rwlock_unlock(&lock); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -234,13 +234,13 @@ void *thr_writer(void *_count) pthread_rwlock_wrlock(&lock); test_array.a = 0; test_array.a = 8; - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); pthread_rwlock_unlock(&lock); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu.c b/tests/test_urcu.c index 884d77c..870f133 100644 --- a/tests/test_urcu.c +++ b/tests/test_urcu.c @@ -239,11 +239,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -280,7 +280,7 @@ void *thr_writer(void *_count) new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); if (old) @@ -288,9 +288,9 @@ void *thr_writer(void *_count) test_array_free(old); rcu_copy_mutex_unlock(); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_assign.c b/tests/test_urcu_assign.c index 0d9ef85..42d70c2 100644 --- a/tests/test_urcu_assign.c +++ b/tests/test_urcu_assign.c @@ -239,11 +239,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -277,7 +277,7 @@ void *thr_writer(void *_count) new->a = 8; old = test_rcu_pointer; rcu_assign_pointer(test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); if (old) @@ -285,9 +285,9 @@ void *thr_writer(void *_count) test_array_free(old); rcu_copy_mutex_unlock(); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c index ba80ae6..857913f 100644 --- a/tests/test_urcu_bp.c +++ b/tests/test_urcu_bp.c @@ -239,11 +239,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -276,7 +276,7 @@ void *thr_writer(void *_count) new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); if (old) @@ -284,9 +284,9 @@ void *thr_writer(void *_count) test_array_free(old); rcu_copy_mutex_unlock(); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_defer.c b/tests/test_urcu_defer.c index 7d71f10..1575e9c 100644 --- a/tests/test_urcu_defer.c +++ b/tests/test_urcu_defer.c @@ -210,11 +210,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -261,7 +261,7 @@ void *thr_writer(void *data) new = malloc(sizeof(*new)); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); defer_rcu(free, old); defer_rcu(test_cb1, old); @@ -272,9 +272,9 @@ void *thr_writer(void *data) defer_rcu(test_cb2, (void *)-4L); defer_rcu(test_cb2, (void *)-2L); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_gc.c b/tests/test_urcu_gc.c index 4eaa61b..21c5d56 100644 --- a/tests/test_urcu_gc.c +++ b/tests/test_urcu_gc.c @@ -218,11 +218,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -259,7 +259,7 @@ static void rcu_gc_reclaim(unsigned long wtidx, void *old) *pending_reclaims[wtidx].head = old; pending_reclaims[wtidx].head++; - if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue + if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue < reclaim_batch)) return; @@ -291,13 +291,13 @@ void *thr_writer(void *data) new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); #endif - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); rcu_gc_reclaim(wtidx, old); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_lfq.c b/tests/test_urcu_lfq.c index 5292ebd..11e7eb3 100644 --- a/tests/test_urcu_lfq.c +++ b/tests/test_urcu_lfq.c @@ -190,11 +190,11 @@ void *thr_enqueuer(void *_count) rcu_read_unlock(); nr_successful_enqueues++; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); fail: nr_enqueues++; - if (unlikely(!test_duration_enqueue())) + if (caa_unlikely(!test_duration_enqueue())) break; } @@ -255,9 +255,9 @@ void *thr_dequeuer(void *_count) } nr_dequeues++; - if (unlikely(!test_duration_dequeue())) + if (caa_unlikely(!test_duration_dequeue())) break; - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); } diff --git a/tests/test_urcu_lfs.c b/tests/test_urcu_lfs.c index c85fa44..883fd0c 100644 --- a/tests/test_urcu_lfs.c +++ b/tests/test_urcu_lfs.c @@ -189,11 +189,11 @@ void *thr_enqueuer(void *_count) cds_lfs_push_rcu(&s, &node->list); nr_successful_enqueues++; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); fail: nr_enqueues++; - if (unlikely(!test_duration_enqueue())) + if (caa_unlikely(!test_duration_enqueue())) break; } @@ -252,9 +252,9 @@ void *thr_dequeuer(void *_count) nr_successful_dequeues++; } nr_dequeues++; - if (unlikely(!test_duration_dequeue())) + if (caa_unlikely(!test_duration_dequeue())) break; - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); } diff --git a/tests/test_urcu_qsbr.c b/tests/test_urcu_qsbr.c index 1c5a696..b986fd8 100644 --- a/tests/test_urcu_qsbr.c +++ b/tests/test_urcu_qsbr.c @@ -238,14 +238,14 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); nr_reads++; /* QS each 1024 reads */ - if (unlikely((nr_reads & ((1 << 10) - 1)) == 0)) + if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0)) rcu_quiescent_state(); - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -282,7 +282,7 @@ void *thr_writer(void *_count) new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); /* can be done after unlock */ @@ -291,9 +291,9 @@ void *thr_writer(void *_count) test_array_free(old); rcu_copy_mutex_unlock(); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_qsbr_gc.c b/tests/test_urcu_qsbr_gc.c index 0c1c124..9deb0aa 100644 --- a/tests/test_urcu_qsbr_gc.c +++ b/tests/test_urcu_qsbr_gc.c @@ -214,14 +214,14 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); _rcu_read_unlock(); nr_reads++; /* QS each 1024 reads */ - if (unlikely((nr_reads & ((1 << 10) - 1)) == 0)) + if (caa_unlikely((nr_reads & ((1 << 10) - 1)) == 0)) _rcu_quiescent_state(); - if (unlikely(!test_duration_read())) + if (caa_unlikely(!test_duration_read())) break; } @@ -258,7 +258,7 @@ static void rcu_gc_reclaim(unsigned long wtidx, void *old) *pending_reclaims[wtidx].head = old; pending_reclaims[wtidx].head++; - if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue + if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue < reclaim_batch)) return; @@ -290,13 +290,13 @@ void *thr_writer(void *data) new->a = 8; old = _rcu_xchg_pointer(&test_rcu_pointer, new); #endif - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); rcu_gc_reclaim(wtidx, old); nr_writes++; - if (unlikely(!test_duration_write())) + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } diff --git a/tests/test_urcu_wfq.c b/tests/test_urcu_wfq.c index e042f5e..83ec635 100644 --- a/tests/test_urcu_wfq.c +++ b/tests/test_urcu_wfq.c @@ -180,11 +180,11 @@ void *thr_enqueuer(void *_count) cds_wfq_enqueue(&q, node); nr_successful_enqueues++; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); fail: nr_enqueues++; - if (unlikely(!test_duration_enqueue())) + if (caa_unlikely(!test_duration_enqueue())) break; } @@ -221,9 +221,9 @@ void *thr_dequeuer(void *_count) } nr_dequeues++; - if (unlikely(!test_duration_dequeue())) + if (caa_unlikely(!test_duration_dequeue())) break; - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); } diff --git a/tests/test_urcu_wfs.c b/tests/test_urcu_wfs.c index 6d419ca..7746a1d 100644 --- a/tests/test_urcu_wfs.c +++ b/tests/test_urcu_wfs.c @@ -180,11 +180,11 @@ void *thr_enqueuer(void *_count) cds_wfs_push(&s, node); nr_successful_enqueues++; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); fail: nr_enqueues++; - if (unlikely(!test_duration_enqueue())) + if (caa_unlikely(!test_duration_enqueue())) break; } @@ -221,9 +221,9 @@ void *thr_dequeuer(void *_count) } nr_dequeues++; - if (unlikely(!test_duration_dequeue())) + if (caa_unlikely(!test_duration_dequeue())) break; - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); } diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index 182e9b1..36e3cf4 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -204,7 +204,7 @@ static void call_rcu_wake_up(struct call_rcu_data *crdp) { /* Write to call_rcu list before reading/writing futex */ cmm_smp_mb(); - if (unlikely(uatomic_read(&crdp->futex) == -1)) { + if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) { uatomic_set(&crdp->futex, 0); futex_async(&crdp->futex, FUTEX_WAKE, 1, NULL, NULL, 0); diff --git a/urcu-defer-impl.h b/urcu-defer-impl.h index 34d99c9..4d1ca5e 100644 --- a/urcu-defer-impl.h +++ b/urcu-defer-impl.h @@ -161,7 +161,7 @@ static void mutex_lock_defer(pthread_mutex_t *mutex) */ static void wake_up_defer(void) { - if (unlikely(uatomic_read(&defer_thread_futex) == -1)) { + if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) { uatomic_set(&defer_thread_futex, 0); futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -225,11 +225,11 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue, for (i = queue->tail; i != head;) { cmm_smp_rmb(); /* read head before q[]. */ p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); - if (unlikely(DQ_IS_FCT_BIT(p))) { + if (caa_unlikely(DQ_IS_FCT_BIT(p))) { DQ_CLEAR_FCT_BIT(p); queue->last_fct_out = p; p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); - } else if (unlikely(p == DQ_FCT_MARK)) { + } else if (caa_unlikely(p == DQ_FCT_MARK)) { p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); queue->last_fct_out = p; p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); @@ -247,7 +247,7 @@ static void _rcu_defer_barrier_thread(void) head = defer_queue.head; num_items = head - defer_queue.tail; - if (unlikely(!num_items)) + if (caa_unlikely(!num_items)) return; synchronize_rcu(); rcu_defer_barrier_queue(&defer_queue, head); @@ -286,7 +286,7 @@ void rcu_defer_barrier(void) index->last_head = CMM_LOAD_SHARED(index->head); num_items += index->last_head - index->tail; } - if (likely(!num_items)) { + if (caa_likely(!num_items)) { /* * We skip the grace period because there are no queued * callbacks to execute. @@ -318,7 +318,7 @@ void _defer_rcu(void (*fct)(void *p), void *p) * If queue is full, or reached threshold. Empty queue ourself. * Worse-case: must allow 2 supplementary entries for fct pointer. */ - if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { + if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) { assert(head - tail <= DEFER_QUEUE_SIZE); rcu_defer_barrier_thread(); assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0); @@ -340,11 +340,11 @@ void _defer_rcu(void (*fct)(void *p), void *p) * Decode: see the comments before 'struct defer_queue' * or the code in rcu_defer_barrier_queue(). */ - if (unlikely(defer_queue.last_fct_in != fct + if (caa_unlikely(defer_queue.last_fct_in != fct || DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) { defer_queue.last_fct_in = fct; - if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { + if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) { _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], DQ_FCT_MARK); _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], diff --git a/urcu.c b/urcu.c index 77f6888..ba013d9 100644 --- a/urcu.c +++ b/urcu.c @@ -144,7 +144,7 @@ static void mutex_unlock(pthread_mutex_t *mutex) #ifdef RCU_MEMBARRIER static void smp_mb_master(int group) { - if (likely(has_sys_membarrier)) + if (caa_likely(has_sys_membarrier)) membarrier(MEMBARRIER_EXPEDITED); else cmm_smp_mb(); diff --git a/urcu/compiler.h b/urcu/compiler.h index 6db803e..f977b95 100644 --- a/urcu/compiler.h +++ b/urcu/compiler.h @@ -20,8 +20,8 @@ #include /* for offsetof */ -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) +#define caa_likely(x) __builtin_expect(!!(x), 1) +#define caa_unlikely(x) __builtin_expect(!!(x), 0) #define cmm_barrier() asm volatile("" : : : "memory") diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index 832ba0f..8d22163 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -166,7 +166,7 @@ static inline void _rcu_read_lock(void) long tmp; /* Check if registered */ - if (unlikely(!rcu_reader)) + if (caa_unlikely(!rcu_reader)) rcu_bp_register(); cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ @@ -175,7 +175,7 @@ static inline void _rcu_read_lock(void) * rcu_gp_ctr is * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) */ - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { + if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before diff --git a/urcu/static/urcu-qsbr.h b/urcu/static/urcu-qsbr.h index 489abb0..68bfc31 100644 --- a/urcu/static/urcu-qsbr.h +++ b/urcu/static/urcu-qsbr.h @@ -137,7 +137,7 @@ extern int32_t gp_futex; */ static inline void wake_up_gp(void) { - if (unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) { + if (caa_unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) { _CMM_STORE_SHARED(rcu_reader.waiting, 0); cmm_smp_mb(); if (uatomic_read(&gp_futex) != -1) diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index b993375..7ae0185 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -176,7 +176,7 @@ extern int has_sys_membarrier; static inline void smp_mb_slave(int group) { - if (likely(has_sys_membarrier)) + if (caa_likely(has_sys_membarrier)) cmm_barrier(); else cmm_smp_mb(); @@ -231,7 +231,7 @@ extern int32_t gp_futex; */ static inline void wake_up_gp(void) { - if (unlikely(uatomic_read(&gp_futex) == -1)) { + if (caa_unlikely(uatomic_read(&gp_futex) == -1)) { uatomic_set(&gp_futex, 0); futex_async(&gp_futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -261,7 +261,7 @@ static inline void _rcu_read_lock(void) * rcu_gp_ctr is * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE) */ - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { + if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr)); /* * Set active readers count for outermost nesting level before @@ -282,7 +282,7 @@ static inline void _rcu_read_unlock(void) * Finish using rcu before decrementing the pointer. * See smp_mb_master(). */ - if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { + if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { smp_mb_slave(RCU_MB_GROUP); _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); /* write rcu_reader.ctr before read futex */ diff --git a/urcu/uatomic/x86.h b/urcu/uatomic/x86.h index e064b9e..9b67f16 100644 --- a/urcu/uatomic/x86.h +++ b/urcu/uatomic/x86.h @@ -505,9 +505,9 @@ extern int __rcu_cas_avail; extern int __rcu_cas_init(void); #define UATOMIC_COMPAT(insn) \ - ((likely(__rcu_cas_avail > 0)) \ + ((caa_likely(__rcu_cas_avail > 0)) \ ? (_uatomic_##insn) \ - : ((unlikely(__rcu_cas_avail < 0) \ + : ((caa_unlikely(__rcu_cas_avail < 0) \ ? ((__rcu_cas_init() > 0) \ ? (_uatomic_##insn) \ : (compat_uatomic_##insn)) \