From: Mathieu Desnoyers Date: Thu, 3 Nov 2011 15:37:30 +0000 (-0400) Subject: Update to use caa_likely/caa_unlikely from urcu 0.6.6 X-Git-Tag: v1.9.1~149 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=b5a3dfa5923801ddeea22ce70758d1e61200eac2;p=lttng-ust.git Update to use caa_likely/caa_unlikely from urcu 0.6.6 Signed-off-by: Mathieu Desnoyers --- diff --git a/README b/README index 901e1751..2d9c5fc2 100644 --- a/README +++ b/README @@ -16,7 +16,7 @@ PREREQUISITES: - liburcu Userspace RCU library, by Mathieu Desnoyers and Paul E. McKenney - -> This release depends on liburcu v0.6 + -> This release depends on liburcu v0.6.6 * Debian/Ubuntu package: liburcu-dev * Website: http://lttng.org/urcu diff --git a/configure.ac b/configure.ac index 10e1af44..856894b2 100644 --- a/configure.ac +++ b/configure.ac @@ -59,6 +59,27 @@ CFLAGS="-Wall $CFLAGS" AC_CHECK_HEADERS([urcu-bp.h], [], [AC_MSG_ERROR([Cannot find [URCU] headers (urcu-bp.h). Use [CFLAGS]=-Idir to specify their location. This error can also occur when the liburcu package's configure script has not been run.])]) +AC_MSG_CHECKING([caa_likely()]) +AC_TRY_COMPILE( +[ +#include +], +[ +void fct(void) +{ + if (caa_likely(1)) { + } +} +], +[ + AC_MSG_RESULT([yes]) +], +[ + AC_MSG_RESULT([no]) + AC_MSG_ERROR([Please upgrade your version of liburcu to 0.6.6 or better]) +] +) + # urcu - check that URCU lib is available to compilation AC_CHECK_LIB([urcu-bp], [synchronize_rcu_bp], [], [AC_MSG_ERROR([Cannot find liburcu-bp lib. Use [LDFLAGS]=-Ldir to specify its location.])]) diff --git a/include/ust/core.h b/include/ust/core.h index 10e864f1..4e75c515 100644 --- a/include/ust/core.h +++ b/include/ust/core.h @@ -23,9 +23,7 @@ #include #include #include - -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) +#include /* ARRAYS */ @@ -45,7 +43,7 @@ /* ERROR OPS */ #define MAX_ERRNO 4095 -#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) +#define IS_ERR_VALUE(x) caa_unlikely((x) >= (unsigned long)-MAX_ERRNO) static inline void *ERR_PTR(long error) { @@ -146,7 +144,7 @@ static __inline__ int ust_get_cpu(void) int cpu; cpu = sched_getcpu(); - if (likely(cpu >= 0)) + if (caa_likely(cpu >= 0)) return cpu; /* * If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback. diff --git a/include/ust/lttng-tracepoint-event.h b/include/ust/lttng-tracepoint-event.h index 10fcbb90..46d8b933 100644 --- a/include/ust/lttng-tracepoint-event.h +++ b/include/ust/lttng-tracepoint-event.h @@ -449,11 +449,11 @@ static void __event_probe__##_name(void *__data, _proto) \ \ if (0) \ (void) __dynamic_len_idx; /* don't warn if unused */ \ - if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \ return; \ - if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \ return; \ - if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \ return; \ __event_len = __event_get_size__##_name(__dynamic_len, _args); \ __event_align = __event_get_align__##_name(_args); \ @@ -476,11 +476,11 @@ static void __event_probe__##_name(void *__data) \ size_t __event_len, __event_align; \ int __ret; \ \ - if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \ return; \ - if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \ return; \ - if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \ return; \ __event_len = 0; \ __event_align = 1; \ diff --git a/include/ust/tracepoint.h b/include/ust/tracepoint.h index 9feb9d51..e39a3849 100644 --- a/include/ust/tracepoint.h +++ b/include/ust/tracepoint.h @@ -77,7 +77,7 @@ struct tracepoint { #define __CHECK_TRACE(name, proto, args) \ do { \ - if (unlikely(__tracepoint_##name.state)) \ + if (caa_unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(proto), TP_ARGS(args)); \ } while (0) diff --git a/include/ust/usterr-signal-safe.h b/include/ust/usterr-signal-safe.h index 71e7437a..998d8d89 100644 --- a/include/ust/usterr-signal-safe.h +++ b/include/ust/usterr-signal-safe.h @@ -147,12 +147,12 @@ static inline void __attribute__ ((format (printf, 1, 2))) #define BUG_ON(condition) \ do { \ - if (unlikely(condition)) \ + if (caa_unlikely(condition)) \ ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \ } while(0) #define WARN_ON(condition) \ do { \ - if (unlikely(condition)) \ + if (caa_unlikely(condition)) \ WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \ } while(0) #define WARN_ON_ONCE(condition) WARN_ON(condition) diff --git a/include/usterr.h b/include/usterr.h index cab65407..5f839cb6 100644 --- a/include/usterr.h +++ b/include/usterr.h @@ -107,12 +107,12 @@ static inline int ust_debug(void) #define BUG_ON(condition) \ do { \ - if (unlikely(condition)) \ + if (caa_unlikely(condition)) \ ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \ } while(0) #define WARN_ON(condition) \ do { \ - if (unlikely(condition)) \ + if (caa_unlikely(condition)) \ WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \ } while(0) #define WARN_ON_ONCE(condition) WARN_ON(condition) diff --git a/libringbuffer/backend_internal.h b/libringbuffer/backend_internal.h index 16d85985..c0721d15 100644 --- a/libringbuffer/backend_internal.h +++ b/libringbuffer/backend_internal.h @@ -318,7 +318,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config * id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id); for (;;) { /* This check is called on the fast path for each record. */ - if (likely(!subbuffer_id_is_noref(config, id))) { + if (caa_likely(!subbuffer_id_is_noref(config, id))) { /* * Store after load dependency ordering the writes to * the subbuffer after load and test of the noref flag @@ -330,7 +330,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config * new_id = id; subbuffer_id_clear_noref(config, &new_id); new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id); - if (likely(new_id == id)) + if (caa_likely(new_id == id)) break; id = new_id; } @@ -391,13 +391,13 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, * following cmpxchg(). */ old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; - if (unlikely(!subbuffer_id_is_noref(config, old_id))) + if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) return -EAGAIN; /* * Make sure the offset count we are expecting matches the one * indicated by the writer. */ - if (unlikely(!subbuffer_id_compare_offset(config, old_id, + if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, consumed_count))) return -EAGAIN; CHAN_WARN_ON(shmp(handle, bufb->chan), @@ -406,7 +406,7 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, consumed_count); new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id, bufb->buf_rsb.id); - if (unlikely(old_id != new_id)) + if (caa_unlikely(old_id != new_id)) return -EAGAIN; bufb->buf_rsb.id = new_id; } else { diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index 31072b6b..0868a4bb 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -46,7 +46,7 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi nesting = ++lib_ring_buffer_nesting; /* TLS */ cmm_barrier(); - if (unlikely(nesting > 4)) { + if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); lib_ring_buffer_nesting--; /* TLS */ rcu_read_unlock(); @@ -97,7 +97,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(*o_begin, chan) == 0)) + if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; ctx->slot_size = record_header_size(config, chan, *o_begin, @@ -105,7 +105,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c ctx->slot_size += lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; - if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) + if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) > chan->backend.subbuf_size)) return 1; @@ -115,7 +115,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c */ *o_end = *o_begin + ctx->slot_size; - if (unlikely((subbuf_offset(*o_end, chan)) == 0)) + if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0)) /* * The offset_end will fall at the very beginning of the next * subbuffer. @@ -165,11 +165,11 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi /* * Perform retryable operations. */ - if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, + if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, &o_end, &o_old, &before_hdr_pad))) goto slow_path; - if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) + if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) != o_old)) goto slow_path; @@ -317,7 +317,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_c */ save_last_tsc(config, buf, 0ULL); - if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) + if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) != end_offset)) return -EPERM; else diff --git a/libringbuffer/frontend_internal.h b/libringbuffer/frontend_internal.h index 1b55cd5d..4b4135bb 100644 --- a/libringbuffer/frontend_internal.h +++ b/libringbuffer/frontend_internal.h @@ -107,7 +107,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, return 0; tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); - if (unlikely(tsc_shifted + if (caa_unlikely(tsc_shifted - (unsigned long)v_read(config, &buf->last_tsc))) return 1; else @@ -131,7 +131,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, if (config->tsc_bits == 0 || config->tsc_bits == 64) return 0; - if (unlikely((tsc - v_read(config, &buf->last_tsc)) + if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) >> config->tsc_bits)) return 1; else @@ -167,13 +167,13 @@ void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, * write position sub-buffer index in the buffer being the one * which will win this loop. */ - if (unlikely(subbuf_trunc(offset, chan) + if (caa_unlikely(subbuf_trunc(offset, chan) - subbuf_trunc(consumed_old, chan) >= chan->backend.buf_size)) consumed_new = subbuf_align(consumed_old, chan); else return; - } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, + } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, consumed_new) != consumed_old)); } @@ -296,7 +296,7 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config u64 tsc; /* Check if all commits have been done */ - if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) + if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) - (old_commit_count & chan->commit_count_mask) == 0)) { /* * If we succeeded at updating cc_sb below, we are the subbuffer @@ -324,7 +324,7 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config * The subbuffer size is least 2 bytes (minimum size: 1 page). * This guarantees that old_commit_count + 1 != commit_count. */ - if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, + if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, old_commit_count, old_commit_count + 1) == old_commit_count)) { /* @@ -443,7 +443,7 @@ void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (unlikely(subbuf_offset(offset - commit_count, chan))) + if (caa_unlikely(subbuf_offset(offset - commit_count, chan))) return; commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq); diff --git a/libringbuffer/frontend_types.h b/libringbuffer/frontend_types.h index 120e1f4b..84593260 100644 --- a/libringbuffer/frontend_types.h +++ b/libringbuffer/frontend_types.h @@ -125,7 +125,7 @@ void *channel_get_private(struct channel *chan) #define CHAN_WARN_ON(c, cond) \ ({ \ struct channel *__chan; \ - int _____ret = unlikely(cond); \ + int _____ret = caa_unlikely(cond); \ if (_____ret) { \ if (__same_type(*(c), struct channel_backend)) \ __chan = caa_container_of((void *) (c), \ diff --git a/libringbuffer/ring_buffer_backend.c b/libringbuffer/ring_buffer_backend.c index 466552de..50cb1938 100644 --- a/libringbuffer/ring_buffer_backend.c +++ b/libringbuffer/ring_buffer_backend.c @@ -46,7 +46,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp)); set_shmp(bufb->array, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc)); - if (unlikely(!shmp(handle, bufb->array))) + if (caa_unlikely(!shmp(handle, bufb->array))) goto array_error; /* @@ -56,7 +56,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf align_shm(shmobj, PAGE_SIZE); set_shmp(bufb->memory_map, zalloc_shm(shmobj, subbuf_size * num_subbuf_alloc)); - if (unlikely(!shmp(handle, bufb->memory_map))) + if (caa_unlikely(!shmp(handle, bufb->memory_map))) goto memory_map_error; /* Allocate backend pages array elements */ @@ -74,7 +74,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf set_shmp(bufb->buf_wsb, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf)); - if (unlikely(!shmp(handle, bufb->buf_wsb))) + if (caa_unlikely(!shmp(handle, bufb->buf_wsb))) goto free_array; for (i = 0; i < num_subbuf; i++) @@ -378,7 +378,7 @@ size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size orig_len = len; offset &= chanb->buf_size - 1; - if (unlikely(!len)) + if (caa_unlikely(!len)) return 0; id = bufb->buf_rsb.id; sb_bindex = subbuffer_id_get_index(config, id); diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index a344d415..82194bf8 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -1212,7 +1212,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * quiescence guarantees for the fusion merge. */ if (mode == SWITCH_FLUSH || off > 0) { - if (unlikely(off == 0)) { + if (caa_unlikely(off == 0)) { /* * The client does not save any header information. * Don't switch empty subbuffer on finalize, because it @@ -1323,7 +1323,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, @@ -1334,19 +1334,19 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { offsets->switch_old_end = 1; /* For offsets->old */ offsets->switch_new_start = 1; /* For offsets->begin */ } } - if (unlikely(offsets->switch_new_start)) { + if (caa_unlikely(offsets->switch_new_start)) { unsigned long sb_index; /* * We are typically not filling the previous buffer completely. */ - if (likely(offsets->switch_old_end)) + if (caa_likely(offsets->switch_old_end)) offsets->begin = subbuf_align(offsets->begin, chan); offsets->begin = offsets->begin + config->cb.subbuffer_header_size(); @@ -1358,9 +1358,9 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, - ((unsigned long) v_read(config, &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb) & chan->commit_count_mask); - if (likely(reserve_commit_diff == 0)) { + if (caa_likely(reserve_commit_diff == 0)) { /* Next subbuffer not being written to. */ - if (unlikely(config->mode != RING_BUFFER_OVERWRITE && + if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE && subbuf_trunc(offsets->begin, chan) - subbuf_trunc((unsigned long) uatomic_read(&buf->consumed), chan) @@ -1398,7 +1398,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { /* * Record too big for subbuffers, report error, don't @@ -1420,7 +1420,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, } offsets->end = offsets->begin + offsets->size; - if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) { /* * The offset_end will fall at the very beginning of the next * subbuffer. @@ -1458,9 +1458,9 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) do { ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, ctx); - if (unlikely(ret)) + if (caa_unlikely(ret)) return ret; - } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old, + } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) != offsets.old)); @@ -1487,7 +1487,7 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) /* * Switch old subbuffer if needed. */ - if (unlikely(offsets.switch_old_end)) { + if (caa_unlikely(offsets.switch_old_end)) { lib_ring_buffer_clear_noref(config, &buf->backend, subbuf_index(offsets.old - 1, chan), handle); @@ -1497,10 +1497,10 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) /* * Populate new subbuffer. */ - if (unlikely(offsets.switch_new_start)) + if (caa_unlikely(offsets.switch_new_start)) lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle); - if (unlikely(offsets.switch_new_end)) + if (caa_unlikely(offsets.switch_new_end)) lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle); ctx->slot_size = offsets.size; diff --git a/libringbuffer/shm.h b/libringbuffer/shm.h index 22d3accc..6cac7295 100644 --- a/libringbuffer/shm.h +++ b/libringbuffer/shm.h @@ -31,13 +31,13 @@ char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref, size_t objindex, ref_offset; objindex = (size_t) ref->index; - if (unlikely(objindex >= table->allocated_len)) + if (caa_unlikely(objindex >= table->allocated_len)) return NULL; obj = &table->objects[objindex]; ref_offset = (size_t) ref->offset; ref_offset += idx * elem_size; /* Check if part of the element returned would exceed the limits. */ - if (unlikely(ref_offset + elem_size > obj->memory_map_size)) + if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size)) return NULL; return &obj->memory_map[ref_offset]; } @@ -84,7 +84,7 @@ int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) size_t index; index = (size_t) ref->index; - if (unlikely(index >= table->allocated_len)) + if (caa_unlikely(index >= table->allocated_len)) return -EPERM; obj = &table->objects[index]; return obj->wait_fd[1]; @@ -99,7 +99,7 @@ int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref) size_t index; index = (size_t) ref->index; - if (unlikely(index >= table->allocated_len)) + if (caa_unlikely(index >= table->allocated_len)) return -EPERM; obj = &table->objects[index]; return obj->wait_fd[0]; @@ -114,7 +114,7 @@ int shm_get_object_data(struct lttng_ust_shm_handle *handle, struct shm_ref *ref size_t index; index = (size_t) ref->index; - if (unlikely(index >= table->allocated_len)) + if (caa_unlikely(index >= table->allocated_len)) return -EPERM; obj = &table->objects[index]; *shm_fd = obj->shm_fd; diff --git a/libringbuffer/smp.h b/libringbuffer/smp.h index 3d138a9a..dd326d83 100644 --- a/libringbuffer/smp.h +++ b/libringbuffer/smp.h @@ -39,7 +39,7 @@ int get_cpu(void) int cpu; cpu = sched_getcpu(); - if (likely(cpu >= 0)) + if (caa_likely(cpu >= 0)) return cpu; /* * If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback. diff --git a/libust/ltt-ring-buffer-client.h b/libust/ltt-ring-buffer-client.h index 6361b9ed..6c6433cd 100644 --- a/libust/ltt-ring-buffer-client.h +++ b/libust/ltt-ring-buffer-client.h @@ -59,7 +59,7 @@ size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) int i; size_t orig_offset = offset; - if (likely(!ctx)) + if (caa_likely(!ctx)) return 0; for (i = 0; i < ctx->nr_fields; i++) offset += ctx->fields[i].get_size(offset); @@ -73,7 +73,7 @@ void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx, { int i; - if (likely(!ctx)) + if (caa_likely(!ctx)) return; for (i = 0; i < ctx->nr_fields; i++) ctx->fields[i].record(&ctx->fields[i], bufctx, chan); @@ -169,7 +169,7 @@ void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *confi struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); struct ltt_event *event = ctx->priv; - if (unlikely(ctx->rflags)) + if (caa_unlikely(ctx->rflags)) goto slow_path; switch (ltt_chan->header_type) { diff --git a/libust/lttng-context-procname.c b/libust/lttng-context-procname.c index ecb001d9..c1f1ced9 100644 --- a/libust/lttng-context-procname.c +++ b/libust/lttng-context-procname.c @@ -28,7 +28,7 @@ char *wrapper_getprocname(void) { int ret; - if (unlikely(!cached_procname[0])) { + if (caa_unlikely(!cached_procname[0])) { ret = prctl(PR_GET_NAME, (unsigned long) cached_procname, 0, 0, 0); assert(!ret); diff --git a/libust/lttng-context-vpid.c b/libust/lttng-context-vpid.c index 4c73e174..1fc8aadb 100644 --- a/libust/lttng-context-vpid.c +++ b/libust/lttng-context-vpid.c @@ -33,7 +33,7 @@ static pid_t cached_vpid; static inline pid_t wrapper_getpid(void) { - if (unlikely(!cached_vpid)) + if (caa_unlikely(!cached_vpid)) cached_vpid = getpid(); return cached_vpid; } diff --git a/libust/lttng-context-vtid.c b/libust/lttng-context-vtid.c index e03b0a0f..25e2febf 100644 --- a/libust/lttng-context-vtid.c +++ b/libust/lttng-context-vtid.c @@ -63,7 +63,7 @@ void vtid_record(struct lttng_ctx_field *field, struct lttng_ust_lib_ring_buffer_ctx *ctx, struct ltt_channel *chan) { - if (unlikely(!cached_vtid)) + if (caa_unlikely(!cached_vtid)) cached_vtid = gettid(); lib_ring_buffer_align_ctx(ctx, lttng_alignof(cached_vtid)); chan->ops->event_write(ctx, &cached_vtid, sizeof(cached_vtid));