Update to use caa_likely/caa_unlikely from urcu 0.6.6
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 3 Nov 2011 15:37:30 +0000 (11:37 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 3 Nov 2011 15:37:30 +0000 (11:37 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
19 files changed:
README
configure.ac
include/ust/core.h
include/ust/lttng-tracepoint-event.h
include/ust/tracepoint.h
include/ust/usterr-signal-safe.h
include/usterr.h
libringbuffer/backend_internal.h
libringbuffer/frontend_api.h
libringbuffer/frontend_internal.h
libringbuffer/frontend_types.h
libringbuffer/ring_buffer_backend.c
libringbuffer/ring_buffer_frontend.c
libringbuffer/shm.h
libringbuffer/smp.h
libust/ltt-ring-buffer-client.h
libust/lttng-context-procname.c
libust/lttng-context-vpid.c
libust/lttng-context-vtid.c

diff --git a/README b/README
index 901e17514c85d06fa81b4e4280fefa905a2e28b1..2d9c5fc27659fd43d8cc8d95586e0b1103126cfe 100644 (file)
--- a/README
+++ b/README
@@ -16,7 +16,7 @@ PREREQUISITES:
   - liburcu
     Userspace RCU library, by Mathieu Desnoyers and Paul E. McKenney
 
-    -> This release depends on liburcu v0.6
+    -> This release depends on liburcu v0.6.6
 
       * Debian/Ubuntu package: liburcu-dev
       * Website:  http://lttng.org/urcu
index 10e1af443a4b61bdbf2786d9e458bbe366aecdd2..856894b2ca166c6f1e591179acc0d3258b82646d 100644 (file)
@@ -59,6 +59,27 @@ CFLAGS="-Wall $CFLAGS"
 AC_CHECK_HEADERS([urcu-bp.h], [], [AC_MSG_ERROR([Cannot find [URCU] headers (urcu-bp.h). Use [CFLAGS]=-Idir to specify their location.
 This error can also occur when the liburcu package's configure script has not been run.])])
 
+AC_MSG_CHECKING([caa_likely()])
+AC_TRY_COMPILE(
+[
+#include <urcu/compiler.h>
+],
+[
+void fct(void)
+{
+       if (caa_likely(1)) {
+       }
+}
+],
+[
+       AC_MSG_RESULT([yes])
+],
+[
+       AC_MSG_RESULT([no])
+       AC_MSG_ERROR([Please upgrade your version of liburcu to 0.6.6 or better])
+]
+)
+
 # urcu - check that URCU lib is available to compilation
 AC_CHECK_LIB([urcu-bp], [synchronize_rcu_bp], [], [AC_MSG_ERROR([Cannot find liburcu-bp lib. Use [LDFLAGS]=-Ldir to specify its location.])])
 
index 10e864f18b22a25493b5aa13f6faa3590768724c..4e75c515e708893db275adb7bbf699046d9dd429 100644 (file)
@@ -23,9 +23,7 @@
 #include <sys/types.h>
 #include <ust/config.h>
 #include <urcu/arch.h>
-
-#define likely(x)      __builtin_expect(!!(x), 1)
-#define unlikely(x)    __builtin_expect(!!(x), 0)
+#include <urcu/compiler.h>
 
 /* ARRAYS */
 
@@ -45,7 +43,7 @@
 /* ERROR OPS */
 #define MAX_ERRNO      4095
 
-#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) caa_unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
 static inline void *ERR_PTR(long error)
 {
@@ -146,7 +144,7 @@ static __inline__ int ust_get_cpu(void)
        int cpu;
 
        cpu = sched_getcpu();
-       if (likely(cpu >= 0))
+       if (caa_likely(cpu >= 0))
                return cpu;
        /*
         * If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback.
index 10fcbb905f0a3e38d463d0db9bb4c901c82d30b1..46d8b93374304e451f893a8c76a318cf55713bbe 100644 (file)
@@ -449,11 +449,11 @@ static void __event_probe__##_name(void *__data, _proto)                \
                                                                              \
        if (0)                                                                \
                (void) __dynamic_len_idx;       /* don't warn if unused */    \
-       if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active)))              \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active)))          \
                return;                                                       \
-       if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled)))                      \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled)))                  \
                return;                                                       \
-       if (unlikely(!CMM_ACCESS_ONCE(__event->enabled)))                     \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled)))                 \
                return;                                                       \
        __event_len = __event_get_size__##_name(__dynamic_len, _args);        \
        __event_align = __event_get_align__##_name(_args);                    \
@@ -476,11 +476,11 @@ static void __event_probe__##_name(void *__data)                        \
        size_t __event_len, __event_align;                                    \
        int __ret;                                                            \
                                                                              \
-       if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active)))              \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active)))          \
                return;                                                       \
-       if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled)))                      \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled)))                  \
                return;                                                       \
-       if (unlikely(!CMM_ACCESS_ONCE(__event->enabled)))                     \
+       if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled)))                 \
                return;                                                       \
        __event_len = 0;                                                      \
        __event_align = 1;                                                    \
index 9feb9d51d55a4bc74aee85d281966db126c78286..e39a3849bc8b928bc7aaff2e5d4f5192eb300a5b 100644 (file)
@@ -77,7 +77,7 @@ struct tracepoint {
 
 #define __CHECK_TRACE(name, proto, args)                               \
        do {                                                            \
-               if (unlikely(__tracepoint_##name.state))                \
+               if (caa_unlikely(__tracepoint_##name.state))            \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TP_PROTO(proto), TP_ARGS(args));        \
        } while (0)
index 71e7437a9c58dbb6a9d84bcfa4acb21598ac6cbb..998d8d8960d78ae8f039ce21d87ea2cb0827823e 100644 (file)
@@ -147,12 +147,12 @@ static inline void __attribute__ ((format (printf, 1, 2)))
 
 #define BUG_ON(condition)                                      \
        do {                                                    \
-               if (unlikely(condition))                        \
+               if (caa_unlikely(condition))                    \
                        ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \
        } while(0)
 #define WARN_ON(condition)                                     \
        do {                                                    \
-               if (unlikely(condition))                        \
+               if (caa_unlikely(condition))                    \
                        WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \
        } while(0)
 #define WARN_ON_ONCE(condition) WARN_ON(condition)
index cab65407dca9c0c5740e038008b884d9480ac3d3..5f839cb66206c8e5366f7d0996eaab5a6ceb17e3 100644 (file)
@@ -107,12 +107,12 @@ static inline int ust_debug(void)
 
 #define BUG_ON(condition)                                      \
        do {                                                    \
-               if (unlikely(condition))                        \
+               if (caa_unlikely(condition))                    \
                        ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \
        } while(0)
 #define WARN_ON(condition)                                     \
        do {                                                    \
-               if (unlikely(condition))                        \
+               if (caa_unlikely(condition))                    \
                        WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \
        } while(0)
 #define WARN_ON_ONCE(condition) WARN_ON(condition)
index 16d8598525172188aa505627a06932e578b3fe52..c0721d15f2840a5c7b1d2319307c9b309c4acab2 100644 (file)
@@ -318,7 +318,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *
        id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
        for (;;) {
                /* This check is called on the fast path for each record. */
-               if (likely(!subbuffer_id_is_noref(config, id))) {
+               if (caa_likely(!subbuffer_id_is_noref(config, id))) {
                        /*
                         * Store after load dependency ordering the writes to
                         * the subbuffer after load and test of the noref flag
@@ -330,7 +330,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *
                new_id = id;
                subbuffer_id_clear_noref(config, &new_id);
                new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
-               if (likely(new_id == id))
+               if (caa_likely(new_id == id))
                        break;
                id = new_id;
        }
@@ -391,13 +391,13 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
                 * following cmpxchg().
                 */
                old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
-               if (unlikely(!subbuffer_id_is_noref(config, old_id)))
+               if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
                        return -EAGAIN;
                /*
                 * Make sure the offset count we are expecting matches the one
                 * indicated by the writer.
                 */
-               if (unlikely(!subbuffer_id_compare_offset(config, old_id,
+               if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
                                                          consumed_count)))
                        return -EAGAIN;
                CHAN_WARN_ON(shmp(handle, bufb->chan),
@@ -406,7 +406,7 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
                                              consumed_count);
                new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
                                 bufb->buf_rsb.id);
-               if (unlikely(old_id != new_id))
+               if (caa_unlikely(old_id != new_id))
                        return -EAGAIN;
                bufb->buf_rsb.id = new_id;
        } else {
index 31072b6b3895edae8d61bb86a2affc3f3caa1b92..0868a4bb6dc6a3bdcd5191c7b24ea2cf2476fc2c 100644 (file)
@@ -46,7 +46,7 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi
        nesting = ++lib_ring_buffer_nesting;    /* TLS */
        cmm_barrier();
 
-       if (unlikely(nesting > 4)) {
+       if (caa_unlikely(nesting > 4)) {
                WARN_ON_ONCE(1);
                lib_ring_buffer_nesting--;      /* TLS */
                rcu_read_unlock();
@@ -97,7 +97,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+       if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
        ctx->slot_size = record_header_size(config, chan, *o_begin,
@@ -105,7 +105,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
        ctx->slot_size +=
                lib_ring_buffer_align(*o_begin + ctx->slot_size,
                                      ctx->largest_align) + ctx->data_size;
-       if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+       if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
                     > chan->backend.subbuf_size))
                return 1;
 
@@ -115,7 +115,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
         */
        *o_end = *o_begin + ctx->slot_size;
 
-       if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+       if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -165,11 +165,11 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
        /*
         * Perform retryable operations.
         */
-       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+       if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
-       if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+       if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
                     != o_old))
                goto slow_path;
 
@@ -317,7 +317,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_c
         */
        save_last_tsc(config, buf, 0ULL);
 
-       if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+       if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
                   != end_offset))
                return -EPERM;
        else
index 1b55cd5dc97365bfc774dc8cd27633cbaae5b3af..4b4135bb47339b3eca170736832d68d72ce4135b 100644 (file)
@@ -107,7 +107,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
                return 0;
 
        tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
-       if (unlikely(tsc_shifted
+       if (caa_unlikely(tsc_shifted
                     - (unsigned long)v_read(config, &buf->last_tsc)))
                return 1;
        else
@@ -131,7 +131,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
        if (config->tsc_bits == 0 || config->tsc_bits == 64)
                return 0;
 
-       if (unlikely((tsc - v_read(config, &buf->last_tsc))
+       if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
                     >> config->tsc_bits))
                return 1;
        else
@@ -167,13 +167,13 @@ void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
                 * write position sub-buffer index in the buffer being the one
                 * which will win this loop.
                 */
-               if (unlikely(subbuf_trunc(offset, chan)
+               if (caa_unlikely(subbuf_trunc(offset, chan)
                              - subbuf_trunc(consumed_old, chan)
                             >= chan->backend.buf_size))
                        consumed_new = subbuf_align(consumed_old, chan);
                else
                        return;
-       } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+       } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
                                              consumed_new) != consumed_old));
 }
 
@@ -296,7 +296,7 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
        u64 tsc;
 
        /* Check if all commits have been done */
-       if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+       if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
                     - (old_commit_count & chan->commit_count_mask) == 0)) {
                /*
                 * If we succeeded at updating cc_sb below, we are the subbuffer
@@ -324,7 +324,7 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
                 * The subbuffer size is least 2 bytes (minimum size: 1 page).
                 * This guarantees that old_commit_count + 1 != commit_count.
                 */
-               if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
+               if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
                                         old_commit_count, old_commit_count + 1)
                           == old_commit_count)) {
                        /*
@@ -443,7 +443,7 @@ void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer
         * buffer full/empty mismatch because offset is never zero here
         * (subbuffer header and record headers have non-zero length).
         */
-       if (unlikely(subbuf_offset(offset - commit_count, chan)))
+       if (caa_unlikely(subbuf_offset(offset - commit_count, chan)))
                return;
 
        commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
index 120e1f4bbbe06b94cea844fb5b654b60c81e04a6..84593260c4f7b7ef941708d9fd0347881165efe1 100644 (file)
@@ -125,7 +125,7 @@ void *channel_get_private(struct channel *chan)
 #define CHAN_WARN_ON(c, cond)                                          \
        ({                                                              \
                struct channel *__chan;                                 \
-               int _____ret = unlikely(cond);                          \
+               int _____ret = caa_unlikely(cond);                              \
                if (_____ret) {                                         \
                        if (__same_type(*(c), struct channel_backend))  \
                                __chan = caa_container_of((void *) (c), \
index 466552debaf89c997a27a9a5679aceb52ce8f691..50cb1938a63ca9bf017887c43b72873c38e81354 100644 (file)
@@ -46,7 +46,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf
        align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
        set_shmp(bufb->array, zalloc_shm(shmobj,
                        sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
-       if (unlikely(!shmp(handle, bufb->array)))
+       if (caa_unlikely(!shmp(handle, bufb->array)))
                goto array_error;
 
        /*
@@ -56,7 +56,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf
        align_shm(shmobj, PAGE_SIZE);
        set_shmp(bufb->memory_map, zalloc_shm(shmobj,
                        subbuf_size * num_subbuf_alloc));
-       if (unlikely(!shmp(handle, bufb->memory_map)))
+       if (caa_unlikely(!shmp(handle, bufb->memory_map)))
                goto memory_map_error;
 
        /* Allocate backend pages array elements */
@@ -74,7 +74,7 @@ int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_conf
        set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
                                sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
                                * num_subbuf));
-       if (unlikely(!shmp(handle, bufb->buf_wsb)))
+       if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
                goto free_array;
 
        for (i = 0; i < num_subbuf; i++)
@@ -378,7 +378,7 @@ size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size
        orig_len = len;
        offset &= chanb->buf_size - 1;
 
-       if (unlikely(!len))
+       if (caa_unlikely(!len))
                return 0;
        id = bufb->buf_rsb.id;
        sb_bindex = subbuffer_id_get_index(config, id);
index a344d415f1d1f459e933ebc7733cf5ebfc2d488a..82194bf82b9fd45af17dbcdb529455e20f5d183e 100644 (file)
@@ -1212,7 +1212,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
         * quiescence guarantees for the fusion merge.
         */
        if (mode == SWITCH_FLUSH || off > 0) {
-               if (unlikely(off == 0)) {
+               if (caa_unlikely(off == 0)) {
                        /*
                         * The client does not save any header information.
                         * Don't switch empty subbuffer on finalize, because it
@@ -1323,7 +1323,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -1334,19 +1334,19 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan) +
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
                             offsets->size > chan->backend.subbuf_size)) {
                        offsets->switch_old_end = 1;    /* For offsets->old */
                        offsets->switch_new_start = 1;  /* For offsets->begin */
                }
        }
-       if (unlikely(offsets->switch_new_start)) {
+       if (caa_unlikely(offsets->switch_new_start)) {
                unsigned long sb_index;
 
                /*
                 * We are typically not filling the previous buffer completely.
                 */
-               if (likely(offsets->switch_old_end))
+               if (caa_likely(offsets->switch_old_end))
                        offsets->begin = subbuf_align(offsets->begin, chan);
                offsets->begin = offsets->begin
                                 + config->cb.subbuffer_header_size();
@@ -1358,9 +1358,9 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                  - ((unsigned long) v_read(config,
                                            &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
                     & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
+               if (caa_likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                       if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
                                subbuf_trunc(offsets->begin, chan)
                                 - subbuf_trunc((unsigned long)
                                     uatomic_read(&buf->consumed), chan)
@@ -1398,7 +1398,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan)
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan)
                             + offsets->size > chan->backend.subbuf_size)) {
                        /*
                         * Record too big for subbuffers, report error, don't
@@ -1420,7 +1420,7 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
        }
        offsets->end = offsets->begin + offsets->size;
 
-       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -1458,9 +1458,9 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        do {
                ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
                                                       ctx);
-               if (unlikely(ret))
+               if (caa_unlikely(ret))
                        return ret;
-       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+       } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
                                    offsets.end)
                          != offsets.old));
 
@@ -1487,7 +1487,7 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        /*
         * Switch old subbuffer if needed.
         */
-       if (unlikely(offsets.switch_old_end)) {
+       if (caa_unlikely(offsets.switch_old_end)) {
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan),
                                            handle);
@@ -1497,10 +1497,10 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
        /*
         * Populate new subbuffer.
         */
-       if (unlikely(offsets.switch_new_start))
+       if (caa_unlikely(offsets.switch_new_start))
                lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
-       if (unlikely(offsets.switch_new_end))
+       if (caa_unlikely(offsets.switch_new_end))
                lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
 
        ctx->slot_size = offsets.size;
index 22d3accc1b5779e10c5bbfb969acffd5196809bb..6cac7295f2189012033dd084f03958e4743bffc3 100644 (file)
@@ -31,13 +31,13 @@ char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
        size_t objindex, ref_offset;
 
        objindex = (size_t) ref->index;
-       if (unlikely(objindex >= table->allocated_len))
+       if (caa_unlikely(objindex >= table->allocated_len))
                return NULL;
        obj = &table->objects[objindex];
        ref_offset = (size_t) ref->offset;
        ref_offset += idx * elem_size;
        /* Check if part of the element returned would exceed the limits. */
-       if (unlikely(ref_offset + elem_size > obj->memory_map_size))
+       if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
                return NULL;
        return &obj->memory_map[ref_offset];
 }
@@ -84,7 +84,7 @@ int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
        size_t index;
 
        index = (size_t) ref->index;
-       if (unlikely(index >= table->allocated_len))
+       if (caa_unlikely(index >= table->allocated_len))
                return -EPERM;
        obj = &table->objects[index];
        return obj->wait_fd[1];
@@ -99,7 +99,7 @@ int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
        size_t index;
 
        index = (size_t) ref->index;
-       if (unlikely(index >= table->allocated_len))
+       if (caa_unlikely(index >= table->allocated_len))
                return -EPERM;
        obj = &table->objects[index];
        return obj->wait_fd[0];
@@ -114,7 +114,7 @@ int shm_get_object_data(struct lttng_ust_shm_handle *handle, struct shm_ref *ref
        size_t index;
 
        index = (size_t) ref->index;
-       if (unlikely(index >= table->allocated_len))
+       if (caa_unlikely(index >= table->allocated_len))
                return -EPERM;
        obj = &table->objects[index];
        *shm_fd = obj->shm_fd;
index 3d138a9a890ce69afede6b5975a334556823be39..dd326d83f5a443b98e202269c7e8da90e361749a 100644 (file)
@@ -39,7 +39,7 @@ int get_cpu(void)
        int cpu;
 
        cpu = sched_getcpu();
-       if (likely(cpu >= 0))
+       if (caa_likely(cpu >= 0))
                return cpu;
        /*
         * If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback.
index 6361b9edeb912e0a8f543483e6c67e30d527328c..6c6433cd1770725b330e781ab1aadee224a884c4 100644 (file)
@@ -59,7 +59,7 @@ size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
        int i;
        size_t orig_offset = offset;
 
-       if (likely(!ctx))
+       if (caa_likely(!ctx))
                return 0;
        for (i = 0; i < ctx->nr_fields; i++)
                offset += ctx->fields[i].get_size(offset);
@@ -73,7 +73,7 @@ void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
 {
        int i;
 
-       if (likely(!ctx))
+       if (caa_likely(!ctx))
                return;
        for (i = 0; i < ctx->nr_fields; i++)
                ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
@@ -169,7 +169,7 @@ void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *confi
        struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
        struct ltt_event *event = ctx->priv;
 
-       if (unlikely(ctx->rflags))
+       if (caa_unlikely(ctx->rflags))
                goto slow_path;
 
        switch (ltt_chan->header_type) {
index ecb001d9825d3ba85971d7dd869245409db7d08a..c1f1ced95e3ba795579eb7d0b51b7ec8b470fb9e 100644 (file)
@@ -28,7 +28,7 @@ char *wrapper_getprocname(void)
 {
        int ret;
 
-       if (unlikely(!cached_procname[0])) {
+       if (caa_unlikely(!cached_procname[0])) {
                ret = prctl(PR_GET_NAME, (unsigned long) cached_procname,
                        0, 0, 0);
                assert(!ret);
index 4c73e174e7aa2bf48af60c09b18002372e004584..1fc8aadb036976a3bb20cb438bb9972390f01940 100644 (file)
@@ -33,7 +33,7 @@ static pid_t cached_vpid;
 static inline
 pid_t wrapper_getpid(void)
 {
-       if (unlikely(!cached_vpid))
+       if (caa_unlikely(!cached_vpid))
                cached_vpid = getpid();
        return cached_vpid;
 }
index e03b0a0f1ad42ae71b58aacdf73cd72c1352356c..25e2febf15bf5352adf0508069ac3d2fdb41435b 100644 (file)
@@ -63,7 +63,7 @@ void vtid_record(struct lttng_ctx_field *field,
                 struct lttng_ust_lib_ring_buffer_ctx *ctx,
                 struct ltt_channel *chan)
 {
-       if (unlikely(!cached_vtid))
+       if (caa_unlikely(!cached_vtid))
                cached_vtid = gettid();
        lib_ring_buffer_align_ctx(ctx, lttng_alignof(cached_vtid));
        chan->ops->event_write(ctx, &cached_vtid, sizeof(cached_vtid));
This page took 0.05022 seconds and 4 git commands to generate.