- liburcu
Userspace RCU library, by Mathieu Desnoyers and Paul E. McKenney
- -> This release depends on liburcu v0.6
+ -> This release depends on liburcu v0.6.6
* Debian/Ubuntu package: liburcu-dev
* Website: http://lttng.org/urcu
AC_CHECK_HEADERS([urcu-bp.h], [], [AC_MSG_ERROR([Cannot find [URCU] headers (urcu-bp.h). Use [CFLAGS]=-Idir to specify their location.
This error can also occur when the liburcu package's configure script has not been run.])])
+AC_MSG_CHECKING([caa_likely()])
+AC_TRY_COMPILE(
+[
+#include <urcu/compiler.h>
+],
+[
+void fct(void)
+{
+ if (caa_likely(1)) {
+ }
+}
+],
+[
+ AC_MSG_RESULT([yes])
+],
+[
+ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([Please upgrade your version of liburcu to 0.6.6 or better])
+]
+)
+
# urcu - check that URCU lib is available to compilation
AC_CHECK_LIB([urcu-bp], [synchronize_rcu_bp], [], [AC_MSG_ERROR([Cannot find liburcu-bp lib. Use [LDFLAGS]=-Ldir to specify its location.])])
#include <sys/types.h>
#include <ust/config.h>
#include <urcu/arch.h>
-
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
+#include <urcu/compiler.h>
/* ARRAYS */
/* ERROR OPS */
#define MAX_ERRNO 4095
-#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) caa_unlikely((x) >= (unsigned long)-MAX_ERRNO)
static inline void *ERR_PTR(long error)
{
int cpu;
cpu = sched_getcpu();
- if (likely(cpu >= 0))
+ if (caa_likely(cpu >= 0))
return cpu;
/*
* If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback.
\
if (0) \
(void) __dynamic_len_idx; /* don't warn if unused */ \
- if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
return; \
- if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
return; \
__event_len = __event_get_size__##_name(__dynamic_len, _args); \
__event_align = __event_get_align__##_name(_args); \
size_t __event_len, __event_align; \
int __ret; \
\
- if (unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
return; \
- if (unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
return; \
__event_len = 0; \
__event_align = 1; \
#define __CHECK_TRACE(name, proto, args) \
do { \
- if (unlikely(__tracepoint_##name.state)) \
+ if (caa_unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(proto), TP_ARGS(args)); \
} while (0)
#define BUG_ON(condition) \
do { \
- if (unlikely(condition)) \
+ if (caa_unlikely(condition)) \
ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \
} while(0)
#define WARN_ON(condition) \
do { \
- if (unlikely(condition)) \
+ if (caa_unlikely(condition)) \
WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \
} while(0)
#define WARN_ON_ONCE(condition) WARN_ON(condition)
#define BUG_ON(condition) \
do { \
- if (unlikely(condition)) \
+ if (caa_unlikely(condition)) \
ERR("condition not respected (BUG) on line %s:%d", __FILE__, __LINE__); \
} while(0)
#define WARN_ON(condition) \
do { \
- if (unlikely(condition)) \
+ if (caa_unlikely(condition)) \
WARN("condition not respected on line %s:%d", __FILE__, __LINE__); \
} while(0)
#define WARN_ON_ONCE(condition) WARN_ON(condition)
id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
for (;;) {
/* This check is called on the fast path for each record. */
- if (likely(!subbuffer_id_is_noref(config, id))) {
+ if (caa_likely(!subbuffer_id_is_noref(config, id))) {
/*
* Store after load dependency ordering the writes to
* the subbuffer after load and test of the noref flag
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
- if (likely(new_id == id))
+ if (caa_likely(new_id == id))
break;
id = new_id;
}
* following cmpxchg().
*/
old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
- if (unlikely(!subbuffer_id_is_noref(config, old_id)))
+ if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
* Make sure the offset count we are expecting matches the one
* indicated by the writer.
*/
- if (unlikely(!subbuffer_id_compare_offset(config, old_id,
+ if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
CHAN_WARN_ON(shmp(handle, bufb->chan),
consumed_count);
new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
bufb->buf_rsb.id);
- if (unlikely(old_id != new_id))
+ if (caa_unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
nesting = ++lib_ring_buffer_nesting; /* TLS */
cmm_barrier();
- if (unlikely(nesting > 4)) {
+ if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
lib_ring_buffer_nesting--; /* TLS */
rcu_read_unlock();
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+ if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
- if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
> chan->backend.subbuf_size))
return 1;
*/
*o_end = *o_begin + ctx->slot_size;
- if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+ if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
/*
* Perform retryable operations.
*/
- if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+ if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
*/
save_last_tsc(config, buf, 0ULL);
- if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
!= end_offset))
return -EPERM;
else
return 0;
tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (unlikely(tsc_shifted
+ if (caa_unlikely(tsc_shifted
- (unsigned long)v_read(config, &buf->last_tsc)))
return 1;
else
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
- if (unlikely((tsc - v_read(config, &buf->last_tsc))
+ if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
>> config->tsc_bits))
return 1;
else
* write position sub-buffer index in the buffer being the one
* which will win this loop.
*/
- if (unlikely(subbuf_trunc(offset, chan)
+ if (caa_unlikely(subbuf_trunc(offset, chan)
- subbuf_trunc(consumed_old, chan)
>= chan->backend.buf_size))
consumed_new = subbuf_align(consumed_old, chan);
else
return;
- } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
consumed_new) != consumed_old));
}
u64 tsc;
/* Check if all commits have been done */
- if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+ if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- (old_commit_count & chan->commit_count_mask) == 0)) {
/*
* If we succeeded at updating cc_sb below, we are the subbuffer
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
- if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
+ if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
- if (unlikely(subbuf_offset(offset - commit_count, chan)))
+ if (caa_unlikely(subbuf_offset(offset - commit_count, chan)))
return;
commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
#define CHAN_WARN_ON(c, cond) \
({ \
struct channel *__chan; \
- int _____ret = unlikely(cond); \
+ int _____ret = caa_unlikely(cond); \
if (_____ret) { \
if (__same_type(*(c), struct channel_backend)) \
__chan = caa_container_of((void *) (c), \
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
set_shmp(bufb->array, zalloc_shm(shmobj,
sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
- if (unlikely(!shmp(handle, bufb->array)))
+ if (caa_unlikely(!shmp(handle, bufb->array)))
goto array_error;
/*
align_shm(shmobj, PAGE_SIZE);
set_shmp(bufb->memory_map, zalloc_shm(shmobj,
subbuf_size * num_subbuf_alloc));
- if (unlikely(!shmp(handle, bufb->memory_map)))
+ if (caa_unlikely(!shmp(handle, bufb->memory_map)))
goto memory_map_error;
/* Allocate backend pages array elements */
set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
* num_subbuf));
- if (unlikely(!shmp(handle, bufb->buf_wsb)))
+ if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
goto free_array;
for (i = 0; i < num_subbuf; i++)
orig_len = len;
offset &= chanb->buf_size - 1;
- if (unlikely(!len))
+ if (caa_unlikely(!len))
return 0;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
* quiescence guarantees for the fusion merge.
*/
if (mode == SWITCH_FLUSH || off > 0) {
- if (unlikely(off == 0)) {
+ if (caa_unlikely(off == 0)) {
/*
* The client does not save any header information.
* Don't switch empty subbuffer on finalize, because it
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan) +
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
offsets->size > chan->backend.subbuf_size)) {
offsets->switch_old_end = 1; /* For offsets->old */
offsets->switch_new_start = 1; /* For offsets->begin */
}
}
- if (unlikely(offsets->switch_new_start)) {
+ if (caa_unlikely(offsets->switch_new_start)) {
unsigned long sb_index;
/*
* We are typically not filling the previous buffer completely.
*/
- if (likely(offsets->switch_old_end))
+ if (caa_likely(offsets->switch_old_end))
offsets->begin = subbuf_align(offsets->begin, chan);
offsets->begin = offsets->begin
+ config->cb.subbuffer_header_size();
- ((unsigned long) v_read(config,
&shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
- if (likely(reserve_commit_diff == 0)) {
+ if (caa_likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
- if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
subbuf_trunc(offsets->begin, chan)
- subbuf_trunc((unsigned long)
uatomic_read(&buf->consumed), chan)
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan)
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan)
+ offsets->size > chan->backend.subbuf_size)) {
/*
* Record too big for subbuffers, report error, don't
}
offsets->end = offsets->begin + offsets->size;
- if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
ctx);
- if (unlikely(ret))
+ if (caa_unlikely(ret))
return ret;
- } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+ } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
offsets.end)
!= offsets.old));
/*
* Switch old subbuffer if needed.
*/
- if (unlikely(offsets.switch_old_end)) {
+ if (caa_unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan),
handle);
/*
* Populate new subbuffer.
*/
- if (unlikely(offsets.switch_new_start))
+ if (caa_unlikely(offsets.switch_new_start))
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
- if (unlikely(offsets.switch_new_end))
+ if (caa_unlikely(offsets.switch_new_end))
lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
ctx->slot_size = offsets.size;
size_t objindex, ref_offset;
objindex = (size_t) ref->index;
- if (unlikely(objindex >= table->allocated_len))
+ if (caa_unlikely(objindex >= table->allocated_len))
return NULL;
obj = &table->objects[objindex];
ref_offset = (size_t) ref->offset;
ref_offset += idx * elem_size;
/* Check if part of the element returned would exceed the limits. */
- if (unlikely(ref_offset + elem_size > obj->memory_map_size))
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
return NULL;
return &obj->memory_map[ref_offset];
}
size_t index;
index = (size_t) ref->index;
- if (unlikely(index >= table->allocated_len))
+ if (caa_unlikely(index >= table->allocated_len))
return -EPERM;
obj = &table->objects[index];
return obj->wait_fd[1];
size_t index;
index = (size_t) ref->index;
- if (unlikely(index >= table->allocated_len))
+ if (caa_unlikely(index >= table->allocated_len))
return -EPERM;
obj = &table->objects[index];
return obj->wait_fd[0];
size_t index;
index = (size_t) ref->index;
- if (unlikely(index >= table->allocated_len))
+ if (caa_unlikely(index >= table->allocated_len))
return -EPERM;
obj = &table->objects[index];
*shm_fd = obj->shm_fd;
int cpu;
cpu = sched_getcpu();
- if (likely(cpu >= 0))
+ if (caa_likely(cpu >= 0))
return cpu;
/*
* If getcpu(2) is not implemented in the Kernel use CPU 0 as fallback.
int i;
size_t orig_offset = offset;
- if (likely(!ctx))
+ if (caa_likely(!ctx))
return 0;
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
{
int i;
- if (likely(!ctx))
+ if (caa_likely(!ctx))
return;
for (i = 0; i < ctx->nr_fields; i++)
ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
struct ltt_event *event = ctx->priv;
- if (unlikely(ctx->rflags))
+ if (caa_unlikely(ctx->rflags))
goto slow_path;
switch (ltt_chan->header_type) {
{
int ret;
- if (unlikely(!cached_procname[0])) {
+ if (caa_unlikely(!cached_procname[0])) {
ret = prctl(PR_GET_NAME, (unsigned long) cached_procname,
0, 0, 0);
assert(!ret);
static inline
pid_t wrapper_getpid(void)
{
- if (unlikely(!cached_vpid))
+ if (caa_unlikely(!cached_vpid))
cached_vpid = getpid();
return cached_vpid;
}
struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
- if (unlikely(!cached_vtid))
+ if (caa_unlikely(!cached_vtid))
cached_vtid = gettid();
lib_ring_buffer_align_ctx(ctx, lttng_alignof(cached_vtid));
chan->ops->event_write(ctx, &cached_vtid, sizeof(cached_vtid));