X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=libringbuffer%2Ffrontend_internal.h;h=6a1d3a6cb523fd36cf9e69c7d28fb1c4852872e9;hb=b58d53c07101121581b372c763f36972bd3bf79a;hp=37e046bc2d40d557cd3830fa46b171c57a0dc4c2;hpb=4931a13e87ddbbabe4911e622e78c85b2197ecbf;p=lttng-ust.git diff --git a/libringbuffer/frontend_internal.h b/libringbuffer/frontend_internal.h index 37e046bc..6a1d3a6c 100644 --- a/libringbuffer/frontend_internal.h +++ b/libringbuffer/frontend_internal.h @@ -16,10 +16,12 @@ * Dual LGPL v2.1/GPL v2 license. */ +#include + #include "config.h" #include "backend_types.h" #include "frontend_types.h" -#include "../lib_prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */ +#include "shm.h" /* Buffer offset macros */ @@ -81,7 +83,7 @@ unsigned long subbuf_index(unsigned long offset, struct channel *chan) * last_tsc atomically. */ -#if (BITS_PER_LONG == 32) +#if (CAA_BITS_PER_LONG == 32) static inline void save_last_tsc(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, u64 tsc) @@ -154,7 +156,7 @@ void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf, unsigned long consumed_old, consumed_new; do { - consumed_old = atomic_long_read(&buf->consumed); + consumed_old = uatomic_read(&buf->consumed); /* * If buffer is in overwrite mode, push the reader consumed * count if the write position has reached it and we are not @@ -170,7 +172,7 @@ void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf, consumed_new = subbuf_align(consumed_old, chan); else return; - } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old, + } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, consumed_new) != consumed_old)); } @@ -181,7 +183,7 @@ void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *c unsigned long idx) { if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) - v_set(config, &buf->commit_hot[idx].seq, commit_count); + v_set(config, &shmp(buf->commit_hot)[idx].seq, commit_count); } static inline @@ -191,9 +193,9 @@ int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, { unsigned long consumed_old, consumed_idx, commit_count, write_offset; - consumed_old = atomic_long_read(&buf->consumed); + consumed_old = uatomic_read(&buf->consumed); consumed_idx = subbuf_index(consumed_old, chan); - commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb); + commit_count = v_read(config, &shmp(buf->commit_cold)[consumed_idx].cc_sb); /* * No memory barrier here, since we are only interested * in a statistically correct polling result. The next poll will @@ -268,7 +270,7 @@ int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi do { offset = v_read(config, &buf->offset); idx = subbuf_index(offset, chan); - commit_count = v_read(config, &buf->commit_hot[idx].cc); + commit_count = v_read(config, &shmp(buf->commit_hot)[idx].cc); } while (offset != v_read(config, &buf->offset)); return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) @@ -316,7 +318,7 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * The subbuffer size is least 2 bytes (minimum size: 1 page). * This guarantees that old_commit_count + 1 != commit_count. */ - if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb, + if (likely(v_cmpxchg(config, &shmp(buf->commit_cold)[idx].cc_sb, old_commit_count, old_commit_count + 1) == old_commit_count)) { /* @@ -354,9 +356,9 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * respect to writers coming into the subbuffer after * wrap around, and also order wrt concurrent readers. */ - smp_mb(); + cmm_smp_mb(); /* End of exclusive subbuffer access */ - v_set(config, &buf->commit_cold[idx].cc_sb, + v_set(config, &shmp(buf->commit_cold)[idx].cc_sb, commit_count); lib_ring_buffer_vmcore_check_deliver(config, buf, commit_count, idx); @@ -365,10 +367,10 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. */ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER - && atomic_long_read(&buf->active_readers) + && uatomic_read(&buf->active_readers) && lib_ring_buffer_poll_deliver(config, buf, chan)) { - wake_up_interruptible(&buf->read_wait); - wake_up_interruptible(&chan->read_wait); + //wake_up_interruptible(&buf->read_wait); + //wake_up_interruptible(&chan->read_wait); } } @@ -408,17 +410,18 @@ void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *c if (unlikely(subbuf_offset(offset - commit_count, chan))) return; - commit_seq_old = v_read(config, &buf->commit_hot[idx].seq); + commit_seq_old = v_read(config, &shmp(buf->commit_hot)[idx].seq); while ((long) (commit_seq_old - commit_count) < 0) - commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq, + commit_seq_old = v_cmpxchg(config, &shmp(buf->commit_hot)[idx].seq, commit_seq_old, commit_count); } extern int lib_ring_buffer_create(struct lib_ring_buffer *buf, - struct channel_backend *chanb, int cpu); + struct channel_backend *chanb, int cpu, + struct shm_header *shm_header); extern void lib_ring_buffer_free(struct lib_ring_buffer *buf); /* Keep track of trap nesting inside ring buffer code */ -DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting); +extern __thread unsigned int lib_ring_buffer_nesting; #endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */