X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=lib%2Fringbuffer%2Ffrontend_internal.h;h=76bf20e79b1ae21ad217bc073c9e9f02a1709ee4;hb=152fe7fccd64295a24df5bef0c5281e5ea9cfa90;hp=dbebdeec86f3362c30d59642ef9ab6910ca94a28;hpb=0fdec6861bdb4095e7537667f10131577eb7b279;p=lttng-modules.git diff --git a/lib/ringbuffer/frontend_internal.h b/lib/ringbuffer/frontend_internal.h index dbebdeec..76bf20e7 100644 --- a/lib/ringbuffer/frontend_internal.h +++ b/lib/ringbuffer/frontend_internal.h @@ -28,10 +28,10 @@ * See ring_buffer_frontend.c for more information on wait-free algorithms. */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend_types.h" -#include "../../wrapper/ringbuffer/frontend_types.h" -#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */ +#include +#include +#include +#include /* For per-CPU read-side iterator */ /* Buffer offset macros */ @@ -159,6 +159,9 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, extern void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf); +extern +void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf); + /* Buffer write helpers */ static inline @@ -290,17 +293,24 @@ int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi - (commit_count & chan->commit_count_mask) == 0); } +/* + * Receive end of subbuffer TSC as parameter. It has been read in the + * space reservation loop of either reserve or switch, which ensures it + * progresses monotonically with event records in the buffer. Therefore, + * it ensures that the end timestamp of a subbuffer is <= begin + * timestamp of the following subbuffers. + */ static inline void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, struct channel *chan, unsigned long offset, unsigned long commit_count, - unsigned long idx) + unsigned long idx, + u64 tsc) { unsigned long old_commit_count = commit_count - chan->backend.subbuf_size; - u64 tsc; /* Check if all commits have been done */ if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) @@ -346,7 +356,6 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, * and any other writer trying to access this subbuffer * in this state is required to drop records. */ - tsc = config->cb.ring_buffer_clock_read(chan); v_add(config, subbuffer_get_records_count(config, &buf->backend, idx), @@ -361,6 +370,12 @@ void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, buf, idx)); + /* + * Increment the packet counter while we have exclusive + * access. + */ + subbuffer_inc_packet_count(config, &buf->backend, idx); + /* * Set noref flag and offset for this subbuffer id. * Contains a memory barrier that ensures counter stores @@ -415,23 +430,20 @@ void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *c struct channel *chan, unsigned long idx, unsigned long buf_offset, - unsigned long commit_count, - size_t slot_size) + unsigned long commit_count) { - unsigned long offset, commit_seq_old; + unsigned long commit_seq_old; if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) return; - offset = buf_offset + slot_size; - /* * subbuf_offset includes commit_count_mask. We can simply * compare the offsets within the subbuffer without caring about * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (unlikely(subbuf_offset(offset - commit_count, chan))) + if (unlikely(subbuf_offset(buf_offset - commit_count, chan))) return; commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);