* See ring_buffer_frontend.c for more information on wait-free algorithms.
*/
-#include "../../wrapper/ringbuffer/config.h"
-#include "../../wrapper/ringbuffer/backend_types.h"
-#include "../../wrapper/ringbuffer/frontend_types.h"
-#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
+#include <wrapper/ringbuffer/config.h>
+#include <wrapper/ringbuffer/backend_types.h>
+#include <wrapper/ringbuffer/frontend_types.h>
+#include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
/* Buffer offset macros */
extern
void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
+extern
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
+
/* Buffer write helpers */
static inline
- (commit_count & chan->commit_count_mask) == 0);
}
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
static inline
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
- unsigned long idx)
+ unsigned long idx,
+ u64 tsc)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
- u64 tsc;
/* Check if all commits have been done */
if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
* and any other writer trying to access this subbuffer
* in this state is required to drop records.
*/
- tsc = config->cb.ring_buffer_clock_read(chan);
v_add(config,
subbuffer_get_records_count(config,
&buf->backend, idx),
buf,
idx));
+ /*
+ * Increment the packet counter while we have exclusive
+ * access.
+ */
+ subbuffer_inc_packet_count(config, &buf->backend, idx);
+
/*
* Set noref flag and offset for this subbuffer id.
* Contains a memory barrier that ensures counter stores
struct channel *chan,
unsigned long idx,
unsigned long buf_offset,
- unsigned long commit_count,
- size_t slot_size)
+ unsigned long commit_count)
{
- unsigned long offset, commit_seq_old;
+ unsigned long commit_seq_old;
if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
return;
- offset = buf_offset + slot_size;
-
/*
* subbuf_offset includes commit_count_mask. We can simply
* compare the offsets within the subbuffer without caring about
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
- if (unlikely(subbuf_offset(offset - commit_count, chan)))
+ if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);