#include "shm_internal.h"
#include "vatomic.h"
+#define RB_BACKEND_PAGES_PADDING 16
struct lttng_ust_lib_ring_buffer_backend_pages {
unsigned long mmap_offset; /* offset of the subbuffer in mmap */
union v_atomic records_commit; /* current records committed count */
union v_atomic records_unread; /* records to read */
unsigned long data_size; /* Amount of data to read from subbuf */
DECLARE_SHMP(char, p); /* Backing memory map */
+ char padding[RB_BACKEND_PAGES_PADDING];
};
struct lttng_ust_lib_ring_buffer_backend_subbuffer {
DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages, shmp);
};
+#define RB_BACKEND_RING_BUFFER_PADDING 64
struct lttng_ust_lib_ring_buffer_backend {
/* Array of ring_buffer_backend_subbuffer for writer */
DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
int cpu; /* This buffer's cpu. -1 if global. */
union v_atomic records_read; /* Number of records read */
unsigned int allocated:1; /* is buffer allocated ? */
+ char padding[RB_BACKEND_RING_BUFFER_PADDING];
};
struct lttng_ust_lib_ring_buffer_shmp {
DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, shmp); /* Channel per-cpu buffers */
};
+#define RB_BACKEND_CHANNEL_PADDING 64
struct channel_backend {
unsigned long buf_size; /* Size of the buffer */
unsigned long subbuf_size; /* Sub-buffer size */
DECLARE_SHMP(void *, priv_data);/* Client-specific information */
struct lttng_ust_lib_ring_buffer_config config; /* Ring buffer configuration */
char name[NAME_MAX]; /* Channel name */
+ char padding[RB_BACKEND_CHANNEL_PADDING];
struct lttng_ust_lib_ring_buffer_shmp buf[];
};
enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
/* channel: collection of per-cpu ring buffers. */
+#define RB_CHANNEL_PADDING 64
struct channel {
int record_disabled;
unsigned long commit_count_mask; /*
* be last member.
*/
struct channel_backend backend; /* Associated backend */
+ char padding[RB_CHANNEL_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
/* Per-subbuffer commit counters used on the hot path */
+#define RB_COMMIT_COUNT_HOT_PADDING 16
struct commit_counters_hot {
union v_atomic cc; /* Commit counter */
union v_atomic seq; /* Consecutive commits */
+ char padding[RB_COMMIT_COUNT_HOT_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
/* Per-subbuffer commit counters used only on cold paths */
+#define RB_COMMIT_COUNT_COLD_PADDING 24
struct commit_counters_cold {
union v_atomic cc_sb; /* Incremented _once_ at sb switch */
+ char padding[RB_COMMIT_COUNT_COLD_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
/* ring buffer state */
+#define RB_RING_BUFFER_PADDING 64
struct lttng_ust_lib_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
/* shmp pointer to self */
DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
+ char padding[RB_RING_BUFFER_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
static inline