#include <unistd.h>
#include <limits.h>
-#ifndef PAGE_SIZE /* Cygwin limits.h defines its own PAGE_SIZE */
-#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
+#ifdef __FreeBSD__
+#include <machine/param.h>
#endif
-#ifndef PAGE_MASK /* FreeBSD and macOS defines their own PAGE_MASK. */
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#ifdef _SC_PAGE_SIZE
+#define LTTNG_UST_PAGE_SIZE sysconf(_SC_PAGE_SIZE)
+#elif defined(PAGE_SIZE)
+#define LTTNG_UST_PAGE_SIZE PAGE_SIZE
+#else
+#error "Please add page size detection for your OS."
#endif
-#define __ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
-#ifndef ALIGN /* FreeBSD and macOS defines their own ALIGN. */
-#define ALIGN(v, align) __ALIGN_MASK(v, (__typeof__(v)) (align) - 1)
-#endif
-#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+#define LTTNG_UST_PAGE_MASK (~(LTTNG_UST_PAGE_SIZE - 1))
+
+#define __LTTNG_UST_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
+#define LTTNG_UST_ALIGN(v, align) __LTTNG_UST_ALIGN_MASK(v, (__typeof__(v)) (align) - 1)
+#define LTTNG_UST_PAGE_ALIGN(addr) LTTNG_UST_ALIGN(addr, LTTNG_UST_PAGE_SIZE)
/**
- * offset_align - Calculate the offset needed to align an object on its natural
- * alignment towards higher addresses.
+ * lttng_ust_offset_align - Calculate the offset needed to align an object on
+ * its natural alignment towards higher addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
*
* Returns the offset that must be added to align towards higher
* addresses.
*/
-#define offset_align(align_drift, alignment) \
+#define lttng_ust_offset_align(align_drift, alignment) \
({ \
LTTNG_BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
})
/**
- * offset_align_floor - Calculate the offset needed to align an object
- * on its natural alignment towards lower addresses.
+ * lttng_ust_offset_align_floor - Calculate the offset needed to align an
+ * object on its natural alignment towards lower addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
*
* Returns the offset that must be substracted to align towards lower addresses.
*/
-#define offset_align_floor(align_drift, alignment) \
+#define lttng_ust_offset_align_floor(align_drift, alignment) \
({ \
LTTNG_BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
(((align_drift) - (alignment)) & ((alignment) - 1)); \
})
+/*
+ * Non-namespaced defines for backwards compatibility,
+ * introduced in 2.13, should be removed in the future.
+ */
+
+/* Cygwin limits.h defines its own PAGE_SIZE */
+#ifndef PAGE_SIZE
+#define PAGE_SIZE LTTNG_UST_PAGE_SIZE
+#endif
+
+/* FreeBSD and macOS defines their own PAGE_MASK. */
+#ifndef PAGE_MASK
+#define PAGE_MASK LTTNG_UST_PAGE_MASK
+#endif
+
+/* FreeBSD machine/param.h defines its own ALIGN */
+#ifndef ALIGN
+#define ALIGN LTTNG_UST_ALIGN
+#endif
+
+#ifndef PAGE_ALIGN
+#define PAGE_ALIGN LTTNG_UST_PAGE_ALIGN
+#endif
+
+#ifndef offset_align
+#define offset_align lttng_ust_offset_align
+#endif
+
+#ifndef offset_align_floor
+#define offset_align_floor lttng_ust_offset_align_floor
+#endif
+
#endif /* _UST_ALIGN_H */
static inline
unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
{
- return offset_align(align_drift, size_of_type);
+ return lttng_ust_offset_align(align_drift, size_of_type);
}
#else
chan = consumer_chan->chan->chan;
*len = lib_ring_buffer_get_read_data_size(&chan->backend.config, buf,
consumer_chan->chan->handle);
- *len = PAGE_ALIGN(*len);
+ *len = LTTNG_UST_PAGE_ALIGN(*len);
return 0;
}
res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
do {
prev_offset = res_offset;
- aligned_offset = ALIGN(prev_offset + sizeof(size_t), alignment);
+ aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
new_offset = aligned_offset + nmemb * size;
if (new_offset > sizeof(static_calloc_buf)) {
abort();
size_t align, size_t len)
{
ssize_t ret;
- size_t padding = offset_align(runtime->data_len, align);
+ size_t padding = lttng_ust_offset_align(runtime->data_len, align);
size_t new_len = runtime->data_len + padding + len;
size_t new_alloc_len = new_len;
size_t old_alloc_len = runtime->data_alloc_len;
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
- (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
if (!header)
return;
header->content_size = data_size * CHAR_BIT; /* in bits */
- header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ header->packet_size = LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
/*
* We do not care about the records lost count, because the metadata
* channel waits and retry.
#include <urcu/futex.h>
#include <urcu/compiler.h>
+#include <lttng/align.h>
#include <lttng/ust-events.h>
#include <lttng/ust-abi.h>
#include <lttng/ust.h>
if (sock_info->wait_shm_mmap) {
long page_size;
- page_size = sysconf(_SC_PAGE_SIZE);
+ page_size = LTTNG_UST_PAGE_SIZE;
if (page_size <= 0) {
if (!page_size) {
errno = EINVAL;
size_t read_len;
/* Align start of note entry */
- offset += offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
if (offset >= segment_end) {
break;
}
offset += sizeof(nhdr) + nhdr.n_namesz;
/* Align start of desc entry */
- offset += offset_align(offset, ELF_NOTE_DESC_ALIGN);
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_DESC_ALIGN);
if (nhdr.n_type != NT_GNU_BUILD_ID) {
/*
#include <urcu/arch.h>
#include <limits.h>
+#include <lttng/align.h>
#include <lttng/ringbuffer-config.h>
#include "vatomic.h"
#include "backend.h"
if (extra_reader_sb)
num_subbuf_alloc++;
- page_size = sysconf(_SC_PAGE_SIZE);
+ page_size = LTTNG_UST_PAGE_SIZE;
if (page_size <= 0) {
goto page_size_error;
}
if (!name)
return -EPERM;
- page_size = sysconf(_SC_PAGE_SIZE);
+ page_size = LTTNG_UST_PAGE_SIZE;
if (page_size <= 0) {
return -ENOMEM;
}
memcpy(&chanb->config, config, sizeof(*config));
/* Per-cpu buffer size: control (prior to backend) */
- shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
+ shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
- shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_hot));
shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
- shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_cold));
shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
/* Sampled timestamp end */
- shmsize += offset_align(shmsize, __alignof__(uint64_t));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(uint64_t));
shmsize += sizeof(uint64_t) * num_subbuf;
/* Per-cpu buffer size: backend */
/* num_subbuf + 1 is the worse case */
num_subbuf_alloc = num_subbuf + 1;
- shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
- shmsize += offset_align(shmsize, page_size);
+ shmsize += lttng_ust_offset_align(shmsize, page_size);
shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
- shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
/* Calculate the shm allocation layout */
shmsize = sizeof(struct channel);
- shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
chansize = shmsize;
if (priv_data_align)
- shmsize += offset_align(shmsize, priv_data_align);
+ shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
shmsize += priv_data_size;
/* Allocate normal memory for channel (not shared) */
void align_shm(struct shm_object *obj, size_t align)
{
- size_t offset_len = offset_align(obj->allocated_len, align);
+ size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
obj->allocated_len += offset_len;
}
}
read_size = lib_ring_buffer_get_read_data_size(
&chan->backend.config, buf, handle);
- read_size = PAGE_ALIGN(read_size);
+ read_size = LTTNG_UST_PAGE_ALIGN(read_size);
ptr = lib_ring_buffer_read_offset_address(
&buf->backend, 0, handle);
printf("WRITE: copy %lu bytes\n", read_size);
}
read_size = lib_ring_buffer_get_read_data_size(
&chan->backend.config, buf, handle);
- read_size = PAGE_ALIGN(read_size);
+ read_size = LTTNG_UST_PAGE_ALIGN(read_size);
ptr = lib_ring_buffer_read_offset_address(
&buf->backend, 0, handle);
printf("WRITE: copy %lu bytes\n", read_size);