#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
#endif
-#ifndef PAGE_MASK /* macOS defines its own PAGE_MASK. */
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#endif
-
-#define __ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
-#ifndef ALIGN /* macOS defines its own ALIGN. */
-#define ALIGN(v, align) __ALIGN_MASK(v, (__typeof__(v)) (align) - 1)
-#endif
+/*
+ * Align value to the next multiple of align. Returns val if it already is a
+ * multiple of align. Align must be a power of two.
+ */
+#define __lttng_align_ceil_mask(v, mask) (((v) + (mask)) & ~(mask))
-#define __ALIGN_FLOOR_MASK(v, mask) ((v) & ~(mask))
+#define lttng_align_ceil(v, align) \
+ __lttng_align_ceil_mask(v, (__typeof__(v)) (align) - 1)
-#ifndef ALIGN_FLOOR
-#define ALIGN_FLOOR(v, align) __ALIGN_FLOOR_MASK(v, (__typeof__(v)) (align) - 1)
-#endif
+/*
+ * Align value to the previous multiple of align. Returns val if it already is a
+ * multiple of align. Align must be a power of two.
+ */
+#define __lttng_align_floor_mask(v, mask) ((v) & ~(mask))
-#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+#define lttng_align_floor(v, align) \
+ __lttng_align_floor_mask(v, (__typeof__(v)) (align) - 1)
/**
- * offset_align - Calculate the offset needed to align an object on its natural
+ * lttng_offset_align - Calculate the offset needed to align an object on its natural
* alignment towards higher addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
* Returns the offset that must be added to align towards higher
* addresses.
*/
-#define offset_align(align_drift, alignment) \
+#define lttng_offset_align(align_drift, alignment) \
({ \
LTTNG_BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
})
/**
- * offset_align_floor - Calculate the offset needed to align an object
+ * lttng_offset_align_floor - Calculate the offset needed to align an object
* on its natural alignment towards lower addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
*
* Returns the offset that must be substracted to align towards lower addresses.
*/
-#define offset_align_floor(align_drift, alignment) \
+#define lttng_offset_align_floor(align_drift, alignment) \
({ \
LTTNG_BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
int32_t bytecode_reserve(struct lttng_bytecode_alloc **fb, uint32_t align, uint32_t len)
{
int32_t ret;
- uint32_t padding = offset_align((*fb)->b.len, align);
+ uint32_t padding = lttng_offset_align((*fb)->b.len, align);
uint32_t new_len = (*fb)->b.len + padding + len;
uint32_t new_alloc_len = sizeof(struct lttng_bytecode_alloc) + new_len;
uint32_t old_alloc_len = (*fb)->alloc_len;
if (!nb_packets_per_stream) {
return consumed_pos; /* Grab everything */
}
- start_pos = produced_pos - offset_align_floor(produced_pos, max_sb_size);
+ start_pos = produced_pos - lttng_offset_align_floor(produced_pos, max_sb_size);
start_pos -= max_sb_size * nb_packets_per_stream;
if ((long) (start_pos - consumed_pos) < 0) {
return consumed_pos; /* Grab everything */
* Align produced position on the start-of-packet boundary of the first
* packet going into the next trace chunk.
*/
- produced_pos = ALIGN_FLOOR(produced_pos, stream->max_sb_size);
+ produced_pos = lttng_align_floor(produced_pos, stream->max_sb_size);
if (consumed_pos == produced_pos) {
DBG("Set rotate ready for stream %" PRIu64 " produced = %lu consumed = %lu",
stream->key, produced_pos, consumed_pos);
goto end;
}
- array_alloc_len = ALIGN(kmask_len.len, 8) >> 3;
+ array_alloc_len = lttng_align_ceil(kmask_len.len, 8) >> 3;
kmask = zmalloc(sizeof(*kmask) + array_alloc_len);
if (!kmask) {
#define is_signed(type) (((type) -1) < (type) 1)
-/*
- * Align value to the next multiple of align. Returns val if it already is a
- * multiple of align. Align must be a power of two.
- */
-#define ALIGN_TO(value, align) ((value + (align - 1)) & ~(align - 1))
-
#define member_sizeof(type, field) sizeof(((type *) 0)->field)
#define ASSERT_LOCKED(lock) LTTNG_ASSERT(pthread_mutex_trylock(&lock))
#include "lttng/lttng-error.h"
#include <common/compat/string.h>
+#include <common/align.h>
#include <common/error.h>
#include <common/hashtable/hashtable.h>
#include <common/hashtable/utils.h>
* the next structure in the buffer probably needs to be
* aligned too (depending on the arch).
*/
- padding_needed = ALIGN_TO(storage_needed, sizeof(uint64_t)) - storage_needed;
+ padding_needed = lttng_align_ceil(storage_needed, sizeof(uint64_t)) - storage_needed;
storage_needed += padding_needed;
if (location->lookup_method) {
* the next structure in the buffer probably needs to be
* aligned too (depending on the arch).
*/
- padding_needed = ALIGN_TO(storage_needed, sizeof(uint64_t)) - storage_needed;
+ padding_needed = lttng_align_ceil(storage_needed, sizeof(uint64_t)) - storage_needed;
storage_needed += padding_needed;
if (location->lookup_method) {
#include <unistd.h>
#include <common/bytecode/bytecode.h>
+#include <common/align.h>
#include <common/common.h>
#include <common/compat/errno.h>
#include <common/compat/string.h>
storage_req += ext_comm->nb_exclusions *
LTTNG_SYMBOL_NAME_LEN;
/* Padding to ensure the flat probe is aligned. */
- storage_req = ALIGN_TO(storage_req, sizeof(uint64_t));
+ storage_req = lttng_align_ceil(storage_req, sizeof(uint64_t));
storage_req += probe_storage_req;
}
}
/* Insert padding to align to 64-bits. */
ret = lttng_dynamic_buffer_set_size(&listing,
- ALIGN_TO(listing.size,
+ lttng_align_ceil(listing.size,
sizeof(uint64_t)));
if (ret) {
ret = -LTTNG_ERR_NOMEM;