From: Mathieu Desnoyers Date: Wed, 6 May 2020 13:36:45 +0000 (-0400) Subject: Cleanup: Move headers from lib/ to include/lttng/ X-Git-Tag: v2.13.0-rc1~232 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=a071f25d89c4ef76ffe7bdfcaeb783bc9cfc1211;p=lttng-modules.git Cleanup: Move headers from lib/ to include/lttng/ Adapt includes accordingly. Signed-off-by: Mathieu Desnoyers --- diff --git a/LICENSE b/LICENSE index 9b8e8b31..caf91c66 100644 --- a/LICENSE +++ b/LICENSE @@ -23,9 +23,9 @@ wrapper/list.h These files are licensed under an MIT-style license. See LICENSES/MIT for details. -lib/prio_heap/lttng_prio_heap.h +include/lttng/prio_heap.h lib/prio_heap/lttng_prio_heap.c -lib/bitfield.h +include/lttng/bitfield.h filter-bytecode.h lttng-filter-interpreter.c lttng-filter-specialize.c diff --git a/include/lttng/align.h b/include/lttng/align.h new file mode 100644 index 00000000..964ed1a8 --- /dev/null +++ b/include/lttng/align.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng/align.h + * + * Copyright (C) 2010-2012 Mathieu Desnoyers + */ + +#ifndef _LTTNG_ALIGN_H +#define _LTTNG_ALIGN_H + +#ifdef __KERNEL__ + +#include +#include + +#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1) +#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask)) +#define PTR_ALIGN_FLOOR(p, a) \ + ((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a)) + +/* + * Align pointer on natural object alignment. + */ +#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj))) +#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj))) + +/** + * offset_align - Calculate the offset needed to align an object on its natural + * alignment towards higher addresses. + * @align_drift: object offset from an "alignment"-aligned address. + * @alignment: natural object alignment. Must be non-zero, power of 2. + * + * Returns the offset that must be added to align towards higher + * addresses. + */ +#define offset_align(align_drift, alignment) \ + ({ \ + BUILD_RUNTIME_BUG_ON((alignment) == 0 \ + || ((alignment) & ((alignment) - 1))); \ + (((alignment) - (align_drift)) & ((alignment) - 1)); \ + }) + +/** + * offset_align_floor - Calculate the offset needed to align an object + * on its natural alignment towards lower addresses. + * @align_drift: object offset from an "alignment"-aligned address. + * @alignment: natural object alignment. Must be non-zero, power of 2. + * + * Returns the offset that must be substracted to align towards lower addresses. + */ +#define offset_align_floor(align_drift, alignment) \ + ({ \ + BUILD_RUNTIME_BUG_ON((alignment) == 0 \ + || ((alignment) & ((alignment) - 1))); \ + (((align_drift) - (alignment)) & ((alignment) - 1)); \ + }) + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/lttng/bitfield.h b/include/lttng/bitfield.h new file mode 100644 index 00000000..7d5eaeaa --- /dev/null +++ b/include/lttng/bitfield.h @@ -0,0 +1,543 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright 2010-2019 - Mathieu Desnoyers + */ + +#ifndef _BABELTRACE_BITFIELD_H +#define _BABELTRACE_BITFIELD_H + +#include +#include + +#ifndef CHAR_BIT +#define CHAR_BIT 8 +#endif + +/* + * This header strictly follows the C99 standard, except for use of the + * compiler-specific __typeof__. + */ + +/* + * This bitfield header requires the compiler representation of signed + * integers to be two's complement. + */ +#if (-1 != ~0) +#error "bitfield.h requires the compiler representation of signed integers to be two's complement." +#endif + +/* + * _bt_is_signed_type() willingly generates comparison of unsigned + * expression < 0, which is always false. Silence compiler warnings. + * GCC versions lower than 4.6.0 do not accept diagnostic pragma inside + * functions. + */ +#if defined(__GNUC__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40600 +# define _BT_DIAG_PUSH _Pragma("GCC diagnostic push") +# define _BT_DIAG_POP _Pragma("GCC diagnostic pop") + +# define _BT_DIAG_STRINGIFY_1(x) #x +# define _BT_DIAG_STRINGIFY(x) _BT_DIAG_STRINGIFY_1(x) + +# define _BT_DIAG_IGNORE(option) \ + _Pragma(_BT_DIAG_STRINGIFY(GCC diagnostic ignored option)) +# define _BT_DIAG_IGNORE_TYPE_LIMITS _BT_DIAG_IGNORE("-Wtype-limits") +#else +# define _BT_DIAG_PUSH +# define _BT_DIAG_POP +# define _BT_DIAG_IGNORE +# define _BT_DIAG_IGNORE_TYPE_LIMITS +#endif + +#define _bt_is_signed_type(type) ((type) -1 < (type) 0) + +/* + * Produce a build-time error if the condition `cond` is non-zero. + * Evaluates as a size_t expression. + */ +#ifdef __cplusplus +#define _BT_BUILD_ASSERT(cond) ([]{static_assert((cond), "");}, 0) +#else +#define _BT_BUILD_ASSERT(cond) \ + sizeof(struct { int f:(2 * !!(cond) - 1); }) +#endif + +/* + * Cast value `v` to an unsigned integer of the same size as `v`. + */ +#define _bt_cast_value_to_unsigned(v) \ + (sizeof(v) == sizeof(uint8_t) ? (uint8_t) (v) : \ + sizeof(v) == sizeof(uint16_t) ? (uint16_t) (v) : \ + sizeof(v) == sizeof(uint32_t) ? (uint32_t) (v) : \ + sizeof(v) == sizeof(uint64_t) ? (uint64_t) (v) : \ + _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t))) + +/* + * Cast value `v` to an unsigned integer type of the size of type `type` + * *without* sign-extension. + * + * The unsigned cast ensures that we're not shifting a negative value, + * which is undefined in C. However, this limits the maximum type size + * of `type` to 64-bit. Generate a compile-time error if the size of + * `type` is larger than 64-bit. + */ +#define _bt_cast_value_to_unsigned_type(type, v) \ + (sizeof(type) == sizeof(uint8_t) ? \ + (uint8_t) _bt_cast_value_to_unsigned(v) : \ + sizeof(type) == sizeof(uint16_t) ? \ + (uint16_t) _bt_cast_value_to_unsigned(v) : \ + sizeof(type) == sizeof(uint32_t) ? \ + (uint32_t) _bt_cast_value_to_unsigned(v) : \ + sizeof(type) == sizeof(uint64_t) ? \ + (uint64_t) _bt_cast_value_to_unsigned(v) : \ + _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t))) + +/* + * _bt_fill_mask evaluates to a "type" integer with all bits set. + */ +#define _bt_fill_mask(type) ((type) ~(type) 0) + +/* + * Left shift a value `v` of `shift` bits. + * + * The type of `v` can be signed or unsigned integer. + * The value of `shift` must be less than the size of `v` (in bits), + * otherwise the behavior is undefined. + * Evaluates to the result of the shift operation. + * + * According to the C99 standard, left shift of a left hand-side signed + * type is undefined if it has a negative value or if the result cannot + * be represented in the result type. This bitfield header discards the + * bits that are left-shifted beyond the result type representation, + * which is the behavior of an unsigned type left shift operation. + * Therefore, always perform left shift on an unsigned type. + * + * This macro should not be used if `shift` can be greater or equal than + * the bitwidth of `v`. See `_bt_safe_lshift`. + */ +#define _bt_lshift(v, shift) \ + ((__typeof__(v)) (_bt_cast_value_to_unsigned(v) << (shift))) + +/* + * Generate a mask of type `type` with the `length` least significant bits + * cleared, and the most significant bits set. + */ +#define _bt_make_mask_complement(type, length) \ + _bt_lshift(_bt_fill_mask(type), length) + +/* + * Generate a mask of type `type` with the `length` least significant bits + * set, and the most significant bits cleared. + */ +#define _bt_make_mask(type, length) \ + ((type) ~_bt_make_mask_complement(type, length)) + +/* + * Right shift a value `v` of `shift` bits. + * + * The type of `v` can be signed or unsigned integer. + * The value of `shift` must be less than the size of `v` (in bits), + * otherwise the behavior is undefined. + * Evaluates to the result of the shift operation. + * + * According to the C99 standard, right shift of a left hand-side signed + * type which has a negative value is implementation defined. This + * bitfield header relies on the right shift implementation carrying the + * sign bit. If the compiler implementation has a different behavior, + * emulate carrying the sign bit. + * + * This macro should not be used if `shift` can be greater or equal than + * the bitwidth of `v`. See `_bt_safe_rshift`. + */ +#if ((-1 >> 1) == -1) +#define _bt_rshift(v, shift) ((v) >> (shift)) +#else +#define _bt_rshift(v, shift) \ + ((__typeof__(v)) ((_bt_cast_value_to_unsigned(v) >> (shift)) | \ + ((v) < 0 ? _bt_make_mask_complement(__typeof__(v), \ + sizeof(v) * CHAR_BIT - (shift)) : 0))) +#endif + +/* + * Right shift a signed or unsigned integer with `shift` value being an + * arbitrary number of bits. `v` is modified by this macro. The shift + * is transformed into a sequence of `_nr_partial_shifts` consecutive + * shift operations, each of a number of bits smaller than the bitwidth + * of `v`, ending with a shift of the number of left over bits. + */ +#define _bt_safe_rshift(v, shift) \ +do { \ + unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \ + unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \ + \ + for (; _nr_partial_shifts; _nr_partial_shifts--) \ + (v) = _bt_rshift(v, sizeof(v) * CHAR_BIT - 1); \ + (v) = _bt_rshift(v, _leftover_bits); \ +} while (0) + +/* + * Left shift a signed or unsigned integer with `shift` value being an + * arbitrary number of bits. `v` is modified by this macro. The shift + * is transformed into a sequence of `_nr_partial_shifts` consecutive + * shift operations, each of a number of bits smaller than the bitwidth + * of `v`, ending with a shift of the number of left over bits. + */ +#define _bt_safe_lshift(v, shift) \ +do { \ + unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \ + unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \ + \ + for (; _nr_partial_shifts; _nr_partial_shifts--) \ + (v) = _bt_lshift(v, sizeof(v) * CHAR_BIT - 1); \ + (v) = _bt_lshift(v, _leftover_bits); \ +} while (0) + +/* + * bt_bitfield_write - write integer to a bitfield in native endianness + * + * Save integer to the bitfield, which starts at the "start" bit, has "len" + * bits. + * The inside of a bitfield is from high bits to low bits. + * Uses native endianness. + * For unsigned "v", pad MSB with 0 if bitfield is larger than v. + * For signed "v", sign-extend v if bitfield is larger than v. + * + * On little endian, bytes are placed from the less significant to the most + * significant. Also, consecutive bitfields are placed from lower bits to higher + * bits. + * + * On big endian, bytes are places from most significant to less significant. + * Also, consecutive bitfields are placed from higher to lower bits. + */ + +#define _bt_bitfield_write_le(ptr, type, start, length, v) \ +do { \ + __typeof__(v) _v = (v); \ + type *_ptr = (void *) (ptr); \ + unsigned long _start = (start), _length = (length); \ + type _mask, _cmask; \ + unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ + unsigned long _start_unit, _end_unit, _this_unit; \ + unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ + \ + if (!_length) \ + break; \ + \ + _end = _start + _length; \ + _start_unit = _start / _ts; \ + _end_unit = (_end + (_ts - 1)) / _ts; \ + \ + /* Trim v high bits */ \ + if (_length < sizeof(_v) * CHAR_BIT) \ + _v &= _bt_make_mask(__typeof__(_v), _length); \ + \ + /* We can now append v with a simple "or", shift it piece-wise */ \ + _this_unit = _start_unit; \ + if (_start_unit == _end_unit - 1) { \ + _mask = _bt_make_mask(type, _start % _ts); \ + if (_end % _ts) \ + _mask |= _bt_make_mask_complement(type, _end % _ts); \ + _cmask = _bt_lshift((type) (_v), _start % _ts); \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + break; \ + } \ + if (_start % _ts) { \ + _cshift = _start % _ts; \ + _mask = _bt_make_mask(type, _cshift); \ + _cmask = _bt_lshift((type) (_v), _cshift); \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + _bt_safe_rshift(_v, _ts - _cshift); \ + _start += _ts - _cshift; \ + _this_unit++; \ + } \ + for (; _this_unit < _end_unit - 1; _this_unit++) { \ + _ptr[_this_unit] = (type) _v; \ + _bt_safe_rshift(_v, _ts); \ + _start += _ts; \ + } \ + if (_end % _ts) { \ + _mask = _bt_make_mask_complement(type, _end % _ts); \ + _cmask = (type) _v; \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + } else \ + _ptr[_this_unit] = (type) _v; \ +} while (0) + +#define _bt_bitfield_write_be(ptr, type, start, length, v) \ +do { \ + __typeof__(v) _v = (v); \ + type *_ptr = (void *) (ptr); \ + unsigned long _start = (start), _length = (length); \ + type _mask, _cmask; \ + unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ + unsigned long _start_unit, _end_unit, _this_unit; \ + unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ + \ + if (!_length) \ + break; \ + \ + _end = _start + _length; \ + _start_unit = _start / _ts; \ + _end_unit = (_end + (_ts - 1)) / _ts; \ + \ + /* Trim v high bits */ \ + if (_length < sizeof(_v) * CHAR_BIT) \ + _v &= _bt_make_mask(__typeof__(_v), _length); \ + \ + /* We can now append v with a simple "or", shift it piece-wise */ \ + _this_unit = _end_unit - 1; \ + if (_start_unit == _end_unit - 1) { \ + _mask = _bt_make_mask(type, (_ts - (_end % _ts)) % _ts); \ + if (_start % _ts) \ + _mask |= _bt_make_mask_complement(type, _ts - (_start % _ts)); \ + _cmask = _bt_lshift((type) (_v), (_ts - (_end % _ts)) % _ts); \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + break; \ + } \ + if (_end % _ts) { \ + _cshift = _end % _ts; \ + _mask = _bt_make_mask(type, _ts - _cshift); \ + _cmask = _bt_lshift((type) (_v), _ts - _cshift); \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + _bt_safe_rshift(_v, _cshift); \ + _end -= _cshift; \ + _this_unit--; \ + } \ + for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \ + _ptr[_this_unit] = (type) _v; \ + _bt_safe_rshift(_v, _ts); \ + _end -= _ts; \ + } \ + if (_start % _ts) { \ + _mask = _bt_make_mask_complement(type, _ts - (_start % _ts)); \ + _cmask = (type) _v; \ + _cmask &= ~_mask; \ + _ptr[_this_unit] &= _mask; \ + _ptr[_this_unit] |= _cmask; \ + } else \ + _ptr[_this_unit] = (type) _v; \ +} while (0) + +/* + * bt_bitfield_write - write integer to a bitfield in native endianness + * bt_bitfield_write_le - write integer to a bitfield in little endian + * bt_bitfield_write_be - write integer to a bitfield in big endian + */ + +#if (__BYTE_ORDER == __LITTLE_ENDIAN) + +#define bt_bitfield_write(ptr, type, start, length, v) \ + _bt_bitfield_write_le(ptr, type, start, length, v) + +#define bt_bitfield_write_le(ptr, type, start, length, v) \ + _bt_bitfield_write_le(ptr, type, start, length, v) + +#define bt_bitfield_write_be(ptr, type, start, length, v) \ + _bt_bitfield_write_be(ptr, unsigned char, start, length, v) + +#elif (__BYTE_ORDER == __BIG_ENDIAN) + +#define bt_bitfield_write(ptr, type, start, length, v) \ + _bt_bitfield_write_be(ptr, type, start, length, v) + +#define bt_bitfield_write_le(ptr, type, start, length, v) \ + _bt_bitfield_write_le(ptr, unsigned char, start, length, v) + +#define bt_bitfield_write_be(ptr, type, start, length, v) \ + _bt_bitfield_write_be(ptr, type, start, length, v) + +#else /* (__BYTE_ORDER == __PDP_ENDIAN) */ + +#error "Byte order not supported" + +#endif + +#define _bt_bitfield_read_le(ptr, type, start, length, vptr) \ +do { \ + __typeof__(*(vptr)) *_vptr = (vptr); \ + __typeof__(*_vptr) _v; \ + type *_ptr = (type *) (ptr); \ + unsigned long _start = (start), _length = (length); \ + type _mask, _cmask; \ + unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ + unsigned long _start_unit, _end_unit, _this_unit; \ + unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ + bool _is_signed_type; \ + \ + if (!_length) { \ + *_vptr = 0; \ + break; \ + } \ + \ + _end = _start + _length; \ + _start_unit = _start / _ts; \ + _end_unit = (_end + (_ts - 1)) / _ts; \ + \ + _this_unit = _end_unit - 1; \ + _BT_DIAG_PUSH \ + _BT_DIAG_IGNORE_TYPE_LIMITS \ + _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \ + _BT_DIAG_POP \ + if (_is_signed_type \ + && (_ptr[_this_unit] & _bt_lshift((type) 1, (_end % _ts ? _end % _ts : _ts) - 1))) \ + _v = ~(__typeof__(_v)) 0; \ + else \ + _v = 0; \ + if (_start_unit == _end_unit - 1) { \ + _cmask = _ptr[_this_unit]; \ + _cmask = _bt_rshift(_cmask, _start % _ts); \ + if ((_end - _start) % _ts) { \ + _mask = _bt_make_mask(type, _end - _start); \ + _cmask &= _mask; \ + } \ + _bt_safe_lshift(_v, _end - _start); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + *_vptr = _v; \ + break; \ + } \ + if (_end % _ts) { \ + _cshift = _end % _ts; \ + _mask = _bt_make_mask(type, _cshift); \ + _cmask = _ptr[_this_unit]; \ + _cmask &= _mask; \ + _bt_safe_lshift(_v, _cshift); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + _end -= _cshift; \ + _this_unit--; \ + } \ + for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \ + _bt_safe_lshift(_v, _ts); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ + _end -= _ts; \ + } \ + if (_start % _ts) { \ + _mask = _bt_make_mask(type, _ts - (_start % _ts)); \ + _cmask = _ptr[_this_unit]; \ + _cmask = _bt_rshift(_cmask, _start % _ts); \ + _cmask &= _mask; \ + _bt_safe_lshift(_v, _ts - (_start % _ts)); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + } else { \ + _bt_safe_lshift(_v, _ts); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ + } \ + *_vptr = _v; \ +} while (0) + +#define _bt_bitfield_read_be(ptr, type, start, length, vptr) \ +do { \ + __typeof__(*(vptr)) *_vptr = (vptr); \ + __typeof__(*_vptr) _v; \ + type *_ptr = (void *) (ptr); \ + unsigned long _start = (start), _length = (length); \ + type _mask, _cmask; \ + unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ + unsigned long _start_unit, _end_unit, _this_unit; \ + unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ + bool _is_signed_type; \ + \ + if (!_length) { \ + *_vptr = 0; \ + break; \ + } \ + \ + _end = _start + _length; \ + _start_unit = _start / _ts; \ + _end_unit = (_end + (_ts - 1)) / _ts; \ + \ + _this_unit = _start_unit; \ + _BT_DIAG_PUSH \ + _BT_DIAG_IGNORE_TYPE_LIMITS \ + _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \ + _BT_DIAG_POP \ + if (_is_signed_type \ + && (_ptr[_this_unit] & _bt_lshift((type) 1, _ts - (_start % _ts) - 1))) \ + _v = ~(__typeof__(_v)) 0; \ + else \ + _v = 0; \ + if (_start_unit == _end_unit - 1) { \ + _cmask = _ptr[_this_unit]; \ + _cmask = _bt_rshift(_cmask, (_ts - (_end % _ts)) % _ts); \ + if ((_end - _start) % _ts) { \ + _mask = _bt_make_mask(type, _end - _start); \ + _cmask &= _mask; \ + } \ + _bt_safe_lshift(_v, _end - _start); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + *_vptr = _v; \ + break; \ + } \ + if (_start % _ts) { \ + _cshift = _start % _ts; \ + _mask = _bt_make_mask(type, _ts - _cshift); \ + _cmask = _ptr[_this_unit]; \ + _cmask &= _mask; \ + _bt_safe_lshift(_v, _ts - _cshift); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + _start += _ts - _cshift; \ + _this_unit++; \ + } \ + for (; _this_unit < _end_unit - 1; _this_unit++) { \ + _bt_safe_lshift(_v, _ts); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ + _start += _ts; \ + } \ + if (_end % _ts) { \ + _mask = _bt_make_mask(type, _end % _ts); \ + _cmask = _ptr[_this_unit]; \ + _cmask = _bt_rshift(_cmask, _ts - (_end % _ts)); \ + _cmask &= _mask; \ + _bt_safe_lshift(_v, _end % _ts); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ + } else { \ + _bt_safe_lshift(_v, _ts); \ + _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ + } \ + *_vptr = _v; \ +} while (0) + +/* + * bt_bitfield_read - read integer from a bitfield in native endianness + * bt_bitfield_read_le - read integer from a bitfield in little endian + * bt_bitfield_read_be - read integer from a bitfield in big endian + */ + +#if (__BYTE_ORDER == __LITTLE_ENDIAN) + +#define bt_bitfield_read(ptr, type, start, length, vptr) \ + _bt_bitfield_read_le(ptr, type, start, length, vptr) + +#define bt_bitfield_read_le(ptr, type, start, length, vptr) \ + _bt_bitfield_read_le(ptr, type, start, length, vptr) + +#define bt_bitfield_read_be(ptr, type, start, length, vptr) \ + _bt_bitfield_read_be(ptr, unsigned char, start, length, vptr) + +#elif (__BYTE_ORDER == __BIG_ENDIAN) + +#define bt_bitfield_read(ptr, type, start, length, vptr) \ + _bt_bitfield_read_be(ptr, type, start, length, vptr) + +#define bt_bitfield_read_le(ptr, type, start, length, vptr) \ + _bt_bitfield_read_le(ptr, unsigned char, start, length, vptr) + +#define bt_bitfield_read_be(ptr, type, start, length, vptr) \ + _bt_bitfield_read_be(ptr, type, start, length, vptr) + +#else /* (__BYTE_ORDER == __PDP_ENDIAN) */ + +#error "Byte order not supported" + +#endif + +#endif /* _BABELTRACE_BITFIELD_H */ diff --git a/include/lttng/bug.h b/include/lttng/bug.h new file mode 100644 index 00000000..0a2c5e4b --- /dev/null +++ b/include/lttng/bug.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng/bug.h + * + * Copyright (C) 2010-2012 Mathieu Desnoyers + */ + +#ifndef _LTTNG_BUG_H +#define _LTTNG_BUG_H + +/** + * BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime + * @condition: the condition which should be false. + * + * If the condition is a constant and true, the compiler will generate a build + * error. If the condition is not constant, a BUG will be triggered at runtime + * if the condition is ever true. If the condition is constant and false, no + * code is emitted. + */ +#define BUILD_RUNTIME_BUG_ON(condition) \ + do { \ + if (__builtin_constant_p(condition)) \ + BUILD_BUG_ON(condition); \ + else \ + BUG_ON(condition); \ + } while (0) + +#endif diff --git a/include/lttng/prio_heap.h b/include/lttng/prio_heap.h new file mode 100644 index 00000000..d01cbe61 --- /dev/null +++ b/include/lttng/prio_heap.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: MIT + * + * lttng/prio_heap.h + * + * Priority heap containing pointers. Based on CLRS, chapter 6. + * + * Copyright 2011 - Mathieu Desnoyers + */ + +#ifndef _LTTNG_PRIO_HEAP_H +#define _LTTNG_PRIO_HEAP_H + +#include + +struct lttng_ptr_heap { + size_t len, alloc_len; + void **ptrs; + int (*gt)(void *a, void *b); + gfp_t gfpmask; +}; + +#ifdef DEBUG_HEAP +void lttng_check_heap(const struct lttng_ptr_heap *heap); +#else +static inline +void lttng_check_heap(const struct lttng_ptr_heap *heap) +{ +} +#endif + +/** + * lttng_heap_maximum - return the largest element in the heap + * @heap: the heap to be operated on + * + * Returns the largest element in the heap, without performing any modification + * to the heap structure. Returns NULL if the heap is empty. + */ +static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap) +{ + lttng_check_heap(heap); + return heap->len ? heap->ptrs[0] : NULL; +} + +/** + * lttng_heap_init - initialize the heap + * @heap: the heap to initialize + * @alloc_len: number of elements initially allocated + * @gfp: allocation flags + * @gt: function to compare the elements + * + * Returns -ENOMEM if out of memory. + */ +extern int lttng_heap_init(struct lttng_ptr_heap *heap, + size_t alloc_len, gfp_t gfpmask, + int gt(void *a, void *b)); + +/** + * lttng_heap_free - free the heap + * @heap: the heap to free + */ +extern void lttng_heap_free(struct lttng_ptr_heap *heap); + +/** + * lttng_heap_insert - insert an element into the heap + * @heap: the heap to be operated on + * @p: the element to add + * + * Insert an element into the heap. + * + * Returns -ENOMEM if out of memory. + */ +extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p); + +/** + * lttng_heap_remove - remove the largest element from the heap + * @heap: the heap to be operated on + * + * Returns the largest element in the heap. It removes this element from the + * heap. Returns NULL if the heap is empty. + */ +extern void *lttng_heap_remove(struct lttng_ptr_heap *heap); + +/** + * lttng_heap_cherrypick - remove a given element from the heap + * @heap: the heap to be operated on + * @p: the element + * + * Remove the given element from the heap. Return the element if present, else + * return NULL. This algorithm has a complexity of O(n), which is higher than + * O(log(n)) provided by the rest of this API. + */ +extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p); + +/** + * lttng_heap_replace_max - replace the the largest element from the heap + * @heap: the heap to be operated on + * @p: the pointer to be inserted as topmost element replacement + * + * Returns the largest element in the heap. It removes this element from the + * heap. The heap is rebalanced only once after the insertion. Returns NULL if + * the heap is empty. + * + * This is the equivalent of calling heap_remove() and then heap_insert(), but + * it only rebalances the heap once. It never allocates memory. + */ +extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p); + +#endif /* _LTTNG_PRIO_HEAP_H */ diff --git a/include/ringbuffer/config.h b/include/ringbuffer/config.h index 1b01935b..1549c108 100644 --- a/include/ringbuffer/config.h +++ b/include/ringbuffer/config.h @@ -13,7 +13,7 @@ #include #include -#include +#include #include struct lib_ring_buffer; diff --git a/include/ringbuffer/frontend_internal.h b/include/ringbuffer/frontend_internal.h index 723656bc..680c65bb 100644 --- a/include/ringbuffer/frontend_internal.h +++ b/include/ringbuffer/frontend_internal.h @@ -15,7 +15,7 @@ #include #include #include -#include /* For per-CPU read-side iterator */ +#include /* For per-CPU read-side iterator */ /* Buffer offset macros */ diff --git a/include/ringbuffer/frontend_types.h b/include/ringbuffer/frontend_types.h index b04c085b..d5c4916b 100644 --- a/include/ringbuffer/frontend_types.h +++ b/include/ringbuffer/frontend_types.h @@ -15,7 +15,7 @@ #include #include #include -#include /* For per-CPU read-side iterator */ +#include /* For per-CPU read-side iterator */ #include /* diff --git a/lib/align.h b/lib/align.h deleted file mode 100644 index c0695494..00000000 --- a/lib/align.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) - * - * lib/align.h - * - * Copyright (C) 2010-2012 Mathieu Desnoyers - */ - -#ifndef _LTTNG_ALIGN_H -#define _LTTNG_ALIGN_H - -#ifdef __KERNEL__ - -#include -#include - -#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1) -#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask)) -#define PTR_ALIGN_FLOOR(p, a) \ - ((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a)) - -/* - * Align pointer on natural object alignment. - */ -#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj))) -#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj))) - -/** - * offset_align - Calculate the offset needed to align an object on its natural - * alignment towards higher addresses. - * @align_drift: object offset from an "alignment"-aligned address. - * @alignment: natural object alignment. Must be non-zero, power of 2. - * - * Returns the offset that must be added to align towards higher - * addresses. - */ -#define offset_align(align_drift, alignment) \ - ({ \ - BUILD_RUNTIME_BUG_ON((alignment) == 0 \ - || ((alignment) & ((alignment) - 1))); \ - (((alignment) - (align_drift)) & ((alignment) - 1)); \ - }) - -/** - * offset_align_floor - Calculate the offset needed to align an object - * on its natural alignment towards lower addresses. - * @align_drift: object offset from an "alignment"-aligned address. - * @alignment: natural object alignment. Must be non-zero, power of 2. - * - * Returns the offset that must be substracted to align towards lower addresses. - */ -#define offset_align_floor(align_drift, alignment) \ - ({ \ - BUILD_RUNTIME_BUG_ON((alignment) == 0 \ - || ((alignment) & ((alignment) - 1))); \ - (((align_drift) - (alignment)) & ((alignment) - 1)); \ - }) - -#endif /* __KERNEL__ */ - -#endif diff --git a/lib/bitfield.h b/lib/bitfield.h deleted file mode 100644 index 7d5eaeaa..00000000 --- a/lib/bitfield.h +++ /dev/null @@ -1,543 +0,0 @@ -/* SPDX-License-Identifier: MIT - * - * Copyright 2010-2019 - Mathieu Desnoyers - */ - -#ifndef _BABELTRACE_BITFIELD_H -#define _BABELTRACE_BITFIELD_H - -#include -#include - -#ifndef CHAR_BIT -#define CHAR_BIT 8 -#endif - -/* - * This header strictly follows the C99 standard, except for use of the - * compiler-specific __typeof__. - */ - -/* - * This bitfield header requires the compiler representation of signed - * integers to be two's complement. - */ -#if (-1 != ~0) -#error "bitfield.h requires the compiler representation of signed integers to be two's complement." -#endif - -/* - * _bt_is_signed_type() willingly generates comparison of unsigned - * expression < 0, which is always false. Silence compiler warnings. - * GCC versions lower than 4.6.0 do not accept diagnostic pragma inside - * functions. - */ -#if defined(__GNUC__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40600 -# define _BT_DIAG_PUSH _Pragma("GCC diagnostic push") -# define _BT_DIAG_POP _Pragma("GCC diagnostic pop") - -# define _BT_DIAG_STRINGIFY_1(x) #x -# define _BT_DIAG_STRINGIFY(x) _BT_DIAG_STRINGIFY_1(x) - -# define _BT_DIAG_IGNORE(option) \ - _Pragma(_BT_DIAG_STRINGIFY(GCC diagnostic ignored option)) -# define _BT_DIAG_IGNORE_TYPE_LIMITS _BT_DIAG_IGNORE("-Wtype-limits") -#else -# define _BT_DIAG_PUSH -# define _BT_DIAG_POP -# define _BT_DIAG_IGNORE -# define _BT_DIAG_IGNORE_TYPE_LIMITS -#endif - -#define _bt_is_signed_type(type) ((type) -1 < (type) 0) - -/* - * Produce a build-time error if the condition `cond` is non-zero. - * Evaluates as a size_t expression. - */ -#ifdef __cplusplus -#define _BT_BUILD_ASSERT(cond) ([]{static_assert((cond), "");}, 0) -#else -#define _BT_BUILD_ASSERT(cond) \ - sizeof(struct { int f:(2 * !!(cond) - 1); }) -#endif - -/* - * Cast value `v` to an unsigned integer of the same size as `v`. - */ -#define _bt_cast_value_to_unsigned(v) \ - (sizeof(v) == sizeof(uint8_t) ? (uint8_t) (v) : \ - sizeof(v) == sizeof(uint16_t) ? (uint16_t) (v) : \ - sizeof(v) == sizeof(uint32_t) ? (uint32_t) (v) : \ - sizeof(v) == sizeof(uint64_t) ? (uint64_t) (v) : \ - _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t))) - -/* - * Cast value `v` to an unsigned integer type of the size of type `type` - * *without* sign-extension. - * - * The unsigned cast ensures that we're not shifting a negative value, - * which is undefined in C. However, this limits the maximum type size - * of `type` to 64-bit. Generate a compile-time error if the size of - * `type` is larger than 64-bit. - */ -#define _bt_cast_value_to_unsigned_type(type, v) \ - (sizeof(type) == sizeof(uint8_t) ? \ - (uint8_t) _bt_cast_value_to_unsigned(v) : \ - sizeof(type) == sizeof(uint16_t) ? \ - (uint16_t) _bt_cast_value_to_unsigned(v) : \ - sizeof(type) == sizeof(uint32_t) ? \ - (uint32_t) _bt_cast_value_to_unsigned(v) : \ - sizeof(type) == sizeof(uint64_t) ? \ - (uint64_t) _bt_cast_value_to_unsigned(v) : \ - _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t))) - -/* - * _bt_fill_mask evaluates to a "type" integer with all bits set. - */ -#define _bt_fill_mask(type) ((type) ~(type) 0) - -/* - * Left shift a value `v` of `shift` bits. - * - * The type of `v` can be signed or unsigned integer. - * The value of `shift` must be less than the size of `v` (in bits), - * otherwise the behavior is undefined. - * Evaluates to the result of the shift operation. - * - * According to the C99 standard, left shift of a left hand-side signed - * type is undefined if it has a negative value or if the result cannot - * be represented in the result type. This bitfield header discards the - * bits that are left-shifted beyond the result type representation, - * which is the behavior of an unsigned type left shift operation. - * Therefore, always perform left shift on an unsigned type. - * - * This macro should not be used if `shift` can be greater or equal than - * the bitwidth of `v`. See `_bt_safe_lshift`. - */ -#define _bt_lshift(v, shift) \ - ((__typeof__(v)) (_bt_cast_value_to_unsigned(v) << (shift))) - -/* - * Generate a mask of type `type` with the `length` least significant bits - * cleared, and the most significant bits set. - */ -#define _bt_make_mask_complement(type, length) \ - _bt_lshift(_bt_fill_mask(type), length) - -/* - * Generate a mask of type `type` with the `length` least significant bits - * set, and the most significant bits cleared. - */ -#define _bt_make_mask(type, length) \ - ((type) ~_bt_make_mask_complement(type, length)) - -/* - * Right shift a value `v` of `shift` bits. - * - * The type of `v` can be signed or unsigned integer. - * The value of `shift` must be less than the size of `v` (in bits), - * otherwise the behavior is undefined. - * Evaluates to the result of the shift operation. - * - * According to the C99 standard, right shift of a left hand-side signed - * type which has a negative value is implementation defined. This - * bitfield header relies on the right shift implementation carrying the - * sign bit. If the compiler implementation has a different behavior, - * emulate carrying the sign bit. - * - * This macro should not be used if `shift` can be greater or equal than - * the bitwidth of `v`. See `_bt_safe_rshift`. - */ -#if ((-1 >> 1) == -1) -#define _bt_rshift(v, shift) ((v) >> (shift)) -#else -#define _bt_rshift(v, shift) \ - ((__typeof__(v)) ((_bt_cast_value_to_unsigned(v) >> (shift)) | \ - ((v) < 0 ? _bt_make_mask_complement(__typeof__(v), \ - sizeof(v) * CHAR_BIT - (shift)) : 0))) -#endif - -/* - * Right shift a signed or unsigned integer with `shift` value being an - * arbitrary number of bits. `v` is modified by this macro. The shift - * is transformed into a sequence of `_nr_partial_shifts` consecutive - * shift operations, each of a number of bits smaller than the bitwidth - * of `v`, ending with a shift of the number of left over bits. - */ -#define _bt_safe_rshift(v, shift) \ -do { \ - unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \ - unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \ - \ - for (; _nr_partial_shifts; _nr_partial_shifts--) \ - (v) = _bt_rshift(v, sizeof(v) * CHAR_BIT - 1); \ - (v) = _bt_rshift(v, _leftover_bits); \ -} while (0) - -/* - * Left shift a signed or unsigned integer with `shift` value being an - * arbitrary number of bits. `v` is modified by this macro. The shift - * is transformed into a sequence of `_nr_partial_shifts` consecutive - * shift operations, each of a number of bits smaller than the bitwidth - * of `v`, ending with a shift of the number of left over bits. - */ -#define _bt_safe_lshift(v, shift) \ -do { \ - unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \ - unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \ - \ - for (; _nr_partial_shifts; _nr_partial_shifts--) \ - (v) = _bt_lshift(v, sizeof(v) * CHAR_BIT - 1); \ - (v) = _bt_lshift(v, _leftover_bits); \ -} while (0) - -/* - * bt_bitfield_write - write integer to a bitfield in native endianness - * - * Save integer to the bitfield, which starts at the "start" bit, has "len" - * bits. - * The inside of a bitfield is from high bits to low bits. - * Uses native endianness. - * For unsigned "v", pad MSB with 0 if bitfield is larger than v. - * For signed "v", sign-extend v if bitfield is larger than v. - * - * On little endian, bytes are placed from the less significant to the most - * significant. Also, consecutive bitfields are placed from lower bits to higher - * bits. - * - * On big endian, bytes are places from most significant to less significant. - * Also, consecutive bitfields are placed from higher to lower bits. - */ - -#define _bt_bitfield_write_le(ptr, type, start, length, v) \ -do { \ - __typeof__(v) _v = (v); \ - type *_ptr = (void *) (ptr); \ - unsigned long _start = (start), _length = (length); \ - type _mask, _cmask; \ - unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ - unsigned long _start_unit, _end_unit, _this_unit; \ - unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ - \ - if (!_length) \ - break; \ - \ - _end = _start + _length; \ - _start_unit = _start / _ts; \ - _end_unit = (_end + (_ts - 1)) / _ts; \ - \ - /* Trim v high bits */ \ - if (_length < sizeof(_v) * CHAR_BIT) \ - _v &= _bt_make_mask(__typeof__(_v), _length); \ - \ - /* We can now append v with a simple "or", shift it piece-wise */ \ - _this_unit = _start_unit; \ - if (_start_unit == _end_unit - 1) { \ - _mask = _bt_make_mask(type, _start % _ts); \ - if (_end % _ts) \ - _mask |= _bt_make_mask_complement(type, _end % _ts); \ - _cmask = _bt_lshift((type) (_v), _start % _ts); \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - break; \ - } \ - if (_start % _ts) { \ - _cshift = _start % _ts; \ - _mask = _bt_make_mask(type, _cshift); \ - _cmask = _bt_lshift((type) (_v), _cshift); \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - _bt_safe_rshift(_v, _ts - _cshift); \ - _start += _ts - _cshift; \ - _this_unit++; \ - } \ - for (; _this_unit < _end_unit - 1; _this_unit++) { \ - _ptr[_this_unit] = (type) _v; \ - _bt_safe_rshift(_v, _ts); \ - _start += _ts; \ - } \ - if (_end % _ts) { \ - _mask = _bt_make_mask_complement(type, _end % _ts); \ - _cmask = (type) _v; \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - } else \ - _ptr[_this_unit] = (type) _v; \ -} while (0) - -#define _bt_bitfield_write_be(ptr, type, start, length, v) \ -do { \ - __typeof__(v) _v = (v); \ - type *_ptr = (void *) (ptr); \ - unsigned long _start = (start), _length = (length); \ - type _mask, _cmask; \ - unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ - unsigned long _start_unit, _end_unit, _this_unit; \ - unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ - \ - if (!_length) \ - break; \ - \ - _end = _start + _length; \ - _start_unit = _start / _ts; \ - _end_unit = (_end + (_ts - 1)) / _ts; \ - \ - /* Trim v high bits */ \ - if (_length < sizeof(_v) * CHAR_BIT) \ - _v &= _bt_make_mask(__typeof__(_v), _length); \ - \ - /* We can now append v with a simple "or", shift it piece-wise */ \ - _this_unit = _end_unit - 1; \ - if (_start_unit == _end_unit - 1) { \ - _mask = _bt_make_mask(type, (_ts - (_end % _ts)) % _ts); \ - if (_start % _ts) \ - _mask |= _bt_make_mask_complement(type, _ts - (_start % _ts)); \ - _cmask = _bt_lshift((type) (_v), (_ts - (_end % _ts)) % _ts); \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - break; \ - } \ - if (_end % _ts) { \ - _cshift = _end % _ts; \ - _mask = _bt_make_mask(type, _ts - _cshift); \ - _cmask = _bt_lshift((type) (_v), _ts - _cshift); \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - _bt_safe_rshift(_v, _cshift); \ - _end -= _cshift; \ - _this_unit--; \ - } \ - for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \ - _ptr[_this_unit] = (type) _v; \ - _bt_safe_rshift(_v, _ts); \ - _end -= _ts; \ - } \ - if (_start % _ts) { \ - _mask = _bt_make_mask_complement(type, _ts - (_start % _ts)); \ - _cmask = (type) _v; \ - _cmask &= ~_mask; \ - _ptr[_this_unit] &= _mask; \ - _ptr[_this_unit] |= _cmask; \ - } else \ - _ptr[_this_unit] = (type) _v; \ -} while (0) - -/* - * bt_bitfield_write - write integer to a bitfield in native endianness - * bt_bitfield_write_le - write integer to a bitfield in little endian - * bt_bitfield_write_be - write integer to a bitfield in big endian - */ - -#if (__BYTE_ORDER == __LITTLE_ENDIAN) - -#define bt_bitfield_write(ptr, type, start, length, v) \ - _bt_bitfield_write_le(ptr, type, start, length, v) - -#define bt_bitfield_write_le(ptr, type, start, length, v) \ - _bt_bitfield_write_le(ptr, type, start, length, v) - -#define bt_bitfield_write_be(ptr, type, start, length, v) \ - _bt_bitfield_write_be(ptr, unsigned char, start, length, v) - -#elif (__BYTE_ORDER == __BIG_ENDIAN) - -#define bt_bitfield_write(ptr, type, start, length, v) \ - _bt_bitfield_write_be(ptr, type, start, length, v) - -#define bt_bitfield_write_le(ptr, type, start, length, v) \ - _bt_bitfield_write_le(ptr, unsigned char, start, length, v) - -#define bt_bitfield_write_be(ptr, type, start, length, v) \ - _bt_bitfield_write_be(ptr, type, start, length, v) - -#else /* (__BYTE_ORDER == __PDP_ENDIAN) */ - -#error "Byte order not supported" - -#endif - -#define _bt_bitfield_read_le(ptr, type, start, length, vptr) \ -do { \ - __typeof__(*(vptr)) *_vptr = (vptr); \ - __typeof__(*_vptr) _v; \ - type *_ptr = (type *) (ptr); \ - unsigned long _start = (start), _length = (length); \ - type _mask, _cmask; \ - unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ - unsigned long _start_unit, _end_unit, _this_unit; \ - unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ - bool _is_signed_type; \ - \ - if (!_length) { \ - *_vptr = 0; \ - break; \ - } \ - \ - _end = _start + _length; \ - _start_unit = _start / _ts; \ - _end_unit = (_end + (_ts - 1)) / _ts; \ - \ - _this_unit = _end_unit - 1; \ - _BT_DIAG_PUSH \ - _BT_DIAG_IGNORE_TYPE_LIMITS \ - _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \ - _BT_DIAG_POP \ - if (_is_signed_type \ - && (_ptr[_this_unit] & _bt_lshift((type) 1, (_end % _ts ? _end % _ts : _ts) - 1))) \ - _v = ~(__typeof__(_v)) 0; \ - else \ - _v = 0; \ - if (_start_unit == _end_unit - 1) { \ - _cmask = _ptr[_this_unit]; \ - _cmask = _bt_rshift(_cmask, _start % _ts); \ - if ((_end - _start) % _ts) { \ - _mask = _bt_make_mask(type, _end - _start); \ - _cmask &= _mask; \ - } \ - _bt_safe_lshift(_v, _end - _start); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - *_vptr = _v; \ - break; \ - } \ - if (_end % _ts) { \ - _cshift = _end % _ts; \ - _mask = _bt_make_mask(type, _cshift); \ - _cmask = _ptr[_this_unit]; \ - _cmask &= _mask; \ - _bt_safe_lshift(_v, _cshift); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - _end -= _cshift; \ - _this_unit--; \ - } \ - for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \ - _bt_safe_lshift(_v, _ts); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ - _end -= _ts; \ - } \ - if (_start % _ts) { \ - _mask = _bt_make_mask(type, _ts - (_start % _ts)); \ - _cmask = _ptr[_this_unit]; \ - _cmask = _bt_rshift(_cmask, _start % _ts); \ - _cmask &= _mask; \ - _bt_safe_lshift(_v, _ts - (_start % _ts)); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - } else { \ - _bt_safe_lshift(_v, _ts); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ - } \ - *_vptr = _v; \ -} while (0) - -#define _bt_bitfield_read_be(ptr, type, start, length, vptr) \ -do { \ - __typeof__(*(vptr)) *_vptr = (vptr); \ - __typeof__(*_vptr) _v; \ - type *_ptr = (void *) (ptr); \ - unsigned long _start = (start), _length = (length); \ - type _mask, _cmask; \ - unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \ - unsigned long _start_unit, _end_unit, _this_unit; \ - unsigned long _end, _cshift; /* _cshift is "complement shift" */ \ - bool _is_signed_type; \ - \ - if (!_length) { \ - *_vptr = 0; \ - break; \ - } \ - \ - _end = _start + _length; \ - _start_unit = _start / _ts; \ - _end_unit = (_end + (_ts - 1)) / _ts; \ - \ - _this_unit = _start_unit; \ - _BT_DIAG_PUSH \ - _BT_DIAG_IGNORE_TYPE_LIMITS \ - _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \ - _BT_DIAG_POP \ - if (_is_signed_type \ - && (_ptr[_this_unit] & _bt_lshift((type) 1, _ts - (_start % _ts) - 1))) \ - _v = ~(__typeof__(_v)) 0; \ - else \ - _v = 0; \ - if (_start_unit == _end_unit - 1) { \ - _cmask = _ptr[_this_unit]; \ - _cmask = _bt_rshift(_cmask, (_ts - (_end % _ts)) % _ts); \ - if ((_end - _start) % _ts) { \ - _mask = _bt_make_mask(type, _end - _start); \ - _cmask &= _mask; \ - } \ - _bt_safe_lshift(_v, _end - _start); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - *_vptr = _v; \ - break; \ - } \ - if (_start % _ts) { \ - _cshift = _start % _ts; \ - _mask = _bt_make_mask(type, _ts - _cshift); \ - _cmask = _ptr[_this_unit]; \ - _cmask &= _mask; \ - _bt_safe_lshift(_v, _ts - _cshift); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - _start += _ts - _cshift; \ - _this_unit++; \ - } \ - for (; _this_unit < _end_unit - 1; _this_unit++) { \ - _bt_safe_lshift(_v, _ts); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ - _start += _ts; \ - } \ - if (_end % _ts) { \ - _mask = _bt_make_mask(type, _end % _ts); \ - _cmask = _ptr[_this_unit]; \ - _cmask = _bt_rshift(_cmask, _ts - (_end % _ts)); \ - _cmask &= _mask; \ - _bt_safe_lshift(_v, _end % _ts); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \ - } else { \ - _bt_safe_lshift(_v, _ts); \ - _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \ - } \ - *_vptr = _v; \ -} while (0) - -/* - * bt_bitfield_read - read integer from a bitfield in native endianness - * bt_bitfield_read_le - read integer from a bitfield in little endian - * bt_bitfield_read_be - read integer from a bitfield in big endian - */ - -#if (__BYTE_ORDER == __LITTLE_ENDIAN) - -#define bt_bitfield_read(ptr, type, start, length, vptr) \ - _bt_bitfield_read_le(ptr, type, start, length, vptr) - -#define bt_bitfield_read_le(ptr, type, start, length, vptr) \ - _bt_bitfield_read_le(ptr, type, start, length, vptr) - -#define bt_bitfield_read_be(ptr, type, start, length, vptr) \ - _bt_bitfield_read_be(ptr, unsigned char, start, length, vptr) - -#elif (__BYTE_ORDER == __BIG_ENDIAN) - -#define bt_bitfield_read(ptr, type, start, length, vptr) \ - _bt_bitfield_read_be(ptr, type, start, length, vptr) - -#define bt_bitfield_read_le(ptr, type, start, length, vptr) \ - _bt_bitfield_read_le(ptr, unsigned char, start, length, vptr) - -#define bt_bitfield_read_be(ptr, type, start, length, vptr) \ - _bt_bitfield_read_be(ptr, type, start, length, vptr) - -#else /* (__BYTE_ORDER == __PDP_ENDIAN) */ - -#error "Byte order not supported" - -#endif - -#endif /* _BABELTRACE_BITFIELD_H */ diff --git a/lib/bug.h b/lib/bug.h deleted file mode 100644 index ea73f158..00000000 --- a/lib/bug.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) - * - * lib/bug.h - * - * Copyright (C) 2010-2012 Mathieu Desnoyers - */ - -#ifndef _LTTNG_BUG_H -#define _LTTNG_BUG_H - -/** - * BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime - * @condition: the condition which should be false. - * - * If the condition is a constant and true, the compiler will generate a build - * error. If the condition is not constant, a BUG will be triggered at runtime - * if the condition is ever true. If the condition is constant and false, no - * code is emitted. - */ -#define BUILD_RUNTIME_BUG_ON(condition) \ - do { \ - if (__builtin_constant_p(condition)) \ - BUILD_BUG_ON(condition); \ - else \ - BUG_ON(condition); \ - } while (0) - -#endif diff --git a/lib/prio_heap/lttng_prio_heap.c b/lib/prio_heap/lttng_prio_heap.c index 04438bbf..0b85ddbe 100644 --- a/lib/prio_heap/lttng_prio_heap.c +++ b/lib/prio_heap/lttng_prio_heap.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #ifdef DEBUG_HEAP diff --git a/lib/prio_heap/lttng_prio_heap.h b/lib/prio_heap/lttng_prio_heap.h deleted file mode 100644 index f97f33f3..00000000 --- a/lib/prio_heap/lttng_prio_heap.h +++ /dev/null @@ -1,108 +0,0 @@ -/* SPDX-License-Identifier: MIT - * - * lttng_prio_heap.h - * - * Priority heap containing pointers. Based on CLRS, chapter 6. - * - * Copyright 2011 - Mathieu Desnoyers - */ - -#ifndef _LTTNG_PRIO_HEAP_H -#define _LTTNG_PRIO_HEAP_H - -#include - -struct lttng_ptr_heap { - size_t len, alloc_len; - void **ptrs; - int (*gt)(void *a, void *b); - gfp_t gfpmask; -}; - -#ifdef DEBUG_HEAP -void lttng_check_heap(const struct lttng_ptr_heap *heap); -#else -static inline -void lttng_check_heap(const struct lttng_ptr_heap *heap) -{ -} -#endif - -/** - * lttng_heap_maximum - return the largest element in the heap - * @heap: the heap to be operated on - * - * Returns the largest element in the heap, without performing any modification - * to the heap structure. Returns NULL if the heap is empty. - */ -static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap) -{ - lttng_check_heap(heap); - return heap->len ? heap->ptrs[0] : NULL; -} - -/** - * lttng_heap_init - initialize the heap - * @heap: the heap to initialize - * @alloc_len: number of elements initially allocated - * @gfp: allocation flags - * @gt: function to compare the elements - * - * Returns -ENOMEM if out of memory. - */ -extern int lttng_heap_init(struct lttng_ptr_heap *heap, - size_t alloc_len, gfp_t gfpmask, - int gt(void *a, void *b)); - -/** - * lttng_heap_free - free the heap - * @heap: the heap to free - */ -extern void lttng_heap_free(struct lttng_ptr_heap *heap); - -/** - * lttng_heap_insert - insert an element into the heap - * @heap: the heap to be operated on - * @p: the element to add - * - * Insert an element into the heap. - * - * Returns -ENOMEM if out of memory. - */ -extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p); - -/** - * lttng_heap_remove - remove the largest element from the heap - * @heap: the heap to be operated on - * - * Returns the largest element in the heap. It removes this element from the - * heap. Returns NULL if the heap is empty. - */ -extern void *lttng_heap_remove(struct lttng_ptr_heap *heap); - -/** - * lttng_heap_cherrypick - remove a given element from the heap - * @heap: the heap to be operated on - * @p: the element - * - * Remove the given element from the heap. Return the element if present, else - * return NULL. This algorithm has a complexity of O(n), which is higher than - * O(log(n)) provided by the rest of this API. - */ -extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p); - -/** - * lttng_heap_replace_max - replace the the largest element from the heap - * @heap: the heap to be operated on - * @p: the pointer to be inserted as topmost element replacement - * - * Returns the largest element in the heap. It removes this element from the - * heap. The heap is rebalanced only once after the insertion. Returns NULL if - * the heap is empty. - * - * This is the equivalent of calling heap_remove() and then heap_insert(), but - * it only rebalances the heap once. It never allocates memory. - */ -extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p); - -#endif /* _LTTNG_PRIO_HEAP_H */ diff --git a/lttng-filter-specialize.c b/lttng-filter-specialize.c index 3dc82d84..f0da47ed 100644 --- a/lttng-filter-specialize.c +++ b/lttng-filter-specialize.c @@ -9,7 +9,7 @@ #include #include -#include "lib/align.h" +#include static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime, size_t align, size_t len) diff --git a/lttng-ring-buffer-client.h b/lttng-ring-buffer-client.h index 9e204caa..67804d3a 100644 --- a/lttng-ring-buffer-client.h +++ b/lttng-ring-buffer-client.h @@ -9,7 +9,7 @@ #include #include -#include +#include #include /* for wrapper_vmalloc_sync_mappings() */ #include #include diff --git a/lttng-syscalls.c b/lttng-syscalls.c index 7720f4ea..a376bcbe 100644 --- a/lttng-syscalls.c +++ b/lttng-syscalls.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include #include