From: Michael Jeanson Date: Thu, 18 Mar 2021 18:16:40 +0000 (-0400) Subject: Centralize arch detection in a public header X-Git-Tag: v2.13.0-rc1~229 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=2eba8e397d36e8cecaf4731b4f892dc57f32a382;p=lttng-ust.git Centralize arch detection in a public header Add a public header with centralized compiler arch detection and arch specific parameters. Namespace everything under 'LTTNG_UST_ARCH_'. Move "LTTNG_UST_HAVE_EFFICIENT_UNALIGNED_ACCESS" from a configure time define to a static one base on compiler arch detection. This will simplify the build system and remove the possibility of accidently mixing aligned and unaligned access in a multiarch environment with shared header files. Change-Id: Ic977d13f031ad070bea4ff5d7b2b8079843e0c26 Signed-off-by: Michael Jeanson Signed-off-by: Mathieu Desnoyers --- diff --git a/configure.ac b/configure.ac index 4d82488d..d089cfaa 100644 --- a/configure.ac +++ b/configure.ac @@ -196,18 +196,16 @@ AS_CASE([$host_cpu], [ppc64le], [], [powerpc64], [], [powerpc64le], [], - [s390], [NO_UNALIGNED_ACCESS=1], - [s390x], [NO_UNALIGNED_ACCESS=1], + [s390], [], + [s390x], [], [arm*], [ - NO_UNALIGNED_ACCESS=1 NO_NUMA=1 ], - [aarch64*], [NO_UNALIGNED_ACCESS=1], - [mips*], [NO_UNALIGNED_ACCESS=1], - [tile*], [NO_UNALIGNED_ACCESS=1], + [aarch64*], [], + [mips*], [], + [tile*], [], [ UNSUPPORTED_ARCH=1 - NO_UNALIGNED_ACCESS=1 ]) # Set os specific options @@ -216,7 +214,6 @@ AS_CASE([$host_os], ) # Configuration options, which will be installed in the config.h -AH_TEMPLATE([LTTNG_UST_HAVE_EFFICIENT_UNALIGNED_ACCESS], [Use efficient unaligned access.]) AH_TEMPLATE([LTTNG_UST_HAVE_SDT_INTEGRATION], [SystemTap integration via sdt.h]) # Checks for libraries. @@ -288,10 +285,6 @@ AS_IF([test "x$have_perf_event" = "xyes"], [ AC_DEFINE([HAVE_PERF_EVENT], [1]) ]) -AS_IF([test "x$NO_UNALIGNED_ACCESS" = "x"], [ - AC_DEFINE([LTTNG_UST_HAVE_EFFICIENT_UNALIGNED_ACCESS], [1]) -]) - # Check for JNI header files if requested AC_ARG_ENABLE([jni-interface], [ AS_HELP_STRING([--enable-jni-interface], [build JNI interface between C and Java. Needs Java include files [default=no]]) @@ -567,9 +560,6 @@ PPRINT_SUBTITLE([System]) PPRINT_PROP_STRING([Target architecture], $host_cpu) -test "x$NO_UNALIGNED_ACCESS" != "x1" && value=1 || value=0 -PPRINT_PROP_BOOL([Efficient unaligned memory access], $value) - AS_IF([test "x$UNSUPPORTED_ARCH" = "x1"],[ PPRINT_WARN([Your architecture ($host_cpu) is unsupported, using safe default of no unaligned access.]) ]) diff --git a/include/Makefile.am b/include/Makefile.am index 0020ca10..9583c8b6 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -13,6 +13,7 @@ nobase_include_HEADERS = \ lttng/ust-tracepoint-event-reset.h \ lttng/ust-tracepoint-event-write.h \ lttng/ust-tracepoint-event-nowrite.h \ + lttng/ust-arch.h \ lttng/ust-events.h \ lttng/ust-ctl.h \ lttng/ust-abi.h \ diff --git a/include/lttng/ust-arch.h b/include/lttng/ust-arch.h new file mode 100644 index 00000000..b2c7fe92 --- /dev/null +++ b/include/lttng/ust-arch.h @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright (C) 2021 Michael Jeanson + * + */ + +#ifndef _LTTNG_UST_ARCH_H +#define _LTTNG_UST_ARCH_H + +/* + * Architecture detection using compiler defines. + * + * The following defines are used internally for architecture specific code. + * + * LTTNG_UST_ARCH_X86 : All x86 variants 32 and 64 bits + * LTTNG_UST_ARCH_I386 : Specific to the i386 + * LTTNG_UST_ARCH_AMD64 : All 64 bits x86 variants + * LTTNG_UST_ARCH_K1OM : Specific to the Xeon Phi / MIC + * + * LTTNG_UST_ARCH_PPC : All PowerPC variants 32 and 64 bits + * LTTNG_UST_ARCH_PPC64 : Specific to 64 bits variants + * + * LTTNG_UST_ARCH_S390 : All IBM s390 / s390x variants + * + * LTTNG_UST_ARCH_SPARC64 : All Sun SPARC variants + * + * LTTNG_UST_ARCH_ALPHA : All DEC Alpha variants + * LTTNG_UST_ARCH_IA64 : All Intel Itanium variants + * LTTNG_UST_ARCH_ARM : All ARM 32 bits variants + * LTTNG_UST_ARCH_ARMV7 : All ARMv7 ISA variants + * LTTNG_UST_ARCH_AARCH64 : All ARM 64 bits variants + * LTTNG_UST_ARCH_MIPS : All MIPS variants + * LTTNG_UST_ARCH_NIOS2 : All Intel / Altera NIOS II variants + * LTTNG_UST_ARCH_TILE : All Tilera TILE variants + * LTTNG_UST_ARCH_HPPA : All HP PA-RISC variants + * LTTNG_UST_ARCH_M68K : All Motorola 68000 variants + * LTTNG_UST_ARCH_RISCV : All RISC-V variants + */ + +#if (defined(__INTEL_OFFLOAD) || defined(__TARGET_ARCH_MIC) || defined(__MIC__)) + +#define LTTNG_UST_ARCH_X86 1 +#define LTTNG_UST_ARCH_AMD64 1 +#define LTTNG_UST_ARCH_K1OM 1 + +#elif (defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64)) + +#define LTTNG_UST_ARCH_X86 1 +#define LTTNG_UST_ARCH_AMD64 1 + +#elif (defined(__i486__) || defined(__i586__) || defined(__i686__)) + +#define LTTNG_UST_ARCH_X86 1 + +#elif (defined(__i386__) || defined(__i386)) + +#define LTTNG_UST_ARCH_X86 1 +#define LTTNG_UST_ARCH_I386 1 + +#elif (defined(__powerpc64__) || defined(__ppc64__)) + +#define LTTNG_UST_ARCH_PPC 1 +#define LTTNG_UST_ARCH_PPC64 1 + +#elif (defined(__powerpc__) || defined(__powerpc) || defined(__ppc__)) + +#define LTTNG_UST_ARCH_PPC 1 + +#elif (defined(__s390__) || defined(__s390x__) || defined(__zarch__)) + +#define LTTNG_UST_ARCH_S390 1 + +#elif (defined(__sparc__) || defined(__sparc) || defined(__sparc64__)) + +#define LTTNG_UST_ARCH_SPARC64 1 + +#elif (defined(__alpha__) || defined(__alpha)) + +#define LTTNG_UST_ARCH_ALPHA 1 + +#elif (defined(__ia64__) || defined(__ia64)) + +#define LTTNG_UST_ARCH_IA64 1 + +#elif (defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)) + +#define LTTNG_UST_ARCH_ARMV7 1 +#define LTTNG_UST_ARCH_ARM 1 + +#elif (defined(__arm__) || defined(__arm)) + +#define LTTNG_UST_ARCH_ARM 1 + +#elif defined(__aarch64__) + +#define LTTNG_UST_ARCH_AARCH64 1 + +#elif (defined(__mips__) || defined(__mips)) + +#define LTTNG_UST_ARCH_MIPS 1 + +#elif (defined(__nios2__) || defined(__nios2)) + +#define LTTNG_UST_ARCH_NIOS2 1 + +#elif (defined(__tile__) || defined(__tilegx__)) + +#define LTTNG_UST_ARCH_TILE 1 + +#elif (defined(__hppa__) || defined(__HPPA__) || defined(__hppa)) + +#define LTTNG_UST_ARCH_HPPA 1 + +#elif defined(__m68k__) + +#define LTTNG_UST_ARCH_M68K 1 + +#elif defined(__riscv) + +#define LTTNG_UST_ARCH_RISCV 1 + +#else + +/* Unrecognised architecture, use safe defaults */ +#define LTTNG_UST_ARCH_UNKNOWN 1 + +#endif + + +/* + * Per architecture global settings. + * + * LTTNG_UST_ARCH_HAS_EFFICIENT_UNALIGNED_ACCESS: + * The architecture has working and efficient unaligned memory access, the + * content of the ringbuffers will packed instead of following the natural + * alignment of the architecture. + */ + +#if defined(LTTNG_UST_ARCH_X86) +#define LTTNG_UST_ARCH_HAS_EFFICIENT_UNALIGNED_ACCESS 1 +#endif + +#if defined(LTTNG_UST_ARCH_PPC) +#define LTTNG_UST_ARCH_HAS_EFFICIENT_UNALIGNED_ACCESS 1 +#endif + +#endif /* _LTTNG_UST_ARCH_H */ diff --git a/include/lttng/ust-config.h.in b/include/lttng/ust-config.h.in index 92cdf2dd..fb6e371c 100644 --- a/include/lttng/ust-config.h.in +++ b/include/lttng/ust-config.h.in @@ -2,10 +2,12 @@ * SPDX-License-Identifier: MIT */ -/* ust/config.h.in. Manually generated for control over the contained defs. */ +#ifndef _LTTNG_UST_CONFIG_H +#define _LTTNG_UST_CONFIG_H -/* Use efficient unaligned access. */ -#undef LTTNG_UST_HAVE_EFFICIENT_UNALIGNED_ACCESS +/* lttng/ust-config.h.in. Manually generated for control over the contained defs. */ /* DTrace/GDB/SystemTap integration via sdt.h */ #undef LTTNG_UST_HAVE_SDT_INTEGRATION + +#endif diff --git a/include/lttng/ust-tracepoint-event.h b/include/lttng/ust-tracepoint-event.h index fc327a68..9066e495 100644 --- a/include/lttng/ust-tracepoint-event.h +++ b/include/lttng/ust-tracepoint-event.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -766,11 +767,11 @@ size_t __event_get_align__##_provider##___##_name(_TP_ARGS_PROTO(_args)) \ #define _TP_IP_PARAM(x) (x) #else /* TP_IP_PARAM */ -#if defined(__PPC__) && !defined(__PPC64__) +#if defined(LTTNG_UST_ARCH_PPC) && !defined(LTTNG_UST_ARCH_PPC64) #define _TP_IP_PARAM(x) NULL -#else /* #if defined(__PPC__) && !defined(__PPC64__) */ +#else #define _TP_IP_PARAM(x) __builtin_return_address(0) -#endif /* #else #if defined(__PPC__) && !defined(__PPC64__) */ +#endif #endif /* TP_IP_PARAM */ diff --git a/include/lttng/ust-tracer.h b/include/lttng/ust-tracer.h index 9593609d..0fd5a707 100644 --- a/include/lttng/ust-tracer.h +++ b/include/lttng/ust-tracer.h @@ -15,11 +15,12 @@ #include #endif +#include #include #include #include -#ifndef LTTNG_UST_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifndef LTTNG_UST_ARCH_HAS_EFFICIENT_UNALIGNED_ACCESS /* Align data on its natural alignment */ #define RING_BUFFER_ALIGN #endif diff --git a/include/ust-helper.h b/include/ust-helper.h index 609a1028..e8b393a8 100644 --- a/include/ust-helper.h +++ b/include/ust-helper.h @@ -9,6 +9,8 @@ #include +#include + static inline __attribute__((always_inline)) void *zmalloc(size_t len) { @@ -37,10 +39,10 @@ void *zmalloc(size_t len) * architecture for now by always using the NULL value for the ip * context. */ -#if defined(__PPC__) && !defined(__PPC64__) +#if defined(LTTNG_UST_ARCH_PPC) && !defined(LTTNG_UST_ARCH_PPC64) #define LTTNG_UST_CALLER_IP() NULL -#else /* #if defined(__PPC__) && !defined(__PPC64__) */ +#else #define LTTNG_UST_CALLER_IP() __builtin_return_address(0) -#endif /* #else #if defined(__PPC__) && !defined(__PPC64__) */ +#endif #endif /* _LTTNG_UST_HELPER_H */ diff --git a/liblttng-ust/lttng-context-perf-counters.c b/liblttng-ust/lttng-context-perf-counters.c index 59393084..69791439 100644 --- a/liblttng-ust/lttng-context-perf-counters.c +++ b/liblttng-ust/lttng-context-perf-counters.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -181,7 +182,7 @@ uint64_t read_perf_counter_syscall( return count; } -#if defined(__x86_64__) || defined(__i386__) +#if defined(LTTNG_UST_ARCH_X86) static uint64_t rdpmc(unsigned int counter) @@ -504,7 +505,7 @@ void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field) free(perf_field); } -#ifdef __ARM_ARCH_7A__ +#ifdef LTTNG_UST_ARCH_ARMV7 static int perf_get_exclude_kernel(void) @@ -512,7 +513,7 @@ int perf_get_exclude_kernel(void) return 0; } -#else /* __ARM_ARCH_7A__ */ +#else /* LTTNG_UST_ARCH_ARMV7 */ static int perf_get_exclude_kernel(void) @@ -520,7 +521,7 @@ int perf_get_exclude_kernel(void) return 1; } -#endif /* __ARM_ARCH_7A__ */ +#endif /* LTTNG_UST_ARCH_ARMV7 */ /* Called with UST lock held */ int lttng_add_perf_counter_to_ctx(uint32_t type, diff --git a/liblttng-ust/rculfhash.c b/liblttng-ust/rculfhash.c index cd1c8ed0..b25c1c18 100644 --- a/liblttng-ust/rculfhash.c +++ b/liblttng-ust/rculfhash.c @@ -256,6 +256,7 @@ #include #include +#include #include #include #include @@ -405,7 +406,7 @@ unsigned long bit_reverse_ulong(unsigned long v) * Returns 0 if no bit is set, else returns the position of the most * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). */ -#if defined(__i386) || defined(__x86_64) +#if defined(LTTNG_UST_ARCH_X86) static inline unsigned int fls_u32(uint32_t x) { @@ -421,7 +422,7 @@ unsigned int fls_u32(uint32_t x) #define HAS_FLS_U32 #endif -#if defined(__x86_64) +#if defined(LTTNG_UST_ARCH_AMD64) static inline unsigned int fls_u64(uint64_t x) {