X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=liblttng-ust-libc-wrapper%2Flttng-ust-malloc.c;h=1e9a25e5c89dbbefbf6ff819a3a37a2e7d8d8c24;hb=9fe043d73a3e2112f859ce1fa489021a96efecc9;hp=ba8e4d42ac018e5808e80431aabe213bd9f5df7c;hpb=52c95399b67cf5a0bf4d52ffb4b354d67b8625ae;p=lttng-ust.git diff --git a/liblttng-ust-libc-wrapper/lttng-ust-malloc.c b/liblttng-ust-libc-wrapper/lttng-ust-malloc.c index ba8e4d42..1e9a25e5 100644 --- a/liblttng-ust-libc-wrapper/lttng-ust-malloc.c +++ b/liblttng-ust-libc-wrapper/lttng-ust-malloc.c @@ -17,7 +17,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#define _GNU_SOURCE +/* + * Do _not_ define _LGPL_SOURCE because we don't want to create a + * circular dependency loop between this malloc wrapper, liburcu and + * libc. + */ #include #include #include @@ -28,6 +32,7 @@ #include #include #include +#include #define TRACEPOINT_DEFINE #define TRACEPOINT_CREATE_PROBES @@ -90,8 +95,8 @@ void ust_malloc_spin_unlock(pthread_mutex_t *lock) #define pthread_mutex_lock ust_malloc_spin_lock #define pthread_mutex_unlock ust_malloc_spin_unlock static DEFINE_URCU_TLS(int, malloc_nesting); -#undef ust_malloc_spin_unlock -#undef ust_malloc_spin_lock +#undef pthread_mutex_unlock +#undef pthread_mutex_lock #undef calloc /* @@ -116,7 +121,7 @@ void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment) res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset); do { prev_offset = res_offset; - aligned_offset = ALIGN(prev_offset + sizeof(size_t), alignment); + aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment); new_offset = aligned_offset + nmemb * size; if (new_offset > sizeof(static_calloc_buf)) { abort(); @@ -262,7 +267,7 @@ void *malloc(size_t size) retval = cur_alloc.malloc(size); if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, malloc, - size, retval, __builtin_return_address(0)); + size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -282,7 +287,7 @@ void free(void *ptr) if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, free, - ptr, __builtin_return_address(0)); + ptr, LTTNG_UST_CALLER_IP()); } if (cur_alloc.free == NULL) { @@ -312,7 +317,7 @@ void *calloc(size_t nmemb, size_t size) retval = cur_alloc.calloc(nmemb, size); if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, calloc, - nmemb, size, retval, __builtin_return_address(0)); + nmemb, size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -365,7 +370,7 @@ void *realloc(void *ptr, size_t size) end: if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, realloc, - ptr, size, retval, __builtin_return_address(0)); + ptr, size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -387,7 +392,7 @@ void *memalign(size_t alignment, size_t size) if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, memalign, alignment, size, retval, - __builtin_return_address(0)); + LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -409,12 +414,18 @@ int posix_memalign(void **memptr, size_t alignment, size_t size) if (URCU_TLS(malloc_nesting) == 1) { tracepoint(lttng_ust_libc, posix_memalign, *memptr, alignment, size, - retval, __builtin_return_address(0)); + retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; } +static +void lttng_ust_fixup_malloc_nesting_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(malloc_nesting))); +} + __attribute__((constructor)) void lttng_ust_malloc_wrapper_init(void) { @@ -422,6 +433,7 @@ void lttng_ust_malloc_wrapper_init(void) if (cur_alloc.calloc) { return; } + lttng_ust_fixup_malloc_nesting_tls(); /* * Ensure the allocator is in place before the process becomes * multithreaded.