X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=liblttng-ust-libc-wrapper%2Flttng-ust-malloc.c;h=54b40e6a0a9546cbada853974994ddaf8fdac82e;hb=0a6c2d7c8b98a3fea54bf0f974b6d192b02d8898;hp=14545c2af6a2c43ab07edd584ff83d1878c5cae5;hpb=887bba304db29a6f9eb16d2efd0e821794def6d2;p=lttng-ust.git diff --git a/liblttng-ust-libc-wrapper/lttng-ust-malloc.c b/liblttng-ust-libc-wrapper/lttng-ust-malloc.c index 14545c2a..54b40e6a 100644 --- a/liblttng-ust-libc-wrapper/lttng-ust-malloc.c +++ b/liblttng-ust-libc-wrapper/lttng-ust-malloc.c @@ -1,24 +1,16 @@ /* - * Copyright (C) 2009 Pierre-Marc Fournier - * Copyright (C) 2011 Mathieu Desnoyers + * SPDX-License-Identifier: LGPL-2.1-or-later * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * Copyright (C) 2009 Pierre-Marc Fournier + * Copyright (C) 2011 Mathieu Desnoyers */ -#define _GNU_SOURCE -#include +/* + * Do _not_ define _LGPL_SOURCE because we don't want to create a + * circular dependency loop between this malloc wrapper, liburcu and + * libc. + */ +#include #include #include #include @@ -27,10 +19,12 @@ #include #include #include -#include +#include +#include "ust-compat.h" #define TRACEPOINT_DEFINE #define TRACEPOINT_CREATE_PROBES +#define TP_IP_PARAM ip #include "ust_libc.h" #define STATIC_CALLOC_LEN 4096 @@ -89,8 +83,8 @@ void ust_malloc_spin_unlock(pthread_mutex_t *lock) #define pthread_mutex_lock ust_malloc_spin_lock #define pthread_mutex_unlock ust_malloc_spin_unlock static DEFINE_URCU_TLS(int, malloc_nesting); -#undef ust_malloc_spin_unlock -#undef ust_malloc_spin_lock +#undef pthread_mutex_unlock +#undef pthread_mutex_lock #undef calloc /* @@ -115,7 +109,7 @@ void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment) res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset); do { prev_offset = res_offset; - aligned_offset = ALIGN(prev_offset + sizeof(size_t), alignment); + aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment); new_offset = aligned_offset + nmemb * size; if (new_offset > sizeof(static_calloc_buf)) { abort(); @@ -260,7 +254,8 @@ void *malloc(size_t size) } retval = cur_alloc.malloc(size); if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, malloc, size, retval, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, malloc, + size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -279,7 +274,8 @@ void free(void *ptr) } if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, free, ptr, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, free, + ptr, LTTNG_UST_CALLER_IP()); } if (cur_alloc.free == NULL) { @@ -308,7 +304,8 @@ void *calloc(size_t nmemb, size_t size) } retval = cur_alloc.calloc(nmemb, size); if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, calloc, nmemb, size, retval, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, calloc, + nmemb, size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -360,7 +357,8 @@ void *realloc(void *ptr, size_t size) retval = cur_alloc.realloc(ptr, size); end: if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, realloc, ptr, size, retval, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, realloc, + ptr, size, retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -380,7 +378,9 @@ void *memalign(size_t alignment, size_t size) } retval = cur_alloc.memalign(alignment, size); if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, memalign, alignment, size, retval, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, memalign, + alignment, size, retval, + LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; @@ -400,20 +400,28 @@ int posix_memalign(void **memptr, size_t alignment, size_t size) } retval = cur_alloc.posix_memalign(memptr, alignment, size); if (URCU_TLS(malloc_nesting) == 1) { - tracepoint(ust_libc, posix_memalign, *memptr, alignment, size, - retval, __builtin_return_address(0)); + tracepoint(lttng_ust_libc, posix_memalign, + *memptr, alignment, size, + retval, LTTNG_UST_CALLER_IP()); } URCU_TLS(malloc_nesting)--; return retval; } +static +void lttng_ust_fixup_malloc_nesting_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(malloc_nesting))); +} + __attribute__((constructor)) -void lttng_ust_malloc_wrapper_init(void) +void lttng_ust_libc_wrapper_malloc_init(void) { /* Initialization already done */ if (cur_alloc.calloc) { return; } + lttng_ust_fixup_malloc_nesting_tls(); /* * Ensure the allocator is in place before the process becomes * multithreaded.