From: David Goulet Date: Fri, 14 Jun 2013 20:15:51 +0000 (-0400) Subject: [memleak] Add memleak directory and script to start program with it X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=5d20d7f89f46d5f6d14d52e1f64a9bbcb049d08c;p=lttng-tools.git [memleak] Add memleak directory and script to start program with it Signed-off-by: David Goulet --- diff --git a/memleak/README b/memleak/README new file mode 100644 index 000000000..7981015c8 --- /dev/null +++ b/memleak/README @@ -0,0 +1,15 @@ +LTTng memleak finder + +example usage: +LD_PRELOAD=./lttng-memleak-finder.so lttng-sessiond + +If you get e.g.: + +[warning] trying to free unallocated ptr 0x18b2a70 caller 0x7fae7943dd57 +or +[leak] ptr: 0x18b2350 size: 0x160 caller: 0x7fae7943e28f + +If you need to see local symbols as well, try compiling your code with +-Wl,--export-dynamic . + +Mathieu Desnoyers, May 2013 diff --git a/memleak/compiler.h b/memleak/compiler.h new file mode 100644 index 000000000..c4ade9092 --- /dev/null +++ b/memleak/compiler.h @@ -0,0 +1,106 @@ +#ifndef _URCU_COMPILER_H +#define _URCU_COMPILER_H + +/* + * compiler.h + * + * Compiler definitions. + * + * Copyright (c) 2009 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + */ + +#include /* for offsetof */ + +#define caa_likely(x) __builtin_expect(!!(x), 1) +#define caa_unlikely(x) __builtin_expect(!!(x), 0) + +#define cmm_barrier() __asm__ __volatile__ ("" : : : "memory") + +/* + * Instruct the compiler to perform only a single access to a variable + * (prohibits merging and refetching). The compiler is also forbidden to reorder + * successive instances of CMM_ACCESS_ONCE(), but only when the compiler is aware of + * particular ordering. Compiler ordering can be ensured, for example, by + * putting two CMM_ACCESS_ONCE() in separate C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. Its main intended + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +#define CMM_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x)) + +#ifndef caa_max +#define caa_max(a,b) ((a)>(b)?(a):(b)) +#endif + +#ifndef caa_min +#define caa_min(a,b) ((a)<(b)?(a):(b)) +#endif + +#if defined(__SIZEOF_LONG__) +#define CAA_BITS_PER_LONG (__SIZEOF_LONG__ * 8) +#elif defined(_LP64) +#define CAA_BITS_PER_LONG 64 +#else +#define CAA_BITS_PER_LONG 32 +#endif + +/* + * caa_container_of - Get the address of an object containing a field. + * + * @ptr: pointer to the field. + * @type: type of the object. + * @member: name of the field within the object. + */ +#define caa_container_of(ptr, type, member) \ + ({ \ + const __typeof__(((type *) NULL)->member) * __ptr = (ptr); \ + (type *)((char *)__ptr - offsetof(type, member)); \ + }) + +#define CAA_BUILD_BUG_ON_ZERO(cond) (sizeof(struct { int:-!!(cond); })) +#define CAA_BUILD_BUG_ON(cond) ((void)CAA_BUILD_BUG_ON_ZERO(cond)) + +/* + * __rcu is an annotation that documents RCU pointer accesses that need + * to be protected by a read-side critical section. Eventually, a static + * checker will be able to use this annotation to detect incorrect RCU + * usage. + */ +#define __rcu + +#ifdef __cplusplus +#define URCU_FORCE_CAST(type, arg) (reinterpret_cast(arg)) +#else +#define URCU_FORCE_CAST(type, arg) ((type) (arg)) +#endif + +#define caa_is_signed_type(type) (((type) (-1)) < 0) + +#define caa_cast_long_keep_sign(v) \ + (caa_is_signed_type(__typeof__(v)) ? (long) (v) : (unsigned long) (v)) + +#if defined (__GNUC__) \ + && ((__GNUC_MAJOR__ == 4) && (__GNUC_MINOR__ >= 5) \ + || __GNUC_MAJOR__ >= 5) +#define CDS_DEPRECATED(msg) \ + __attribute__((deprecated(msg))) +#else +#define CDS_DEPRECATED(msg) \ + __attribute__((deprecated)) +#endif + +#define CAA_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#endif /* _URCU_COMPILER_H */ diff --git a/memleak/hlist.h b/memleak/hlist.h new file mode 100644 index 000000000..15e672bc1 --- /dev/null +++ b/memleak/hlist.h @@ -0,0 +1,86 @@ +#ifndef _KCOMPAT_HLIST_H +#define _KCOMPAT_HLIST_H + +/* + * Kernel sourcecode compatible lightweight single pointer list head useful + * for implementing hash tables + * + * Copyright (C) 2009 Novell Inc. + * + * Author: Jan Blunck + * + * Copyright (C) 2010 Mathieu Desnoyers + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License version 2.1 as + * published by the Free Software Foundation. + */ + +#include + +struct cds_hlist_head +{ + struct cds_hlist_node *next; +}; + +struct cds_hlist_node +{ + struct cds_hlist_node *next; + struct cds_hlist_node *prev; +}; + +/* Initialize a new list head. */ +static inline void CDS_INIT_HLIST_HEAD(struct cds_hlist_head *ptr) +{ + ptr->next = NULL; +} + +/* Get typed element from list at a given position. */ +#define cds_hlist_entry(ptr, type, member) \ + ((type *) ((char *) (ptr) - (unsigned long) (&((type *) 0)->member))) + +/* Get first entry from a list. Assumes the hlist is not empty. */ +#define cds_hlist_first_entry(ptr, type, member) \ + cds_list_entry((ptr)->next, type, member) + +static inline int cds_hlist_empty(struct cds_hlist_head *head) +{ + return !head->next; +} + +/* Add new element at the head of the list. */ +static inline void cds_hlist_add_head (struct cds_hlist_node *newp, + struct cds_hlist_head *head) +{ + if (head->next) + head->next->prev = newp; + + newp->next = head->next; + newp->prev = (struct cds_hlist_node *)head; + head->next = newp; +} + +/* Remove element from list. */ +static inline void cds_hlist_del (struct cds_hlist_node *elem) +{ + if (elem->next) + elem->next->prev = elem->prev; + + elem->prev->next = elem->next; +} + +#define cds_hlist_for_each_entry(entry, pos, head, member) \ + for (pos = (head)->next, \ + entry = cds_hlist_entry(pos, __typeof__(*entry), member); \ + pos != NULL; \ + pos = pos->next, \ + entry = cds_hlist_entry(pos, __typeof__(*entry), member)) + +#define cds_hlist_for_each_entry_safe(entry, pos, p, head, member) \ + for (pos = (head)->next, \ + entry = cds_hlist_entry(pos, __typeof__(*entry), member); \ + (pos != NULL) && ({ p = pos->next; 1;}); \ + pos = p, \ + entry = cds_hlist_entry(pos, __typeof__(*entry), member)) + +#endif /* _KCOMPAT_HLIST_H */ diff --git a/memleak/jhash.h b/memleak/jhash.h new file mode 100644 index 000000000..ca4643b65 --- /dev/null +++ b/memleak/jhash.h @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2011 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "compiler.h" +#include "ust-endian.h" + +/* + * Hash function + * Source: http://burtleburtle.net/bob/c/lookup3.c + * Originally Public Domain + */ + +#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) + +#define mix(a, b, c) \ +do { \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c, 16); c += b; \ + b -= a; b ^= rot(a, 19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} while (0) + +#define final(a, b, c) \ +{ \ + c ^= b; c -= rot(b, 14); \ + a ^= c; a -= rot(c, 11); \ + b ^= a; b -= rot(a, 25); \ + c ^= b; c -= rot(b, 16); \ + a ^= c; a -= rot(c, 4);\ + b ^= a; b -= rot(a, 14); \ + c ^= b; c -= rot(b, 24); \ +} + +#if (BYTE_ORDER == LITTLE_ENDIAN) +#define HASH_LITTLE_ENDIAN 1 +#else +#define HASH_LITTLE_ENDIAN 0 +#endif + +/* + * + * hashlittle() -- hash a variable-length key into a 32-bit value + * k : the key (the unaligned variable-length array of bytes) + * length : the length of the key, counting by bytes + * initval : can be any 4-byte value + * Returns a 32-bit value. Every bit of the key affects every bit of + * the return value. Two keys differing by one or two bits will have + * totally different hash values. + * + * The best hash table sizes are powers of 2. There is no need to do + * mod a prime (mod is sooo slow!). If you need less than 32 bits, + * use a bitmask. For example, if you need only 10 bits, do + * h = (h & hashmask(10)); + * In which case, the hash table should have hashsize(10) elements. + * + * If you are hashing n strings (uint8_t **)k, do it like this: + * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h); + * + * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this + * code any way you wish, private, educational, or commercial. It's free. + * + * Use for hash table lookup, or anything where one collision in 2^^32 is + * acceptable. Do NOT use for cryptographic purposes. + */ +static +uint32_t hashlittle(const void *key, size_t length, uint32_t initval) +{ + uint32_t a, b, c; /* internal state */ + union { + const void *ptr; + size_t i; + } u; + + /* Set up the internal state */ + a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */ + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a, b, c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch (length) { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + { + const uint8_t *k8; + + k8 = (const uint8_t *) k; + switch (length) { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t) k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t) k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; + } + } +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t) k[1])<<16); + b += k[2] + (((uint32_t) k[3])<<16); + c += k[4] + (((uint32_t) k[5])<<16); + mix(a, b, c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *) k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t) k[5])<<16); + b+=k[2]+(((uint32_t) k[3])<<16); + a+=k[0]+(((uint32_t) k[1])<<16); + break; + case 11: c+=((uint32_t) k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t) k[3])<<16); + a+=k[0]+(((uint32_t) k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t) k[3])<<16); + a+=k[0]+(((uint32_t) k[1])<<16); + break; + case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t) k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t) k[1])<<16); + break; + case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length requires no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a, b, c) */ + while (length > 12) { + a += k[0]; + a += ((uint32_t) k[1])<<8; + a += ((uint32_t) k[2])<<16; + a += ((uint32_t) k[3])<<24; + b += k[4]; + b += ((uint32_t) k[5])<<8; + b += ((uint32_t) k[6])<<16; + b += ((uint32_t) k[7])<<24; + c += k[8]; + c += ((uint32_t) k[9])<<8; + c += ((uint32_t) k[10])<<16; + c += ((uint32_t) k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch (length) { /* all the case statements fall through */ + case 12: c+=((uint32_t) k[11])<<24; + case 11: c+=((uint32_t) k[10])<<16; + case 10: c+=((uint32_t) k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t) k[7])<<24; + case 7 : b+=((uint32_t) k[6])<<16; + case 6 : b+=((uint32_t) k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t) k[3])<<24; + case 3 : a+=((uint32_t) k[2])<<16; + case 2 : a+=((uint32_t) k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : return c; + } + } + + final(a, b, c); + return c; +} + +static inline +uint32_t jhash(const void *key, size_t length, uint32_t seed) +{ + return hashlittle(key, length, seed); +} diff --git a/memleak/lttng-malloc-stats.c b/memleak/lttng-malloc-stats.c new file mode 100644 index 000000000..7811ea692 --- /dev/null +++ b/memleak/lttng-malloc-stats.c @@ -0,0 +1,49 @@ +/* + * lttng-malloc-stats.c + * + * LTTng malloc stats printer + * + * Copyright (c) 2013 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#include +#include +#include +#include + +#define STAT_SIGNAL SIGUSR2 + +/* Signal handler */ +static void sighandler(int signo, siginfo_t *siginfo, void *context) +{ + malloc_stats(); +} + +static __attribute__((constructor)) +void print_stats_init(void) +{ + /* Attach signal handler on STAT_SIGNAL */ + struct sigaction act; + int ret; + + act.sa_sigaction = sighandler; + act.sa_flags = SA_SIGINFO | SA_RESTART; + sigemptyset(&act.sa_mask); + ret = sigaction(STAT_SIGNAL, &act, NULL); + assert(!ret); +} diff --git a/memleak/lttng-memleak-finder.c b/memleak/lttng-memleak-finder.c new file mode 100644 index 000000000..72e8ce7d6 --- /dev/null +++ b/memleak/lttng-memleak-finder.c @@ -0,0 +1,349 @@ +/* + * lttng-memleak-finder.c + * + * LTTng memory leak finder + * + * Copyright (c) 2013 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include "hlist.h" + +#include "jhash.h" + +static volatile int print_to_console; + +static pthread_mutex_t mh_mutex = PTHREAD_MUTEX_INITIALIZER; + +static void *(*callocp)(size_t, size_t); +static void *(*mallocp)(size_t); +static void *(*reallocp)(void *, size_t); +static void *(*memalignp)(size_t, size_t); +static void (*freep)(void *); + +static volatile int initialized; +static __thread int thread_in_hook; + +#define STATIC_CALLOC_LEN 4096 +static char static_calloc_buf[STATIC_CALLOC_LEN]; +static size_t static_calloc_len; + +#define MH_HASH_BITS 20 /* 1 M entries, hardcoded for now */ +#define MH_TABLE_SIZE (1 << MH_HASH_BITS) +static struct cds_hlist_head mh_table[MH_TABLE_SIZE]; + +struct mh_entry { + struct cds_hlist_node hlist; + void *ptr; + const void *alloc_caller; + char *caller_symbol; + size_t alloc_size; +}; + +static struct mh_entry * +get_mh(const void *ptr) +{ + struct cds_hlist_head *head; + struct cds_hlist_node *node; + struct mh_entry *e; + uint32_t hash; + + hash = jhash(&ptr, sizeof(ptr), 0); + head = &mh_table[hash & (MH_TABLE_SIZE - 1)]; + cds_hlist_for_each_entry(e, node, head, hlist) { + if (ptr == e->ptr) + return e; + } + return NULL; +} + +static void +add_mh(void *ptr, size_t alloc_size, const void *caller) +{ + struct cds_hlist_head *head; + struct cds_hlist_node *node; + struct mh_entry *e; + uint32_t hash; + Dl_info info; + + if (!ptr) + return; + hash = jhash(&ptr, sizeof(ptr), 0); + head = &mh_table[hash & (MH_TABLE_SIZE - 1)]; + cds_hlist_for_each_entry(e, node, head, hlist) { + if (ptr == e->ptr) { + fprintf(stderr, "[warning] add_mh pointer %p is already there\n", + ptr); + //assert(0); /* already there */ + } + } + e = malloc(sizeof(*e)); + e->ptr = ptr; + e->alloc_caller = caller; + e->alloc_size = alloc_size; + if (dladdr(caller, &info) && info.dli_sname) { + e->caller_symbol = strdup(info.dli_sname); + } else { + e->caller_symbol = NULL; + } + cds_hlist_add_head(&e->hlist, head); +} + +static void +del_mh(void *ptr, const void *caller) +{ + struct mh_entry *e; + + if (!ptr) + return; + e = get_mh(ptr); + if (!e) { + fprintf(stderr, + "[warning] trying to free unallocated ptr %p caller %p\n", + ptr, caller); + return; + } + cds_hlist_del(&e->hlist); + free(e->caller_symbol); + free(e); +} + +static void __attribute__((constructor)) +do_init(void) +{ + char *env; + + if (initialized) + return; + callocp = (void *(*) (size_t, size_t)) dlsym (RTLD_NEXT, "calloc"); + mallocp = (void *(*) (size_t)) dlsym (RTLD_NEXT, "malloc"); + reallocp = (void *(*) (void *, size_t)) dlsym (RTLD_NEXT, "realloc"); + memalignp = (void *(*)(size_t, size_t)) dlsym (RTLD_NEXT, "memalign"); + freep = (void (*) (void *)) dlsym (RTLD_NEXT, "free"); + + env = getenv("LTTNG_MEMLEAK_PRINT"); + if (env && strcmp(env, "1") == 0) + print_to_console = 1; + + initialized = 1; +} + +static +void *static_calloc(size_t nmemb, size_t size) +{ + size_t prev_len; + + if (nmemb * size > sizeof(static_calloc_buf) - static_calloc_len) + return NULL; + prev_len = static_calloc_len; + static_calloc_len += nmemb + size; + return &static_calloc_buf[prev_len]; +} + +void * +calloc(size_t nmemb, size_t size) +{ + void *result; + const void *caller = __builtin_return_address(0); + + if (callocp == NULL) { + return static_calloc(nmemb, size); + } + + do_init(); + + if (thread_in_hook) { + return callocp(nmemb, size); + } + + thread_in_hook = 1; + + pthread_mutex_lock(&mh_mutex); + + /* Call resursively */ + result = callocp(nmemb, size); + + add_mh(result, nmemb * size, caller); + + /* printf might call malloc, so protect it too. */ + if (print_to_console) + fprintf(stderr, "calloc(%zu,%zu) returns %p\n", nmemb, size, result); + + pthread_mutex_unlock(&mh_mutex); + + thread_in_hook = 0; + + return result; +} + +void * +malloc(size_t size) +{ + void *result; + const void *caller = __builtin_return_address(0); + + do_init(); + + if (thread_in_hook) { + return mallocp(size); + } + + thread_in_hook = 1; + + pthread_mutex_lock(&mh_mutex); + + /* Call resursively */ + result = mallocp(size); + + add_mh(result, size, caller); + + /* printf might call malloc, so protect it too. */ + if (print_to_console) + fprintf(stderr, "malloc(%zu) returns %p\n", size, result); + + pthread_mutex_unlock(&mh_mutex); + + thread_in_hook = 0; + + return result; +} + +void * +realloc(void *ptr, size_t size) +{ + void *result; + const void *caller = __builtin_return_address(0); + + do_init(); + + if (thread_in_hook) { + return reallocp(ptr, size); + } + + thread_in_hook = 1; + + pthread_mutex_lock(&mh_mutex); + + /* Call resursively */ + result = reallocp(ptr, size); + + if (size == 0 && ptr) { + /* equivalent to free() */ + del_mh(ptr, caller); + } else if (result) { + del_mh(ptr, caller); + add_mh(result, size, caller); + } + + /* printf might call malloc, so protect it too. */ + if (print_to_console) + fprintf(stderr, "realloc(%p,%zu) returns %p\n", ptr, size, result); + + pthread_mutex_unlock(&mh_mutex); + + thread_in_hook = 0; + + return result; +} + +void * +memalign(size_t alignment, size_t size) +{ + void *result; + const void *caller = __builtin_return_address(0); + + do_init(); + + if (thread_in_hook) { + return memalignp(alignment, size); + } + + thread_in_hook = 1; + + pthread_mutex_lock(&mh_mutex); + + /* Call resursively */ + result = memalignp(alignment, size); + + add_mh(result, size, caller); + + /* printf might call malloc, so protect it too. */ + if (print_to_console) + fprintf(stderr, "memalign(%zu,%zu) returns %p\n", + alignment, size, result); + + pthread_mutex_unlock(&mh_mutex); + + thread_in_hook = 0; + + return result; +} + +void +free(void *ptr) +{ + const void *caller = __builtin_return_address(0); + + do_init(); + + if (thread_in_hook) { + freep(ptr); + return; + } + + thread_in_hook = 1; + pthread_mutex_lock(&mh_mutex); + + /* Call resursively */ + freep(ptr); + + del_mh(ptr, caller); + + /* printf might call free, so protect it too. */ + if (print_to_console) + fprintf(stderr, "freed pointer %p\n", ptr); + + pthread_mutex_unlock(&mh_mutex); + thread_in_hook = 0; +} + +static __attribute__((destructor)) +void print_leaks(void) +{ + unsigned long i; + + for (i = 0; i < MH_TABLE_SIZE; i++) { + struct cds_hlist_head *head; + struct cds_hlist_node *node; + struct mh_entry *e; + + head = &mh_table[i]; + cds_hlist_for_each_entry(e, node, head, hlist) { + fprintf(stderr, "[leak] ptr: %p size: %zu caller: %p <%s>\n", + e->ptr, e->alloc_size, e->alloc_caller, + e->caller_symbol); + } + } +} diff --git a/memleak/start-bin b/memleak/start-bin new file mode 100755 index 000000000..f948dffc0 --- /dev/null +++ b/memleak/start-bin @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright (C) - 2013 Christian Babeux +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License, version 2 only, as published by +# the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +LIB_MEMLEAK="lttng-memleak-finder.so" +LIB_STATS="lttng-malloc-stats.so" + +ld_preload_lib="./$LIB_MEMLEAK" + +if [ -z "$1" ]; then + echo "Usage: $0 [--stats] PROG" + exit 1 +fi + +if [ ! -f ./$LIB_MEMLEAK -a ! -f ./$LIB_STATS ]; then + echo "Please compile the $LIB_STATS and/or $LIB_MEMLEAK first." + exit 1 +fi + +if [ "$1" = "--stats" ]; then + ld_preload_lib="./$LIB_STATS" + shift +fi + +LD_PRELOAD=$ld_preload_lib $@ diff --git a/memleak/ust-endian.h b/memleak/ust-endian.h new file mode 100644 index 000000000..f676162b0 --- /dev/null +++ b/memleak/ust-endian.h @@ -0,0 +1,46 @@ +#ifndef _LTTNG_UST_ENDIAN_H +#define _LTTNG_UST_ENDIAN_H + +/* + * lttng/ust-endian.h + * + * Copyright 2012 (c) - Mathieu Desnoyers + * + * endian.h compatibility layer. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if (defined(__linux__) || defined(__CYGWIN__)) +#include +#elif defined(__FreeBSD__) +#include +#else +#error "Please add support for your OS." +#endif + +#ifndef FLOAT_WORD_ORDER +#ifdef __FLOAT_WORD_ORDER +#define FLOAT_WORD_ORDER __FLOAT_WORD_ORDER +#else /* __FLOAT_WORD_ORDER */ +#define FLOAT_WORD_ORDER BYTE_ORDER +#endif /* __FLOAT_WORD_ORDER */ +#endif /* FLOAT_WORD_ORDER */ + +#endif /* _LTTNG_UST_ENDIAN_H */