summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
92d9f5e)
On a small arm imx6 solo with 256MB RAM it often happens that memory
becomes fragmented rather quickly, so that kmalloc will not be able to
get enough consecutive pages (enocuntered for example if you enable all
kernel events: lttng enable-event -k syscall --all).
This patch switches the allocation to vmalloc. Tested for x86 on Ubuntu
12.04 Lts and on imx6 solo 256MB RAM
If this patch is not applied, you can identify low and/or fragmented
memory failures by looking at the kernel ring buffer (please ignore DMA,
it is due to some memory setup misconfiguration, should read Normal):
...
[ 321.993820] lttng-sessiond: page allocation failure: order:4, mode:0x1040d0
...
[ 321.994711] lowmem_reserve[]: 0 0 0
[ 321.994727] DMA: 801*4kB (UEMC) 424*8kB (EMC) 355*16kB (UEMC) 344*32kB (MC) 340*64kB (C) 8*128kB (C) 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB 0*8192kB 0*16384kB 0*32768kB = 46068kB
[ Edit by Mathieu: use vzalloc() rather than vmalloc() + memset 0. ]
Signed-off-by: Martin Leisener <martin@leisener.de>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
#include "wrapper/file.h"
#include <linux/jhash.h>
#include <linux/uaccess.h>
#include "wrapper/file.h"
#include <linux/jhash.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/uuid.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
GFP_KERNEL);
if (!metadata_cache)
goto err_free_session;
- metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
- GFP_KERNEL);
+ metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
if (!metadata_cache->data)
goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
{
struct lttng_metadata_cache *cache =
container_of(kref, struct lttng_metadata_cache, refcount);
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
tmp_cache_alloc_size = max_t(unsigned int,
session->metadata_cache->cache_alloc + len,
session->metadata_cache->cache_alloc << 1);
- tmp_cache_realloc = krealloc(session->metadata_cache->data,
- tmp_cache_alloc_size, GFP_KERNEL);
+ tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
if (!tmp_cache_realloc)
goto err;
if (!tmp_cache_realloc)
goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}
session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
session->metadata_cache->data = tmp_cache_realloc;
}