diff options
author | Otto Moerbeek <otto@cvs.openbsd.org> | 2011-05-12 09:29:31 +0000 |
---|---|---|
committer | Otto Moerbeek <otto@cvs.openbsd.org> | 2011-05-12 09:29:31 +0000 |
commit | e357301211b82e6c69a5ecd2d24f6541fc494c0b (patch) | |
tree | 624e9d381523031f5cf0283089df07a5bbb34516 /lib/libc/stdlib/malloc.c | |
parent | c99efe36da95f31d0b577d488095a9049bc75d42 (diff) |
Introduce leak detection code for MALLOC_STATS
Diffstat (limited to 'lib/libc/stdlib/malloc.c')
-rw-r--r-- | lib/libc/stdlib/malloc.c | 190 |
1 files changed, 165 insertions, 25 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c index 025508a3356..d82c3269149 100644 --- a/lib/libc/stdlib/malloc.c +++ b/lib/libc/stdlib/malloc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: malloc.c,v 1.131 2011/05/08 07:08:13 otto Exp $ */ +/* $OpenBSD: malloc.c,v 1.132 2011/05/12 09:29:30 otto Exp $ */ /* * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> * @@ -43,6 +43,7 @@ #include <unistd.h> #ifdef MALLOC_STATS +#include <sys/tree.h> #include <fcntl.h> #endif @@ -94,6 +95,9 @@ struct region_info { void *p; /* page; low bits used to mark chunks */ uintptr_t size; /* size for pages, or chunk_info pointer */ +#ifdef MALLOC_STATS + void *f; /* where allocated from */ +#endif }; LIST_HEAD(chunk_head, chunk_info); @@ -125,9 +129,11 @@ struct dir_info { size_t cheap_reallocs; #define STATS_INC(x) ((x)++) #define STATS_ZERO(x) ((x) = 0) +#define STATS_SETF(x,y) ((x)->f = (y)) #else #define STATS_INC(x) /* nothing */ #define STATS_ZERO(x) /* nothing */ +#define STATS_SETF(x,y) /* nothing */ #endif /* MALLOC_STATS */ u_int32_t canary2; }; @@ -195,6 +201,9 @@ extern char *__progname; #ifdef MALLOC_STATS void malloc_dump(int); static void malloc_exit(void); +#define CALLER __builtin_return_address(0) +#else +#define CALLER NULL #endif /* low bits of r->p determine size: 0 means >= page size and p->size holding @@ -696,7 +705,7 @@ alloc_chunk_info(struct dir_info *d) } static int -insert(struct dir_info *d, void *p, size_t sz) +insert(struct dir_info *d, void *p, size_t sz, void *f) { size_t index; size_t mask; @@ -717,6 +726,9 @@ insert(struct dir_info *d, void *p, size_t sz) } d->r[index].p = p; d->r[index].size = sz; +#ifdef MALLOC_STATS + d->r[index].f = f; +#endif d->regions_free--; return 0; } @@ -839,7 +851,7 @@ omalloc_make_chunks(struct dir_info *d, int bits) if ((uintptr_t)pp & bits) wrterror("pp & bits", pp); - insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp); + insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); return bp; } @@ -848,7 +860,7 @@ omalloc_make_chunks(struct dir_info *d, int bits) * Allocate a chunk */ static void * -malloc_bytes(struct dir_info *d, size_t size) +malloc_bytes(struct dir_info *d, size_t size, void *f) { int i, j; size_t k; @@ -908,6 +920,12 @@ malloc_bytes(struct dir_info *d, size_t size) i = 0; } d->chunk_start += i + 1; +#ifdef MALLOC_STATS + if (i == 0) { + struct region_info *r = find(d, bp->page); + r->f = f; + } +#endif *lp ^= u; @@ -980,7 +998,7 @@ free_bytes(struct dir_info *d, struct region_info *r, void *ptr) static void * -omalloc(size_t sz, int zero_fill) +omalloc(size_t sz, int zero_fill, void *f) { void *p; size_t psz; @@ -997,7 +1015,7 @@ omalloc(size_t sz, int zero_fill) errno = ENOMEM; return NULL; } - if (insert(g_pool, p, sz)) { + if (insert(g_pool, p, sz, f)) { unmap(g_pool, p, psz); errno = ENOMEM; return NULL; @@ -1034,7 +1052,7 @@ omalloc(size_t sz, int zero_fill) } else { /* takes care of SOME_JUNK */ - p = malloc_bytes(g_pool, sz); + p = malloc_bytes(g_pool, sz, f); if (zero_fill && p != NULL && sz > 0) memset(p, 0, sz); } @@ -1090,7 +1108,7 @@ malloc(size_t size) malloc_recurse(); return NULL; } - r = omalloc(size, mopts.malloc_zero); + r = omalloc(size, mopts.malloc_zero, CALLER); malloc_active--; _MALLOC_UNLOCK(); if (r == NULL && mopts.malloc_xmalloc) { @@ -1198,14 +1216,14 @@ free(void *ptr) static void * -orealloc(void *p, size_t newsz) +orealloc(void *p, size_t newsz, void *f) { struct region_info *r; size_t oldsz, goldsz, gnewsz; void *q; if (p == NULL) - return omalloc(newsz, 0); + return omalloc(newsz, 0, f); r = find(g_pool, p); if (r == NULL) { @@ -1245,6 +1263,7 @@ orealloc(void *p, size_t newsz) memset(q, SOME_JUNK, rnewsz - roldsz); r->size = newsz; + STATS_SETF(r, f); STATS_INC(g_pool->cheap_reallocs); return p; } else if (q != MAP_FAILED) @@ -1263,29 +1282,34 @@ orealloc(void *p, size_t newsz) } unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz); r->size = gnewsz; + STATS_SETF(r, f); return p; } else { if (newsz > oldsz && mopts.malloc_junk) memset((char *)p + newsz, SOME_JUNK, rnewsz - mopts.malloc_guard - newsz); r->size = gnewsz; + STATS_SETF(r, f); return p; } } if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) { if (mopts.malloc_junk && newsz > 0) memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); + STATS_SETF(r, f); return p; } else if (newsz != oldsz || mopts.malloc_realloc) { - q = omalloc(newsz, 0); + q = omalloc(newsz, 0, f); if (q == NULL) return NULL; if (newsz != 0 && oldsz != 0) memcpy(q, p, oldsz < newsz ? oldsz : newsz); ofree(p); return q; - } else + } else { + STATS_SETF(r, f); return p; + } } void * @@ -1304,7 +1328,7 @@ realloc(void *ptr, size_t size) malloc_recurse(); return NULL; } - r = orealloc(ptr, size); + r = orealloc(ptr, size, CALLER); malloc_active--; _MALLOC_UNLOCK(); @@ -1347,7 +1371,7 @@ calloc(size_t nmemb, size_t size) } size *= nmemb; - r = omalloc(size, 1); + r = omalloc(size, 1, CALLER); malloc_active--; _MALLOC_UNLOCK(); @@ -1386,17 +1410,110 @@ posix_memalign(void **memptr, size_t alignment, size_t size) } #ifdef MALLOC_STATS + +struct malloc_leak { + void (*f)(); + size_t total_size; + int count; +}; + +struct leaknode { + RB_ENTRY(leaknode) entry; + struct malloc_leak d; +}; + +static int +leakcmp(struct leaknode *e1, struct leaknode *e2) +{ + return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; +} + +static RB_HEAD(leaktree, leaknode) leakhead; +RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp) + +static void +putleakinfo(void *f, size_t sz, int cnt) +{ + struct leaknode key, *p; + static struct leaknode *page; + static int used; + + if (cnt == 0) + return; + + key.d.f = f; + p = RB_FIND(leaktree, &leakhead, &key); + if (p == NULL) { + if (page == NULL || + used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { + page = MMAP(MALLOC_PAGESIZE); + if (page == MAP_FAILED) + return; + used = 0; + } + p = &page[used++]; + p->d.f = f; + p->d.total_size = sz * cnt; + p->d.count = cnt; + RB_INSERT(leaktree, &leakhead, p); + } else { + p->d.total_size += sz * cnt; + p->d.count += cnt; + } +} + +static struct malloc_leak *malloc_leaks; + +static void +dump_leaks(int fd) +{ + struct leaknode *p; + char buf[64]; + int i = 0; + + snprintf(buf, sizeof(buf), "Leak report\n"); + write(fd, buf, strlen(buf)); + snprintf(buf, sizeof(buf), " f sum # avg\n"); + write(fd, buf, strlen(buf)); + /* XXX only one page of summary */ + if (malloc_leaks == NULL) + malloc_leaks = MMAP(MALLOC_PAGESIZE); + if (malloc_leaks != MAP_FAILED) + memset(malloc_leaks, 0, MALLOC_PAGESIZE); + RB_FOREACH(p, leaktree, &leakhead) { + snprintf(buf, sizeof(buf), "%12p %7zu %6u %6zu\n", p->d.f, + p->d.total_size, p->d.count, p->d.total_size / p->d.count); + write(fd, buf, strlen(buf)); + if (malloc_leaks == MAP_FAILED || + i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) + continue; + malloc_leaks[i].f = p->d.f; + malloc_leaks[i].total_size = p->d.total_size; + malloc_leaks[i].count = p->d.count; + i++; + } +} + static void -dump_chunk(int fd, struct chunk_info *p, int fromfreelist) +dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) { char buf[64]; while (p != NULL) { - snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size, - p->free, p->total, p->page); + snprintf(buf, sizeof(buf), "chunk %12p %12p %4d %d/%d\n", + p->page, ((p->bits[0] & 1) ? NULL : f), + p->size, p->free, p->total); write(fd, buf, strlen(buf)); - if (!fromfreelist) + if (!fromfreelist) { + if (p->bits[0] & 1) + putleakinfo(NULL, p->size, p->total - p->free); + else { + putleakinfo(f, p->size, 1); + putleakinfo(NULL, p->size, + p->total - p->free - 1); + } break; + } p = LIST_NEXT(p, entries); if (p != NULL) { snprintf(buf, sizeof(buf), " "); @@ -1418,7 +1535,7 @@ dump_free_chunk_info(int fd, struct dir_info *d) if (p != NULL) { snprintf(buf, sizeof(buf), "%2d) ", i); write(fd, buf, strlen(buf)); - dump_chunk(fd, p, 1); + dump_chunk(fd, p, NULL, 1); } } @@ -1472,35 +1589,58 @@ malloc_dump1(int fd, struct dir_info *d) write(fd, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free); write(fd, buf, strlen(buf)); + dump_free_chunk_info(fd, d); + dump_free_page_info(fd, d); + snprintf(buf, sizeof(buf), + "slot) hash d type page f size [free/n]\n"); + write(fd, buf, strlen(buf)); for (i = 0; i < d->regions_total; i++) { if (d->r[i].p != NULL) { size_t h = hash(d->r[i].p) & (d->regions_total - 1); - snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ", + snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ", i, h, h - i); write(fd, buf, strlen(buf)); REALSIZE(realsize, &d->r[i]); if (realsize > MALLOC_MAXCHUNK) { + putleakinfo(d->r[i].f, realsize, 1); snprintf(buf, sizeof(buf), - "%p: %zu\n", d->r[i].p, realsize); + "pages %12p %12p %zu\n", d->r[i].p, + d->r[i].f, realsize); write(fd, buf, strlen(buf)); } else dump_chunk(fd, - (struct chunk_info *)d->r[i].size, 0); + (struct chunk_info *)d->r[i].size, + d->r[i].f, 0); } } - dump_free_chunk_info(fd, d); - dump_free_page_info(fd, d); snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used); write(fd, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded); write(fd, buf, strlen(buf)); + dump_leaks(fd); + write(fd, "\n", 1); } - void malloc_dump(int fd) { + int i; + void *p; + struct region_info *r; + + for (i = 0; i <= MALLOC_DELAYED_CHUNKS; i++) { + p = g_pool->delayed_chunks[i]; + if (p == NULL) + continue; + r = find(g_pool, p); + if (r == NULL) + wrterror("bogus pointer in malloc_dump", p); + free_bytes(g_pool, r, p); + g_pool->delayed_chunks[i] = NULL; + } + /* XXX leak when run multiple times */ + RB_INIT(&leakhead); malloc_dump1(fd, g_pool); } |