summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2000-06-06 20:18:21 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2000-06-06 20:18:21 +0000
commitdf4ef8b8a6e3a5ae6fcf99555606fd2b2d4978ef (patch)
tree0f5fa209150547cbdfcc77975bb096379cc80e98 /sys
parent53362e67cc113df1bdd7eb307573e1532504ad67 (diff)
malloc debugging code. Enabled by option MALLOC_DEBUG.
Make sure you read the docs (malloc(9)) before use.
Diffstat (limited to 'sys')
-rw-r--r--sys/conf/files3
-rw-r--r--sys/kern/kern_malloc.c23
-rw-r--r--sys/kern/kern_malloc_debug.c308
3 files changed, 332 insertions, 2 deletions
diff --git a/sys/conf/files b/sys/conf/files
index b38e57b0e06..e559d5d0e12 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,4 +1,4 @@
-# $OpenBSD: files,v 1.165 2000/05/30 19:34:20 mickey Exp $
+# $OpenBSD: files,v 1.166 2000/06/06 20:18:20 art Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@@ -365,6 +365,7 @@ file kern/kern_lock.c
file kern/kern_lkm.c lkm
file kern/kern_ntptime.c
file kern/kern_malloc.c
+file kern/kern_malloc_debug.c malloc_debug
file kern/kern_physio.c
file kern/kern_proc.c
file kern/kern_prot.c
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 3e1aeb3c0a6..0c0258354ee 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc.c,v 1.19 2000/03/16 22:11:03 art Exp $ */
+/* $OpenBSD: kern_malloc.c,v 1.20 2000/06/06 20:18:20 art Exp $ */
/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
@@ -61,6 +61,12 @@ char *kmembase, *kmemlimit;
char *memname[] = INITKMEMNAMES;
#endif
+#ifdef MALLOC_DEBUG
+extern int debug_malloc __P((unsigned long, int, int, void **));
+extern int debug_free __P((void *, int));
+extern void debug_malloc_init __P((void));
+#endif
+
#ifdef DIAGNOSTIC
/*
* This structure provides a set of masks to catch unaligned frees.
@@ -124,6 +130,12 @@ malloc(size, type, flags)
if (((unsigned long)type) > M_LAST)
panic("malloc - bogus type");
#endif
+
+#ifdef MALLOC_DEBUG
+ if (debug_malloc(size, type, flags, (void **)&va))
+ return ((void *) va);
+#endif
+
indx = BUCKETINDX(size);
kbp = &bucket[indx];
s = splimp();
@@ -319,6 +331,11 @@ free(addr, type)
register struct kmemstats *ksp = &kmemstats[type];
#endif
+#ifdef MALLOC_DEBUG
+ if (debug_free(addr, type))
+ return;
+#endif
+
kup = btokup(addr);
size = 1 << kup->ku_indx;
kbp = &bucket[kup->ku_indx];
@@ -457,4 +474,8 @@ kmeminit()
for (indx = 0; indx < M_LAST; indx++)
kmemstats[indx].ks_limit = npg * PAGE_SIZE * 6 / 10;
#endif
+#ifdef MALLOC_DEBUG
+ debug_malloc_init();
+#endif
}
+
diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c
new file mode 100644
index 00000000000..2289c3083a0
--- /dev/null
+++ b/sys/kern/kern_malloc_debug.c
@@ -0,0 +1,308 @@
+/* $OpenBSD: kern_malloc_debug.c,v 1.1 2000/06/06 20:18:20 art Exp $ */
+
+/*
+ * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This really belongs in kern/kern_malloc.c, but it was too much pollution.
+ */
+
+/*
+ * It's only possible to debug one type/size at a time. The question is
+ * if this is a limitation or a feature. We never want to run this as the
+ * default malloc because we'll run out of memory really fast. Adding
+ * more types will also add to the complexity of the code.
+ *
+ * This is really is simple. Every malloc() allocates two virtual pages,
+ * the second page is left unmapped, and the the value returned is aligned
+ * so that it ends at (or very close to) the page boundary to catch overflows.
+ * Every free() changes the protection of the first page to VM_PROT_NONE so
+ * that we can catch any dangling writes to it.
+ * To minimize the risk of writes to recycled chunks we keep an LRU of latest
+ * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
+ *
+ * Don't expect any performance.
+ *
+ * TODO:
+ * - support for size >= PAGE_SIZE
+ * - add support to the fault handler to give better diagnostics if we fail.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <uvm/uvm.h>
+#include <uvm/uvm_page.h>
+
+/*
+ * malloc_deb_type and malloc_deb_size define the type and size of
+ * memory to be debugged. Use 0 for a wildcard.
+ *
+ * Although those are variables, it's a really bad idea to change the type
+ * if any memory chunks of this type are used. It's ok to change the size
+ * in runtime.
+ */
+int malloc_deb_type = M_MBUF;
+int malloc_deb_size = 128;
+
+/*
+ * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
+ * freelist before we reuse them.
+ */
+#define MALLOC_DEBUG_CHUNKS 16
+
+/* returns 0 if normal malloc/free should be used */
+int debug_malloc __P((unsigned long, int, int, void **));
+int debug_free __P((void *, int));
+void debug_malloc_init __P((void));
+
+void malloc_deb_allocate_free __P((int));
+void debug_malloc_print __P((void));
+
+struct malloc_deb_entry {
+ TAILQ_ENTRY(malloc_deb_entry) md_list;
+ vaddr_t md_va;
+ paddr_t md_pa;
+ size_t md_size;
+ int md_type;
+};
+
+TAILQ_HEAD(,malloc_deb_entry) malloc_deb_free;
+TAILQ_HEAD(,malloc_deb_entry) malloc_deb_used;
+
+int malloc_deb_allocs;
+int malloc_deb_frees;
+int malloc_deb_pages;
+int malloc_deb_chunks_on_freelist;
+
+#ifndef M_DEBUG
+#define M_DEBUG M_TEMP
+#endif
+
+int
+debug_malloc(size, type, flags, addr)
+ unsigned long size;
+ int type, flags;
+ void **addr;
+{
+ struct malloc_deb_entry *md = NULL;
+ int s;
+ int wait = flags & M_NOWAIT;
+
+ if ((type != malloc_deb_type && malloc_deb_type != 0) ||
+ (size != malloc_deb_size && malloc_deb_size != 0) ||
+ type == M_DEBUG)
+ return 0;
+
+ /* XXX - fix later */
+ if (size > PAGE_SIZE)
+ return 0;
+
+ s = splimp();
+ if (malloc_deb_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
+ malloc_deb_allocate_free(wait);
+
+ md = TAILQ_FIRST(&malloc_deb_free);
+ if (md == NULL) {
+ splx(s);
+ return 0;
+ }
+ TAILQ_REMOVE(&malloc_deb_free, md, md_list);
+ malloc_deb_chunks_on_freelist--;
+
+ TAILQ_INSERT_HEAD(&malloc_deb_used, md, md_list);
+ malloc_deb_allocs++;
+ splx(s);
+
+
+#ifdef PMAP_NEW
+ pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_ALL);
+#else
+ pmap_enter(pmap_kernel(), md->md_va, md->md_pa, VM_PROT_ALL, TRUE,
+ VM_PROT_READ|VM_PROT_WRITE);
+#endif
+
+ md->md_size = size;
+ md->md_type = type;
+
+ /*
+ * Align the returned addr so that it ends where the first page
+ * ends. roundup to get decent alignment.
+ */
+ *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
+ return 1;
+}
+
+int
+debug_free(addr, type)
+ void *addr;
+ int type;
+{
+ struct malloc_deb_entry *md;
+ int s;
+ vaddr_t va;
+
+ if ((type != malloc_deb_type && malloc_deb_type != 0) ||
+ type == M_DEBUG)
+ return 0;
+
+ /*
+ * trunc_page to get the address of the page.
+ */
+ va = trunc_page((vaddr_t)addr);
+
+ s = splimp();
+ TAILQ_FOREACH(md, &malloc_deb_used, md_list)
+ if (md->md_va == va)
+ break;
+
+ /*
+ * If we are not responsible for this entry, let the normal free
+ * handle it
+ */
+ if (md == NULL) {
+ /*
+ * sanity check. Check for multiple frees.
+ */
+ TAILQ_FOREACH(md, &malloc_deb_free, md_list)
+ if (md->md_va == va)
+ panic("debug_free: already free");
+ splx(s);
+ return 0;
+ }
+
+ malloc_deb_frees++;
+ TAILQ_REMOVE(&malloc_deb_used, md, md_list);
+
+ TAILQ_INSERT_TAIL(&malloc_deb_free, md, md_list);
+ malloc_deb_chunks_on_freelist++;
+ /*
+ * unmap the page.
+ */
+#ifdef PMAP_NEW
+ pmap_kremove(md->md_va, PAGE_SIZE);
+#else
+ pmap_remove(pmap_kernel(), md->md_va, md->md_va + PAGE_SIZE);
+#endif
+ splx(s);
+
+ return 1;
+}
+
+void
+debug_malloc_init()
+{
+ TAILQ_INIT(&malloc_deb_free);
+ TAILQ_INIT(&malloc_deb_used);
+
+ malloc_deb_allocs = 0;
+ malloc_deb_frees = 0;
+ malloc_deb_pages = 0;
+ malloc_deb_chunks_on_freelist = 0;
+}
+
+/*
+ * Add one chunk to the freelist.
+ *
+ * called at splimp.
+ */
+void
+malloc_deb_allocate_free(wait)
+ int wait;
+{
+ vaddr_t va, offset;
+ struct vm_page *pg;
+ struct malloc_deb_entry *md;
+
+ md = malloc(sizeof(struct malloc_deb_entry), M_DEBUG,
+ wait ? M_WAITOK : M_NOWAIT);
+ if (md == NULL)
+ return;
+
+ va = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
+ PAGE_SIZE * 2,
+ UVM_KMF_VALLOC | (wait ? UVM_KMF_NOWAIT : 0));
+ if (va == 0) {
+ free(md, M_DEBUG);
+ return;
+ }
+
+ offset = va - vm_map_min(kernel_map);
+ do {
+ simple_lock(&uvmexp.kmem_object->vmobjlock);
+ pg = uvm_pagealloc(uvmexp.kmem_object, offset, NULL, 0);
+ if (pg) {
+ pg->flags &= ~PG_BUSY; /* new page */
+ UVM_PAGE_OWN(pg, NULL);
+ }
+ simple_unlock(&uvmexp.kmem_object->vmobjlock);
+
+ if (pg)
+ break;
+
+ if (wait == 0) {
+ uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
+ free(md, M_DEBUG);
+ return;
+ }
+ uvm_wait("debug_malloc");
+ } while (1);
+
+ md->md_va = va;
+ md->md_pa = VM_PAGE_TO_PHYS(pg);
+
+ malloc_deb_pages++;
+ TAILQ_INSERT_HEAD(&malloc_deb_free, md, md_list);
+ malloc_deb_chunks_on_freelist++;
+}
+
+void
+debug_malloc_print()
+{
+ struct malloc_deb_entry *md;
+
+ printf("allocs: %d\n", malloc_deb_allocs);
+ printf("frees: %d\n", malloc_deb_frees);
+ printf("pages used: %d\n", malloc_deb_pages);
+ printf("chunks on freelist: %d\n", malloc_deb_chunks_on_freelist);
+
+ printf("\taddr:\tsize:\n");
+ printf("free chunks:\n");
+ TAILQ_FOREACH(md, &malloc_deb_free, md_list)
+ printf("\t0x%x\t0x%x\t%d\n", md->md_va, md->md_size, md->md_type);
+ printf("used chunks:\n");
+ TAILQ_FOREACH(md, &malloc_deb_used, md_list)
+ printf("\t0x%x\t0x%x\t%d\n", md->md_va, md->md_size, md->md_type);
+}
+
+