summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/conf/files3
-rw-r--r--sys/kern/kern_malloc.c18
-rw-r--r--sys/kern/kern_malloc_debug.c364
3 files changed, 2 insertions, 383 deletions
diff --git a/sys/conf/files b/sys/conf/files
index 990be7b73f7..d5fdb71f0fa 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,4 +1,4 @@
-# $OpenBSD: files,v 1.654 2017/11/03 13:01:20 florian Exp $
+# $OpenBSD: files,v 1.655 2017/11/14 06:46:43 dlg Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@@ -665,7 +665,6 @@ file kern/kern_kthread.c
file kern/kern_ktrace.c ktrace
file kern/kern_lock.c
file kern/kern_malloc.c
-file kern/kern_malloc_debug.c malloc_debug
file kern/kern_mutex.c witness
file kern/kern_rwlock.c
file kern/kern_physio.c
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 60b29ad437b..b448115593f 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc.c,v 1.130 2017/07/10 23:49:10 dlg Exp $ */
+/* $OpenBSD: kern_malloc.c,v 1.131 2017/11/14 06:46:43 dlg Exp $ */
/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
@@ -178,14 +178,6 @@ malloc(size_t size, int type, int flags)
}
#endif
-#ifdef MALLOC_DEBUG
- if (debug_malloc(size, type, flags, (void **)&va)) {
- if ((flags & M_ZERO) && va != NULL)
- memset(va, 0, size);
- return (va);
- }
-#endif
-
if (size > 65535 * PAGE_SIZE) {
if (flags & M_CANFAIL) {
#ifndef SMALL_KERNEL
@@ -380,11 +372,6 @@ free(void *addr, int type, size_t freedsize)
if (addr == NULL)
return;
-#ifdef MALLOC_DEBUG
- if (debug_free(addr, type))
- return;
-#endif
-
#ifdef DIAGNOSTIC
if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
panic("free: non-malloced addr %p type %s", addr,
@@ -573,9 +560,6 @@ kmeminit(void)
for (indx = 0; indx < M_LAST; indx++)
kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
#endif
-#ifdef MALLOC_DEBUG
- debug_malloc_init();
-#endif
}
/*
diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c
deleted file mode 100644
index 5ccf09786ff..00000000000
--- a/sys/kern/kern_malloc_debug.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/* $OpenBSD: kern_malloc_debug.c,v 1.35 2017/07/10 23:49:10 dlg Exp $ */
-
-/*
- * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * This really belongs in kern/kern_malloc.c, but it was too much pollution.
- */
-
-/*
- * It's only possible to debug one type/size at a time. The question is
- * if this is a limitation or a feature. We never want to run this as the
- * default malloc because we'll run out of memory really fast. Adding
- * more types will also add to the complexity of the code.
- *
- * This is really simple. Every malloc() allocates two virtual pages,
- * the second page is left unmapped, and the value returned is aligned
- * so that it ends at (or very close to) the page boundary to catch overflows.
- * Every free() changes the protection of the first page to PROT_NONE so
- * that we can catch any dangling writes to it.
- * To minimize the risk of writes to recycled chunks we keep an LRU of latest
- * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
- *
- * Don't expect any performance.
- *
- * TODO:
- * - support for size >= PAGE_SIZE
- * - add support to the fault handler to give better diagnostics if we fail.
- */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/systm.h>
-#include <sys/pool.h>
-#include <sys/mutex.h>
-
-#include <uvm/uvm_extern.h>
-#include <uvm/uvm.h>
-
-/*
- * debug_malloc_type and debug_malloc_size define the type and size of
- * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
- * is the lower limit and debug_malloc_size_hi the upper limit of sizes
- * being debugged; 0 will not work as a wildcard for the upper limit.
- * For any debugging to take place, type must be != -1, size must be >= 0,
- * and if the limits are being used, size must be set to 0.
- * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
- *
- * Although those are variables, it's a really bad idea to change the type
- * if any memory chunks of this type are used. It's ok to change the size
- * in runtime.
- */
-int debug_malloc_type = -1;
-int debug_malloc_size = -1;
-int debug_malloc_size_lo = -1;
-int debug_malloc_size_hi = -1;
-
-/*
- * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
- * freelist before we reuse them.
- */
-#define MALLOC_DEBUG_CHUNKS 16
-
-void debug_malloc_allocate_free(int);
-
-struct debug_malloc_entry {
- TAILQ_ENTRY(debug_malloc_entry) md_list;
- vaddr_t md_va;
- paddr_t md_pa;
- size_t md_size;
- int md_type;
-};
-
-TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist;
-TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist;
-
-int debug_malloc_allocs;
-int debug_malloc_frees;
-int debug_malloc_pages;
-int debug_malloc_chunks_on_freelist;
-
-int debug_malloc_initialized;
-
-struct pool debug_malloc_pool;
-struct mutex debug_malloc_mtx = MUTEX_INITIALIZER(IPL_VM);
-
-int
-debug_malloc(unsigned long size, int type, int flags, void **addr)
-{
- struct debug_malloc_entry *md = NULL;
- int wait = (flags & M_NOWAIT) == 0;
-
- /* Careful not to compare unsigned long to int -1 */
- if (((type != debug_malloc_type && debug_malloc_type != 0) ||
- (size != debug_malloc_size && debug_malloc_size != 0) ||
- (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
- (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi) ||
- !debug_malloc_initialized) && type != M_DEBUG)
- return (0);
-
- /* XXX - fix later */
- if (size > PAGE_SIZE)
- return (0);
-
- if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
- debug_malloc_allocate_free(wait);
-
- mtx_enter(&debug_malloc_mtx);
-
- md = TAILQ_FIRST(&debug_malloc_freelist);
- if (md == NULL) {
- mtx_leave(&debug_malloc_mtx);
- return (0);
- }
- TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
- debug_malloc_chunks_on_freelist--;
-
- TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
- debug_malloc_allocs++;
-
- md->md_size = size;
- md->md_type = type;
-
- mtx_leave(&debug_malloc_mtx);
-
- pmap_kenter_pa(md->md_va, md->md_pa, PROT_READ | PROT_WRITE);
- pmap_update(pmap_kernel());
-
- /*
- * Align the returned addr so that it ends where the first page
- * ends. roundup to get decent alignment.
- */
- *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
- return (1);
-}
-
-int
-debug_free(void *addr, int type)
-{
- struct debug_malloc_entry *md;
- vaddr_t va;
-
- if (type != debug_malloc_type && debug_malloc_type != 0 &&
- type != M_DEBUG)
- return (0);
-
- /*
- * trunc_page to get the address of the page.
- */
- va = trunc_page((vaddr_t)addr);
-
- mtx_enter(&debug_malloc_mtx);
- TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
- if (md->md_va == va)
- break;
-
- /*
- * If we are not responsible for this entry, let the normal free
- * handle it
- */
- if (md == NULL) {
- /*
- * sanity check. Check for multiple frees.
- */
- TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
- if (md->md_va == va)
- panic("debug_free: already free");
- mtx_leave(&debug_malloc_mtx);
- return (0);
- }
-
- debug_malloc_frees++;
- TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
- mtx_leave(&debug_malloc_mtx);
-
- /*
- * unmap the page.
- */
- pmap_kremove(md->md_va, PAGE_SIZE);
- pmap_update(pmap_kernel());
-
- mtx_enter(&debug_malloc_mtx);
- TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
- debug_malloc_chunks_on_freelist++;
- mtx_leave(&debug_malloc_mtx);
-
- return (1);
-}
-
-void
-debug_malloc_init(void)
-{
-
- TAILQ_INIT(&debug_malloc_freelist);
- TAILQ_INIT(&debug_malloc_usedlist);
-
- debug_malloc_allocs = 0;
- debug_malloc_frees = 0;
- debug_malloc_pages = 0;
- debug_malloc_chunks_on_freelist = 0;
-
- pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
- 0, 0, IPL_VM, "mdbepl", NULL);
-
- debug_malloc_initialized = 1;
-}
-
-/*
- * Add one chunk to the freelist.
- *
- * called at splvm.
- */
-void
-debug_malloc_allocate_free(int wait)
-{
- vaddr_t va, offset;
- struct vm_page *pg;
- struct debug_malloc_entry *md;
- int s;
-
- md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
- if (md == NULL)
- return;
-
- s = splvm();
-
- va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
- UVM_KMF_VALLOC | (wait ? 0: UVM_KMF_NOWAIT));
- if (va == 0)
- goto put;
-
- offset = va - vm_map_min(kernel_map);
- for (;;) {
- pg = uvm_pagealloc(NULL, 0, NULL, 0);
- if (pg) {
- atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
- UVM_PAGE_OWN(pg, NULL);
- }
-
- if (pg)
- break;
-
- if (wait == 0)
- goto unmap;
-
- uvm_wait("debug_malloc");
- }
-
- splx(s);
-
- md->md_va = va;
- md->md_pa = VM_PAGE_TO_PHYS(pg);
-
- mtx_enter(&debug_malloc_mtx);
- debug_malloc_pages++;
- TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
- debug_malloc_chunks_on_freelist++;
- mtx_leave(&debug_malloc_mtx);
-
- return;
-
-unmap:
- uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
-put:
- splx(s);
- pool_put(&debug_malloc_pool, md);
- return;
-}
-
-void
-debug_malloc_print(void)
-{
-
- debug_malloc_printit(printf, 0);
-}
-
-void
-debug_malloc_assert_allocated(void *addr, const char *func)
-{
- struct debug_malloc_entry *md;
- vaddr_t va = (vaddr_t)addr;
-
- TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
- if (va >= md->md_va &&
- va < md->md_va + 2 * PAGE_SIZE)
- panic("debug_malloc: (%s): %p - freed", func, addr);
- }
- TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
- if (va >= md->md_va + PAGE_SIZE &&
- va < md->md_va + 2 * PAGE_SIZE)
- panic("debug_malloc: (%s): %p - overflow", func, addr);
- }
-}
-
-void
-debug_malloc_printit(
- int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))),
- vaddr_t addr)
-{
- struct debug_malloc_entry *md;
-
- if (addr) {
- TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
- if (addr >= md->md_va &&
- addr < md->md_va + 2 * PAGE_SIZE) {
- (*pr)("Memory at address 0x%lx is in a freed "
- "area. type %d, size: %zu\n ",
- addr, md->md_type, md->md_size);
- return;
- }
- }
- TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
- if (addr >= md->md_va + PAGE_SIZE &&
- addr < md->md_va + 2 * PAGE_SIZE) {
- (*pr)("Memory at address 0x%lx is just outside "
- "an allocated area. type %d, size: %zu\n",
- addr, md->md_type, md->md_size);
- return;
- }
- }
- (*pr)("Memory at address 0x%lx is outside debugged malloc.\n",
- addr);
- return;
- }
-
- (*pr)("allocs: %d\n", debug_malloc_allocs);
- (*pr)("frees: %d\n", debug_malloc_frees);
- (*pr)("pages used: %d\n", debug_malloc_pages);
- (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
-
- (*pr)("\taddr:\tsize:\n");
- (*pr)("free chunks:\n");
- TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
- (*pr)("\t0x%lx\t0x%lx\t%d\n", md->md_va, md->md_size,
- md->md_type);
- (*pr)("used chunks:\n");
- TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
- (*pr)("\t0x%lx\t0x%lx\t%d\n", md->md_va, md->md_size,
- md->md_type);
-}