summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorAriane van der Steldt <ariane@cvs.openbsd.org>2012-03-15 17:52:29 +0000
committerAriane van der Steldt <ariane@cvs.openbsd.org>2012-03-15 17:52:29 +0000
commitf3304ab0f292fb67c2476a45bbc5afc1f66b3bbb (patch)
treed18934f04bbbda85ccb283900f1304904f26ccad /sys/uvm
parent8c2c66ef1254809927c0271da4a7759ebfc9d07b (diff)
Reduce installmedia pressure from new vmmap.
Has less special allocators on install media (where they aren't required anyway). Bonus: makes the vmmap initialization code easier to read.
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_addr.c18
-rw-r--r--sys/uvm/uvm_addr.h4
-rw-r--r--sys/uvm/uvm_init.c4
-rw-r--r--sys/uvm/uvm_map.c129
4 files changed, 113 insertions, 42 deletions
diff --git a/sys/uvm/uvm_addr.c b/sys/uvm/uvm_addr.c
index 486198e3891..ccf4f16430e 100644
--- a/sys/uvm/uvm_addr.c
+++ b/sys/uvm/uvm_addr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_addr.c,v 1.1 2012/03/09 13:01:29 ariane Exp $ */
+/* $OpenBSD: uvm_addr.c,v 1.2 2012/03/15 17:52:28 ariane Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -111,8 +111,10 @@ struct uvm_addr_state uaddr_kbootstrap;
* Support functions.
*/
+#ifndef SMALL_KERNEL
struct vm_map_entry *uvm_addr_entrybyspace(struct uaddr_free_rbtree*,
vsize_t);
+#endif /* !SMALL_KERNEL */
void uaddr_kinsert(struct vm_map*, struct uvm_addr_state*,
struct vm_map_entry*);
void uaddr_kremove(struct vm_map*, struct uvm_addr_state*,
@@ -146,6 +148,7 @@ int uaddr_bestfit_select(struct vm_map*,
struct uvm_addr_state*, struct vm_map_entry**,
vaddr_t*, vsize_t, vaddr_t, vaddr_t, vm_prot_t,
vaddr_t);
+#ifndef SMALL_KERNEL
int uaddr_pivot_select(struct vm_map*,
struct uvm_addr_state*, struct vm_map_entry**,
vaddr_t*, vsize_t, vaddr_t, vaddr_t, vm_prot_t,
@@ -154,6 +157,7 @@ int uaddr_stack_brk_select(struct vm_map*,
struct uvm_addr_state*, struct vm_map_entry**,
vaddr_t*, vsize_t, vaddr_t, vaddr_t, vm_prot_t,
vaddr_t);
+#endif /* !SMALL_KERNEL */
void uaddr_rnd_insert(struct vm_map*,
struct uvm_addr_state*, struct vm_map_entry*);
@@ -168,11 +172,13 @@ void uaddr_pivot_insert(struct vm_map*,
void uaddr_pivot_remove(struct vm_map*,
struct uvm_addr_state*, struct vm_map_entry*);
+#ifndef SMALL_KERNEL
vsize_t uaddr_pivot_random(void);
int uaddr_pivot_newpivot(struct vm_map*,
struct uaddr_pivot_state*, struct uaddr_pivot*,
struct vm_map_entry**, vaddr_t*,
vsize_t, vaddr_t, vaddr_t, vsize_t, vsize_t);
+#endif /* !SMALL_KERNEL */
#if defined(DEBUG) || defined(DDB)
void uaddr_pivot_print(struct uvm_addr_state*, boolean_t,
@@ -182,6 +188,7 @@ void uaddr_rnd_print(struct uvm_addr_state*, boolean_t,
#endif /* DEBUG || DDB */
+#ifndef SMALL_KERNEL
/*
* Find smallest entry in tree that will fit sz bytes.
*/
@@ -201,6 +208,7 @@ uvm_addr_entrybyspace(struct uaddr_free_rbtree *free, vsize_t sz)
}
return res;
}
+#endif /* !SMALL_KERNEL */
static __inline vaddr_t
uvm_addr_align_forward(vaddr_t addr, vaddr_t align, vaddr_t offset)
@@ -888,6 +896,7 @@ uaddr_kbootstrap_destroy(struct uvm_addr_state *uaddr)
KASSERT(uaddr == (struct uvm_addr_state*)&uaddr_kbootstrap);
}
+#ifndef SMALL_KERNEL
/*
* Best fit algorithm.
*/
@@ -988,8 +997,10 @@ uaddr_bestfit_select(struct vm_map *map, struct uvm_addr_state *uaddr_p,
min : max);
return 0;
}
+#endif /* !SMALL_KERNEL */
+#ifndef SMALL_KERNEL
/*
* A userspace allocator based on pivots.
*/
@@ -1445,7 +1456,9 @@ uaddr_pivot_print(struct uvm_addr_state *uaddr_p, boolean_t full,
}
}
#endif /* DEBUG || DDB */
+#endif /* !SMALL_KERNEL */
+#ifndef SMALL_KERNEL
/*
* Strategy for uaddr_stack_brk_select.
*/
@@ -1550,7 +1563,10 @@ uaddr_stack_brk_create(vaddr_t minaddr, vaddr_t maxaddr)
uaddr->uaddr_functions = &uaddr_stack_brk_functions;
return uaddr;
}
+#endif /* !SMALL_KERNEL */
+#ifndef SMALL_KERNEL
RB_GENERATE(uaddr_free_rbtree, vm_map_entry, dfree.rbtree,
uvm_mapent_fspace_cmp);
+#endif /* !SMALL_KERNEL */
diff --git a/sys/uvm/uvm_addr.h b/sys/uvm/uvm_addr.h
index 5d94947d5a3..5019523373c 100644
--- a/sys/uvm/uvm_addr.h
+++ b/sys/uvm/uvm_addr.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_addr.h,v 1.1 2012/03/09 13:01:29 ariane Exp $ */
+/* $OpenBSD: uvm_addr.h,v 1.2 2012/03/15 17:52:28 ariane Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -91,9 +91,11 @@ int uvm_addr_invoke(struct vm_map*,
struct uvm_addr_state *uaddr_lin_create(vaddr_t, vaddr_t);
struct uvm_addr_state *uaddr_rnd_create(vaddr_t, vaddr_t);
struct uvm_addr_state *uaddr_hint_create(vaddr_t, vaddr_t, vsize_t);
+#ifndef SMALL_KERNEL
struct uvm_addr_state *uaddr_bestfit_create(vaddr_t, vaddr_t);
struct uvm_addr_state *uaddr_pivot_create(vaddr_t, vaddr_t);
struct uvm_addr_state *uaddr_stack_brk_create(vaddr_t, vaddr_t);
+#endif /* SMALL_KERNEL */
int uvm_addr_fitspace(vaddr_t*, vaddr_t*,
vaddr_t, vaddr_t, vsize_t, vaddr_t, vaddr_t,
vsize_t, vsize_t);
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index 81110d054e8..c7668767fd3 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_init.c,v 1.29 2012/03/09 13:01:29 ariane Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.30 2012/03/15 17:52:28 ariane Exp $ */
/* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -179,6 +179,7 @@ uvm_init(void)
*/
uvm_anon_init();
+#ifndef SMALL_KERNEL
/*
* Switch kernel and kmem_map over to a best-fit allocator,
* instead of walking the tree.
@@ -189,4 +190,5 @@ uvm_init(void)
uvm_map_set_uaddr(kmem_map, &kmem_map->uaddr_any[3],
uaddr_bestfit_create(vm_map_min(kmem_map),
vm_map_max(kmem_map)));
+#endif /* !SMALL_KERNEL */
}
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index bc6b9df0281..ce96b5b6a4a 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.148 2012/03/09 13:01:29 ariane Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.149 2012/03/15 17:52:28 ariane Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -136,6 +136,7 @@ int uvm_map_pageable_wire(struct vm_map*,
struct vm_map_entry*, struct vm_map_entry*,
vaddr_t, vaddr_t, int);
void uvm_map_setup_entries(struct vm_map*);
+void uvm_map_setup_md(struct vm_map*);
void uvm_map_teardown(struct vm_map*);
void uvm_map_vmspace_update(struct vm_map*,
struct uvm_map_deadq*, int);
@@ -2236,44 +2237,9 @@ uvm_map_setup(struct vm_map *map, vaddr_t min, vaddr_t max, int flags)
/*
* Configure the allocators.
*/
- if (flags & VM_MAP_ISVMSPACE) {
- /*
- * Setup hint areas.
- */
-#if 0 /* Don't use the cool stuff yet. */
-#ifdef __LP64__
- /* Hinted allocations above 4GB */
- map->uaddr_any[0] =
- uaddr_hint_create(0x100000000ULL, max, 1024 * 1024 * 1024);
- /* Hinted allocations below 4GB */
- map->uaddr_any[1] =
- uaddr_hint_create(MAX(min, VMMAP_MIN_ADDR), 0x100000000ULL,
- 1024 * 1024 * 1024);
-#else
- map->uaddr_any[1] =
- uaddr_hint_create(MAX(min, VMMAP_MIN_ADDR), max,
- 1024 * 1024 * 1024);
-#endif
-
-#ifdef __i386__
- map->uaddr_exe = uaddr_rnd_create(min, I386_MAX_EXE_ADDR);
- map->uaddr_any[3] = uaddr_pivot_create(2 * I386_MAX_EXE_ADDR,
- max);
-#elif defined(__LP64__)
- map->uaddr_any[3] =
- uaddr_pivot_create(MAX(min, 0x100000000ULL), max);
-#else
- map->uaddr_any[3] = uaddr_pivot_create(min, max);
-#endif
-#else /* Don't use the cool stuff yet. */
- /*
- * Use the really crappy stuff at first commit.
- * Browsers like crappy stuff.
- */
- map->uaddr_any[0] = uaddr_rnd_create(min, max);
-#endif
- map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
- } else
+ if (flags & VM_MAP_ISVMSPACE)
+ uvm_map_setup_md(map);
+ else
map->uaddr_any[3] = &uaddr_kbootstrap;
/*
@@ -5026,3 +4992,88 @@ vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
RB_GENERATE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
uvm_mapentry_addrcmp);
+
+
+/*
+ * MD code: vmspace allocator setup.
+ */
+
+
+#ifdef __i386__
+void
+uvm_map_setup_md(struct vm_map *map)
+{
+ vaddr_t min, max;
+
+ min = map->min_offset;
+ max = map->max_offset;
+
+#if 0 /* Cool stuff, not yet */
+ /* Hinted allocations. */
+ map->uaddr_any[1] = uaddr_hint_create(MAX(min, VMMAP_MIN_ADDR), max,
+ 1024 * 1024 * 1024);
+
+ /* Executable code is special. */
+ map->uaddr_exe = uaddr_rnd_create(min, I386_MAX_EXE_ADDR);
+ /* Place normal allocations beyond executable mappings. */
+ map->uaddr_any[3] = uaddr_pivot_create(2 * I386_MAX_EXE_ADDR, max);
+#else /* Crappy stuff, for now */
+ map->uaddr_any[0] = uaddr_rnd_create(min, max);
+#endif
+
+#ifndef SMALL_KERNEL
+ map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
+#endif /* !SMALL_KERNEL */
+}
+#elif __LP64__
+void
+uvm_map_setup_md(struct vm_map *map)
+{
+ vaddr_t min, max;
+
+ min = map->min_offset;
+ max = map->max_offset;
+
+#if 0 /* Cool stuff, not yet */
+ /* Hinted allocations above 4GB */
+ map->uaddr_any[0] =
+ uaddr_hint_create(0x100000000ULL, max, 1024 * 1024 * 1024);
+ /* Hinted allocations below 4GB */
+ map->uaddr_any[1] =
+ uaddr_hint_create(MAX(min, VMMAP_MIN_ADDR), 0x100000000ULL,
+ 1024 * 1024 * 1024);
+ /* Normal allocations, always above 4GB */
+ map->uaddr_any[3] =
+ uaddr_pivot_create(MAX(min, 0x100000000ULL), max);
+#else /* Crappy stuff, for now */
+ map->uaddr_any[0] = uaddr_rnd_create(min, max);
+#endif
+
+#ifndef SMALL_KERNEL
+ map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
+#endif /* !SMALL_KERNEL */
+}
+#else /* non-i386, 32 bit */
+void
+uvm_map_setup_md(struct vm_map *map)
+{
+ vaddr_t min, max;
+
+ min = map->min_offset;
+ max = map->max_offset;
+
+#if 0 /* Cool stuff, not yet */
+ /* Hinted allocations. */
+ map->uaddr_any[1] = uaddr_hint_create(MAX(min, VMMAP_MIN_ADDR), max,
+ 1024 * 1024 * 1024);
+ /* Normal allocations. */
+ map->uaddr_any[3] = uaddr_pivot_create(min, max);
+#else /* Crappy stuff, for now */
+ map->uaddr_any[0] = uaddr_rnd_create(min, max);
+#endif
+
+#ifndef SMALL_KERNEL
+ map->uaddr_brk_stack = uaddr_stack_brk_create(min, max);
+#endif /* !SMALL_KERNEL */
+}
+#endif