summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_km.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_km.c')
-rw-r--r--sys/uvm/uvm_km.c184
1 files changed, 146 insertions, 38 deletions
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index a908f8f53aa..a5bb21b2a7b 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.22 1999/03/26 21:58:39 mycroft Exp $ */
+/* $NetBSD: uvm_km.c,v 1.27 1999/06/04 23:38:41 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -156,12 +156,16 @@
vm_map_t kernel_map = NULL;
+struct vmi_list vmi_list;
+simple_lock_data_t vmi_list_slock;
+
/*
* local functions
*/
static int uvm_km_get __P((struct uvm_object *, vaddr_t,
- vm_page_t *, int *, int, vm_prot_t, int, int));
+ vm_page_t *, int *, int, vm_prot_t, int, int));
+
/*
* local data structues
*/
@@ -414,29 +418,43 @@ uvm_km_init(start, end)
vaddr_t base = VM_MIN_KERNEL_ADDRESS;
/*
- * first, init kernel memory objects.
+ * first, initialize the interrupt-safe map list.
+ */
+ LIST_INIT(&vmi_list);
+ simple_lock_init(&vmi_list_slock);
+
+ /*
+ * next, init kernel memory objects.
*/
/* kernel_object: for pageable anonymous kernel memory */
uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
- /* kmem_object: for malloc'd memory (wired, protected by splimp) */
+ /*
+ * kmem_object: for use by the kernel malloc(). Memory is always
+ * wired, and this object (and the kmem_map) can be accessed at
+ * interrupt time.
+ */
simple_lock_init(&kmem_object_store.vmobjlock);
kmem_object_store.pgops = &km_pager;
TAILQ_INIT(&kmem_object_store.memq);
kmem_object_store.uo_npages = 0;
/* we are special. we never die */
- kmem_object_store.uo_refs = UVM_OBJ_KERN;
+ kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
uvmexp.kmem_object = &kmem_object_store;
- /* mb_object: for mbuf memory (always wired, protected by splimp) */
+ /*
+ * mb_object: for mbuf cluster pages on platforms which use the
+ * mb_map. Memory is always wired, and this object (and the mb_map)
+ * can be accessed at interrupt time.
+ */
simple_lock_init(&mb_object_store.vmobjlock);
mb_object_store.pgops = &km_pager;
TAILQ_INIT(&mb_object_store.memq);
mb_object_store.uo_npages = 0;
/* we are special. we never die */
- mb_object_store.uo_refs = UVM_OBJ_KERN;
+ mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
uvmexp.mb_object = &mb_object_store;
/*
@@ -444,7 +462,7 @@ uvm_km_init(start, end)
* before installing.
*/
- uvm_map_setup(&kernel_map_store, base, end, FALSE);
+ uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
kernel_map_store.pmap = pmap_kernel();
if (uvm_map(&kernel_map_store, &base, start - base, NULL,
UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
@@ -469,11 +487,11 @@ uvm_km_init(start, end)
* alloc a new map
*/
struct vm_map *
-uvm_km_suballoc(map, min, max, size, pageable, fixed, submap)
+uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
struct vm_map *map;
vaddr_t *min, *max; /* OUT, OUT */
vsize_t size;
- boolean_t pageable;
+ int flags;
boolean_t fixed;
struct vm_map *submap;
{
@@ -503,11 +521,11 @@ uvm_km_suballoc(map, min, max, size, pageable, fixed, submap)
pmap_reference(vm_map_pmap(map));
if (submap == NULL) {
- submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
+ submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
if (submap == NULL)
panic("uvm_km_suballoc: unable to create submap");
} else {
- uvm_map_setup(submap, *min, *max, pageable);
+ uvm_map_setup(submap, *min, *max, flags);
submap->pmap = vm_map_pmap(map);
}
@@ -535,15 +553,17 @@ uvm_km_pgremove(uobj, start, end)
struct uvm_object *uobj;
vaddr_t start, end;
{
- boolean_t by_list, is_aobj;
+ boolean_t by_list;
struct vm_page *pp, *ppnext;
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
simple_lock(&uobj->vmobjlock); /* lock object */
- /* is uobj an aobj? */
- is_aobj = uobj->pgops == &aobj_pager;
+#ifdef DIAGNOSTIC
+ if (uobj->pgops != &aobj_pager)
+ panic("uvm_km_pgremove: object %p not an aobj", uobj);
+#endif
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
@@ -561,26 +581,24 @@ uvm_km_pgremove(uobj, start, end)
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
+
/* now do the actual work */
- if (pp->flags & PG_BUSY)
+ if (pp->flags & PG_BUSY) {
/* owner must check for this when done */
pp->flags |= PG_RELEASED;
- else {
- pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
+ } else {
+ /* free the swap slot... */
+ uao_dropswap(uobj, curoff >> PAGE_SHIFT);
/*
- * if this kernel object is an aobj, free the swap slot.
+ * ...and free the page; note it may be on the
+ * active or inactive queues.
*/
- if (is_aobj) {
- uao_dropswap(uobj, curoff >> PAGE_SHIFT);
- }
-
uvm_lock_pageq();
uvm_pagefree(pp);
uvm_unlock_pageq();
}
/* done */
-
}
simple_unlock(&uobj->vmobjlock);
return;
@@ -588,7 +606,6 @@ uvm_km_pgremove(uobj, start, end)
loop_by_list:
for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
-
ppnext = pp->listq.tqe_next;
if (pp->offset < start || pp->offset >= end) {
continue;
@@ -596,26 +613,111 @@ loop_by_list:
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
+
/* now do the actual work */
- if (pp->flags & PG_BUSY)
+ if (pp->flags & PG_BUSY) {
/* owner must check for this when done */
pp->flags |= PG_RELEASED;
- else {
- pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
+ } else {
+ /* free the swap slot... */
+ uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
/*
- * if this kernel object is an aobj, free the swap slot.
+ * ...and free the page; note it may be on the
+ * active or inactive queues.
*/
- if (is_aobj) {
- uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
- }
-
uvm_lock_pageq();
uvm_pagefree(pp);
uvm_unlock_pageq();
}
/* done */
+ }
+ simple_unlock(&uobj->vmobjlock);
+ return;
+}
+
+
+/*
+ * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
+ * objects
+ *
+ * => when you unmap a part of anonymous kernel memory you want to toss
+ * the pages right away. (this gets called from uvm_unmap_...).
+ * => none of the pages will ever be busy, and none of them will ever
+ * be on the active or inactive queues (because these objects are
+ * never allowed to "page").
+ */
+void
+uvm_km_pgremove_intrsafe(uobj, start, end)
+ struct uvm_object *uobj;
+ vaddr_t start, end;
+{
+ boolean_t by_list;
+ struct vm_page *pp, *ppnext;
+ vaddr_t curoff;
+ UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
+
+ simple_lock(&uobj->vmobjlock); /* lock object */
+
+#ifdef DIAGNOSTIC
+ if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0)
+ panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
+#endif
+
+ /* choose cheapest traversal */
+ by_list = (uobj->uo_npages <=
+ ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
+
+ if (by_list)
+ goto loop_by_list;
+
+ /* by hash */
+
+ for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
+ pp = uvm_pagelookup(uobj, curoff);
+ if (pp == NULL)
+ continue;
+
+ UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
+ pp->flags & PG_BUSY, 0, 0);
+#ifdef DIAGNOSTIC
+ if (pp->flags & PG_BUSY)
+ panic("uvm_km_pgremove_intrsafe: busy page");
+ if (pp->pqflags & PQ_ACTIVE)
+ panic("uvm_km_pgremove_intrsafe: active page");
+ if (pp->pqflags & PQ_INACTIVE)
+ panic("uvm_km_pgremove_intrsafe: inactive page");
+#endif
+
+ /* free the page */
+ uvm_pagefree(pp);
+ }
+ simple_unlock(&uobj->vmobjlock);
+ return;
+
+loop_by_list:
+
+ for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
+ ppnext = pp->listq.tqe_next;
+ if (pp->offset < start || pp->offset >= end) {
+ continue;
+ }
+
+ UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
+ pp->flags & PG_BUSY, 0, 0);
+
+#ifdef DIAGNOSTIC
+ if (pp->flags & PG_BUSY)
+ panic("uvm_km_pgremove_intrsafe: busy page");
+ if (pp->pqflags & PQ_ACTIVE)
+ panic("uvm_km_pgremove_intrsafe: active page");
+ if (pp->pqflags & PQ_INACTIVE)
+ panic("uvm_km_pgremove_intrsafe: inactive page");
+#endif
+
+ /* free the page */
+ uvm_pagefree(pp);
}
simple_unlock(&uobj->vmobjlock);
return;
@@ -725,12 +827,18 @@ uvm_km_kmemalloc(map, obj, size, flags)
* (because if pmap_enter wants to allocate out of kmem_object
* it will need to lock it itself!)
*/
+ if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
#if defined(PMAP_NEW)
- pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
+ pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_ALL);
#else
- pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
- UVM_PROT_ALL, TRUE, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
+ UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
#endif
+ } else {
+ pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
+ UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
+ }
loopva += PAGE_SIZE;
offset += PAGE_SIZE;
size -= PAGE_SIZE;
@@ -861,8 +969,8 @@ uvm_km_alloc1(map, size, zeroit)
* map it in; note we're never called with an intrsafe
* object, so we always use regular old pmap_enter().
*/
- pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
- UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
+ UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
loopva += PAGE_SIZE;
offset += PAGE_SIZE;