summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_km.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_km.c')
-rw-r--r--sys/uvm/uvm_km.c92
1 files changed, 24 insertions, 68 deletions
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 08f373e54de..aa5895fb37c 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.19 2001/11/07 01:18:01 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.39 2000/09/13 15:00:25 thorpej Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.20 2001/11/07 02:55:50 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.40 2000/11/24 07:07:27 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -328,12 +328,8 @@ uvm_km_pgremove(uobj, start, end)
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
- simple_lock(&uobj->vmobjlock); /* lock object */
-
-#ifdef DIAGNOSTIC
- if (__predict_false(uobj->pgops != &aobj_pager))
- panic("uvm_km_pgremove: object %p not an aobj", uobj);
-#endif
+ KASSERT(uobj->pgops == &aobj_pager);
+ simple_lock(&uobj->vmobjlock);
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
@@ -368,15 +364,14 @@ uvm_km_pgremove(uobj, start, end)
uvm_pagefree(pp);
uvm_unlock_pageq();
}
- /* done */
}
simple_unlock(&uobj->vmobjlock);
return;
loop_by_list:
- for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
- ppnext = pp->listq.tqe_next;
+ for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
+ ppnext = TAILQ_NEXT(pp, listq);
if (pp->offset < start || pp->offset >= end) {
continue;
}
@@ -384,7 +379,6 @@ loop_by_list:
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
- /* now do the actual work */
if (pp->flags & PG_BUSY) {
/* owner must check for this when done */
pp->flags |= PG_RELEASED;
@@ -400,10 +394,8 @@ loop_by_list:
uvm_pagefree(pp);
uvm_unlock_pageq();
}
- /* done */
}
simple_unlock(&uobj->vmobjlock);
- return;
}
@@ -428,13 +420,9 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
vaddr_t curoff;
UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
+ KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj));
simple_lock(&uobj->vmobjlock); /* lock object */
-#ifdef DIAGNOSTIC
- if (__predict_false(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0))
- panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
-#endif
-
/* choose cheapest traversal */
by_list = (uobj->uo_npages <=
((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
@@ -446,21 +434,15 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
pp = uvm_pagelookup(uobj, curoff);
- if (pp == NULL)
+ if (pp == NULL) {
continue;
+ }
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
-#ifdef DIAGNOSTIC
- if (__predict_false(pp->flags & PG_BUSY))
- panic("uvm_km_pgremove_intrsafe: busy page");
- if (__predict_false(pp->pqflags & PQ_ACTIVE))
- panic("uvm_km_pgremove_intrsafe: active page");
- if (__predict_false(pp->pqflags & PQ_INACTIVE))
- panic("uvm_km_pgremove_intrsafe: inactive page");
-#endif
-
- /* free the page */
+ KASSERT((pp->flags & PG_BUSY) == 0);
+ KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
+ KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
uvm_pagefree(pp);
}
simple_unlock(&uobj->vmobjlock);
@@ -468,29 +450,20 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
loop_by_list:
- for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
- ppnext = pp->listq.tqe_next;
+ for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) {
+ ppnext = TAILQ_NEXT(pp, listq);
if (pp->offset < start || pp->offset >= end) {
continue;
}
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(pp->flags & PG_BUSY))
- panic("uvm_km_pgremove_intrsafe: busy page");
- if (__predict_false(pp->pqflags & PQ_ACTIVE))
- panic("uvm_km_pgremove_intrsafe: active page");
- if (__predict_false(pp->pqflags & PQ_INACTIVE))
- panic("uvm_km_pgremove_intrsafe: inactive page");
-#endif
-
- /* free the page */
+ KASSERT((pp->flags & PG_BUSY) == 0);
+ KASSERT((pp->pqflags & PQ_ACTIVE) == 0);
+ KASSERT((pp->pqflags & PQ_INACTIVE) == 0);
uvm_pagefree(pp);
}
simple_unlock(&uobj->vmobjlock);
- return;
}
@@ -518,14 +491,9 @@ uvm_km_kmemalloc(map, obj, size, flags)
struct vm_page *pg;
UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
-
UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
- map, obj, size, flags);
-#ifdef DIAGNOSTIC
- /* sanity check */
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_kmemalloc: invalid map");
-#endif
+ map, obj, size, flags);
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
/*
* setup for call
@@ -554,6 +522,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
return(kva);
}
+
/*
* recover object offset from virtual address
*/
@@ -597,6 +566,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
* (because if pmap_enter wants to allocate out of kmem_object
* it will need to lock it itself!)
*/
+
if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
VM_PROT_ALL);
@@ -609,7 +579,6 @@ uvm_km_kmemalloc(map, obj, size, flags)
offset += PAGE_SIZE;
size -= PAGE_SIZE;
}
-
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
return(kva);
}
@@ -624,7 +593,6 @@ uvm_km_free(map, addr, size)
vaddr_t addr;
vsize_t size;
{
-
uvm_unmap(map, trunc_page(addr), round_page(addr+size));
}
@@ -670,11 +638,7 @@ uvm_km_alloc1(map, size, zeroit)
UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
-
-#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
- panic("uvm_km_alloc1");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
kva = vm_map_min(map); /* hint */
@@ -771,11 +735,7 @@ uvm_km_valloc(map, size)
UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_valloc");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
kva = vm_map_min(map); /* hint */
@@ -814,11 +774,7 @@ uvm_km_valloc_prefer_wait(map, size, prefer)
UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
-
-#ifdef DIAGNOSTIC
- if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
- panic("uvm_km_valloc_wait");
-#endif
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
size = round_page(size);
if (size > vm_map_max(map) - vm_map_min(map))