summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_page.c
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2000-03-16 22:11:06 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2000-03-16 22:11:06 +0000
commit6270ed032c8d513c60cd21bb0356766633014a3b (patch)
tree3566a5fad92c120888db8b8da8da3c9b1006d48c /sys/uvm/uvm_page.c
parent20caed58266ef751b064092b7925dd0ae32fee9d (diff)
Bring in some new UVM code from NetBSD (not current).
- Introduce a new type of map that are interrupt safe and never allow faults in them. mb_map and kmem_map are made intrsafe. - Add "access protection" to uvm_vslock (to be passed down to uvm_fault and later to pmap_enter). - madvise(2) now works. - various cleanups.
Diffstat (limited to 'sys/uvm/uvm_page.c')
-rw-r--r--sys/uvm/uvm_page.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index fa85122f307..c60017de35b 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.19 1999/05/20 20:07:55 thorpej Exp $ */
+/* $NetBSD: uvm_page.c,v 1.23 1999/05/25 01:34:13 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -847,9 +847,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
panic("uvm_pagealloc: obj and anon != NULL");
#endif
- s = splimp();
-
- uvm_lock_fpageq(); /* lock free page queue */
+ s = uvm_lock_fpageq(); /* lock free page queue */
/*
* check to see if we need to generate some free pages waking
@@ -870,7 +868,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
*/
use_reserve = (flags & UVM_PGA_USERESERVE) ||
- (obj && obj->uo_refs == UVM_OBJ_KERN);
+ (obj && UVM_OBJ_IS_KERN_OBJECT(obj));
if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
(uvmexp.free <= uvmexp.reserve_pagedaemon &&
!(use_reserve && curproc == uvm.pagedaemon_proc)))
@@ -919,8 +917,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
TAILQ_REMOVE(freeq, pg, pageq);
uvmexp.free--;
- uvm_unlock_fpageq(); /* unlock free page queue */
- splx(s);
+ uvm_unlock_fpageq(s); /* unlock free page queue */
pg->offset = off;
pg->uobject = obj;
@@ -945,8 +942,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
return(pg);
fail:
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
return (NULL);
}
@@ -1137,8 +1133,7 @@ struct vm_page *pg;
* and put on free queue
*/
- s = splimp();
- uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)],
pg, pageq);
pg->pqflags = PQ_FREE;
@@ -1148,8 +1143,7 @@ struct vm_page *pg;
pg->uanon = (void *)0xdeadbeef;
#endif
uvmexp.free++;
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
}
#if defined(UVM_PAGE_TRKOWN)