summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_km.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_km.c')
-rw-r--r--sys/uvm/uvm_km.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 2acbc0507f3..ac76b56f1a2 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.13 2001/07/26 19:37:13 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.34 2000/01/11 06:57:50 chs Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.14 2001/08/11 10:57:22 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.35 2000/05/08 23:10:20 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -335,7 +335,7 @@ uvm_km_pgremove(uobj, start, end)
simple_lock(&uobj->vmobjlock); /* lock object */
#ifdef DIAGNOSTIC
- if (uobj->pgops != &aobj_pager)
+ if (__predict_false(uobj->pgops != &aobj_pager))
panic("uvm_km_pgremove: object %p not an aobj", uobj);
#endif
@@ -435,7 +435,7 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
simple_lock(&uobj->vmobjlock); /* lock object */
#ifdef DIAGNOSTIC
- if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0)
+ if (__predict_false(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0))
panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
#endif
@@ -456,11 +456,11 @@ uvm_km_pgremove_intrsafe(uobj, start, end)
UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
pp->flags & PG_BUSY, 0, 0);
#ifdef DIAGNOSTIC
- if (pp->flags & PG_BUSY)
+ if (__predict_false(pp->flags & PG_BUSY))
panic("uvm_km_pgremove_intrsafe: busy page");
- if (pp->pqflags & PQ_ACTIVE)
+ if (__predict_false(pp->pqflags & PQ_ACTIVE))
panic("uvm_km_pgremove_intrsafe: active page");
- if (pp->pqflags & PQ_INACTIVE)
+ if (__predict_false(pp->pqflags & PQ_INACTIVE))
panic("uvm_km_pgremove_intrsafe: inactive page");
#endif
@@ -482,11 +482,11 @@ loop_by_list:
pp->flags & PG_BUSY, 0, 0);
#ifdef DIAGNOSTIC
- if (pp->flags & PG_BUSY)
+ if (__predict_false(pp->flags & PG_BUSY))
panic("uvm_km_pgremove_intrsafe: busy page");
- if (pp->pqflags & PQ_ACTIVE)
+ if (__predict_false(pp->pqflags & PQ_ACTIVE))
panic("uvm_km_pgremove_intrsafe: active page");
- if (pp->pqflags & PQ_INACTIVE)
+ if (__predict_false(pp->pqflags & PQ_INACTIVE))
panic("uvm_km_pgremove_intrsafe: inactive page");
#endif
@@ -527,7 +527,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
map, obj, size, flags);
#ifdef DIAGNOSTIC
/* sanity check */
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_kmemalloc: invalid map");
#endif
@@ -542,10 +542,10 @@ uvm_km_kmemalloc(map, obj, size, flags)
* allocate some virtual space
*/
- if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
+ if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
- != KERN_SUCCESS) {
+ != KERN_SUCCESS)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
}
@@ -584,7 +584,7 @@ uvm_km_kmemalloc(map, obj, size, flags)
* out of memory?
*/
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
if (flags & UVM_KMF_NOWAIT) {
/* free everything! */
uvm_unmap(map, kva, kva + size);
@@ -687,9 +687,10 @@ uvm_km_alloc1(map, size, zeroit)
* allocate some virtual space
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
- UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
+ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
+ UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ 0)) != KERN_SUCCESS)) {
UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
return(0);
}
@@ -730,7 +731,7 @@ uvm_km_alloc1(map, size, zeroit)
UVM_PAGE_OWN(pg, NULL);
}
simple_unlock(&uvm.kernel_object->vmobjlock);
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
uvm_wait("km_alloc1w"); /* wait for memory */
continue;
}
@@ -776,7 +777,7 @@ uvm_km_valloc(map, size)
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_valloc");
#endif
@@ -787,9 +788,10 @@ uvm_km_valloc(map, size)
* allocate some virtual space. will be demand filled by kernel_object.
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
- UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
+ if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
+ UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ 0)) != KERN_SUCCESS)) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
return(0);
}
@@ -817,7 +819,7 @@ uvm_km_valloc_wait(map, size)
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
#ifdef DIAGNOSTIC
- if (vm_map_pmap(map) != pmap_kernel())
+ if (__predict_false(vm_map_pmap(map) != pmap_kernel()))
panic("uvm_km_valloc_wait");
#endif
@@ -833,10 +835,10 @@ uvm_km_valloc_wait(map, size)
* by kernel_object.
*/
- if (uvm_map(map, &kva, size, uvm.kernel_object,
+ if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
- == KERN_SUCCESS) {
+ == KERN_SUCCESS)) {
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
return(kva);
}
@@ -876,7 +878,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
again:
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
- if (pg == NULL) {
+ if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
goto again;
@@ -884,7 +886,7 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
return (0);
}
va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
- if (va == 0)
+ if (__predict_false(va == 0))
uvm_pagefree(pg);
return (va);
#else