summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-11-06 13:36:53 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-11-06 13:36:53 +0000
commitdce408d18a14842aa7836a626a151e98665f04e7 (patch)
treea3ddb948fd94211a9af9df05d8de6c60a2081605 /sys/uvm
parent789e8c48d1472795faa6a959068b32a1749b315d (diff)
More sync to NetBSD.
- Use malloc/free instead of MALLOC/FREE for variable sized allocations. - Move the memory inheritance code to sys/mman.h and rename from VM_* to MAP_* - various cleanups and simplifications.
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_amap.c39
-rw-r--r--sys/uvm/uvm_anon.c15
-rw-r--r--sys/uvm/uvm_aobj.c10
-rw-r--r--sys/uvm/uvm_extern.h19
-rw-r--r--sys/uvm/uvm_fault.c36
-rw-r--r--sys/uvm/uvm_glue.c4
-rw-r--r--sys/uvm/uvm_km.c19
-rw-r--r--sys/uvm/uvm_map.c16
-rw-r--r--sys/uvm/uvm_map.h11
-rw-r--r--sys/uvm/uvm_page.c8
-rw-r--r--sys/uvm/uvm_pdaemon.c10
-rw-r--r--sys/uvm/uvm_unix.c11
12 files changed, 106 insertions, 92 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 0a9abcbde4f..800a0dec20c 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_amap.c,v 1.12 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_amap.c,v 1.24 2000/06/27 17:29:17 mrg Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.13 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.26 2000/08/03 00:47:02 thorpej Exp $ */
/*
*
@@ -258,12 +258,12 @@ amap_free(amap)
panic("amap_free");
#endif
- FREE(amap->am_slots, M_UVMAMAP);
- FREE(amap->am_bckptr, M_UVMAMAP);
- FREE(amap->am_anon, M_UVMAMAP);
+ free(amap->am_slots, M_UVMAMAP);
+ free(amap->am_bckptr, M_UVMAMAP);
+ free(amap->am_anon, M_UVMAMAP);
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
- FREE(amap->am_ppref, M_UVMAMAP);
+ free(amap->am_ppref, M_UVMAMAP);
#endif
amap_unlock(amap); /* mainly for lock debugging */
pool_put(&uvm_amap_pool, amap);
@@ -367,19 +367,18 @@ amap_extend(entry, addsize)
#ifdef UVM_AMAP_PPREF
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- MALLOC(newppref, int *, slotneed * sizeof(int), M_UVMAMAP,
- M_NOWAIT);
+ newppref = malloc(slotneed * sizeof(int), M_UVMAMAP, M_NOWAIT);
if (newppref == NULL) {
/* give up if malloc fails */
- FREE(amap->am_ppref, M_UVMAMAP);
- amap->am_ppref = PPREF_NONE;
+ free(amap->am_ppref, M_UVMAMAP);
+ amap->am_ppref = PPREF_NONE;
}
}
#endif
- MALLOC(newsl, int *, slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
- MALLOC(newbck, int *, slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
- MALLOC(newover, struct vm_anon **, slotneed * sizeof(struct vm_anon *),
- M_UVMAMAP, M_WAITOK);
+ newsl = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
+ newbck = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
+ newover = malloc(slotneed * sizeof(struct vm_anon *),
+ M_UVMAMAP, M_WAITOK);
amap_lock(amap); /* re-lock! */
#ifdef DIAGNOSTIC
@@ -433,12 +432,12 @@ amap_extend(entry, addsize)
amap_unlock(amap);
/* and free */
- FREE(oldsl, M_UVMAMAP);
- FREE(oldbck, M_UVMAMAP);
- FREE(oldover, M_UVMAMAP);
+ free(oldsl, M_UVMAMAP);
+ free(oldbck, M_UVMAMAP);
+ free(oldover, M_UVMAMAP);
#ifdef UVM_AMAP_PPREF
if (oldppref && oldppref != PPREF_NONE)
- FREE(oldppref, M_UVMAMAP);
+ free(oldppref, M_UVMAMAP);
#endif
UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
amap, slotneed, 0, 0);
@@ -839,7 +838,7 @@ ReStart:
} /* end of 'for' loop */
- return;
+ amap_unlock(amap);
}
/*
@@ -900,7 +899,7 @@ amap_pp_establish(amap)
struct vm_amap *amap;
{
- MALLOC(amap->am_ppref, int *, sizeof(int) * amap->am_maxslot,
+ amap->am_ppref = malloc(sizeof(int) * amap->am_maxslot,
M_UVMAMAP, M_NOWAIT);
/*
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index 0f74c4eff86..84ddfaf8235 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_anon.c,v 1.12 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_anon.c,v 1.7 2000/06/27 17:29:18 mrg Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.13 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.9 2000/08/06 00:21:57 thorpej Exp $ */
/*
*
@@ -478,10 +478,13 @@ anon_pagein(anon)
struct uvm_object *uobj;
int rv;
UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
-
+
/* locked: anon */
rv = uvmfault_anonget(NULL, NULL, anon);
- /* unlocked: anon */
+ /*
+ * if rv == VM_PAGER_OK, anon is still locked, else anon
+ * is unlocked
+ */
switch (rv) {
case VM_PAGER_OK:
@@ -498,9 +501,11 @@ anon_pagein(anon)
return FALSE;
-#ifdef DIAGNOSTIC
default:
+#ifdef DIAGNOSTIC
panic("anon_pagein: uvmfault_anonget -> %d", rv);
+#else
+ return FALSE;
#endif
}
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 3b0a07dccb5..bae91b851e3 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_aobj.c,v 1.17 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_aobj.c,v 1.33 2000/06/27 17:29:19 mrg Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.18 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.34 2000/08/02 20:23:23 thorpej Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -434,7 +434,7 @@ uao_free(aobj)
pool_put(&uao_swhash_elt_pool, elt);
}
}
- FREE(aobj->u_swhash, M_UVMAOBJ);
+ free(aobj->u_swhash, M_UVMAOBJ);
} else {
int i;
@@ -454,7 +454,7 @@ uao_free(aobj)
simple_unlock(&uvm.swap_data_lock);
}
}
- FREE(aobj->u_swslots, M_UVMAOBJ);
+ free(aobj->u_swslots, M_UVMAOBJ);
}
/*
@@ -527,7 +527,7 @@ uao_create(size, flags)
if (aobj->u_swhash == NULL)
panic("uao_create: hashinit swhash failed");
} else {
- MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
+ aobj->u_swslots = malloc(pages * sizeof(int),
M_UVMAOBJ, mflags);
if (aobj->u_swslots == NULL)
panic("uao_create: malloc swslots failed");
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 0d7b9122a8e..249713f25f6 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.26 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.45 2000/06/27 16:16:43 mrg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.27 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.48 2000/08/12 22:41:55 thorpej Exp $ */
/*
*
@@ -174,17 +174,6 @@ typedef int vm_prot_t;
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
-/*
- * Enumeration of valid values for vm_inherit_t.
- */
-
-#define VM_INHERIT_SHARE ((vm_inherit_t)0) /* share with child */
-#define VM_INHERIT_COPY ((vm_inherit_t)1) /* copy into child */
-#define VM_INHERIT_NONE ((vm_inherit_t)2) /* absent from child */
-#define VM_INHERIT_DONATE_COPY ((vm_inherit_t)3) /* copy and delete */
-
-#define VM_INHERIT_DEFAULT VM_INHERIT_COPY
-
/* advice: matches MADV_* from sys/mman.h */
#define UVM_ADV_NORMAL 0x0 /* 'normal' */
#define UVM_ADV_RANDOM 0x1 /* 'random' */
@@ -496,6 +485,8 @@ struct vm_map *uvm_km_suballoc __P((vm_map_t, vaddr_t *,
boolean_t, vm_map_t));
vaddr_t uvm_km_valloc __P((vm_map_t, vsize_t));
vaddr_t uvm_km_valloc_wait __P((vm_map_t, vsize_t));
+vaddr_t uvm_km_valloc_prefer_wait __P((vm_map_t, vsize_t,
+ voff_t));
vaddr_t uvm_km_alloc_poolpage1 __P((vm_map_t,
struct uvm_object *, boolean_t));
void uvm_km_free_poolpage1 __P((vm_map_t, vaddr_t));
@@ -552,7 +543,7 @@ void uvm_page_physload __P((paddr_t, paddr_t,
void uvm_setpagesize __P((void));
/* uvm_pdaemon.c */
-void uvm_pageout __P((void));
+void uvm_pageout __P((void *));
/* uvm_pglist.c */
int uvm_pglistalloc __P((psize_t, paddr_t,
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index 1f61a2e45a5..98d3be5024a 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_fault.c,v 1.21 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_fault.c,v 1.50 2000/06/27 17:29:21 mrg Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.22 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
*
@@ -276,7 +276,7 @@ uvmfault_amapcopy(ufi)
* page in that anon.
*
* => maps, amap, and anon locked by caller.
- * => if we fail (result != VM_PAGER_OK) we unlock everything except anon.
+ * => if we fail (result != VM_PAGER_OK) we unlock everything.
* => if we are successful, we return with everything still locked.
* => we don't move the page on the queues [gets moved later]
* => if we allocate a new page [we_own], it gets put on the queues.
@@ -1062,7 +1062,7 @@ ReFault:
/*
* let uvmfault_anonget do the dirty work.
- * if it fails (!OK) it will unlock all but the anon for us.
+ * if it fails (!OK) it will unlock everything for us.
* if it succeeds, locks are still valid and locked.
* also, if it is OK, then the anon's page is on the queues.
* if the page is on loan from a uvm_object, then anonget will
@@ -1070,20 +1070,28 @@ ReFault:
*/
result = uvmfault_anonget(&ufi, amap, anon);
- if (result != VM_PAGER_OK) {
- simple_unlock(&anon->an_lock);
- }
+ switch (result) {
+ case VM_PAGER_OK:
+ break;
- if (result == VM_PAGER_REFAULT)
+ case VM_PAGER_REFAULT:
goto ReFault;
- if (result == VM_PAGER_AGAIN) {
- tsleep((caddr_t)&lbolt, PVM, "fltagain1", 0);
- goto ReFault;
- }
+ case VM_PAGER_ERROR:
+ /*
+ * An error occurred while trying to bring in the
+ * page -- this is the only error we return right
+ * now.
+ */
+ return (KERN_PROTECTION_FAILURE); /* XXX */
- if (result != VM_PAGER_OK)
- return (KERN_PROTECTION_FAILURE); /* XXX??? */
+ default:
+#ifdef DIAGNOSTIC
+ panic("uvm_fault: uvmfault_anonget -> %d", result);
+#else
+ return (KERN_PROTECTION_FAILURE);
+#endif
+ }
/*
* uobj is non null if the page is on loan from an object (i.e. uobj)
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 71c28281e64..ba10fa92b50 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_glue.c,v 1.22 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_glue.c,v 1.38 2000/06/27 17:29:22 mrg Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.23 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.40 2000/08/21 02:29:32 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 729fcced29b..3ff1ea1140a 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_km.c,v 1.17 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_km.c,v 1.37 2000/06/27 17:29:24 mrg Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.18 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_km.c,v 1.38 2000/07/24 20:10:53 jeffs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -805,12 +805,13 @@ uvm_km_valloc(map, size)
*/
vaddr_t
-uvm_km_valloc_wait(map, size)
+uvm_km_valloc_prefer_wait(map, size, prefer)
vm_map_t map;
vsize_t size;
+ voff_t prefer;
{
vaddr_t kva;
- UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
@@ -832,7 +833,7 @@ uvm_km_valloc_wait(map, size)
*/
if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
- UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
+ prefer, UVM_MAPFLAG(UVM_PROT_ALL,
UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
== KERN_SUCCESS)) {
UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
@@ -849,6 +850,14 @@ uvm_km_valloc_wait(map, size)
/*NOTREACHED*/
}
+vaddr_t
+uvm_km_valloc_wait(map, size)
+ vm_map_t map;
+ vsize_t size;
+{
+ return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
+}
+
/* Sanity; must specify both or none. */
#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
(!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 6144ef4a222..b754d9ccde4 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.c,v 1.26 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_map.c,v 1.79 2000/06/27 17:29:26 mrg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.27 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_map.c,v 1.80 2000/08/01 00:53:11 wiz Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -1841,9 +1841,9 @@ uvm_map_inherit(map, start, end, new_inheritance)
map, start, end, new_inheritance);
switch (new_inheritance) {
- case VM_INHERIT_NONE:
- case VM_INHERIT_COPY:
- case VM_INHERIT_SHARE:
+ case MAP_INHERIT_NONE:
+ case MAP_INHERIT_COPY:
+ case MAP_INHERIT_SHARE:
break;
default:
UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
@@ -2930,13 +2930,13 @@ uvmspace_fork(vm1)
switch (old_entry->inheritance) {
- case VM_INHERIT_NONE:
+ case MAP_INHERIT_NONE:
/*
* drop the mapping
*/
break;
- case VM_INHERIT_SHARE:
+ case MAP_INHERIT_SHARE:
/*
* share the mapping: this means we want the old and
* new entries to share amaps and backing objects.
@@ -2993,7 +2993,7 @@ uvmspace_fork(vm1)
break;
- case VM_INHERIT_COPY:
+ case MAP_INHERIT_COPY:
/*
* copy-on-write the mapping (using mmap's
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index 40ad444cac8..951c282cfd7 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_map.h,v 1.11 2001/11/06 00:27:01 art Exp $ */
-/* $NetBSD: uvm_map.h,v 1.19 2000/06/26 17:18:40 mrg Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.12 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_map.h,v 1.21 2000/08/16 16:32:06 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -240,7 +240,6 @@ struct vm_map {
#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
/* XXX: number of kernel maps and entries to statically allocate */
-#define MAX_KMAP 10
#if !defined(MAX_KMAPENT)
#if (50 + (2 * NPROC) > 1000)
@@ -425,11 +424,9 @@ vm_map_lock(map)
try_again:
simple_lock(&map->flags_lock);
- if (map->flags & VM_MAP_BUSY) {
+ while (map->flags & VM_MAP_BUSY) {
map->flags |= VM_MAP_WANTLOCK;
- simple_unlock(&map->flags_lock);
- (void) tsleep(&map->flags, PVM, "vmmapbsy", 0);
- goto try_again;
+ ltsleep(&map->flags, PVM, "vmmapbsy", 0, &map->flags_lock);
}
error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 56ff88e56fa..1c2e7157baf 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_page.c,v 1.26 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_page.c,v 1.39 2000/06/27 17:29:31 mrg Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.27 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_page.c,v 1.40 2000/08/02 20:25:11 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -645,8 +645,8 @@ uvm_page_physload(start, end, avail_start, avail_end, free_list)
/* XXXCDC: need some sort of lockout for this case */
paddr_t paddr;
npages = end - start; /* # of pages */
- MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
- M_VMPAGE, M_NOWAIT);
+ pgs = malloc(sizeof(struct vm_page) * npages,
+ M_VMPAGE, M_NOWAIT);
if (pgs == NULL) {
printf("uvm_page_physload: can not malloc vm_page "
"structs for segment\n");
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 612d8b8d04d..42f141b17c0 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.15 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_pdaemon.c,v 1.21 2000/06/27 17:29:33 mrg Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.16 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -170,6 +170,10 @@ uvmpd_tune()
uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT);
+ /* Make sure there's always a user page free. */
+ if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
+ uvmexp.freemin = uvmexp.reserve_kernel + 1;
+
uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
if (uvmexp.freetarg <= uvmexp.freemin)
uvmexp.freetarg = uvmexp.freemin + 1;
@@ -186,7 +190,7 @@ uvmpd_tune()
*/
void
-uvm_pageout()
+uvm_pageout(void *arg)
{
int npages = 0;
int s;
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index 69c8dc5210b..1cccce72d29 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_unix.c,v 1.14 2001/11/06 01:35:04 art Exp $ */
-/* $NetBSD: uvm_unix.c,v 1.13 2000/06/27 17:29:36 mrg Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.15 2001/11/06 13:36:52 art Exp $ */
+/* $NetBSD: uvm_unix.c,v 1.17 2000/09/07 05:01:43 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -77,8 +77,8 @@ sys_obreak(p, v, retval)
} */ *uap = v;
struct vmspace *vm = p->p_vmspace;
vaddr_t new, old;
+ ssize_t diff;
int rv;
- long diff;
old = (vaddr_t)vm->vm_daddr;
new = round_page((vaddr_t)SCARG(uap, nsize));
@@ -197,12 +197,13 @@ uvm_coredump(p, vp, cred, chdr)
struct vmspace *vm = p->p_vmspace;
vm_map_t map = &vm->vm_map;
vm_map_entry_t entry;
- vaddr_t start, end;
+ vaddr_t start, end, maxstack;
struct coreseg cseg;
off_t offset;
int flag, error = 0;
offset = chdr->c_hdrsize + chdr->c_seghdrsize + chdr->c_cpusize;
+ maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize));
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
@@ -253,7 +254,7 @@ uvm_coredump(p, vp, cred, chdr)
offset += chdr->c_seghdrsize;
error = vn_rdwr(UIO_WRITE, vp,
- (caddr_t)cseg.c_addr, (int)cseg.c_size,
+ (caddr_t)(u_long)cseg.c_addr, (int)cseg.c_size,
offset, UIO_USERSPACE,
IO_NODELOCKED|IO_UNIT, cred, NULL, p);
if (error)