summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pglist.h8
-rw-r--r--sys/vm/pmap.h80
-rw-r--r--sys/vm/swap_pager.c4
-rw-r--r--sys/vm/vm.h6
-rw-r--r--sys/vm/vm_extern.h31
-rw-r--r--sys/vm/vm_fault.c18
-rw-r--r--sys/vm/vm_glue.c79
-rw-r--r--sys/vm/vm_init.c18
-rw-r--r--sys/vm/vm_kern.c8
-rw-r--r--sys/vm/vm_kern.h13
-rw-r--r--sys/vm/vm_map.c39
-rw-r--r--sys/vm/vm_map.h41
-rw-r--r--sys/vm/vm_meter.c4
-rw-r--r--sys/vm/vm_object.c10
-rw-r--r--sys/vm/vm_object.h8
-rw-r--r--sys/vm/vm_page.c1279
-rw-r--r--sys/vm/vm_page.h464
-rw-r--r--sys/vm/vm_pageout.c47
-rw-r--r--sys/vm/vm_pageout.h16
-rw-r--r--sys/vm/vm_pager.h31
-rw-r--r--sys/vm/vm_param.h28
21 files changed, 1648 insertions, 584 deletions
diff --git a/sys/vm/pglist.h b/sys/vm/pglist.h
new file mode 100644
index 00000000000..2e17c434f92
--- /dev/null
+++ b/sys/vm/pglist.h
@@ -0,0 +1,8 @@
+/* $OpenBSD: pglist.h,v 1.1 1998/03/01 00:37:58 niklas Exp $ */
+
+#ifndef _PGLIST_H_
+#define _PGLIST_H_
+
+TAILQ_HEAD(pglist, vm_page);
+
+#endif
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 07053e3047a..8edd35b77a8 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
+/* $OpenBSD: pmap.h,v 1.5 1998/03/01 00:37:58 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */
/*
@@ -87,6 +87,26 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
+/*
+ * PMAP_PGARG hack
+ *
+ * operations that take place on managed pages used to take PAs.
+ * this caused us to translate the PA back to a page (or pv_head).
+ * PMAP_NEW avoids this by passing the vm_page in (pv_head should be
+ * pointed to by vm_page (or be a part of it)).
+ *
+ * applies to: pmap_page_protect, pmap_is_referenced, pmap_is_modified,
+ * pmap_clear_reference, pmap_clear_modify.
+ *
+ * the latter two functions are boolean_t in PMAP_NEW. they return
+ * TRUE if something was cleared.
+ */
+#if defined(PMAP_NEW)
+#define PMAP_PGARG(PG) (PG)
+#else
+#define PMAP_PGARG(PG) (VM_PAGE_TO_PHYS(PG))
+#endif
+
#ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */
#ifdef _KERNEL
__BEGIN_DECLS
@@ -96,33 +116,74 @@ void *pmap_bootstrap_alloc __P((int));
void pmap_bootstrap( /* machine dependent */ );
#endif
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
+
+#if defined(PMAP_NEW)
+#if !defined(pmap_clear_modify)
+boolean_t pmap_clear_modify __P((struct vm_page *));
+#endif
+#if !defined(pmap_clear_reference)
+boolean_t pmap_clear_reference __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
+#endif /* PMAP_NEW */
+
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t,
pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
+#if defined(PMAP_NEW)
+struct pmap *pmap_create __P((void));
+#else
pmap_t pmap_create __P((vm_size_t));
+#endif
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
-#ifndef pmap_page_index
-int pmap_page_index __P((vm_offset_t));
+#if defined(PMAP_NEW) && defined(PMAP_GROWKERNEL)
+void pmap_growkernel __P((vm_offset_t));
#endif
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
void pmap_init __P((vm_offset_t, vm_offset_t));
#else
void pmap_init __P((void));
#endif
+
+#if defined(PMAP_NEW)
+void pmap_kenter_pa __P((vm_offset_t, vm_offset_t, vm_prot_t));
+void pmap_kenter_pgs __P((vm_offset_t, struct vm_page **, int));
+void pmap_kremove __P((vm_offset_t, vm_size_t));
+#if !defined(pmap_is_modified)
+boolean_t pmap_is_modified __P((struct vm_page *));
+#endif
+#if !defined(pmap_is_referenced)
+boolean_t pmap_is_referenced __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+#endif /* PMAP_NEW */
+
+#if !defined(MACHINE_NEW_NONCONTIG)
+#ifndef pmap_page_index
+int pmap_page_index __P((vm_offset_t));
+#endif
+#endif /* ! MACHINE_NEW_NONCONTIG */
+
+#if defined(PMAP_NEW)
+void pmap_page_protect __P((struct vm_page *, vm_prot_t));
+#else
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
+#endif
+
void pmap_pageable __P((pmap_t,
vm_offset_t, vm_offset_t, boolean_t));
+#if !defined(pmap_phys_address)
vm_offset_t pmap_phys_address __P((int));
+#endif
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t));
@@ -135,10 +196,15 @@ void pmap_zero_page __P((vm_offset_t));
#ifdef MACHINE_NONCONTIG
u_int pmap_free_pages __P((void));
boolean_t pmap_next_page __P((vm_offset_t *));
-void pmap_startup __P((vm_offset_t *, vm_offset_t *));
-vm_offset_t pmap_steal_memory __P((vm_size_t));
+#endif
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+#if defined(PMAP_STEAL_MEMORY)
+vm_offset_t pmap_steal_memory __P((vm_size_t, vm_offset_t *,
+ vm_offset_t *));
+#else
void pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
#endif
+#endif
__END_DECLS
#endif /* kernel*/
#endif /* PMAP_EXCLUDE_DECLS */
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 65ad9d9a921..8ffef6d549e 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: swap_pager.c,v 1.12 1997/12/02 16:55:51 csapuntz Exp $ */
+/* $OpenBSD: swap_pager.c,v 1.13 1998/03/01 00:38:00 niklas Exp $ */
/* $NetBSD: swap_pager.c,v 1.27 1996/03/16 23:15:20 christos Exp $ */
/*
@@ -127,6 +127,8 @@ struct swpclean swap_pager_inuse; /* list of pending page cleans */
struct swpclean swap_pager_free; /* list of free pager clean structs */
struct pagerlst swap_pager_list; /* list of "named" anon regions */
+extern struct buf bswlist; /* import from vm_swap.c */
+
static void swap_pager_init __P((void));
static vm_pager_t swap_pager_alloc
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 1f63279b69d..07c188f3043 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm.h,v 1.5 1997/11/06 05:59:31 csapuntz Exp $ */
+/* $OpenBSD: vm.h,v 1.6 1998/03/01 00:38:01 niklas Exp $ */
/* $NetBSD: vm.h,v 1.13 1994/06/29 06:47:52 cgd Exp $ */
/*
@@ -39,6 +39,8 @@
#ifndef VM_H
#define VM_H
+/* XXX remove this later when the simple locks are not here! */
+
typedef int vm_inherit_t; /* XXX: inheritance codes */
union vm_map_object;
@@ -62,10 +64,12 @@ typedef struct pager_struct *vm_pager_t;
/*
* MACH VM locking type mappings to kernel types
*/
+#if !defined(UVM)
typedef struct simplelock simple_lock_data_t;
typedef struct simplelock *simple_lock_t;
typedef struct lock lock_data_t;
typedef struct lock *lock_t;
+#endif
#include <sys/vmmeter.h>
#include <sys/queue.h>
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 86462654a49..d6265da7915 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_extern.h,v 1.14 1997/11/06 05:59:31 csapuntz Exp $ */
+/* $OpenBSD: vm_extern.h,v 1.15 1998/03/01 00:38:02 niklas Exp $ */
/* $NetBSD: vm_extern.h,v 1.20 1996/04/23 12:25:23 christos Exp $ */
/*-
@@ -45,7 +45,7 @@ struct mount;
struct vnode;
struct core;
-#ifdef KGDB
+#if defined(KGDB) && !defined(UVM)
void chgkprot __P((caddr_t, int, int));
#endif
@@ -65,9 +65,13 @@ int sstk __P((struct proc *, void *, int *));
#endif
void assert_wait __P((void *, boolean_t));
+#if !defined(UVM)
int grow __P((struct proc *, vm_offset_t));
+#endif
void iprintf __P((int (*)(const char *, ...), const char *, ...));
+#if !defined(UVM)
int kernacc __P((caddr_t, int, int));
+#endif
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
@@ -80,26 +84,33 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
+#if !defined(UVM)
void munmapfd __P((struct proc *, int));
+#endif
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
+#if !defined(UVM)
#ifdef __GNUC__
void scheduler __P((void)) __attribute ((noreturn));
#else
void scheduler __P((void));
#endif
+#endif
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
+#if !defined(UVM)
void swapout __P((struct proc *));
void swapout_threads __P((void));
+#endif
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
-void thread_block __P((void));
+void thread_block __P((char *));
void thread_sleep_msg __P((void *, simple_lock_t,
- boolean_t, char *));
+ boolean_t, char *, int));
+
/* backwards compatibility */
#define thread_sleep(event, lock, ruptible) \
@@ -110,6 +121,7 @@ void thread_sleep_msg __P((void *, simple_lock_t,
* was solely a wrapper around wakeup.
*/
#define thread_wakeup wakeup
+#if !defined(UVM)
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
boolean_t));
@@ -118,35 +130,46 @@ int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
struct core *));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
+#endif
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
+#if !defined(UVM)
#ifdef __FORK_BRAINDAMAGE
int vm_fork __P((struct proc *, struct proc *));
#else
void vm_fork __P((struct proc *, struct proc *));
#endif
+#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
+#if !defined(UVM)
void vm_init_limits __P((struct proc *));
+#endif
void vm_mem_init __P((void));
+#if !defined(UVM)
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
+#endif
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
+#if !defined(UVM)
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
+#endif
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
+#if !defined(UVM)
int vslock __P((caddr_t, u_int));
int vsunlock __P((caddr_t, u_int));
+#endif
/* Machine dependent portion */
void vmapbuf __P((struct buf *, vm_size_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 0e2a13c6291..cbf765aab9e 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_fault.c,v 1.13 1997/11/06 05:59:32 csapuntz Exp $ */
-/* $NetBSD: vm_fault.c,v 1.20 1997/02/18 13:39:33 mrg Exp $ */
+/* $OpenBSD: vm_fault.c,v 1.14 1998/03/01 00:38:04 niklas Exp $ */
+/* $NetBSD: vm_fault.c,v 1.21 1998/01/31 04:02:39 ross Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -245,7 +245,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
PAGE_ASSERT_WAIT(m, !change_wiring);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mFltbsy");
wait_result = current_thread()->wait_result;
vm_object_deallocate(first_object);
if (wait_result != THREAD_AWAKENED)
@@ -255,7 +255,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
PAGE_ASSERT_WAIT(m, !change_wiring);
UNLOCK_THINGS;
cnt.v_intrans++;
- thread_block();
+ thread_block("mFltbsy2");
vm_object_deallocate(first_object);
goto RetryFault;
#endif
@@ -300,7 +300,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
- VM_WAIT;
+ vm_wait("fVfault1");
goto RetryFault;
}
}
@@ -574,7 +574,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mCpybsy");
wait_result =
current_thread()->wait_result;
vm_object_deallocate(first_object);
@@ -592,7 +592,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mCpybsy2");
vm_object_deallocate(first_object);
goto RetryFault;
#endif
@@ -627,7 +627,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_AND_DEALLOCATE;
- VM_WAIT;
+ vm_wait("fCopy");
goto RetryFault;
}
@@ -986,7 +986,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
dst_m = vm_page_alloc(dst_object, dst_offset);
if (dst_m == NULL) {
vm_object_unlock(dst_object);
- VM_WAIT;
+ vm_wait("fVm_copy");
vm_object_lock(dst_object);
}
} while (dst_m == NULL);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2542fa235fe..88c3322a6d7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_glue.c,v 1.28 1998/02/23 20:15:54 niklas Exp $ */
+/* $OpenBSD: vm_glue.c,v 1.29 1998/03/01 00:38:05 niklas Exp $ */
/* $NetBSD: vm_glue.c,v 1.55.4.1 1996/06/13 17:25:45 cgd Exp $ */
/*
@@ -441,7 +441,7 @@ loop:
p->p_pid, p->p_comm, cnt.v_free_count);
#endif
(void)splhigh();
- VM_WAIT;
+ vm_wait("fLowmem");
(void)spl0();
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
@@ -557,78 +557,3 @@ swapout(p)
p->p_swtime = 0;
++cnt.v_swpout;
}
-
-/*
- * The rest of these routines fake thread handling
- */
-
-void
-assert_wait(event, ruptible)
- void *event;
- boolean_t ruptible;
-{
-#ifdef lint
- ruptible++;
-#endif
- curproc->p_thread = event;
-}
-
-void
-thread_block()
-{
- int s = splhigh();
-
- if (curproc->p_thread)
- tsleep(curproc->p_thread, PVM, "thrd_block", 0);
- splx(s);
-}
-
-void
-thread_sleep_msg(event, lock, ruptible, msg)
- void *event;
- simple_lock_t lock;
- boolean_t ruptible;
- char *msg;
-{
- int s = splhigh();
-
-#ifdef lint
- ruptible++;
-#endif
- curproc->p_thread = event;
- simple_unlock(lock);
- if (curproc->p_thread)
- tsleep(event, PVM, msg, 0);
- splx(s);
-}
-
-/*
- * DEBUG stuff
- */
-
-int indent = 0;
-
-#include <machine/stdarg.h> /* see subr_prf.c */
-
-/*ARGSUSED2*/
-void
-#if __STDC__
-iprintf(int (*pr)(const char *, ...), const char *fmt, ...)
-#else
-iprintf(pr, fmt /* , va_alist */)
- void (*pr)();
- char *fmt;
- /* va_dcl */
-#endif
-{
- register int i;
- va_list ap;
-
- for (i = indent; i >= 8; i -= 8)
- (*pr)("\t");
- while (--i >= 0)
- (*pr)(" ");
- va_start(ap, fmt);
- (*pr)("%:", fmt, ap);
- va_end(ap);
-}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 8f353eba930..6b46acbf01d 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_init.c,v 1.2 1996/08/02 00:06:00 niklas Exp $ */
-/* $NetBSD: vm_init.c,v 1.9 1994/06/29 06:48:00 cgd Exp $ */
+/* $OpenBSD: vm_init.c,v 1.3 1998/03/01 00:38:06 niklas Exp $ */
+/* $NetBSD: vm_init.c,v 1.11 1998/01/09 06:00:50 thorpej Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -70,6 +70,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -84,7 +85,7 @@
void vm_mem_init()
{
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
#else
@@ -96,8 +97,11 @@ void vm_mem_init()
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
- vm_set_page_size();
-#ifndef MACHINE_NONCONTIG
+ if (page_shift == 0) {
+ printf("vm_mem_init: WARN: MD code did not set page size\n");
+ vm_set_page_size();
+ }
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_page_startup(&avail_start, &avail_end);
#else
vm_page_bootstrap(&start, &end);
@@ -106,13 +110,13 @@ void vm_mem_init()
/*
* Initialize other VM packages
*/
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_object_init(virtual_end - VM_MIN_KERNEL_ADDRESS);
#else
vm_object_init(end - VM_MIN_KERNEL_ADDRESS);
#endif
vm_map_startup();
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
#else
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 51be546be26..ea3953c457d 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_kern.c,v 1.9 1998/02/23 20:22:17 niklas Exp $ */
+/* $OpenBSD: vm_kern.c,v 1.10 1998/03/01 00:38:08 niklas Exp $ */
/* $NetBSD: vm_kern.c,v 1.17.6.1 1996/06/13 17:21:28 cgd Exp $ */
/*
@@ -177,7 +177,7 @@ kmem_alloc(map, size)
while ((mem = vm_page_alloc(kernel_object, offset + i)) ==
NULL) {
vm_object_unlock(kernel_object);
- VM_WAIT;
+ vm_wait("fKmwire");
vm_object_lock(kernel_object);
}
vm_page_zero_fill(mem);
@@ -241,7 +241,7 @@ kmem_suballoc(parent, min, max, size, pageable)
size = round_page(size);
- *min = (vm_offset_t) vm_map_min(parent);
+ *min = (vm_offset_t)vm_map_min(parent);
ret = vm_map_find(parent, NULL, (vm_offset_t)0, min, size, TRUE);
if (ret != KERN_SUCCESS) {
printf("kmem_suballoc: bad status return of %d.\n", ret);
@@ -417,7 +417,7 @@ kmem_alloc_wait(map, size)
}
assert_wait(map, TRUE);
vm_map_unlock(map);
- thread_block();
+ thread_block("mKmwait");
}
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
vm_map_unlock(map);
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index e46ee18b458..37b47261a57 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_kern.h,v 1.2 1996/08/02 00:06:01 niklas Exp $ */
-/* $NetBSD: vm_kern.h,v 1.9 1994/06/29 06:48:03 cgd Exp $ */
+/* $OpenBSD: vm_kern.h,v 1.3 1998/03/01 00:38:09 niklas Exp $ */
+/* $NetBSD: vm_kern.h,v 1.11 1998/02/10 14:08:58 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -67,9 +67,18 @@
/* Kernel memory management definitions. */
+#if defined(UVM)
+extern vm_map_t buffer_map;
+extern vm_map_t exec_map;
+extern vm_map_t kernel_map;
+extern vm_map_t kmem_map;
+extern vm_map_t mb_map;
+extern vm_map_t phys_map;
+#else
vm_map_t buffer_map;
vm_map_t exec_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
vm_map_t phys_map;
+#endif
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 9c9aca5d679..ebe5fa82592 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_map.c,v 1.12 1998/02/03 01:27:09 millert Exp $ */
+/* $OpenBSD: vm_map.c,v 1.13 1998/03/01 00:38:11 niklas Exp $ */
/* $NetBSD: vm_map.c,v 1.23 1996/02/10 00:08:08 christos Exp $ */
/*
@@ -136,8 +136,16 @@
* maps and requires map entries.
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+u_int8_t kentry_data_store[MAX_KMAP*sizeof(struct vm_map) +
+ MAX_KMAPENT*sizeof(struct vm_map_entry)];
+vm_offset_t kentry_data = (vm_offset_t) kentry_data_store;
+vm_size_t kentry_data_size = sizeof(kentry_data_store);
+#else
+/* NUKE NUKE NUKE */
vm_offset_t kentry_data;
vm_size_t kentry_data_size;
+#endif
vm_map_entry_t kentry_free;
vm_map_t kmap_free;
@@ -160,6 +168,12 @@ vm_map_startup()
vm_map_t mp;
/*
+ * zero kentry area
+ * XXX necessary?
+ */
+ bzero((caddr_t)kentry_data, kentry_data_size);
+
+ /*
* Static map structures for allocation before initialization of
* kernel map or kmem map. vm_map_create knows how to deal with them.
*/
@@ -197,11 +211,24 @@ vmspace_alloc(min, max, pageable)
register struct vmspace *vm;
if (mapvmpgcnt == 0 && mapvm == 0) {
-#ifndef MACHINE_NONCONTIG
- mapvmpgcnt = ((last_page-first_page) * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
-#else
- mapvmpgcnt = (vm_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
-#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ int vm_page_count = 0;
+ int lcv;
+
+ for (lcv = 0; lcv < vm_nphysseg; lcv++)
+ vm_page_count += (vm_physmem[lcv].end -
+ vm_physmem[lcv].start);
+
+ mapvmpgcnt = (vm_page_count *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+#elif defined(MACHINE_NONCONTIG)
+ mapvmpgcnt = (vm_page_count *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+#else /* must be contig */
+ mapvmpgcnt = ((last_page-first_page) *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+#endif /* contig */
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
mapvmpgcnt * PAGE_SIZE);
mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 7140ad1be98..63ca52ac0db 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_map.h,v 1.7 1997/11/11 20:16:41 millert Exp $ */
+/* $OpenBSD: vm_map.h,v 1.8 1998/03/01 00:38:12 niklas Exp $ */
/* $NetBSD: vm_map.h,v 1.11 1995/03/26 20:39:10 jtc Exp $ */
/*
@@ -72,6 +72,10 @@
#ifndef _VM_MAP_
#define _VM_MAP_
+#ifdef UVM
+#include <uvm/uvm_anon.h>
+#endif
+
/*
* Types defined:
*
@@ -84,12 +88,17 @@
* Objects which live in maps may be either VM objects, or
* another map (called a "sharing map") which denotes read-write
* sharing with other maps.
+ *
+ * XXXCDC: private pager data goes here now
*/
union vm_map_object {
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
+#ifdef UVM
+ struct uvm_object *uvm_obj; /* UVM OBJECT */
+#endif /* UVM */
};
/*
@@ -105,16 +114,30 @@ struct vm_map_entry {
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
+#if defined(UVM)
+ /* etype is a bitmap that replaces the following 4 items */
+ int etype; /* entry type */
+#else
boolean_t is_a_map; /* Is "object" a map? */
boolean_t is_sub_map; /* Is "object" a submap? */
/* Only in sharing maps: */
boolean_t copy_on_write; /* is data copy-on-write */
boolean_t needs_copy; /* does object need to be copied */
+#endif
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
+#ifdef UVM
+ struct vm_aref aref; /* anonymous overlay */
+ int advice; /* madvise advice */
+#define uvm_map_entry_stop_copy flags
+ u_int8_t flags; /* flags */
+
+#define UVM_MAP_STATIC 0x01 /* static map entry */
+
+#endif /* UVM */
};
/*
@@ -198,6 +221,22 @@ typedef struct {
(map)->lk_flags &= ~LK_CANRECURSE; \
simple_unlock(&(map)->lk_interlock); \
}
+#if defined(UVM) && defined(_KERNEL)
+/* XXX: clean up later */
+static boolean_t vm_map_lock_try __P((vm_map_t));
+
+static __inline boolean_t vm_map_lock_try(map)
+
+vm_map_t map;
+
+{
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) != 0)
+ return(FALSE);
+ map->timestamp++;
+ return(TRUE);
+}
+#endif
+
/*
* Functions implemented as macros
*/
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 0d63c2bc9cf..0364c169ea9 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_meter.c,v 1.7 1997/11/06 05:59:35 csapuntz Exp $ */
+/* $OpenBSD: vm_meter.c,v 1.8 1998/03/01 00:38:14 niklas Exp $ */
/* $NetBSD: vm_meter.c,v 1.18 1996/02/05 01:53:59 christos Exp $ */
/*
@@ -47,7 +47,7 @@
struct loadavg averunnable; /* load average, of runnable procs */
int maxslp = MAXSLP;
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
int saferss = SAFERSS;
#endif /* MACHINE_NONCONTIG */
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index b8b6d2e4196..9b96a03f4a2 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_object.c,v 1.20 1997/11/06 05:59:35 csapuntz Exp $ */
+/* $OpenBSD: vm_object.c,v 1.21 1998/03/01 00:38:15 niklas Exp $ */
/* $NetBSD: vm_object.c,v 1.46 1997/03/30 20:56:12 mycroft Exp $ */
/*-
@@ -391,7 +391,7 @@ vm_object_terminate(object)
* Wait until the pageout daemon is through with the object or a
* potential collapse operation is finished.
*/
- vm_object_paging_wait(object);
+ vm_object_paging_wait(object,"vmterm");
/*
* Detach the object from its shadow if we are the shadow's
@@ -507,7 +507,7 @@ again:
/*
* Wait until the pageout daemon is through with the object.
*/
- vm_object_paging_wait(object);
+ vm_object_paging_wait(object,"vclean");
/*
* Loop through the object page list cleaning as necessary.
@@ -1201,7 +1201,7 @@ vm_object_overlay(object)
vm_object_unlock(object);
retry:
- vm_object_paging_wait(backing_object);
+ vm_object_paging_wait(backing_object,"vpagew");
/*
* While we were asleep, the parent object might have been deleted. If
@@ -1318,7 +1318,7 @@ retry:
paged_offset);
if (backing_page == NULL) {
vm_object_unlock(backing_object);
- VM_WAIT;
+ vm_wait("fVmcollapse");
vm_object_lock(backing_object);
goto retry;
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 7c4522a0740..53114a4e8c1 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_object.h,v 1.8 1997/11/06 05:59:36 csapuntz Exp $ */
+/* $OpenBSD: vm_object.h,v 1.9 1998/03/01 00:38:17 niklas Exp $ */
/* $NetBSD: vm_object.h,v 1.16 1995/03/29 22:10:28 briggs Exp $ */
/*
@@ -150,7 +150,7 @@ vm_object_t kmem_object;
do { \
(object)->flags |= OBJ_WAITING; \
thread_sleep_msg((event), &(object)->Lock, \
- (interruptible), (where)); \
+ (interruptible), (where), 0); \
} while (0)
#define vm_object_wakeup(object) \
@@ -184,11 +184,11 @@ vm_object_t kmem_object;
vm_object_wakeup((object)); \
} while (0)
-#define vm_object_paging_wait(object) \
+#define vm_object_paging_wait(object,msg) \
do { \
while (vm_object_paging((object))) { \
vm_object_sleep((object), (object), FALSE, \
- "vospgw"); \
+ (msg)); \
vm_object_lock((object)); \
} \
} while (0)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 5e68e78d814..365acb01bdb 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_page.c,v 1.10 1998/02/06 08:32:47 niklas Exp $ */
-/* $NetBSD: vm_page.c,v 1.31 1997/06/06 23:10:23 thorpej Exp $ */
+/* $OpenBSD: vm_page.c,v 1.11 1998/03/01 00:38:18 niklas Exp $ */
+/* $NetBSD: vm_page.c,v 1.41 1998/02/08 18:24:52 thorpej Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@@ -75,7 +75,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
+ * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -105,12 +105,13 @@
*/
/*
- * Resident memory management module.
+ * Resident memory management module.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -119,25 +120,49 @@
#include <machine/cpu.h>
-#ifdef MACHINE_NONCONTIG
+#define VERY_LOW_MEM() (cnt.v_free_count <= vm_page_free_reserved)
+#define KERN_OBJ(object) ((object) == kernel_object || (object) == kmem_object)
+
+int vm_page_free_reserved = 10;
+
+#if defined(MACHINE_NEW_NONCONTIG)
+
/*
- * These variables record the values returned by vm_page_bootstrap,
- * for debugging purposes. The implementation of pmap_steal_memory
- * and pmap_startup here also uses them internally.
+ * physical memory config is stored in vm_physmem.
*/
-vm_offset_t virtual_space_start;
-vm_offset_t virtual_space_end;
-#endif /* MACHINE_NONCONTIG */
+struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
+int vm_nphysseg = 0;
+static int vm_page_lost_count = 0; /* XXXCDC: DEBUG DEBUG */
+
+#endif
+
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
/*
- * Associated with page of user-allocatable memory is a
- * page structure.
+ * These variables record the values returned by vm_page_bootstrap,
+ * for debugging purposes.
+ *
+ * The implementation of vm_bootstrap_steal_memory here also uses
+ * them internally.
+ */
+static vm_offset_t virtual_space_start;
+static vm_offset_t virtual_space_end;
+
+vm_offset_t vm_bootstrap_steal_memory __P((vm_size_t));
+#endif
+
+/*
+ * Associated with page of user-allocatable memory is a
+ * page structure.
*/
struct pglist *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count = 0; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
+#if defined(MACHINE_NEW_NONCONTIG)
+struct pglist vm_page_bootbucket; /* bootstrap bucket */
+#endif
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_active;
@@ -149,26 +174,55 @@ simple_lock_data_t vm_page_queue_free_lock;
boolean_t vm_page_startup_initialized;
vm_page_t vm_page_array;
+#if defined(MACHINE_NEW_NONCONTIG)
+ /* NOTHING NEEDED HERE */
+#elif defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+u_long first_page;
int vm_page_count;
-#ifndef MACHINE_NONCONTIG
+#else
+/* OLD NCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
long first_page;
long last_page;
vm_offset_t first_phys_addr;
vm_offset_t last_phys_addr;
-#else
-u_long first_page;
-#endif /* MACHINE_NONCONTIG */
+int vm_page_count;
+#endif
vm_size_t page_mask;
int page_shift;
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * local prototypes
+ */
+
+#if !defined(PMAP_STEAL_MEMORY)
+static boolean_t vm_page_physget __P((vm_offset_t *));
+#endif
+#endif
+
+/*
+ * macros
+ */
+
/*
- * vm_set_page_size:
+ * vm_page_hash:
*
- * Sets the page size, perhaps based upon the memory
- * size. Must be called before any use of page-size
- * dependent functions.
+ * Distributes the object/offset key pair among hash buckets.
*
- * Sets page_shift and page_mask from cnt.v_page_size.
+ * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
+ */
+#define vm_page_hash(object, offset) \
+ (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
+
+/*
+ * vm_set_page_size:
+ *
+ * Sets the page size, perhaps based upon the memory
+ * size. Must be called before any use of page-size
+ * dependent functions.
+ *
+ * Sets page_shift and page_mask from cnt.v_page_size.
*/
void
vm_set_page_size()
@@ -184,73 +238,611 @@ vm_set_page_size()
break;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * vm_page_bootstrap: initialize the resident memory module (called
+ * from vm_mem_init()).
+ *
+ * - startp and endp are out params which return the boundaries of the
+ * free part of the kernel's virtual address space.
+ */
+void
+vm_page_bootstrap(startp, endp)
+ vm_offset_t *startp, *endp; /* OUT, OUT */
+{
+ vm_offset_t paddr;
+ vm_page_t pagearray;
+ int lcv, freepages, pagecount, n, i;
+
+ /*
+ * first init all the locks and queues.
+ */
+ simple_lock_init(&vm_page_queue_free_lock);
+ simple_lock_init(&vm_page_queue_lock);
+ TAILQ_INIT(&vm_page_queue_free);
+ TAILQ_INIT(&vm_page_queue_active);
+ TAILQ_INIT(&vm_page_queue_inactive);
+
+ /*
+ * init the <OBJ,OFFSET> => <PAGE> hash table buckets. for now
+ * we just have one bucket (the bootstrap bucket). later on we
+ * will malloc() new buckets as we dynamically resize the hash table.
+ */
+ vm_page_bucket_count = 1;
+ vm_page_hash_mask = 0;
+ vm_page_buckets = &vm_page_bootbucket;
+ TAILQ_INIT(vm_page_buckets);
+ simple_lock_init(&bucket_lock);
+
+ /*
+ * before calling this function the MD code is expected to register
+ * some free RAM with the vm_page_physload() function. our job
+ * now is to allocate vm_page structures for this preloaded memory.
+ */
+ if (vm_nphysseg == 0)
+ panic("vm_page_bootstrap: no memory pre-allocated");
+
+ /*
+ * first calculate the number of free pages... note that start/end
+ * are inclusive so you have to add one to get the number of pages.
+ *
+ * note that we use start/end rather than avail_start/avail_end.
+ * this allows us to allocate extra vm_page structures in case we
+ * want to return some memory to the pool after booting.
+ */
+ freepages = 0;
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ freepages = freepages +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start);
+ }
+
+ /*
+ * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
+ * use. for each page of memory we use we need a vm_page structure.
+ * thus, the total number of pages we can use is the total size of
+ * the memory divided by the PAGE_SIZE plus the size of the vm_page
+ * structure. we add one to freepages as a fudge factor to avoid
+ * truncation errors (since we can only allocate in terms of whole
+ * pages).
+ */
+ pagecount = (PAGE_SIZE * (freepages + 1)) /
+ (PAGE_SIZE + sizeof(struct vm_page));
+ pagearray = (vm_page_t)
+ vm_bootstrap_steal_memory(pagecount * sizeof(struct vm_page));
+ bzero(pagearray, pagecount * sizeof(struct vm_page));
+
+ /*
+ * now init the page frames
+ */
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+
+ n = vm_physmem[lcv].end - vm_physmem[lcv].start;
+ if (n > pagecount) {
+ printf("vm_init: lost %d page(s) in init\n",
+ n - pagecount);
+ vm_page_lost_count += (n - pagecount);
+ n = pagecount;
+ }
+
+ /* set up page array pointers */
+ vm_physmem[lcv].pgs = pagearray;
+ pagearray += n;
+ pagecount -= n;
+ vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
+
+ /* init and free vm_pages (we've already bzero'd them) */
+ paddr = ptoa(vm_physmem[lcv].start);
+ for (i = 0; i < n; i++, paddr += PAGE_SIZE) {
+ vm_physmem[lcv].pgs[i].phys_addr = paddr;
+ if (atop(paddr) >= vm_physmem[lcv].avail_start &&
+ atop(paddr) <= vm_physmem[lcv].avail_end)
+ vm_page_free(&vm_physmem[lcv].pgs[i]);
+ }
+ }
+
+ /*
+ * pass up the values of virtual_space_start and virtual_space_end
+ * (obtained by vm_bootstrap_steal_memory) to the upper layers of
+ * the VM.
+ */
+ *startp = round_page(virtual_space_start);
+ *endp = trunc_page(virtual_space_end);
+
+ /*
+ * init pagedaemon lock
+ */
+ simple_lock_init(&vm_pages_needed_lock);
+}
+
+/*
+ * vm_bootstrap_steal_memory: steal memory from physmem for bootstrapping
+ */
+vm_offset_t
+vm_bootstrap_steal_memory(size)
+ vm_size_t size;
+{
+#if defined(PMAP_STEAL_MEMORY)
+ vm_offset_t addr;
+
+ /*
+ * Defer this to machine-dependent code; we may need to allocate
+ * from a direct-mapped segment.
+ */
+ addr = pmap_steal_memory(size, &virtual_space_start,
+ &virtual_space_end);
+
+ /* round it the way we like it */
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+
+ return (addr);
+#else /* ! PMAP_STEAL_MEMORY */
+ vm_offset_t addr, vaddr, paddr;
+
+ /* round to page size */
+ size = round_page(size);
+
+ /*
+ * on first call to this function init ourselves. we detect this
+ * by checking virtual_space_start/end which are in the zero'd BSS
+ * area.
+ */
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /* round it the way we like it */
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * allocate virtual memory for this request
+ */
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * allocate and mapin physical pages to back new virtual pages
+ */
+ for (vaddr = round_page(addr); vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!vm_page_physget(&paddr))
+ panic("vm_bootstrap_steal_memory: out of memory");
+
+ /* XXX: should be wired, but some pmaps don't like that ... */
+ pmap_enter(pmap_kernel(), vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+ return(addr);
+#endif /* PMAP_STEAL_MEMORY */
+}
+
+#if !defined(PMAP_STEAL_MEMORY)
+/*
+ * vm_page_physget: "steal" one page from the vm_physmem structure.
+ *
+ * - attempt to allocate it off the end of a segment in which the "avail"
+ * values match the start/end values. if we can't do that, then we
+ * will advance both values (making them equal, and removing some
+ * vm_page structures from the non-avail area).
+ * - return false if out of memory.
+ */
+static boolean_t
+vm_page_physget(paddrp)
+ vm_offset_t *paddrp;
+
+{
+ int lcv, x;
+
+ /* pass 1: try allocating from a matching end */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+ for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+#else
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+#endif
+ {
+ if (vm_physmem[lcv].pgs)
+ panic("vm_page_physget: called _after_ bootstrap");
+
+ /* try from front */
+ if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
+ vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
+ *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ vm_physmem[lcv].avail_start++;
+ vm_physmem[lcv].start++;
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_start ==
+ vm_physmem[lcv].end) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x+1];
+ }
+ return(TRUE);
+ }
+
+ /* try from rear */
+ if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
+ vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
+ *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
+ vm_physmem[lcv].avail_end--;
+ vm_physmem[lcv].end--;
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_end ==
+ vm_physmem[lcv].start) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x+1];
+ }
+ return(TRUE);
+ }
+ }
+
+ /* pass2: forget about matching ends, just allocate something */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+ for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+#else
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+#endif
+ {
+ /* any room in this bank? */
+ if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
+ continue; /* nope */
+
+ *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ vm_physmem[lcv].avail_start++;
+ vm_physmem[lcv].start = vm_physmem[lcv].avail_start; /* truncate! */
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ vm_physmem[x] = vm_physmem[x+1]; /* structure copy */
+ }
+ return(TRUE);
+ }
+
+ return(FALSE); /* whoops! */
+}
+#endif /* ! PMAP_STEAL_MEMORY */
+
+/*
+ * vm_page_physload: load physical memory into VM system
+ *
+ * - all args are PFs
+ * - all pages in start/end get vm_page structures
+ * - areas marked by avail_start/avail_end get added to the free page pool
+ * - we are limited to VM_PHYSSEG_MAX physical memory segments
+ */
+void
+vm_page_physload(start, end, avail_start, avail_end)
+ vm_offset_t start, end, avail_start, avail_end;
+{
+ struct vm_page *pgs;
+ struct vm_physseg *ps;
+ int preload, lcv, npages, x;
+
+ if (page_shift == 0)
+ panic("vm_page_physload: page size not set!");
+
+ /*
+ * do we have room?
+ */
+ if (vm_nphysseg == VM_PHYSSEG_MAX) {
+ printf("vm_page_physload: unable to load physical memory segment\n");
+ printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
+ VM_PHYSSEG_MAX, start, end);
+ return;
+ }
+
+ /*
+ * check to see if this is a "preload" (i.e. vm_mem_init hasn't been
+ * called yet, so malloc is not available).
+ */
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ if (vm_physmem[lcv].pgs)
+ break;
+ }
+ preload = (lcv == vm_nphysseg);
+
+ /*
+ * if VM is already running, attempt to malloc() vm_page structures
+ */
+ if (!preload) {
+#if defined(VM_PHYSSEG_NOADD)
+ panic("vm_page_physload: tried to add RAM after vm_mem_init");
+#else
+/* XXXCDC: need some sort of lockout for this case */
+ vm_offset_t paddr;
+
+ /* # of pages */
+ npages = end - start;
+ MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
+ M_VMPAGE, M_NOWAIT);
+ if (pgs == NULL) {
+ printf("vm_page_physload: can not malloc vm_page structs for segment\n");
+ printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
+ return;
+ }
+ /* zero data, init phys_addr, and free pages */
+ bzero(pgs, sizeof(struct vm_page) * npages);
+ for (lcv = 0, paddr = ptoa(start); lcv < npages;
+ lcv++, paddr += PAGE_SIZE) {
+ pgs[lcv].phys_addr = paddr;
+ if (atop(paddr) >= avail_start &&
+ atop(paddr) <= avail_end)
+ vm_page_free(&pgs[i]);
+ }
+/* XXXCDC: incomplete: need to update v_free_count, what else? */
+/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
+#endif
+ } else {
+ /* XXX/gcc complains if these don't get init'd */
+ pgs = NULL;
+ npages = 0;
+ }
+
+ /*
+ * now insert us in the proper place in vm_physmem[]
+ */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
+ /* random: put it at the end (easy!) */
+ ps = &vm_physmem[vm_nphysseg];
+
+#else
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+
+ /* sort by address for binary search */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ if (start < vm_physmem[lcv].start)
+ break;
+ ps = &vm_physmem[lcv];
+
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysseg ; x > lcv ; x--)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x - 1];
+
+#else
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+
+ /* sort by largest segment first */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ if ((end - start) >
+ (vm_physmem[lcv].end - vm_physmem[lcv].start))
+ break;
+ ps = &vm_physmem[lcv];
+
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysseg ; x > lcv ; x--)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x - 1];
+
+#else
+
+ panic("vm_page_physload: unknown physseg strategy selected!");
+
+#endif
+#endif
+#endif
+
+ ps->start = start;
+ ps->end = end;
+ ps->avail_start = avail_start;
+ ps->avail_end = avail_end;
+ if (preload) {
+ ps->pgs = NULL;
+ } else {
+ ps->pgs = pgs;
+ ps->lastpg = pgs + npages - 1;
+ }
+ vm_nphysseg++;
+
+ /*
+ * done!
+ */
+ return;
+}
+
+/*
+ * vm_page_physrehash: reallocate hash table based on number of
+ * free pages.
+ */
+void
+vm_page_physrehash()
+{
+ struct pglist *newbuckets, *oldbuckets;
+ struct vm_page *pg;
+ int freepages, lcv, bucketcount, s, oldcount;
+
+ /*
+ * compute number of pages that can go in the free pool
+ */
+ freepages = 0;
+ for (lcv = 0; lcv < vm_nphysseg; lcv++)
+ freepages = freepages + (vm_physmem[lcv].avail_end -
+ vm_physmem[lcv].avail_start);
+
+ /*
+ * compute number of buckets needed for this number of pages
+ */
+ bucketcount = 1;
+ while (bucketcount < freepages)
+ bucketcount = bucketcount * 2;
+
+ /*
+ * malloc new buckets
+ */
+ MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
+ M_VMPBUCKET, M_NOWAIT);
+ if (newbuckets == NULL) {
+ printf("vm_page_physrehash: WARNING: could not grow page hash table\n");
+ return;
+ }
+ for (lcv = 0; lcv < bucketcount; lcv++)
+ TAILQ_INIT(&newbuckets[lcv]);
+
+ /*
+ * now replace the old buckets with the new ones and rehash everything
+ */
+ s = splimp();
+ simple_lock(&bucket_lock);
+ /* swap old for new ... */
+ oldbuckets = vm_page_buckets;
+ oldcount = vm_page_bucket_count;
+ vm_page_buckets = newbuckets;
+ vm_page_bucket_count = bucketcount;
+ vm_page_hash_mask = bucketcount - 1; /* power of 2 */
+
+ /* ... and rehash */
+ for (lcv = 0 ; lcv < oldcount ; lcv++) {
+ while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
+ TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
+ TAILQ_INSERT_TAIL(&vm_page_buckets[
+ vm_page_hash(pg->object, pg->offset)], pg, hashq);
+ }
+ }
+ simple_unlock(&bucket_lock);
+ splx(s);
+
+ /*
+ * free old bucket array if we malloc'd it previously
+ */
+ if (oldbuckets != &vm_page_bootbucket)
+ FREE(oldbuckets, M_VMPBUCKET);
+
+ /*
+ * done
+ */
+ return;
+}
+
+#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
+
+void vm_page_physdump __P((void)); /* SHUT UP GCC */
+
+/* call from DDB */
+void
+vm_page_physdump()
+{
+ int lcv;
+
+ printf("rehash: physical memory config [segs=%d of %d]:\n",
+ vm_nphysseg, VM_PHYSSEG_MAX);
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
+ vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
+ vm_physmem[lcv].avail_end);
+ printf("STRATEGY = ");
+
+ switch (VM_PHYSSEG_STRAT) {
+ case VM_PSTRAT_RANDOM:
+ printf("RANDOM\n");
+ break;
+
+ case VM_PSTRAT_BSEARCH:
+ printf("BSEARCH\n");
+ break;
+
+ case VM_PSTRAT_BIGFIRST:
+ printf("BIGFIRST\n");
+ break;
+
+ default:
+ printf("<<UNKNOWN>>!!!!\n");
+ }
+ printf("number of buckets = %d\n", vm_page_bucket_count);
+ printf("number of lost pages = %d\n", vm_page_lost_count);
+}
+#endif
+
+#elif defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+
+/*
+ * We implement vm_page_bootstrap and vm_bootstrap_steal_memory with
+ * the help of two simpler functions:
+ *
+ * pmap_virtual_space and pmap_next_page
+ */
-#ifdef MACHINE_NONCONTIG
/*
- * vm_page_bootstrap:
+ * vm_page_bootstrap:
*
- * Initializes the resident memory module.
+ * Initializes the resident memory module.
*
- * Allocates memory for the page cells, and
- * for the object/offset-to-page hash table headers.
- * Each page cell is initialized and placed on the free list.
- * Returns the range of available kernel virtual memory.
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
+ * Returns the range of available kernel virtual memory.
*/
void
vm_page_bootstrap(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
- int i;
+ unsigned int i, freepages;
register struct pglist *bucket;
-
+ vm_offset_t paddr;
+
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+
/*
- * Initialize the locks
+ * Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
- * Initialize the queue headers for the free queue,
- * the active queue and the inactive queue.
+ * Initialize the queue headers for the free queue,
+ * the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Pre-allocate maps and map entries that cannot be dynamically
- * allocated via malloc(). The maps include the kernel_map and
- * kmem_map which must be initialized before malloc() will
- * work (obviously). Also could include pager maps which would
- * be allocated before kmeminit.
+ * Pre-allocate maps and map entries that cannot be dynamically
+ * allocated via malloc(). The maps include the kernel_map and
+ * kmem_map which must be initialized before malloc() will
+ * work (obviously). Also could include pager maps which would
+ * be allocated before kmeminit.
*
- * Allow some kernel map entries... this should be plenty
- * since people shouldn't be cluttering up the kernel
- * map (they should use their own maps).
+ * Allow some kernel map entries... this should be plenty
+ * since people shouldn't be cluttering up the kernel
+ * map (they should use their own maps).
*/
+
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
MAX_KMAPENT*sizeof(struct vm_map_entry));
- kentry_data = (vm_offset_t)pmap_steal_memory(kentry_data_size);
+ kentry_data = vm_bootstrap_steal_memory(kentry_data_size);
/*
- * Validate these zone addresses.
+ * Validate these zone addresses.
*/
- bzero((caddr_t)kentry_data, kentry_data_size);
+ bzero((caddr_t) kentry_data, kentry_data_size);
/*
- * Allocate (and initialize) the virtual-to-physical
- * table hash buckets.
+ * Allocate (and initialize) the virtual-to-physical
+ * table hash buckets.
*
- * The number of buckets MUST BE a power of 2, and
- * the actual value is the next power of 2 greater
- * than the number of physical pages in the system.
+ * The number of buckets MUST BE a power of 2, and
+ * the actual value is the next power of 2 greater
+ * than the number of physical pages in the system.
*
- * Note:
- * This computation can be tweaked if desired.
+ * Note:
+ * This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
unsigned int npages = pmap_free_pages();
@@ -263,9 +855,10 @@ vm_page_bootstrap(startp, endp)
vm_page_hash_mask = vm_page_bucket_count - 1;
vm_page_buckets = (struct pglist *)
- pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
- bucket = vm_page_buckets;
-
+ vm_bootstrap_steal_memory(vm_page_bucket_count *
+ sizeof(*vm_page_buckets));
+ bucket = vm_page_buckets;
+
for (i = vm_page_bucket_count; i--;) {
TAILQ_INIT(bucket);
bucket++;
@@ -274,13 +867,83 @@ vm_page_bootstrap(startp, endp)
simple_lock_init(&bucket_lock);
/*
- * Machine-dependent code allocates the resident page table.
- * It uses VM_PAGE_INIT to initialize the page frames.
- * The code also returns to us the virtual space available
- * to the kernel. We don't trust the pmap module
- * to get the alignment right.
+ * We calculate how many page frames we will have and
+ * then allocate the page structures in one chunk.
+ * The calculation is non-trivial. We want:
+ *
+ * vmpages > (freepages - (vmpages / sizeof(vm_page_t)))
+ *
+ * ...which, with some algebra, becomes:
+ *
+ * vmpages > (freepages * sizeof(...) / (1 + sizeof(...)))
+ *
+ * The value of vm_page_count need not be exact, but must
+ * be large enough so vm_page_array handles the index range.
+ */
+
+ freepages = pmap_free_pages();
+ /* Fudge slightly to deal with truncation error. */
+ freepages += 1; /* fudge */
+
+ vm_page_count = (PAGE_SIZE * freepages) /
+ (PAGE_SIZE + sizeof(*vm_page_array));
+
+ vm_page_array = (vm_page_t)
+ vm_bootstrap_steal_memory(vm_page_count * sizeof(*vm_page_array));
+ bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array));
+
+#ifdef DIAGNOSTIC
+ /*
+ * Initialize everything in case the holes are stepped in,
+ * and set PA to something that will cause a panic...
+ */
+ for (i = 0; i < vm_page_count; i++)
+ vm_page_array[i].phys_addr = 0xdeadbeef;
+#endif
+
+ /*
+ * Initialize the page frames. Note that some page
+ * indices may not be usable when pmap_free_pages()
+ * counts pages in a hole.
+ */
+
+ if (!pmap_next_page(&paddr))
+ panic("vm_page_bootstrap: can't get first page");
+
+ first_page = pmap_page_index(paddr);
+ for (i = 0;;) {
+ /*
+ * Initialize a page array element.
+ */
+
+ VM_PAGE_INIT(&vm_page_array[i], NULL, NULL);
+ vm_page_array[i].phys_addr = paddr;
+ vm_page_free(&vm_page_array[i]);
+
+ /*
+ * Are there any more physical pages?
+ */
+
+ if (!pmap_next_page(&paddr))
+ break;
+ i = pmap_page_index(paddr) - first_page;
+
+ /*
+ * Don't trust pmap_page_index()...
+ */
+
+ if (
+#if 0
+ i < 0 || /* can't happen, i is unsigned */
+#endif
+ i >= vm_page_count)
+ panic("vm_page_bootstrap: bad i = 0x%x", i);
+ }
+
+ /*
+ * Make sure we have nice, round values.
*/
- pmap_startup(&virtual_space_start, &virtual_space_end);
+
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
@@ -290,16 +953,75 @@ vm_page_bootstrap(startp, endp)
simple_lock_init(&vm_pages_needed_lock);
}
+vm_offset_t
+vm_bootstrap_steal_memory(size)
+ vm_size_t size;
+{
+ vm_offset_t addr, vaddr, paddr;
+
+ /*
+ * We round to page size.
+ */
+
+ size = round_page(size);
+
+ /*
+ * If this is the first call to vm_bootstrap_steal_memory,
+ * we have to initialize ourself.
+ */
+
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /*
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
+ */
+
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * Allocate virtual memory for this request.
+ */
+
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * Allocate and map physical pages to back new virtual pages.
+ */
+
+ for (vaddr = round_page(addr);
+ vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!pmap_next_page(&paddr))
+ panic("vm_bootstrap_steal_memory");
+
+ /*
+ * XXX Logically, these mappings should be wired,
+ * but some pmap modules barf if they are.
+ */
+
+ pmap_enter(pmap_kernel(), vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+
+ return addr;
+}
+
#else /* MACHINE_NONCONTIG */
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
/*
- * vm_page_startup:
+ * vm_page_startup:
*
- * Initializes the resident memory module.
+ * Initializes the resident memory module.
*
- * Allocates memory for the page cells, and
- * for the object/offset-to-page hash table headers.
- * Each page cell is initialized and placed on the free list.
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
*/
void
vm_page_startup(start, end)
@@ -314,29 +1036,30 @@ vm_page_startup(start, end)
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+
/*
- * Initialize the locks
+ * Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
- * Initialize the queue headers for the free queue,
- * the active queue and the inactive queue.
+ * Initialize the queue headers for the free queue,
+ * the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Calculate the number of hash table buckets.
+ * Calculate the number of hash table buckets.
*
- * The number of buckets MUST BE a power of 2, and
- * the actual value is the next power of 2 greater
- * than the number of physical pages in the system.
+ * The number of buckets MUST BE a power of 2, and
+ * the actual value is the next power of 2 greater
+ * than the number of physical pages in the system.
*
- * Note:
- * This computation can be tweaked if desired.
+ * Note:
+ * This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
@@ -347,7 +1070,7 @@ vm_page_startup(start, end)
vm_page_hash_mask = vm_page_bucket_count - 1;
/*
- * Allocate (and initialize) the hash table buckets.
+ * Allocate (and initialize) the hash table buckets.
*/
vm_page_buckets = (struct pglist *)
pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
@@ -361,55 +1084,56 @@ vm_page_startup(start, end)
simple_lock_init(&bucket_lock);
/*
- * Truncate the remainder of physical memory to our page size.
+ * Truncate the remainder of physical memory to our page size.
*/
*end = trunc_page(*end);
/*
- * Pre-allocate maps and map entries that cannot be dynamically
- * allocated via malloc(). The maps include the kernel_map and
- * kmem_map which must be initialized before malloc() will
- * work (obviously). Also could include pager maps which would
- * be allocated before kmeminit.
+ * Pre-allocate maps and map entries that cannot be dynamically
+ * allocated via malloc(). The maps include the kernel_map and
+ * kmem_map which must be initialized before malloc() will
+ * work (obviously). Also could include pager maps which would
+ * be allocated before kmeminit.
*
- * Allow some kernel map entries... this should be plenty
- * since people shouldn't be cluttering up the kernel
- * map (they should use their own maps).
+ * Allow some kernel map entries... this should be plenty
+ * since people shouldn't be cluttering up the kernel
+ * map (they should use their own maps).
*/
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
- MAX_KMAPENT*sizeof(struct vm_map_entry));
+ MAX_KMAPENT*sizeof(struct vm_map_entry));
kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
/*
- * Compute the number of pages of memory that will be
- * available for use (taking into account the overhead
- * of a page structure per page).
+ * Compute the number of pages of memory that will be
+ * available for use (taking into account the overhead
+ * of a page structure per page).
*/
- cnt.v_free_count = vm_page_count = (*end - *start +
- sizeof(struct vm_page)) / (PAGE_SIZE + sizeof(struct vm_page));
+ cnt.v_free_count = vm_page_count =
+ (*end - *start + sizeof(struct vm_page)) /
+ (PAGE_SIZE + sizeof(struct vm_page));
/*
- * Record the extent of physical memory that the
- * virtual memory system manages.
+ * Record the extent of physical memory that the
+ * virtual memory system manages.
*/
first_page = *start;
first_page += vm_page_count * sizeof(struct vm_page);
first_page = atop(round_page(first_page));
- last_page = first_page + vm_page_count - 1;
+ last_page = first_page + vm_page_count - 1;
first_phys_addr = ptoa(first_page);
- last_phys_addr = ptoa(last_page) + PAGE_MASK;
+ last_phys_addr = ptoa(last_page) + PAGE_MASK;
/*
- * Allocate and clear the mem entry structures.
+ * Allocate and clear the mem entry structures.
*/
m = vm_page_array = (vm_page_t)
- pmap_bootstrap_alloc(vm_page_count * sizeof(struct vm_page));
+ pmap_bootstrap_alloc(vm_page_count * sizeof(struct vm_page));
bzero(vm_page_array, vm_page_count * sizeof(struct vm_page));
/*
- * Initialize the mem entry structures now, and
- * put them in the free queue.
+ * Initialize the mem entry structures now, and
+ * put them in the free queue.
*/
pa = first_phys_addr;
npages = vm_page_count;
@@ -423,8 +1147,8 @@ vm_page_startup(start, end)
}
/*
- * Initialize vm_pages_needed lock here - don't wait for pageout
- * daemon XXX
+ * Initialize vm_pages_needed lock here - don't wait for pageout
+ * daemon XXX
*/
simple_lock_init(&vm_pages_needed_lock);
@@ -433,161 +1157,13 @@ vm_page_startup(start, end)
}
#endif /* MACHINE_NONCONTIG */
-#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES)
/*
- * We implement pmap_steal_memory and pmap_startup with the help
- * of two simpler functions, pmap_virtual_space and pmap_next_page.
- */
-vm_offset_t
-pmap_steal_memory(size)
- vm_size_t size;
-{
- vm_offset_t addr, vaddr, paddr;
-
-#ifdef i386 /* XXX i386 calls pmap_steal_memory before vm_mem_init() */
- if (cnt.v_page_size == 0) /* XXX */
- vm_set_page_size();
-#endif
-
- /*
- * We round the size to an integer multiple.
- */
- size = (size + 3) &~ 3; /* XXX */
-
- /*
- * If this is the first call to pmap_steal_memory,
- * we have to initialize ourself.
- */
- if (virtual_space_start == virtual_space_end) {
- pmap_virtual_space(&virtual_space_start, &virtual_space_end);
-
- /*
- * The initial values must be aligned properly, and
- * we don't trust the pmap module to do it right.
- */
- virtual_space_start = round_page(virtual_space_start);
- virtual_space_end = trunc_page(virtual_space_end);
- }
-
- /*
- * Allocate virtual memory for this request.
- */
- addr = virtual_space_start;
- virtual_space_start += size;
-
- /*
- * Allocate and map physical pages to back new virtual pages.
- */
- for (vaddr = round_page(addr); vaddr < addr + size;
- vaddr += PAGE_SIZE) {
- if (!pmap_next_page(&paddr))
- panic("pmap_steal_memory");
-
- /*
- * XXX Logically, these mappings should be wired,
- * but some pmap modules barf if they are.
- */
- pmap_enter(pmap_kernel(), vaddr, paddr,
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- }
-
- return addr;
-}
-
-void
-pmap_startup(startp, endp)
- vm_offset_t *startp;
- vm_offset_t *endp;
-{
- unsigned int i, freepages;
- vm_offset_t paddr;
-
- /*
- * We calculate how many page frames we will have
- * and then allocate the page structures in one chunk.
- * The calculation is non-trivial. We want:
- *
- * vmpages > (freepages - (vmpages / sizeof(vm_page_t)))
- *
- * which, with some algebra, becomes:
- *
- * vmpages > (freepages * sizeof(...) / (1 + sizeof(...)))
- *
- * The value of vm_page_count need not be exact, but must be
- * large enough so vm_page_array handles the index range.
- */
- freepages = pmap_free_pages();
- /* Fudge slightly to deal with truncation error. */
- freepages += 1; /* fudge */
-
- vm_page_count = (PAGE_SIZE * freepages) /
- (PAGE_SIZE + sizeof(*vm_page_array));
-
- vm_page_array = (vm_page_t)
- pmap_steal_memory(vm_page_count * sizeof(*vm_page_array));
- bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array));
-
-#ifdef DIAGNOSTIC
- /*
- * Initialize everyting in case the holes are stepped in,
- * and set PA to something that will cause a panic...
- */
- for (i = 0; i < vm_page_count; i++)
- vm_page_array[i].phys_addr = 0xdeadbeef;
-#endif
-
- /*
- * Initialize the page frames.
- * Note that some page indices may not be usable
- * when pmap_free_pages() counts pages in a hole.
- */
- if (!pmap_next_page(&paddr))
- panic("pmap_startup: can't get first page");
- first_page = pmap_page_index(paddr);
- i = 0;
- for (;;) {
- /* Initialize a page array element. */
- VM_PAGE_INIT(&vm_page_array[i], NULL, 0);
- vm_page_array[i].phys_addr = paddr;
- vm_page_free(&vm_page_array[i]);
-
- /* Are there more physical pages? */
- if (!pmap_next_page(&paddr))
- break;
- i = pmap_page_index(paddr) - first_page;
-
- /* Don't trust pmap_page_index()... */
- if (
-#if 0
- /* Cannot happen; i is unsigned */
- i < 0 ||
-#endif
- i >= vm_page_count)
- panic("pmap_startup: bad i=0x%x", i);
- }
-
- *startp = virtual_space_start;
- *endp = virtual_space_end;
-}
-#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */
-
-/*
- * vm_page_hash:
- *
- * Distributes the object/offset key pair among hash buckets.
- *
- * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
- */
-#define vm_page_hash(object, offset) \
- (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
-
-/*
- * vm_page_insert: [ internal use only ]
+ * vm_page_insert: [ internal use only ]
*
- * Inserts the given mem entry into the object/object-page
- * table and object list.
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
*
- * The object and page must be locked.
+ * The object and page must be locked.
*/
void
vm_page_insert(mem, object, offset)
@@ -604,42 +1180,46 @@ vm_page_insert(mem, object, offset)
panic("vm_page_insert: already inserted");
/*
- * Record the object/offset pair in this page
+ * Record the object/offset pair in this page
*/
+
mem->object = object;
mem->offset = offset;
/*
- * Insert it into the object_object/offset hash table
+ * Insert it into the object_object/offset hash table
*/
+
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
spl = splimp();
simple_lock(&bucket_lock);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void)splx(spl);
+ (void) splx(spl);
/*
- * Now link into the object's list of backed pages.
+ * Now link into the object's list of backed pages.
*/
+
TAILQ_INSERT_TAIL(&object->memq, mem, listq);
mem->flags |= PG_TABLED;
/*
- * And show that the object has one more resident
- * page.
+ * And show that the object has one more resident
+ * page.
*/
+
object->resident_page_count++;
}
/*
- * vm_page_remove: [ internal use only ]
+ * vm_page_remove: [ internal use only ]
* XXX: used by device pager as well
*
- * Removes the given mem entry from the object/offset-page
- * table and the object page list.
+ * Removes the given mem entry from the object/offset-page
+ * table and the object page list.
*
- * The object and page must be locked.
+ * The object and page must be locked.
*/
void
vm_page_remove(mem)
@@ -659,8 +1239,9 @@ vm_page_remove(mem)
return;
/*
- * Remove from the object_object/offset hash table
+ * Remove from the object_object/offset hash table
*/
+
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
spl = splimp();
simple_lock(&bucket_lock);
@@ -669,26 +1250,28 @@ vm_page_remove(mem)
(void) splx(spl);
/*
- * Now remove from the object's list of backed pages.
+ * Now remove from the object's list of backed pages.
*/
+
TAILQ_REMOVE(&mem->object->memq, mem, listq);
/*
- * And show that the object has one fewer resident
- * page.
+ * And show that the object has one fewer resident
+ * page.
*/
+
mem->object->resident_page_count--;
mem->flags &= ~PG_TABLED;
}
/*
- * vm_page_lookup:
+ * vm_page_lookup:
*
- * Returns the page associated with the object/offset
- * pair specified; if none is found, NULL is returned.
+ * Returns the page associated with the object/offset
+ * pair specified; if none is found, NULL is returned.
*
- * The object must be locked. No side effects.
+ * The object must be locked. No side effects.
*/
vm_page_t
vm_page_lookup(object, offset)
@@ -700,8 +1283,9 @@ vm_page_lookup(object, offset)
int spl;
/*
- * Search the hash table for this object/offset pair
+ * Search the hash table for this object/offset pair
*/
+
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
spl = splimp();
@@ -721,12 +1305,12 @@ vm_page_lookup(object, offset)
}
/*
- * vm_page_rename:
+ * vm_page_rename:
*
- * Move the given memory entry from its
- * current object to the specified target object/offset.
+ * Move the given memory entry from its
+ * current object to the specified target object/offset.
*
- * The object must be locked.
+ * The object must be locked.
*/
void
vm_page_rename(mem, new_object, new_offset)
@@ -734,25 +1318,26 @@ vm_page_rename(mem, new_object, new_offset)
register vm_object_t new_object;
vm_offset_t new_offset;
{
+
if (mem->object == new_object)
return;
- /* Keep page from moving out from under pageout daemon */
- vm_page_lock_queues();
-
- vm_page_remove(mem);
+ vm_page_lock_queues(); /* keep page from moving out from
+ under pageout daemon */
+ vm_page_remove(mem);
vm_page_insert(mem, new_object, new_offset);
vm_page_unlock_queues();
}
/*
- * vm_page_alloc:
+ * vm_page_alloc:
*
- * Allocate and return a memory cell associated
- * with this VM object/offset pair.
+ * Allocate and return a memory cell associated
+ * with this VM object/offset pair.
*
- * Object must be locked.
+ * Object must be locked.
*/
+
vm_page_t
vm_page_alloc(object, offset)
vm_object_t object;
@@ -763,13 +1348,20 @@ vm_page_alloc(object, offset)
spl = splimp(); /* XXX */
simple_lock(&vm_page_queue_free_lock);
- if (vm_page_queue_free.tqh_first == NULL) {
- simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
- return(NULL);
- }
-
mem = vm_page_queue_free.tqh_first;
+
+ if (VERY_LOW_MEM()) {
+ if ((!KERN_OBJ(object) && curproc != pageout_daemon)
+ || mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ splx(spl);
+ return(NULL);
+ }
+ }
+#ifdef DIAGNOSTIC
+ if (mem == NULL) /* because we now depend on VERY_LOW_MEM() */
+ panic("vm_page_alloc");
+#endif
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
cnt.v_free_count--;
@@ -779,34 +1371,36 @@ vm_page_alloc(object, offset)
VM_PAGE_INIT(mem, object, offset);
/*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
+ * Decide if we should poke the pageout daemon.
+ * We do this if the free count is less than the low
+ * water mark, or if the free count is less than the high
+ * water mark (but above the low water mark) and the inactive
+ * count is less than its target.
*
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
+ * We don't have the counts locked ... if they change a little,
+ * it doesn't really matter.
*/
+
if (cnt.v_free_count < cnt.v_free_min ||
(cnt.v_free_count < cnt.v_free_target &&
- cnt.v_inactive_count < cnt.v_inactive_target))
+ cnt.v_inactive_count < cnt.v_inactive_target))
thread_wakeup(&vm_pages_needed);
return (mem);
}
/*
- * vm_page_free:
+ * vm_page_free:
*
- * Returns the given page to the free list,
- * disassociating it with any VM object.
+ * Returns the given page to the free list,
+ * disassociating it with any VM object.
*
- * Object and page must be locked prior to entry.
+ * Object and page must be locked prior to entry.
*/
void
vm_page_free(mem)
register vm_page_t mem;
{
+
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
@@ -835,18 +1429,19 @@ vm_page_free(mem)
}
/*
- * vm_page_wire:
+ * vm_page_wire:
*
- * Mark this page as wired down by yet
- * another map, removing it from paging queues
- * as necessary.
+ * Mark this page as wired down by yet
+ * another map, removing it from paging queues
+ * as necessary.
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_wire(mem)
register vm_page_t mem;
{
+
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
@@ -866,17 +1461,18 @@ vm_page_wire(mem)
}
/*
- * vm_page_unwire:
+ * vm_page_unwire:
*
- * Release one wiring of this page, potentially
- * enabling it to be paged again.
+ * Release one wiring of this page, potentially
+ * enabling it to be paged again.
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_unwire(mem)
register vm_page_t mem;
{
+
VM_PAGE_CHECK(mem);
mem->wire_count--;
@@ -889,24 +1485,26 @@ vm_page_unwire(mem)
}
/*
- * vm_page_deactivate:
+ * vm_page_deactivate:
*
- * Returns the given page to the inactive list,
- * indicating that no physical maps have access
- * to this page. [Used by the physical mapping system.]
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_deactivate(m)
register vm_page_t m;
{
+
VM_PAGE_CHECK(m);
/*
- * Only move active pages -- ignore locked or already
- * inactive ones.
+ * Only move active pages -- ignore locked or already
+ * inactive ones.
*/
+
if (m->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
m->flags &= ~PG_ACTIVE;
@@ -929,16 +1527,17 @@ vm_page_deactivate(m)
}
/*
- * vm_page_activate:
+ * vm_page_activate:
*
- * Put the specified page on the active list (if appropriate).
+ * Put the specified page on the active list (if appropriate).
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_activate(m)
register vm_page_t m;
{
+
VM_PAGE_CHECK(m);
if (m->flags & PG_INACTIVE) {
@@ -957,16 +1556,17 @@ vm_page_activate(m)
}
/*
- * vm_page_zero_fill:
+ * vm_page_zero_fill:
*
- * Zero-fill the specified page.
- * Written as a standard pagein routine, to
- * be used by the zero-fill object.
+ * Zero-fill the specified page.
+ * Written as a standard pagein routine, to
+ * be used by the zero-fill object.
*/
boolean_t
vm_page_zero_fill(m)
vm_page_t m;
{
+
VM_PAGE_CHECK(m);
m->flags &= ~PG_CLEAN;
@@ -975,15 +1575,16 @@ vm_page_zero_fill(m)
}
/*
- * vm_page_copy:
+ * vm_page_copy:
*
- * Copy one page to another
+ * Copy one page to another
*/
void
vm_page_copy(src_m, dest_m)
vm_page_t src_m;
vm_page_t dest_m;
{
+
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dest_m);
@@ -1039,14 +1640,18 @@ u_long vm_page_alloc_memory_npages;
* XXX allocates a single segment.
*/
int
-vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
- waitok)
+vm_page_alloc_memory(size, low, high, alignment, boundary,
+ rlist, nsegs, waitok)
vm_size_t size;
vm_offset_t low, high, alignment, boundary;
struct pglist *rlist;
int nsegs, waitok;
{
vm_offset_t try, idxpa, lastidxpa;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int psi;
+ struct vm_page *vm_page_array;
+#endif
int s, tryidx, idx, end, error;
vm_page_t m;
u_long pagemask;
@@ -1101,6 +1706,19 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
/*
* Make sure this is a managed physical page.
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+
+ if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
+ continue; /* managed? */
+ if (vm_physseg_find(atop(try + size), NULL) != psi)
+ continue; /* end must be in this segment */
+
+ tryidx = idx;
+ end = idx + (size / PAGE_SIZE);
+ vm_page_array = vm_physmem[psi].pgs;
+ /* XXX: emulates old global vm_page_array */
+
+#else
if (IS_VM_PHYSADDR(try) == 0)
continue;
@@ -1112,6 +1730,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
*/
goto out;
}
+#endif
/*
* Found a suitable starting page. See of the range
@@ -1127,6 +1746,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
idxpa = VM_PAGE_TO_PHYS(&vm_page_array[idx]);
+#if !defined(MACHINE_NEW_NONCONTIG)
/*
* Make sure this is a managed physical page.
* XXX Necessary? I guess only if there
@@ -1134,6 +1754,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
*/
if (IS_VM_PHYSADDR(idxpa) == 0)
break;
+#endif
if (idx > tryidx) {
lastidxpa =
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index e4780cdbb8b..ab48d7dffd6 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_page.h,v 1.4 1997/09/22 20:44:53 niklas Exp $ */
-/* $NetBSD: vm_page.h,v 1.20 1997/06/06 23:10:25 thorpej Exp $ */
+/* $OpenBSD: vm_page.h,v 1.5 1998/03/01 00:38:20 niklas Exp $ */
+/* $NetBSD: vm_page.h,v 1.24 1998/02/10 14:09:03 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -68,7 +68,6 @@
/*
* Resident memory system definitions.
*/
-
#ifndef _VM_PAGE_
#define _VM_PAGE_
@@ -94,24 +93,60 @@
*
* Fields in this structure are locked either by the lock on the
* object that the page belongs to (O) or by the lock on the page
- * queues (P).
+ * queues (P) [or both].
+ */
+
+#if defined(UVM)
+/*
+ * locking note: the mach version of this data structure had bit
+ * fields for the flags, and the bit fields were divided into two
+ * items (depending on who locked what). some time, in BSD, the bit
+ * fields were dumped and all the flags were lumped into one short.
+ * that is fine for a single threaded uniprocessor OS, but bad if you
+ * want to actual make use of locking (simple_lock's). so, we've
+ * seperated things back out again.
+ *
+ * note the page structure has no lock of its own.
*/
+#include <uvm/uvm_extern.h>
+#include <vm/pglist.h>
+#else
TAILQ_HEAD(pglist, vm_page);
+#endif /* UVM */
struct vm_page {
- TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
- * queue or free list (P) */
- TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
- TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
+ TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
+ * queue or free list (P) */
+ TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
+ TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
- vm_object_t object; /* which object am I in (O,P)*/
- vm_offset_t offset; /* offset into object (O,P) */
-
- u_short wire_count; /* wired down maps refs (P) */
- u_short flags; /* see below */
+#if !defined(UVM) /* uvm uses obju */
+ vm_object_t object; /* which object am I in (O,P)*/
+#endif
+ vm_offset_t offset; /* offset into object (O,P) */
+
+#if defined(UVM)
+ struct uvm_object *uobject; /* object (O,P) */
+ struct vm_anon *uanon; /* anon (O,P) */
+ u_short flags; /* object flags [O] */
+ u_short version; /* version count [O] */
+ u_short wire_count; /* wired down map refs [P] */
+ u_short pqflags; /* page queue flags [P] */
+ u_int loan_count; /* number of active loans
+ * to read: [O or P]
+ * to modify: [O _and_ P] */
+#else
+ u_short wire_count; /* wired down maps refs (P) */
+ u_short flags; /* see below */
+#endif
- vm_offset_t phys_addr; /* physical address of page */
+ vm_offset_t phys_addr; /* physical address of page */
+#if defined(UVM) && defined(UVM_PAGE_TRKOWN)
+ /* debugging fields to track page ownership */
+ pid_t owner; /* proc that set PG_BUSY */
+ char *owner_tag; /* why it was set busy */
+#endif
};
/*
@@ -119,6 +154,38 @@ struct vm_page {
*
* Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
+#if defined(UVM)
+
+/*
+ * locking rules:
+ * PG_ ==> locked by object lock
+ * PQ_ ==> lock by page queue lock
+ * PQ_FREE is locked by free queue lock and is mutex with all other PQs
+ *
+ * possible deadwood: PG_FAULTING, PQ_LAUNDRY
+ */
+#define PG_CLEAN 0x0008 /* page has not been modified */
+#define PG_BUSY 0x0010 /* page is in transit */
+#define PG_WANTED 0x0020 /* someone is waiting for page */
+#define PG_TABLED 0x0040 /* page is in VP table */
+#define PG_FAKE 0x0200 /* page is placeholder for pagein */
+#define PG_FILLED 0x0400 /* client flag to set when filled */
+#define PG_DIRTY 0x0800 /* client flag to set when dirty */
+#define PG_RELEASED 0x1000 /* page released while paging */
+#define PG_FAULTING 0x2000 /* page is being faulted in */
+#define PG_CLEANCHK 0x4000 /* clean bit has been checked */
+
+#define PQ_FREE 0x0001 /* page is on free list */
+#define PQ_INACTIVE 0x0002 /* page is in inactive list */
+#define PQ_ACTIVE 0x0004 /* page is in active list */
+#define PQ_LAUNDRY 0x0008 /* page is being cleaned now */
+#define PQ_ANON 0x0010 /* page is part of an anon, rather
+ than an uvm_object */
+#define PQ_AOBJ 0x0020 /* page is part of an anonymous
+ uvm_object */
+#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
+
+#else
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P) */
@@ -144,32 +211,44 @@ struct vm_page {
#define PG_FAULTING 0x2000 /* page is being faulted in */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
+#endif
-#if VM_PAGE_DEBUG
-#ifndef MACHINE_NONCONTIG
-#define VM_PAGE_CHECK(mem) { \
- if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
- (((unsigned int) mem) > \
- ((unsigned int) &vm_page_array[last_page-first_page])) || \
- ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
- (PG_ACTIVE | PG_INACTIVE))) \
- panic("vm_page_check: not valid!"); \
-}
-#else /* MACHINE_NONCONTIG */
-#define VM_PAGE_CHECK(mem) { \
- if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
- (((unsigned int) mem) > \
- ((unsigned int) &vm_page_array[vm_page_count])) || \
- ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
- (PG_ACTIVE | PG_INACTIVE))) \
- panic("vm_page_check: not valid!"); \
-}
-#endif /* MACHINE_NONCONTIG */
-#else /* VM_PAGE_DEBUG */
-#define VM_PAGE_CHECK(mem)
-#endif /* VM_PAGE_DEBUG */
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * physical memory layout structure
+ *
+ * MD vmparam.h must #define:
+ * VM_PHYSEG_MAX = max number of physical memory segments we support
+ * (if this is "1" then we revert to a "contig" case)
+ * VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
+ * - VM_PSTRAT_RANDOM: linear search (random order)
+ * - VM_PSTRAT_BSEARCH: binary search (sorted by address)
+ * - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
+ * - others?
+ * XXXCDC: eventually we should remove contig and old non-contig cases
+ * and purge all left-over global variables...
+ */
+#define VM_PSTRAT_RANDOM 1
+#define VM_PSTRAT_BSEARCH 2
+#define VM_PSTRAT_BIGFIRST 3
+
+/*
+ * vm_physmemseg: describes one segment of physical memory
+ */
+struct vm_physseg {
+ vm_offset_t start; /* PF# of first page in segment */
+ vm_offset_t end; /* (PF# of last page in segment) + 1 */
+ vm_offset_t avail_start; /* PF# of first free page in segment */
+ vm_offset_t avail_end; /* (PF# of last free page in segment) +1 */
+ struct vm_page *pgs; /* vm_page structures (from start) */
+ struct vm_page *lastpg; /* vm_page structure for end */
+ struct pmap_physseg pmseg; /* pmap specific (MD) data */
+};
+
+#endif /* MACHINE_NEW_NONCONTIG */
+
+#if defined(_KERNEL)
-#ifdef _KERNEL
/*
* Each pageable resident page falls into one of three lists:
*
@@ -193,51 +272,226 @@ struct pglist vm_page_queue_active; /* active memory queue */
extern
struct pglist vm_page_queue_inactive; /* inactive memory queue */
+
+#if defined(MACHINE_NEW_NONCONTIG)
+
+/*
+ * physical memory config is stored in vm_physmem.
+ */
+
+extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
+extern int vm_nphysseg;
+
+#else
+#if defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+extern
+u_long first_page; /* first physical page number */
+extern
+int vm_page_count; /* How many pages do we manage? */
extern
vm_page_t vm_page_array; /* First resident page in table */
-#ifndef MACHINE_NONCONTIG
+#define VM_PAGE_INDEX(pa) \
+ (pmap_page_index((pa)) - first_page)
+#else
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
extern
-long first_page; /* first physical page number */
+long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern
-long last_page; /* last physical page number */
+long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern
-vm_offset_t first_phys_addr; /* physical address for first_page */
+vm_offset_t first_phys_addr; /* physical address for first_page */
extern
-vm_offset_t last_phys_addr; /* physical address for last_page */
-#else /* MACHINE_NONCONTIG */
-extern
-u_long first_page; /* first physical page number */
+vm_offset_t last_phys_addr; /* physical address for last_page */
extern
-int vm_page_count; /* How many pages do we manage? */
+vm_page_t vm_page_array; /* First resident page in table */
+
+#define VM_PAGE_INDEX(pa) \
+ (atop((pa)) - first_page)
+
#endif /* MACHINE_NONCONTIG */
+#endif /* MACHINE_NEW_NONCONTIG */
+
+/*
+ * prototypes
+ */
+
+#if defined(MACHINE_NEW_NONCONTIG)
+static struct vm_page *PHYS_TO_VM_PAGE __P((vm_offset_t));
+static int vm_physseg_find __P((vm_offset_t, int *));
+#endif
+void vm_page_activate __P((vm_page_t));
+vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
+int vm_page_alloc_memory __P((vm_size_t size, vm_offset_t low,
+ vm_offset_t high, vm_offset_t alignment, vm_offset_t boundary,
+ struct pglist *rlist, int nsegs, int waitok));
+void vm_page_free_memory __P((struct pglist *list));
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
+#endif
+void vm_page_copy __P((vm_page_t, vm_page_t));
+void vm_page_deactivate __P((vm_page_t));
+void vm_page_free __P((vm_page_t));
+void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
+vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
+#if defined(MACHINE_NEW_NONCONTIG)
+void vm_page_physload __P((vm_offset_t, vm_offset_t,
+ vm_offset_t, vm_offset_t));
+void vm_page_physrehash __P((void));
+#endif
+void vm_page_remove __P((vm_page_t));
+void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
+void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
+#endif
+void vm_page_unwire __P((vm_page_t));
+void vm_page_wire __P((vm_page_t));
+boolean_t vm_page_zero_fill __P((vm_page_t));
+
+/*
+ * macros and inlines
+ */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
-#ifndef MACHINE_NONCONTIG
+#if defined(MACHINE_NEW_NONCONTIG)
+
+/*
+ * when VM_PHYSSEG_MAX is 1, we can simplify these functions
+ */
+
+/*
+ * vm_physseg_find: find vm_physseg structure that belongs to a PA
+ */
+static __inline int
+vm_physseg_find(pframe, offp)
+ vm_offset_t pframe;
+ int *offp;
+{
+#if VM_PHYSSEG_MAX == 1
+
+ /* 'contig' case */
+ if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[0].start;
+ return(0);
+ }
+ return(-1);
+
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+ /* binary search for it */
+ int start, len, try;
+
+ /*
+ * if try is too large (thus target is less than than try) we reduce
+ * the length to trunc(len/2) [i.e. everything smaller than "try"]
+ *
+ * if the try is too small (thus target is greater than try) then
+ * we set the new start to be (try + 1). this means we need to
+ * reduce the length to (round(len/2) - 1).
+ *
+ * note "adjust" below which takes advantage of the fact that
+ * (round(len/2) - 1) == trunc((len - 1) / 2)
+ * for any value of len we may have
+ */
+
+ for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
+ try = start + (len / 2); /* try in the middle */
+
+ /* start past our try? */
+ if (pframe >= vm_physmem[try].start) {
+ /* was try correct? */
+ if (pframe < vm_physmem[try].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[try].start;
+ return(try); /* got it */
+ }
+ start = try + 1; /* next time, start here */
+ len--; /* "adjust" */
+ } else {
+ /*
+ * pframe before try, just reduce length of
+ * region, done in "for" loop
+ */
+ }
+ }
+ return(-1);
+
+#else
+ /* linear search for it */
+ int lcv;
+
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ if (pframe >= vm_physmem[lcv].start &&
+ pframe < vm_physmem[lcv].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[lcv].start;
+ return(lcv); /* got it */
+ }
+ }
+ return(-1);
+
+#endif
+}
+
+
+/*
+ * IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
+ */
+
+#define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
+
+/*
+ * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
+ * back from an I/O mapping (ugh!). used in some MD code as well.
+ */
+static __inline struct vm_page *
+PHYS_TO_VM_PAGE(pa)
+ vm_offset_t pa;
+{
+ vm_offset_t pf = atop(pa);
+ int off;
+ int psi;
+
+ psi = vm_physseg_find(pf, &off);
+ if (psi != -1)
+ return(&vm_physmem[psi].pgs[off]);
+ return(NULL);
+}
+
+#elif defined(MACHINE_NONCONTIG)
+
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define IS_VM_PHYSADDR(pa) \
- ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
+ (pmap_page_index(pa) >= 0)
+
+#define PHYS_TO_VM_PAGE(pa) \
+ (&vm_page_array[pmap_page_index(pa) - first_page])
-#define VM_PAGE_INDEX(pa) \
- (atop((pa)) - first_page)
#else
-#define IS_VM_PHYSADDR(pa) \
-({ \
- int __pmapidx = pmap_page_index(pa); \
- (__pmapidx >= 0 && __pmapidx >= first_page); \
-})
-#define VM_PAGE_INDEX(pa) \
- (pmap_page_index((pa)) - first_page)
-#endif /* MACHINE_NONCONTIG */
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define IS_VM_PHYSADDR(pa) \
+ ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
+
+#define PHYS_TO_VM_PAGE(pa) \
+ (&vm_page_array[atop(pa) - first_page ])
+
+#endif /* (OLD) MACHINE_NONCONTIG */
+
+#if defined(UVM)
+
+#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
+
+#else /* UVM */
-#define PHYS_TO_VM_PAGE(pa) \
- (&vm_page_array[VM_PAGE_INDEX((pa))])
+#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
-#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
+#endif /* UVM */
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
@@ -245,10 +499,6 @@ simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
extern /* lock on free page queue */
simple_lock_data_t vm_page_queue_free_lock;
-/*
- * Functions implemented as macros
- */
-
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
assert_wait((m), (interruptible)); \
@@ -267,7 +517,10 @@ simple_lock_data_t vm_page_queue_free_lock;
#define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
-#ifndef MACHINE_NONCONTIG
+/*
+ * XXXCDC: different versions of this should die
+ */
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
vm_page_insert((mem), (obj), (offset)); \
@@ -284,37 +537,58 @@ simple_lock_data_t vm_page_queue_free_lock;
}
#endif /* MACHINE_NONCONTIG */
-/* XXX what is this here for? */
-void vm_set_page_size __P((void));
+#if VM_PAGE_DEBUG
+#if defined(MACHINE_NEW_NONCONTIG)
-/* XXX probably should be elsewhere. */
-#ifdef MACHINE_NONCONTIG
-vm_offset_t pmap_steal_memory __P((vm_size_t));
-void pmap_startup __P((vm_offset_t *, vm_offset_t *));
-#endif
+/*
+ * VM_PAGE_CHECK: debugging check of a vm_page structure
+ */
+static __inline void
+VM_PAGE_CHECK(mem)
+ struct vm_page *mem;
+{
+ int lcv;
+
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ if ((unsigned int) mem >= (unsigned int) vm_physmem[lcv].pgs &&
+ (unsigned int) mem <= (unsigned int) vm_physmem[lcv].lastpg)
+ break;
+ }
+ if (lcv == vm_nphysseg ||
+ (mem->flags & (PG_ACTIVE|PG_INACTIVE)) == (PG_ACTIVE|PG_INACTIVE))
+ panic("vm_page_check: not valid!");
+ return;
+}
+
+#elif defined(MACHINE_NONCONTIG)
+
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define VM_PAGE_CHECK(mem) { \
+ if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
+ (((unsigned int) mem) > \
+ ((unsigned int) &vm_page_array[vm_page_count])) || \
+ ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
+ (PG_ACTIVE | PG_INACTIVE))) \
+ panic("vm_page_check: not valid!"); \
+}
+
+#else
+
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define VM_PAGE_CHECK(mem) { \
+ if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
+ (((unsigned int) mem) > \
+ ((unsigned int) &vm_page_array[last_page-first_page])) || \
+ ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
+ (PG_ACTIVE | PG_INACTIVE))) \
+ panic("vm_page_check: not valid!"); \
+}
-void vm_page_activate __P((vm_page_t));
-vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
-int vm_page_alloc_memory __P((vm_size_t, vm_offset_t,
- vm_offset_t, vm_offset_t, vm_offset_t,
- struct pglist *, int, int));
-void vm_page_free_memory __P((struct pglist *));
-#ifdef MACHINE_NONCONTIG
-void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
-#endif
-void vm_page_copy __P((vm_page_t, vm_page_t));
-void vm_page_deactivate __P((vm_page_t));
-void vm_page_free __P((vm_page_t));
-void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
-vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
-void vm_page_remove __P((vm_page_t));
-void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
-#ifndef MACHINE_NONCONTIG
-void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
#endif
-void vm_page_unwire __P((vm_page_t));
-void vm_page_wire __P((vm_page_t));
-boolean_t vm_page_zero_fill __P((vm_page_t));
+
+#else /* VM_PAGE_DEBUG */
+#define VM_PAGE_CHECK(mem)
+#endif /* VM_PAGE_DEBUG */
#endif /* _KERNEL */
#endif /* !_VM_PAGE_ */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b1b41394611..e6a9fef00bb 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pageout.c,v 1.8 1997/11/06 05:59:36 csapuntz Exp $ */
+/* $OpenBSD: vm_pageout.c,v 1.9 1998/03/01 00:38:21 niklas Exp $ */
/* $NetBSD: vm_pageout.c,v 1.23 1996/02/05 01:54:07 christos Exp $ */
/*
@@ -99,6 +99,33 @@ int doclustered_pageout = 1;
#endif
/*
+ * Activate the pageout daemon and sleep awaiting more free memory
+ */
+void vm_wait(msg)
+ char *msg;
+{
+ int timo = 0;
+
+ if(curproc == pageout_daemon) {
+ /*
+ * We might be toast here, but IF some paging operations
+ * are pending then pages will magically appear. We
+ * usually can't return an error because callers of
+ * malloc who can wait generally don't check for
+ * failure.
+ *
+ * Only the pageout_daemon wakes up this channel!
+ */
+ printf("pageout daemon has stalled\n");
+ timo = hz >> 3;
+ }
+ simple_lock(&vm_pages_needed_lock);
+ thread_wakeup(&vm_pages_needed);
+ thread_sleep_msg(&cnt.v_free_count, &vm_pages_needed_lock, FALSE, msg,
+ timo);
+}
+
+/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
void
@@ -202,7 +229,6 @@ vm_pageout_scan()
object = m->object;
if (!vm_object_lock_try(object))
continue;
- cnt.v_pageouts++;
#ifdef CLUSTERED_PAGEOUT
if (object->pager &&
vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
@@ -294,9 +320,10 @@ vm_pageout_page(m, object)
vm_object_unlock(object);
/*
- * Do a wakeup here in case the following operations block.
+ * We _used_ to wakeup page consumers here, "in case the following
+ * operations block". That leads to livelock if the pageout fails,
+ * which is actually quite a common thing for NFS paging.
*/
- thread_wakeup(&cnt.v_free_count);
/*
* If there is no pager for the page, use the default pager.
@@ -317,6 +344,9 @@ vm_pageout_page(m, object)
switch (pageout_status) {
case VM_PAGER_OK:
case VM_PAGER_PEND:
+ /* hmm, don't wakeup if memory is _very_ low? */
+ thread_wakeup(&cnt.v_free_count);
+ cnt.v_pageouts++;
cnt.v_pgpgout++;
m->flags &= ~PG_LAUNDRY;
break;
@@ -340,7 +370,7 @@ vm_pageout_page(m, object)
* XXX could get stuck here.
*/
(void)tsleep((caddr_t)&vm_pages_needed, PZERO|PCATCH,
- "pageout", hz);
+ "pageout", hz>>3);
break;
}
case VM_PAGER_FAIL:
@@ -391,6 +421,7 @@ vm_pageout_cluster(m, object)
vm_page_t plist[MAXPOCLUSTER], *plistp, p;
int postatus, ix, count;
+ cnt.v_pageouts++;
/*
* Determine the range of pages that can be part of a cluster
* for this object/offset. If it is only our single page, just
@@ -524,7 +555,8 @@ again:
void
vm_pageout()
{
- (void)spl0();
+ pageout_daemon = curproc;
+ (void) spl0();
/*
* Initialize some paging parameters.
@@ -557,7 +589,8 @@ vm_pageout()
simple_lock(&vm_pages_needed_lock);
while (TRUE) {
- thread_sleep(&vm_pages_needed, &vm_pages_needed_lock, FALSE);
+ thread_sleep_msg(&vm_pages_needed, &vm_pages_needed_lock,
+ FALSE, "paged", 0);
/*
* Compute the inactive target for this scan.
* We need to keep a reasonable amount of memory in the
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 566be52e9eb..7451f5f75b0 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_pageout.h,v 1.5 1997/11/06 05:59:37 csapuntz Exp $ */
-/* $NetBSD: vm_pageout.h,v 1.11 1995/03/26 20:39:14 jtc Exp $ */
+/* $OpenBSD: vm_pageout.h,v 1.6 1998/03/01 00:38:22 niklas Exp $ */
+/* $NetBSD: vm_pageout.h,v 1.14 1998/02/10 14:09:04 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -76,6 +76,9 @@
extern int vm_pages_needed; /* should be some "event" structure */
simple_lock_data_t vm_pages_needed_lock;
+struct proc *pageout_daemon; /* watch for this in vm_fault()!! */
+u_int32_t vm_pages_reserved; /* i.e., reserved for pageout_daemon */
+
/*
* Exported routines.
@@ -85,15 +88,12 @@ simple_lock_data_t vm_pages_needed_lock;
* Signal pageout-daemon and wait for it.
*/
-#define VM_WAIT { \
- simple_lock(&vm_pages_needed_lock); \
- thread_wakeup(&vm_pages_needed); \
- thread_sleep(&cnt.v_free_count, \
- &vm_pages_needed_lock, FALSE); \
- }
+#if !defined(UVM)
#ifdef _KERNEL
+void vm_wait __P((char *));
void vm_pageout __P((void));
void vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
#endif
+#endif
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index 6b3ab9059c8..a612d9f2cee 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pager.h,v 1.5 1997/11/06 05:59:38 csapuntz Exp $ */
+/* $OpenBSD: vm_pager.h,v 1.6 1998/03/01 00:38:24 niklas Exp $ */
/* $NetBSD: vm_pager.h,v 1.10 1995/03/26 20:39:15 jtc Exp $ */
/*
@@ -105,19 +105,24 @@ struct pagerops {
/*
* get/put return values
- * OK operation was successful
- * BAD specified data was out of the accepted range
- * FAIL specified data was in range, but doesn't exist
- * PEND operations was initiated but not completed
- * ERROR error while accessing data that is in range and exists
- * AGAIN temporary resource shortage prevented operation from happening
+ * OK operation was successful
+ * BAD specified data was out of the accepted range
+ * FAIL specified data was in range, but doesn't exist
+ * PEND operations was initiated but not completed
+ * ERROR error while accessing data that is in range and exists
+ * AGAIN temporary resource shortage prevented operation from happening
+ * UNLOCK unlock the map and try again
+ * REFAULT [uvm_fault internal use only!] unable to relock data structures,
+ * thus the mapping needs to be reverified before we can procede
*/
-#define VM_PAGER_OK 0
-#define VM_PAGER_BAD 1
-#define VM_PAGER_FAIL 2
-#define VM_PAGER_PEND 3
-#define VM_PAGER_ERROR 4
-#define VM_PAGER_AGAIN 5
+#define VM_PAGER_OK 0
+#define VM_PAGER_BAD 1
+#define VM_PAGER_FAIL 2
+#define VM_PAGER_PEND 3
+#define VM_PAGER_ERROR 4
+#define VM_PAGER_AGAIN 5
+#define VM_PAGER_UNLOCK 6
+#define VM_PAGER_REFAULT 7
#ifdef _KERNEL
extern struct pagerops *dfltpagerops;
diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h
index 74124a226cf..97eeaaf24b1 100644
--- a/sys/vm/vm_param.h
+++ b/sys/vm/vm_param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_param.h,v 1.13 1997/12/12 08:46:00 deraadt Exp $ */
+/* $OpenBSD: vm_param.h,v 1.14 1998/03/01 00:38:25 niklas Exp $ */
/* $NetBSD: vm_param.h,v 1.12 1995/03/26 20:39:16 jtc Exp $ */
/*
@@ -96,10 +96,17 @@ typedef int boolean_t;
* or PAGE_SHIFT. The fact they are variables is hidden here so that
* we can easily make them constant if we so desire.
*/
+#if defined(UVM)
+#define PAGE_SIZE uvmexp.pagesize /* size of page */
+#define PAGE_MASK uvmexp.pagemask /* size of page - 1 */
+#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */
+#else
#define PAGE_SIZE cnt.v_page_size /* size of page */
#define PAGE_MASK page_mask /* size of page - 1 */
#define PAGE_SHIFT page_shift /* bits to shift for pages */
-#ifdef _KERNEL
+#endif
+
+#if defined(_KERNEL) && !defined(UVM)
extern vm_size_t page_mask;
extern int page_shift;
#endif
@@ -110,6 +117,7 @@ extern int page_shift;
#define VM_METER 1 /* struct vmmeter */
#define VM_LOADAVG 2 /* struct loadavg */
#define VM_PSSTRINGS 3 /* PSSTRINGS */
+#if !defined(UVM)
#define VM_MAXID 4 /* number of valid vm ids */
#define CTL_VM_NAMES { \
@@ -119,6 +127,22 @@ extern int page_shift;
{ "psstrings", CTLTYPE_STRUCT }, \
}
+#else
+
+#define VM_UVMEXP 4 /* struct uvmexp */
+#define VM_MAXID 5 /* number of valid vm ids */
+
+#define CTL_VM_NAMES { \
+ { 0, 0 }, \
+ { "vmmeter", CTLTYPE_STRUCT }, \
+ { "loadavg", CTLTYPE_STRUCT }, \
+ { "psstrings", CTLTYPE_STRUCT }, \
+ { "uvmexp", CTLTYPE_STRUCT }, \
+}
+
+#endif
+
+
struct _ps_strings {
void *val;
};