summaryrefslogtreecommitdiff
path: root/sys/vm/vm_extern.h
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
commitb92b419a6a8ef401c8a4e022115bf3e18426eea0 (patch)
treeff214e6b334202d15c2b303427a2a5d2f16af4f0 /sys/vm/vm_extern.h
parent4f215f167e35940141001f7f31bfce350266d153 (diff)
Merge of MACHINE_NEW_CONTIG (aka MNN) code from Chuck Cranor,
<chuck@openbsd.org>. This code is as of yet disabled on all platforms, actually not yet supported on more than mvme68k, although other platforms are expected soon, as code is already available. This code makes handling of multiple physical memory regions consistent over all platforms, as well as keeping the performance of maintaining a single continuous memory chunk. It is also a requirement for the upcoming UVM replacement VM system. What I did in this merge: just declared the pmap_map function in a MD include file per port that needs it. It's not an exported pmap interface, says Chuck. It ended up in differnt include files on differnet ports, as I tried to follow the current policy on a per-arch basis.
Diffstat (limited to 'sys/vm/vm_extern.h')
-rw-r--r--sys/vm/vm_extern.h31
1 files changed, 27 insertions, 4 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 86462654a49..d6265da7915 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_extern.h,v 1.14 1997/11/06 05:59:31 csapuntz Exp $ */
+/* $OpenBSD: vm_extern.h,v 1.15 1998/03/01 00:38:02 niklas Exp $ */
/* $NetBSD: vm_extern.h,v 1.20 1996/04/23 12:25:23 christos Exp $ */
/*-
@@ -45,7 +45,7 @@ struct mount;
struct vnode;
struct core;
-#ifdef KGDB
+#if defined(KGDB) && !defined(UVM)
void chgkprot __P((caddr_t, int, int));
#endif
@@ -65,9 +65,13 @@ int sstk __P((struct proc *, void *, int *));
#endif
void assert_wait __P((void *, boolean_t));
+#if !defined(UVM)
int grow __P((struct proc *, vm_offset_t));
+#endif
void iprintf __P((int (*)(const char *, ...), const char *, ...));
+#if !defined(UVM)
int kernacc __P((caddr_t, int, int));
+#endif
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
@@ -80,26 +84,33 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
+#if !defined(UVM)
void munmapfd __P((struct proc *, int));
+#endif
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
+#if !defined(UVM)
#ifdef __GNUC__
void scheduler __P((void)) __attribute ((noreturn));
#else
void scheduler __P((void));
#endif
+#endif
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
+#if !defined(UVM)
void swapout __P((struct proc *));
void swapout_threads __P((void));
+#endif
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
-void thread_block __P((void));
+void thread_block __P((char *));
void thread_sleep_msg __P((void *, simple_lock_t,
- boolean_t, char *));
+ boolean_t, char *, int));
+
/* backwards compatibility */
#define thread_sleep(event, lock, ruptible) \
@@ -110,6 +121,7 @@ void thread_sleep_msg __P((void *, simple_lock_t,
* was solely a wrapper around wakeup.
*/
#define thread_wakeup wakeup
+#if !defined(UVM)
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
boolean_t));
@@ -118,35 +130,46 @@ int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
struct core *));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
+#endif
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
+#if !defined(UVM)
#ifdef __FORK_BRAINDAMAGE
int vm_fork __P((struct proc *, struct proc *));
#else
void vm_fork __P((struct proc *, struct proc *));
#endif
+#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
+#if !defined(UVM)
void vm_init_limits __P((struct proc *));
+#endif
void vm_mem_init __P((void));
+#if !defined(UVM)
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
+#endif
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
+#if !defined(UVM)
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
+#endif
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
+#if !defined(UVM)
int vslock __P((caddr_t, u_int));
int vsunlock __P((caddr_t, u_int));
+#endif
/* Machine dependent portion */
void vmapbuf __P((struct buf *, vm_size_t));