summaryrefslogtreecommitdiff
path: root/sys/vm/pmap.h
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
commitb92b419a6a8ef401c8a4e022115bf3e18426eea0 (patch)
treeff214e6b334202d15c2b303427a2a5d2f16af4f0 /sys/vm/pmap.h
parent4f215f167e35940141001f7f31bfce350266d153 (diff)
Merge of MACHINE_NEW_CONTIG (aka MNN) code from Chuck Cranor,
<chuck@openbsd.org>. This code is as of yet disabled on all platforms, actually not yet supported on more than mvme68k, although other platforms are expected soon, as code is already available. This code makes handling of multiple physical memory regions consistent over all platforms, as well as keeping the performance of maintaining a single continuous memory chunk. It is also a requirement for the upcoming UVM replacement VM system. What I did in this merge: just declared the pmap_map function in a MD include file per port that needs it. It's not an exported pmap interface, says Chuck. It ended up in differnt include files on differnet ports, as I tried to follow the current policy on a per-arch basis.
Diffstat (limited to 'sys/vm/pmap.h')
-rw-r--r--sys/vm/pmap.h80
1 files changed, 73 insertions, 7 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 07053e3047a..8edd35b77a8 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
+/* $OpenBSD: pmap.h,v 1.5 1998/03/01 00:37:58 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */
/*
@@ -87,6 +87,26 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
+/*
+ * PMAP_PGARG hack
+ *
+ * operations that take place on managed pages used to take PAs.
+ * this caused us to translate the PA back to a page (or pv_head).
+ * PMAP_NEW avoids this by passing the vm_page in (pv_head should be
+ * pointed to by vm_page (or be a part of it)).
+ *
+ * applies to: pmap_page_protect, pmap_is_referenced, pmap_is_modified,
+ * pmap_clear_reference, pmap_clear_modify.
+ *
+ * the latter two functions are boolean_t in PMAP_NEW. they return
+ * TRUE if something was cleared.
+ */
+#if defined(PMAP_NEW)
+#define PMAP_PGARG(PG) (PG)
+#else
+#define PMAP_PGARG(PG) (VM_PAGE_TO_PHYS(PG))
+#endif
+
#ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */
#ifdef _KERNEL
__BEGIN_DECLS
@@ -96,33 +116,74 @@ void *pmap_bootstrap_alloc __P((int));
void pmap_bootstrap( /* machine dependent */ );
#endif
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
+
+#if defined(PMAP_NEW)
+#if !defined(pmap_clear_modify)
+boolean_t pmap_clear_modify __P((struct vm_page *));
+#endif
+#if !defined(pmap_clear_reference)
+boolean_t pmap_clear_reference __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
+#endif /* PMAP_NEW */
+
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t,
pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
+#if defined(PMAP_NEW)
+struct pmap *pmap_create __P((void));
+#else
pmap_t pmap_create __P((vm_size_t));
+#endif
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
-#ifndef pmap_page_index
-int pmap_page_index __P((vm_offset_t));
+#if defined(PMAP_NEW) && defined(PMAP_GROWKERNEL)
+void pmap_growkernel __P((vm_offset_t));
#endif
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
void pmap_init __P((vm_offset_t, vm_offset_t));
#else
void pmap_init __P((void));
#endif
+
+#if defined(PMAP_NEW)
+void pmap_kenter_pa __P((vm_offset_t, vm_offset_t, vm_prot_t));
+void pmap_kenter_pgs __P((vm_offset_t, struct vm_page **, int));
+void pmap_kremove __P((vm_offset_t, vm_size_t));
+#if !defined(pmap_is_modified)
+boolean_t pmap_is_modified __P((struct vm_page *));
+#endif
+#if !defined(pmap_is_referenced)
+boolean_t pmap_is_referenced __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+#endif /* PMAP_NEW */
+
+#if !defined(MACHINE_NEW_NONCONTIG)
+#ifndef pmap_page_index
+int pmap_page_index __P((vm_offset_t));
+#endif
+#endif /* ! MACHINE_NEW_NONCONTIG */
+
+#if defined(PMAP_NEW)
+void pmap_page_protect __P((struct vm_page *, vm_prot_t));
+#else
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
+#endif
+
void pmap_pageable __P((pmap_t,
vm_offset_t, vm_offset_t, boolean_t));
+#if !defined(pmap_phys_address)
vm_offset_t pmap_phys_address __P((int));
+#endif
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t));
@@ -135,10 +196,15 @@ void pmap_zero_page __P((vm_offset_t));
#ifdef MACHINE_NONCONTIG
u_int pmap_free_pages __P((void));
boolean_t pmap_next_page __P((vm_offset_t *));
-void pmap_startup __P((vm_offset_t *, vm_offset_t *));
-vm_offset_t pmap_steal_memory __P((vm_size_t));
+#endif
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+#if defined(PMAP_STEAL_MEMORY)
+vm_offset_t pmap_steal_memory __P((vm_size_t, vm_offset_t *,
+ vm_offset_t *));
+#else
void pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
#endif
+#endif
__END_DECLS
#endif /* kernel*/
#endif /* PMAP_EXCLUDE_DECLS */