summaryrefslogtreecommitdiff
path: root/sys/arch/hppa64/include/pmap.h
diff options
context:
space:
mode:
authorMichael Shalayeff <mickey@cvs.openbsd.org>2005-04-01 10:40:50 +0000
committerMichael Shalayeff <mickey@cvs.openbsd.org>2005-04-01 10:40:50 +0000
commit086e003dd29b7c42921a44eb7af336bacfc10d4d (patch)
treea901551a395f9d10ee4d44cef80ec86a4dd5ae0c /sys/arch/hppa64/include/pmap.h
parent4082d6812410636300be23e2010448febcf6e5d9 (diff)
small batch early bottling hppa64 port
matured in mighty ukrainian oak for 23 months
Diffstat (limited to 'sys/arch/hppa64/include/pmap.h')
-rw-r--r--sys/arch/hppa64/include/pmap.h117
1 files changed, 117 insertions, 0 deletions
diff --git a/sys/arch/hppa64/include/pmap.h b/sys/arch/hppa64/include/pmap.h
new file mode 100644
index 00000000000..1014076b7e2
--- /dev/null
+++ b/sys/arch/hppa64/include/pmap.h
@@ -0,0 +1,117 @@
+/* $OpenBSD: pmap.h,v 1.1 2005/04/01 10:40:48 mickey Exp $ */
+
+/*
+ * Copyright (c) 2005 Michael Shalayeff
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+
+#include <machine/pte.h>
+#include <uvm/uvm_pglist.h>
+#include <uvm/uvm_object.h>
+
+struct pmap {
+ simple_lock_data_t pm_lock;
+ int pm_refcount;
+ struct vm_page *pm_ptphint;
+ struct pglist pm_pglist;
+ volatile u_int32_t *pm_pdir; /* page dir (read-only after create) */
+ pa_space_t pm_space; /* space id (read-only after create) */
+
+ struct pmap_statistics pm_stats;
+};
+typedef struct pmap *pmap_t;
+
+struct pv_entry { /* locked by its list's pvh_lock */
+ struct pv_entry *pv_next;
+ struct pmap *pv_pmap; /* the pmap */
+ vaddr_t pv_va; /* the virtual address */
+ struct vm_page *pv_ptp; /* the vm_page of the PTP */
+};
+
+#ifdef _KERNEL
+
+extern struct pmap kernel_pmap_store;
+
+/*
+ * pool quickmaps
+ */
+#define pmap_map_direct(pg) ((vaddr_t)VM_PAGE_TO_PHYS(pg))
+#define pmap_unmap_direct(va) PHYS_TO_VM_PAGE((paddr_t)(va))
+#define __HAVE_PMAP_DIRECT
+
+/*
+ * according to the parisc manual aliased va's should be
+ * different by high 12 bits only.
+ */
+#define PMAP_PREFER(o,h) do { \
+ vaddr_t pmap_prefer_hint; \
+ pmap_prefer_hint = (*(h) & HPPA_PGAMASK) | ((o) & HPPA_PGAOFF); \
+ if (pmap_prefer_hint < *(h)) \
+ pmap_prefer_hint += HPPA_PGALIAS; \
+ *(h) = pmap_prefer_hint; \
+} while(0)
+
+#define PMAP_GROWKERNEL
+#define PMAP_STEAL_MEMORY
+
+#define pmap_sid2pid(s) (((s) + 1) << 1)
+#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_update(pm) (void)(pm)
+#define pmap_copy(dpmap,spmap,da,len,sa)
+
+#define pmap_clear_modify(pg) pmap_changebit(pg, 0, PTE_DIRTY)
+#define pmap_clear_reference(pg) pmap_changebit(pg, PTE_REFTRAP, 0)
+#define pmap_is_modified(pg) pmap_testbit(pg, PTE_DIRTY)
+#define pmap_is_referenced(pg) pmap_testbit(pg, PTE_REFTRAP)
+#define pmap_phys_address(ppn) ((ppn) << PAGE_SHIFT)
+
+#define pmap_proc_iflush(p,va,len) /* nothing */
+#define pmap_unuse_final(p) /* nothing */
+
+void pmap_bootstrap(vaddr_t);
+boolean_t pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t);
+boolean_t pmap_testbit(struct vm_page *, pt_entry_t);
+void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
+void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
+void pmap_page_remove(struct vm_page *pg);
+
+static __inline void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ if ((prot & UVM_PROT_WRITE) == 0) {
+ if (prot & (UVM_PROT_RX))
+ pmap_changebit(pg, 0, PTE_WRITE);
+ else
+ pmap_page_remove(pg);
+ }
+}
+
+static __inline void
+pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
+{
+ if ((prot & UVM_PROT_WRITE) == 0) {
+ if (prot & (UVM_PROT_RX))
+ pmap_write_protect(pmap, sva, eva, prot);
+ else
+ pmap_remove(pmap, sva, eva);
+ }
+}
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_PMAP_H_ */