summaryrefslogtreecommitdiff
path: root/sys/arch/vax/include/pmap.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/vax/include/pmap.h')
-rw-r--r--sys/arch/vax/include/pmap.h125
1 files changed, 75 insertions, 50 deletions
diff --git a/sys/arch/vax/include/pmap.h b/sys/arch/vax/include/pmap.h
index 3022c97e207..412aefaff0f 100644
--- a/sys/arch/vax/include/pmap.h
+++ b/sys/arch/vax/include/pmap.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: pmap.h,v 1.8 1998/03/01 12:09:02 maja Exp $ */
-/* $NetBSD: pmap.h,v 1.19 1997/07/06 22:38:29 ragge Exp $ */
+/* $OpenBSD: pmap.h,v 1.9 2000/04/26 03:08:42 bjc Exp $ */
+/* $NetBSD: pmap.h,v 1.37 1999/08/01 13:48:07 ragge Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -44,28 +44,37 @@
*/
-#ifndef PMAP_H
-#define PMAP_H
+#ifndef PMAP_H
+#define PMAP_H
+#include <machine/types.h>
+#include <machine/pte.h>
#include <machine/mtpr.h>
+#include <machine/pcb.h>
-
-#define VAX_PAGE_SIZE NBPG
-#define VAX_SEG_SIZE NBSEG
+/*
+ * Some constants to make life easier.
+ */
+#define LTOHPS (PGSHIFT - VAX_PGSHIFT)
+#define LTOHPN (1 << LTOHPS)
+#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
+#define NPTEPGS (USRPTSIZE / (sizeof(struct pte) * LTOHPN))
/*
- * Pmap structure
- *
- * p0br == PR_P0BR in user struct, p0br is also == SBR in pmap_kernel()
- * p1br is the same for stack space, stack is base of alloced pte mem
+ * Pmap structure
+ * pm_stack holds lowest allocated memory for the process stack.
*/
typedef struct pmap {
- vm_offset_t pm_stack; /* Base of alloced p1 pte space */
- struct pcb *pm_pcb; /* Pointer to PCB for this pmap */
- int ref_count; /* reference count */
- struct pmap_statistics stats; /* statistics */
- simple_lock_data_t pm_lock; /* lock on pmap */
+ vaddr_t pm_stack; /* Base of alloced p1 pte space */
+ int ref_count; /* reference count */
+ struct pte *pm_p0br; /* page 0 base register */
+ long pm_p0lr; /* page 0 length register */
+ struct pte *pm_p1br; /* page 1 base register */
+ long pm_p1lr; /* page 1 length register */
+ int pm_lock; /* Lock entry in MP environment */
+ struct pmap_statistics pm_stats; /* Some statistics */
+ u_char pm_refcnt[NPTEPGS]; /* Refcount per pte page */
} *pmap_t;
/*
@@ -73,55 +82,71 @@ typedef struct pmap {
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
*/
-typedef struct pv_entry {
- struct pv_entry *pv_next; /* next pv_entry */
- struct pmap *pv_pmap;/* if not NULL, pmap where mapping lies */
- vm_offset_t pv_va; /* virtual address for mapping */
- int pv_flags; /* flags */
-} *pv_entry_t;
-
-#define PV_REF 0x00000001 /* Simulated phys ref bit */
-
-#define PHYS_TO_PV(phys_page) (&pv_table[((phys_page)>>PAGE_SHIFT)])
+struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pte *pv_pte; /* pte for this physical page */
+ struct pmap *pv_pmap; /* pmap this entry belongs to */
+ int pv_attr; /* write/modified bits */
+};
/* ROUND_PAGE used before vm system is initialized */
-#define ROUND_PAGE(x) (((uint)(x) + PAGE_SIZE-1)& ~(PAGE_SIZE - 1))
-#define TRUNC_PAGE(x) ((uint)(x) & ~(PAGE_SIZE - 1))
+#define ROUND_PAGE(x) (((uint)(x) + PGOFSET) & ~PGOFSET)
+#define TRUNC_PAGE(x) ((uint)(x) & ~PGOFSET)
/* Mapping macros used when allocating SPT */
-#define MAPVIRT(ptr, count) \
+#define MAPVIRT(ptr, count) \
(vm_offset_t)ptr = virtual_avail; \
- virtual_avail += (count) * NBPG;
+ virtual_avail += (count) * VAX_NBPG;
-#define MAPPHYS(ptr, count, perm) \
- pmap_map(virtual_avail, avail_start, avail_start + \
- (count) * NBPG, perm); \
- (vm_offset_t)ptr = virtual_avail; \
- virtual_avail += (count) * NBPG; \
- avail_start += (count) * NBPG;
+#define MAPPHYS(ptr, count, perm) \
+ (vm_offset_t)ptr = avail_start + KERNBASE; \
+ avail_start += (count) * VAX_NBPG;
#ifdef _KERNEL
-#define pa_index(pa) atop(pa)
-#define pa_to_pvh(pa) (&pv_table[atop(pa)])
extern struct pmap kernel_pmap_store;
-#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_kernel() (&kernel_pmap_store)
-#endif /* _KERNEL */
+/*
+ * Real nice (fast) routines to get the virtual address of a physical page
+ * (and vice versa).
+ */
+#define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
+#define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
+
+#define PMAP_STEAL_MEMORY
+
+/*
+ * This is the by far most used pmap routine. Make it inline.
+ */
/* Routines that are best to define as macros */
-#define pmap_copy(a,b,c,d,e) /* Dont do anything */
-#define pmap_update() mtpr(0,PR_TBIA) /* Update buffes */
-#define pmap_pageable(a,b,c,d) /* Dont do anything */
-#define pmap_collect(pmap) /* No need so far */
-#define pmap_reference(pmap) if(pmap) (pmap)->ref_count++
-#define pmap_pinit(pmap) (pmap)->ref_count=1;
-#define pmap_phys_address(phys) ((u_int)(phys)<<PAGE_SHIFT)
+#define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
+#define pmap_unwire(pmap, v) /* no need */
+#define pmap_copy(a,b,c,d,e) /* Dont do anything */
+#define pmap_update() mtpr(0,PR_TBIA) /* Update buffes */
+#define pmap_collect(pmap) /* No need so far */
+#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_deactivate(p) /* Dont do anything */
+#define pmap_reference(pmap) (pmap)->ref_count++
+
+/* These can be done as efficient inline macros */
+#define pmap_copy_page(src, dst) \
+ __asm__("addl3 $0x80000000,%0,r0;addl3 $0x80000000,%1,r1; \
+ movc3 $4096,(r0),(r1)" \
+ :: "r"(src),"r"(dst):"r0","r1","r2","r3","r4","r5");
+
+#define pmap_zero_page(phys) \
+ __asm__("addl3 $0x80000000,%0,r0;movc5 $0,(r0),$0,$4096,(r0)" \
+ :: "r"(phys): "r0","r1","r2","r3","r4","r5");
/* Prototypes */
void pmap_bootstrap __P((void));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
-void pmap_expandp0 __P((struct pmap *, int));
-void pmap_expandp1 __P((struct pmap *));
+vaddr_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+void pmap_pinit __P((pmap_t));
+
+#endif /* _KERNEL */
+
#endif PMAP_H