summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2001-12-24 04:12:41 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2001-12-24 04:12:41 +0000
commit523094721d7129d36b81523b4fe6bb581f2a21d8 (patch)
tree7ca56319ca0adf43dbbda64735db235241532ee0 /sys/arch
parent46ffb725921a53936f9fd04901c908c61d3c1bbc (diff)
- completely change the mmu segment and page table structure definitions,
to use constant bitmasks instead of bitfields. - remove unnecessary (as long as we are not running SMP) locks on the physsegs. - update the pmap code to take these changes into account, and gratuitously change several names and code paths to be closer to existing m68k pmaps. It's a bit faster now. - change pmap.c's usage of vm_{offset,size}_t to {p,v}{addr,size}_t. - remove dead or unused stuff from pmap.c, fix typos, etc Tested on 187 and 188, should not make things worse for 197.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/mvme88k/include/mmu.h292
-rw-r--r--sys/arch/mvme88k/include/pmap.h71
-rw-r--r--sys/arch/mvme88k/include/pte.h138
-rw-r--r--sys/arch/mvme88k/include/vmparam.h3
-rw-r--r--sys/arch/mvme88k/mvme88k/m88110.c94
-rw-r--r--sys/arch/mvme88k/mvme88k/m8820x.c99
-rw-r--r--sys/arch/mvme88k/mvme88k/machdep.c8
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c1681
-rw-r--r--sys/arch/mvme88k/mvme88k/trap.c12
9 files changed, 835 insertions, 1563 deletions
diff --git a/sys/arch/mvme88k/include/mmu.h b/sys/arch/mvme88k/include/mmu.h
index c5a8826ccad..a59fb8eebcc 100644
--- a/sys/arch/mvme88k/include/mmu.h
+++ b/sys/arch/mvme88k/include/mmu.h
@@ -1,18 +1,85 @@
-/* $OpenBSD: mmu.h,v 1.15 2001/12/22 19:17:01 smurph Exp $ */
+/* $OpenBSD: mmu.h,v 1.16 2001/12/24 04:12:37 miod Exp $ */
-#ifndef __MACHINE_MMU_H__
-#define __MACHINE_MMU_H__
+/*
+ * This file bears almost no resemblance to the original m68k file,
+ * so the following copyright notice is questionable, but we are
+ * nice people.
+ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: pte.h 1.13 92/01/20$
+ *
+ * @(#)pte.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MACHINE_MMU_H_
+#define _MACHINE_MMU_H_
/*
- * Parameters which determine the 'geometry' of the M88K page tables in memory.
+ * Parameters which determine the 'geometry' of the m88K page tables in memory.
*/
-#define SDT_BITS 10 /* M88K segment table size bits */
-#define PDT_BITS 10 /* M88K page table size bits */
+
+#define SDT_BITS 10 /* M88K segment table size bits */
+#define PDT_BITS 10 /* M88K page table size bits */
#define PG_BITS PAGE_SHIFT /* M88K hardware page size bits */
/*
- * M88K area descriptors
+ * Common fields for APR, SDT and PTE
*/
+
+/* address frame */
+#define PG_FRAME 0xfffff000
+#define PG_SHIFT PG_BITS
+#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
+
+/* cache control bits */
+#define CACHE_DFL 0x0000000
+#define CACHE_INH 0x0000040 /* cache inhibit */
+#define CACHE_GLOBAL 0x0000080 /* global scope */
+#define CACHE_WT 0x0000200 /* write through */
+
+#define CACHE_MASK (~(CACHE_INH | CACHE_GLOBAL | CACHE_WT))
+
+/*
+ * Area descriptors
+ */
+
typedef struct cmmu_apr {
unsigned long
st_base:20, /* segment table base address */
@@ -31,71 +98,15 @@ typedef union apr_template {
} apr_template_t;
/*
- * M88K segment descriptors
- */
-typedef struct sdt_entry {
- unsigned long
- table_addr:20, /* page table base address */
- rsvA:2, /* reserved */
- wt:1, /* writethrough (cache control) */
- sup:1, /* supervisor protection */
- g:1, /* global (cache control) */
- no_cache:1, /* cache inhibit */
- rsvB:3, /* reserved */
- prot:1, /* write protect */
- rsvC:1, /* reserved */
- dtype:1; /* valid */
-} sdt_entry_t;
-
-typedef union sdt_entry_template {
- sdt_entry_t sdt_desc;
- unsigned long bits;
-} sdt_entry_template_t;
-
-#define SDT_ENTRY_NULL ((sdt_entry_t *) 0)
-
-/*
- * M88K page descriptors
- */
-typedef struct pt_entry {
- unsigned long
- pfn:20, /* page frame address */
- rsvA:1, /* reserved (U1 on mc88110) */
- wired:1, /* wired bit <<software>> */
- wt:1, /* writethrough (cache control) */
- sup:1, /* supervisor protection */
- g:1, /* global (cache control) */
- ci:1, /* cache inhibit */
- rsvB:1, /* reserved */
- modified:1, /* modified */
- pg_used:1, /* used (referenced) */
- prot:1, /* write protect */
- dtype:2; /* descriptor type (bit 2 only on mc88110) */
-} pt_entry_t;
-
-/* mc88110 indirect descriptors */
-typedef struct pt_ind_entry {
- unsigned long
- pda:30, /* page descriptor address */
- dtype:2; /* valid */
-} pt_ind_entry_t;
-
-typedef union pte_template {
- pt_entry_t pte;
- unsigned long bits;
-} pte_template_t;
-
-#define PT_ENTRY_NULL ((pt_entry_t *) 0)
-
-/*
* 88200 PATC (TLB)
*/
#define PATC_ENTRIES 56
/*
- * M88K BATC entries
+ * BATC entries
*/
+
typedef struct {
unsigned long
lba:13, /* logical block address */
@@ -114,143 +125,166 @@ typedef union batc_template {
} batc_template_t;
/*
- * Parameters and macros for BATC
+ * Segment table entries
*/
-#define BATC_BLKBYTES (512*1024) /* 'block' size of a BATC entry mapping */
-#define BATC_BLKSHIFT 19 /* number of bits to BATC shift (log2(BATC_BLKBYTES)) */
-#define BATC_BLKMASK (BATC_BLKBYTES-1) /* BATC block mask */
-#define BATC_MAX 8 /* number of BATC entries */
+typedef u_int32_t sdt_entry_t;
-#define BATC_BLK_ALIGNED(x) ((x & BATC_BLKMASK) == 0)
+#define SDT_ENTRY_NULL ((sdt_entry_t *) 0)
-#define M88K_BTOBLK(x) (x >> BATC_BLKSHIFT)
+#define SG_V 0x00000001
+#define SG_NV 0x00000000
+#define SG_PROT 0x00000004
+#define SG_RO 0x00000004
+#define SG_RW 0x00000000
+#define SG_SO 0x00000100
-/*
- * protection codes (prot field)
- */
-#define M88K_RO 1 /* read only */
-#define M88K_RW 0 /* read/write */
+#define SDT_VALID(sdt) (*(sdt) & SG_V)
+#define SDT_SUP(sdt) (*(sdt) & SG_SO)
+#define SDT_WP(sdt) (*(sdt) & SG_PROT)
/*
- * protection codes (sup field)
+ * Page table entries
*/
-#define M88K_SUPV 1 /* translation can only be done in supervisor mode */
-#define M88K_USER 0 /* translation can be done supv. or user mode */
+
+typedef u_int32_t pt_entry_t;
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#define PG_V 0x00000001
+#define PG_NV 0x00000000
+#define PG_PROT 0x00000004
+#define PG_U 0x00000008
+#define PG_M 0x00000010
+#define PG_RO 0x00000004
+#define PG_RW 0x00000000
+#define PG_SO 0x00000100
+#define PG_W 0x00000400 /* XXX unused but reserved field */
+#define PG_U1 0x00000800 /* U1 bit for M88110 */
+
+#define PDT_VALID(pte) (*(pte) & PG_V)
+#define PDT_SUP(pte) (*(pte) & PG_SO)
+#define PDT_WP(pte) (*(pte) & PG_PROT)
/*
- * descriptor types
+ * Indirect descriptors (mc81110)
*/
-#define DT_INVALID 0
-#define DT_VALID 1
-#define DT_IND_MASKED 2
-#define DT_IND_UMASKED 3
+
+typedef u_int32_t pt_ind_entry_t;
+
+/* validity bits */
+#define IND_V 0x00000001
+#define IND_NV 0x00000000
+#define IND_MASKED 0x00000002
+#define IND_UNMASKED 0x00000003
+#define IND_MASK 0x00000003
+
+#define IND_FRAME 0xfffffffc
+#define IND_SHIFT 2
+
+#define IND_PDA(x) ((x) & IND_FRAME >> IND_SHIFT)
/*
* Number of entries in a page table.
*/
+
#define SDT_ENTRIES (1<<(SDT_BITS))
#define PDT_ENTRIES (1<<(PDT_BITS))
/*
* Size in bytes of a single page table.
*/
+
#define SDT_SIZE (sizeof(sdt_entry_t) * SDT_ENTRIES)
#define PDT_SIZE (sizeof(pt_entry_t) * PDT_ENTRIES)
/*
* Shifts and masks
*/
+
#define SDT_SHIFT (PDT_BITS + PG_BITS)
#define PDT_SHIFT (PG_BITS)
#define SDT_MASK (((1<<SDT_BITS)-1) << SDT_SHIFT)
#define PDT_MASK (((1<<PDT_BITS)-1) << PDT_SHIFT)
-#define SDT_NEXT(va) ((va + (1<<SDT_SHIFT)) & SDT_MASK)
-#define PDT_NEXT(va) ((va + (1<<PDT_SHIFT)) & (SDT_MASK|PDT_MASK))
+#define SDT_NEXT(va) (((va) + (1<<SDT_SHIFT)) & SDT_MASK)
+#define PDT_NEXT(va) (((va) + (1<<PDT_SHIFT)) & (SDT_MASK|PDT_MASK))
-#define SDTIDX(va) ((va & SDT_MASK) >> SDT_SHIFT)
-#define PDTIDX(va) ((va & PDT_MASK) >> PDT_SHIFT)
+#define SDTIDX(va) (((va) & SDT_MASK) >> SDT_SHIFT)
+#define PDTIDX(va) (((va) & PDT_MASK) >> PDT_SHIFT)
-#define SDTENT(map, va) ((sdt_entry_t *)(map->sdt_vaddr + SDTIDX(va)))
+/* XXX uses knowledge of pmap structure */
+#define SDTENT(map, va) ((sdt_entry_t *)((map)->sdt_vaddr + SDTIDX(va)))
/*
* Size of a PDT table group.
*/
+
#define LOG2_PDT_SIZE (PDT_BITS + 2)
#define LOG2_PDT_TABLE_GROUP_SIZE (PAGE_SHIFT - LOG2_PDT_SIZE)
#define PDT_TABLE_GROUP_SIZE (1 << LOG2_PDT_TABLE_GROUP_SIZE)
-#define PT_FREE(tbl) uvm_km_free(kernel_map, (vaddr_t)tbl, PAGE_SIZE)
+#define PT_FREE(tbl) uvm_km_free(kernel_map, (vaddr_t)tbl, PAGE_SIZE)
/*
* Va spaces mapped by tables and PDT table group.
*/
+
#define PDT_VA_SPACE (PDT_ENTRIES * PAGE_SIZE)
#define PDT_TABLE_GROUP_VA_SPACE (PDT_VA_SPACE * PDT_TABLE_GROUP_SIZE)
/*
* Number of sdt entries used to map user and kernel space.
*/
+
#define USER_SDT_ENTRIES SDTIDX(VM_MIN_KERNEL_ADDRESS)
#define KERNEL_SDT_ENTRIES (SDT_ENTRIES - USER_SDT_ENTRIES)
/*
- * Macros to check if the descriptor is valid.
+ * Parameters and macros for BATC
*/
-#define SDT_VALID(sd_ptr) ((sd_ptr)->dtype == DT_VALID)
-#define PDT_VALID(pd_ptr) ((pd_ptr)->dtype == DT_VALID)
-/*
- * Alignment checks for pages (must lie on page boundaries).
- */
-#define PAGE_ALIGNED(ad) (((vm_offset_t)(ad) & PAGE_MASK) == 0)
-#define CHECK_PAGE_ALIGN(ad,who) \
- if (!PAGE_ALIGNED(ad)) \
- printf("%s: addr %x not page aligned.\n", who, ad)
-
-/*
- * Parameters for ATC(TLB) fulsh
- */
+/* number of bits to BATC shift (log2(BATC_BLKBYTES)) */
+#define BATC_BLKSHIFT 19
+/* 'block' size of a BATC entry mapping */
+#define BATC_BLKBYTES (1 << BATC_BLKSHIFT)
+/* BATC block mask */
+#define BATC_BLKMASK (BATC_BLKBYTES-1)
+/* number of BATC entries */
+#define BATC_MAX 8
-#define CMMU_SCR 0x004
+#define BATC_BLK_ALIGNED(x) ((x & BATC_BLKMASK) == 0)
-#define FLUSH_SUP_ALL 0x37
-#define FLUSH_USR_ALL 0x33
-#define FLUSH_SUP_SEG 0x36
-#define FLUSH_USR_SEG 0x32
-#define FLUSH_SUP_PG 0x35
-#define FLUSH_USR_PG 0x31
+#define M88K_BTOBLK(x) (x >> BATC_BLKSHIFT)
/*
- * Cache coontrol bits for pte
+ * DMA and caching control
*/
-#define CACHE_DFL 0
-#define CACHE_INH 0x40
-#define CACHE_GLOBAL 0x80
-#define CACHE_WT 0x200
+#define DMA_CACHE_SYNC 0x1
+#define DMA_CACHE_SYNC_INVAL 0x2
+#define DMA_CACHE_INV 0x3
-#define CACHE_MASK (~(unsigned)(CACHE_INH | CACHE_GLOBAL | CACHE_WT))
+void dma_cachectl __P((vm_offset_t, int, int));
/*
- * Prototype for invalidate_pte found in locore_asm_routines.S
+ * Alignment checks for pages (must lie on page boundaries).
*/
-unsigned invalidate_pte(pt_entry_t *pointer);
+
+#define PAGE_ALIGNED(ad) (((vm_offset_t)(ad) & PAGE_MASK) == 0)
+#define CHECK_PAGE_ALIGN(ad,who) \
+ if (!PAGE_ALIGNED(ad)) \
+ printf("%s: addr %x not page aligned.\n", who, ad)
+
+unsigned invalidate_pte __P((pt_entry_t *));
extern vm_offset_t kmapva;
#define kvtopte(va) \
-({ \
- sdt_entry_t *sdt; \
- sdt = (sdt_entry_t *)kmapva + SDTIDX(va) + SDT_ENTRIES; \
- (pte_template_t *)(sdt->table_addr << PDT_SHIFT) + PDTIDX(va); \
-})
-extern u_int kvtop __P((vm_offset_t));
+ ((pt_entry_t *)(PG_PFNUM(*((sdt_entry_t *)kmapva + \
+ SDTIDX(va) + SDT_ENTRIES)) << PDT_SHIFT) + PDTIDX(va))
-#define DMA_CACHE_SYNC 0x1
-#define DMA_CACHE_SYNC_INVAL 0x2
-#define DMA_CACHE_INV 0x3
+u_int kvtop __P((vm_offset_t));
#endif /* __MACHINE_MMU_H__ */
diff --git a/sys/arch/mvme88k/include/pmap.h b/sys/arch/mvme88k/include/pmap.h
index ab00e0491bb..ab3fea00c1d 100644
--- a/sys/arch/mvme88k/include/pmap.h
+++ b/sys/arch/mvme88k/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.23 2001/12/22 10:22:13 smurph Exp $ */
+/* $OpenBSD: pmap.h,v 1.24 2001/12/24 04:12:37 miod Exp $ */
/*
* Mach Operating System
* Copyright (c) 1991 Carnegie Mellon University
@@ -20,7 +20,7 @@
#include <machine/pcb.h> /* pcb_t, etc.*/
#include <machine/psl.h> /* get standard goodies */
-typedef struct sdt_entry *sdt_ptr_t;
+typedef sdt_entry_t *sdt_ptr_t;
/*
* PMAP structure
@@ -28,23 +28,23 @@ typedef struct sdt_entry *sdt_ptr_t;
typedef struct pmap *pmap_t;
struct pmap {
- sdt_ptr_t sdt_paddr; /* physical pointer to sdt */
- sdt_ptr_t sdt_vaddr; /* virtual pointer to sdt */
- int ref_count; /* reference count */
- struct simplelock lock;
- struct pmap_statistics stats; /* pmap statistics */
+ sdt_ptr_t sdt_paddr; /* physical pointer to sdt */
+ sdt_ptr_t sdt_vaddr; /* virtual pointer to sdt */
+ int ref_count; /* reference count */
+ struct simplelock lock;
+ struct pmap_statistics stats; /* pmap statistics */
/* cpus using of this pmap; NCPU must be <= 32 */
- unsigned long cpus_using;
+ u_int32_t cpus_using;
#ifdef DEBUG
- pmap_t next;
- pmap_t prev;
+ pmap_t next;
+ pmap_t prev;
#endif
/* for OMRON_PMAP */
- batc_template_t i_batc[BATC_MAX]; /* instruction BATCs */
- batc_template_t d_batc[BATC_MAX]; /* data BATCs */
+ batc_template_t i_batc[BATC_MAX]; /* instruction BATCs */
+ batc_template_t d_batc[BATC_MAX]; /* data BATCs */
/* end OMRON_PMAP */
};
@@ -61,7 +61,7 @@ struct pmap {
typedef struct pv_entry {
struct pv_entry *next; /* next pv_entry */
pmap_t pmap; /* pmap where mapping lies */
- vm_offset_t va; /* virtual address for mapping */
+ vaddr_t va; /* virtual address for mapping */
} *pv_entry_t;
#ifdef _KERNEL
@@ -70,51 +70,18 @@ extern pmap_t kernel_pmap;
extern struct pmap kernel_pmap_store;
extern caddr_t vmmap;
-#define pmap_kernel() (&kernel_pmap_store)
-#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
-/* Used in builtin/device_pager.c */
-#define pmap_phys_address(frame) ((vm_offset_t) (ptoa(frame)))
+#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((paddr_t)(ptoa(frame)))
#define pmap_update(pmap) /* nothing (yet) */
#define PMAP_ACTIVATE(proc) pmap_activate(proc)
#define PMAP_DEACTIVATE(proc) pmap_deactivate(proc)
-#define PMAP_CONTEXT(pmap, thread)
-/*
- * Modes used when calling pmap_cache_flush().
- */
-#define FLUSH_CACHE 0
-#define FLUSH_CODE_CACHE 1
-#define FLUSH_DATA_CACHE 2
-#define FLUSH_LOCAL_CACHE 3
-#define FLUSH_LOCAL_CODE_CACHE 4
-#define FLUSH_LOCAL_DATA_CACHE 5
-
-/**************************************************************************/
-/*** Prototypes for public functions defined in pmap.c ********************/
-/**************************************************************************/
-
-void pmap_bootstrap __P((vm_offset_t, vm_offset_t *, vm_offset_t *,
- vm_offset_t *, vm_offset_t *));
-void pmap_cache_ctrl __P((pmap_t, vm_offset_t, vm_offset_t, unsigned));
-pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
-void pmap_cache_ctrl __P((pmap_t, vm_offset_t, vm_offset_t, unsigned));
-void pmap_zero_page __P((vm_offset_t));
-void pmap_remove_all __P((vm_offset_t));
-vm_offset_t pmap_extract_unlocked __P((pmap_t, vm_offset_t));
-void copy_to_phys __P((vm_offset_t, vm_offset_t, int));
-void copy_from_phys __P((vm_offset_t, vm_offset_t, int));
-void pmap_redzone __P((pmap_t, vm_offset_t));
-void icache_flush __P((vm_offset_t));
-void pmap_dcache_flush __P((pmap_t, vm_offset_t));
-void pmap_cache_flush __P((pmap_t, vm_offset_t, int, int));
-void pmap_print __P((pmap_t));
-void pmap_print_trace __P((pmap_t, vm_offset_t, boolean_t));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t,
- vm_prot_t, unsigned int));
-vm_offset_t pmap_map_batc __P((vm_offset_t, vm_offset_t, vm_offset_t,
- vm_prot_t, unsigned int));
+void pmap_bootstrap __P((vaddr_t, paddr_t *, paddr_t *, vaddr_t *, vaddr_t *));
+void pmap_cache_ctrl __P((pmap_t, vaddr_t, vaddr_t, unsigned));
+
#endif /* _KERNEL */
#endif /* _MACHINE_PMAP_H_ */
diff --git a/sys/arch/mvme88k/include/pte.h b/sys/arch/mvme88k/include/pte.h
deleted file mode 100644
index 7757aad7f07..00000000000
--- a/sys/arch/mvme88k/include/pte.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* $OpenBSD: pte.h,v 1.2 1999/02/09 06:36:27 smurph Exp $ */
-
-/*
- * Copyright (c) 1988 University of Utah.
- * Copyright (c) 1982, 1986, 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: Utah $Hdr: pte.h 1.13 92/01/20$
- *
- * @(#)pte.h 8.1 (Berkeley) 6/10/93
- */
-
-#ifndef _MVME88K_PTE_H_
-#define _MVME88K_PTE_H_
-
-/*
- * m88k hardware segment/page table entries
- */
-
-#if 0 /* FIXME */
-struct ste {
- unsigned int sg_pfnum:20; /* page table frame number */
- unsigned int :8; /* reserved at 0 */
- unsigned int :1; /* reserved at 1 */
- unsigned int sg_prot:1; /* write protect bit */
- unsigned int sg_v:2; /* valid bits */
-};
-
-struct ste40 {
- unsigned int sg_ptaddr:24; /* page table page addr */
- unsigned int :4; /* reserved at 0 */
- unsigned int sg_u; /* hardware modified (dirty) bit */
- unsigned int sg_prot:1; /* write protect bit */
- unsigned int sg_v:2; /* valid bits */
-};
-
-struct pte {
- unsigned int pg_pfnum:20; /* page frame number or 0 */
- unsigned int :3;
- unsigned int pg_w:1; /* is wired */
- unsigned int :1; /* reserved at zero */
- unsigned int pg_ci:1; /* cache inhibit bit */
- unsigned int :1; /* reserved at zero */
- unsigned int pg_m:1; /* hardware modified (dirty) bit */
- unsigned int pg_u:1; /* hardware used (reference) bit */
- unsigned int pg_prot:1; /* write protect bit */
- unsigned int pg_v:2; /* valid bit */
-};
-#endif /* 0 */
-
-#if 0 /* FIXME */
-typedef int st_entry_t; /* segment table entry */
-typedef int pt_entry_t; /* Mach page table entry */
-#endif /* 0 */
-
-#define PT_ENTRY_NULL ((pt_entry_t *) 0)
-#define ST_ENTRY_NULL ((st_entry_t *) 0)
-
-#define SG_V 0x00000002 /* segment is valid */
-#define SG_NV 0x00000000
-#define SG_PROT 0x00000004 /* access protection mask */
-#define SG_RO 0x00000004
-#define SG_RW 0x00000000
-#define SG_U 0x00000008 /* modified bit (68040) */
-#define SG_FRAME 0xfffff000
-#define SG_IMASK 0xffc00000
-#define SG_ISHIFT 22
-#define SG_PMASK 0x003ff000
-#define SG_PSHIFT 12
-
-#define PG_V 0x00000001
-#define PG_NV 0x00000000
-#define PG_PROT 0x00000004
-#define PG_U 0x00000008
-#define PG_M 0x00000010
-#define PG_W 0x00000100
-#define PG_RO 0x00000004
-#define PG_RW 0x00000000
-#define PG_FRAME 0xfffff000
-#define PG_CI 0x00000040
-#define PG_SHIFT 12
-#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
-
-#if 0 /* FIXME */
-#define M88K_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
- /* user process segment table size */
-#define M88K_MAX_PTSIZE 0x400000 /* max size of UPT */
-#define M88K_MAX_KPTSIZE 0x100000 /* max memory to allocate to KPT */
-#define M88K_PTBASE 0x10000000 /* UPT map base address */
-#define M88K_PTMAXSIZE 0x70000000 /* UPT map maximum size */
-#endif /* 0 */
-
-/*
- * Kernel virtual address to page table entry and to physical address.
- */
-
-#if 0 /* FIXME */
-#define kvtopte(va) \
- (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
-#define ptetokv(pt) \
- ((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
-#define kvtophys(va) \
- ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
-#endif /* 0 */
-
-#endif /* !_MVME88K_PTE_H_ */
diff --git a/sys/arch/mvme88k/include/vmparam.h b/sys/arch/mvme88k/include/vmparam.h
index 1ebb8a72f5c..4e8882be664 100644
--- a/sys/arch/mvme88k/include/vmparam.h
+++ b/sys/arch/mvme88k/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.18 2001/12/05 16:25:44 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.19 2001/12/24 04:12:37 miod Exp $ */
/*
* Mach Operating System
* Copyright (c) 1992 Carnegie Mellon University
@@ -112,7 +112,6 @@
struct pmap_physseg {
struct pv_entry *pvent; /* pv table for this seg */
char *attrs; /* page modify list for this seg */
- struct simplelock *plock; /* page lock for this seg */
};
#endif /* _LOCORE */
diff --git a/sys/arch/mvme88k/mvme88k/m88110.c b/sys/arch/mvme88k/mvme88k/m88110.c
index a0295b313b8..ff3b1a482c2 100644
--- a/sys/arch/mvme88k/mvme88k/m88110.c
+++ b/sys/arch/mvme88k/mvme88k/m88110.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: m88110.c,v 1.4 2001/12/22 09:49:39 smurph Exp $ */
+/* $OpenBSD: m88110.c,v 1.5 2001/12/24 04:12:40 miod Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* All rights reserved.
@@ -786,9 +786,9 @@ m88110_cmmu_show_translation(unsigned address,
union {
unsigned bits;
struct {
- unsigned segment_table_index:10,
- page_table_index:10,
- page_offset:12;
+ unsigned segment_table_index:SDT_BITS,
+ page_table_index:PDT_BITS,
+ page_offset:PG_BITS;
} field;
} virtual_address;
unsigned value;
@@ -856,7 +856,7 @@ m88110_cmmu_show_translation(unsigned address,
DEBUG_MSG("<would report an error, valid bit not set>\n");
return;
}
- value = apr_template.field.st_base << 12; /* now point to seg page */
+ value = apr_template.field.st_base << PG_BITS; /* now point to seg page */
}
/* translate value from physical to virtual */
@@ -868,43 +868,43 @@ m88110_cmmu_show_translation(unsigned address,
/****** ACCESS SEGMENT TABLE AND INTERPRET SEGMENT DESCRIPTOR *******/
{
- union sdt_entry_template std_template;
+ sdt_entry_t sdt;
if (verbose_flag)
DEBUG_MSG("will follow to entry %d of page at 0x%x...\n",
virtual_address.field.segment_table_index, value);
value |= virtual_address.field.segment_table_index *
- sizeof(struct sdt_entry);
+ sizeof(sdt_entry_t);
if (badwordaddr((vm_offset_t)value)) {
DEBUG_MSG("ERROR: unable to access page at 0x%08x.\n", value);
return;
}
- std_template.bits = *(unsigned *)value;
+ sdt = *(sdt_entry_t *)value;
if (verbose_flag > 1)
- DEBUG_MSG("SEG DESC @0x%x is 0x%08x\n", value, std_template.bits);
+ DEBUG_MSG("SEG DESC @0x%x is 0x%08x\n", value, sdt);
DEBUG_MSG("SEG DESC @0x%x: PgTbl: 0x%x000",
- value, std_template.sdt_desc.table_addr);
- if (std_template.sdt_desc.wt) DEBUG_MSG(", WTHRU");
+ value, PG_PFNUM(sdt));
+ if (sdt & CACHE_WT) DEBUG_MSG(", WTHRU");
else DEBUG_MSG(", !wthru");
- if (std_template.sdt_desc.sup) DEBUG_MSG(", S-PROT");
+ if (sdt & SG_SO) DEBUG_MSG(", S-PROT");
else DEBUG_MSG(", UserOk");
- if (std_template.sdt_desc.g) DEBUG_MSG(", GLOBAL");
+ if (sdt & CACHE_GLOBAL) DEBUG_MSG(", GLOBAL");
else DEBUG_MSG(", !global");
- if (std_template.sdt_desc.no_cache) DEBUG_MSG(", $INHIBIT");
+ if (sdt & CACHE_INH) DEBUG_MSG(", $INHIBIT");
else DEBUG_MSG(", $ok");
- if (std_template.sdt_desc.prot) DEBUG_MSG(", W-PROT");
+ if (sdt & SG_PROT) DEBUG_MSG(", W-PROT");
else DEBUG_MSG(", WriteOk");
- if (std_template.sdt_desc.dtype) DEBUG_MSG(", VALID");
+ if (sdt & SG_V) DEBUG_MSG(", VALID");
else DEBUG_MSG(", !valid");
DEBUG_MSG(".\n");
/* if not valid, done now */
- if (std_template.sdt_desc.dtype == 0) {
+ if (!(sdt & SG_V)) {
DEBUG_MSG("<would report an error, STD entry not valid>\n");
return;
}
- value = std_template.sdt_desc.table_addr << 12;
+ value = ptoa(PG_PFNUM(sdt));
}
/* translate value from physical to virtual */
@@ -914,51 +914,50 @@ m88110_cmmu_show_translation(unsigned address,
/******* PAGE TABLE *********/
{
- union pte_template pte_template;
+ pt_entry_t pte;
if (verbose_flag)
DEBUG_MSG("will follow to entry %d of page at 0x%x...\n",
virtual_address.field.page_table_index, value);
value |= virtual_address.field.page_table_index *
- sizeof(struct pt_entry);
+ sizeof(pt_entry_t);
if (badwordaddr((vm_offset_t)value)) {
DEBUG_MSG("error: unable to access page at 0x%08x.\n", value);
-
return;
}
- pte_template.bits = *(unsigned *)value;
+ pte = *(pt_entry_t *)value;
if (verbose_flag > 1)
- DEBUG_MSG("PAGE DESC @0x%x is 0x%08x.\n", value, pte_template.bits);
+ DEBUG_MSG("PAGE DESC @0x%x is 0x%08x.\n", value, pte);
DEBUG_MSG("PAGE DESC @0x%x: page @%x000",
- value, pte_template.pte.pfn);
- if (pte_template.pte.wired) DEBUG_MSG(", WIRE");
- else DEBUG_MSG(", !wire");
- if (pte_template.pte.wt) DEBUG_MSG(", WTHRU");
- else DEBUG_MSG(", !wthru");
- if (pte_template.pte.sup) DEBUG_MSG(", S-PROT");
- else DEBUG_MSG(", UserOk");
- if (pte_template.pte.g) DEBUG_MSG(", GLOBAL");
- else DEBUG_MSG(", !global");
- if (pte_template.pte.ci) DEBUG_MSG(", $INHIBIT");
- else DEBUG_MSG(", $ok");
- if (pte_template.pte.modified) DEBUG_MSG(", MOD");
- else DEBUG_MSG(", !mod");
- if (pte_template.pte.pg_used) DEBUG_MSG(", USED");
- else DEBUG_MSG(", !used");
- if (pte_template.pte.prot) DEBUG_MSG(", W-PROT");
- else DEBUG_MSG(", WriteOk");
- if (pte_template.pte.dtype) DEBUG_MSG(", VALID");
- else DEBUG_MSG(", !valid");
+ value, PG_PFNUM(pte));
+ if (pte & PG_W) DEBUG_MSG(", WIRE");
+ else DEBUG_MSG(", !wire");
+ if (pte & CACHE_WT) DEBUG_MSG(", WTHRU");
+ else DEBUG_MSG(", !wthru");
+ if (pte & PG_SO) DEBUG_MSG(", S-PROT");
+ else DEBUG_MSG(", UserOk");
+ if (pte & CACHE_GLOBAL) DEBUG_MSG(", GLOBAL");
+ else DEBUG_MSG(", !global");
+ if (pte & CACHE_INH) DEBUG_MSG(", $INHIBIT");
+ else DEBUG_MSG(", $ok");
+ if (pte & PG_M) DEBUG_MSG(", MOD");
+ else DEBUG_MSG(", !mod");
+ if (pte & PG_U) DEBUG_MSG(", USED");
+ else DEBUG_MSG(", !used");
+ if (pte & PG_PROT) DEBUG_MSG(", W-PROT");
+ else DEBUG_MSG(", WriteOk");
+ if (pte & PG_V) DEBUG_MSG(", VALID");
+ else DEBUG_MSG(", !valid");
DEBUG_MSG(".\n");
/* if not valid, done now */
- if (pte_template.pte.dtype == 0) {
+ if (!(pte & PG_V)) {
DEBUG_MSG("<would report an error, PTE entry not valid>\n");
return;
}
- value = pte_template.pte.pfn << 12;
+ value = ptoa(PG_PFNUM(pte));
if (verbose_flag)
DEBUG_MSG("will follow to byte %d of page at 0x%x...\n",
virtual_address.field.page_offset, value);
@@ -1018,11 +1017,6 @@ m88110_load_patc(int entry, vm_offset_t vaddr, vm_offset_t paddr, int kernel)
set_dppl(lpa);
}
-#define SDT_WP(sd_ptr) ((sd_ptr)->prot != 0)
-#define SDT_SUP(sd_ptr) ((sd_ptr)->sup != 0)
-#define PDT_WP(pte_ptr) ((pte_ptr)->prot != 0)
-#define PDT_SUP(pte_ptr) ((pte_ptr)->sup != 0)
-
int
m88110_table_search(pmap_t map, vm_offset_t virt, int write, int kernel, int data)
{
@@ -1048,7 +1042,7 @@ m88110_table_search(pmap_t map, vm_offset_t virt, int write, int kernel, int dat
if (write && SDT_WP(sdt))
return (7); /* Write Violation */
- pte = (pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) + PDTIDX(virt);
+ pte = (pt_entry_t *)(PG_PFNUM(*(sdt_entry_t *)(sdt + SDT_ENTRIES))<<PDT_SHIFT) + PDTIDX(virt);
/*
* Check whether page frame exist or not.
*/
diff --git a/sys/arch/mvme88k/mvme88k/m8820x.c b/sys/arch/mvme88k/mvme88k/m8820x.c
index 003b67e5fc2..82cbe46e5f3 100644
--- a/sys/arch/mvme88k/mvme88k/m8820x.c
+++ b/sys/arch/mvme88k/mvme88k/m8820x.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: m8820x.c,v 1.9 2001/12/22 09:49:39 smurph Exp $ */
+/* $OpenBSD: m8820x.c,v 1.10 2001/12/24 04:12:40 miod Exp $ */
/*
* Copyright (c) 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -444,7 +444,8 @@ m8820x_setup_cmmu_config()
dprintf(DB_CMMU,("cmmu_init: testing CMMU %d for CPU %d\n",
num*cpu_cmmu_ratio+i, num));
if (!m8820x_cmmu_alive(num*cpu_cmmu_ratio + i)) {
- printf("CMMU %d attached to CPU %d is not working\n");
+ printf("CMMU %d attached to CPU %d is not working\n",
+ num*cpu_cmmu_ratio+i, num);
panic("m8820x_setup_cmmu_config");
}
}
@@ -866,7 +867,7 @@ m8820x_cmmu_init()
m8820x_cmmu[cmmu_num].cmmu_regs->sctr = tmp;
tmp =
- (0x00000 << 12) | /* segment table base address */
+ (0x00000 << PG_BITS) | /* segment table base address */
AREA_D_WT | /* write through */
AREA_D_G | /* global */
AREA_D_CI | /* cache inhibit */
@@ -942,7 +943,7 @@ m8820x_cmmu_init()
* set up yet....
*/
tmp =
- (0x00000 << 12) | /* segment table base address */
+ (0x00000 << PG_BITS) | /* segment table base address */
AREA_D_WT | /* write through */
AREA_D_G | /* global */
AREA_D_CI | /* cache inhibit */
@@ -989,7 +990,7 @@ m8820x_cmmu_shutdown_now()
m8820x_cmmu[cmmu_num].cmmu_regs->sctr = tmp;
tmp =
- (0x00000 << 12) | /* segment table base address */
+ (0x00000 << PG_BITS) | /* segment table base address */
! AREA_D_WT | /* write through */
! AREA_D_G | /* global */
AREA_D_CI | /* cache inhibit */
@@ -1756,9 +1757,9 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
union {
unsigned bits;
struct {
- unsigned segment_table_index:10,
- page_table_index:10,
- page_offset:12;
+ unsigned segment_table_index:SDT_BITS,
+ page_table_index:PDT_BITS,
+ page_offset:PG_BITS;
} field;
} virtual_address;
unsigned value;
@@ -1950,7 +1951,7 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
return;
}
- value = apr_template.field.st_base << 12; /* now point to seg page */
+ value = apr_template.field.st_base << PG_BITS; /* now point to seg page */
}
/* translate value from physical to virtual */
@@ -1962,46 +1963,44 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
/****** ACCESS SEGMENT TABLE AND INTERPRET SEGMENT DESCRIPTOR *******/
{
- union sdt_entry_template std_template;
+ sdt_entry_t sdt;
if (verbose_flag)
DEBUG_MSG("will follow to entry %d of page at 0x%x...\n",
virtual_address.field.segment_table_index, value);
value |= virtual_address.field.segment_table_index *
- sizeof(struct sdt_entry);
+ sizeof(sdt_entry_t);
if (badwordaddr((vm_offset_t)value)) {
DEBUG_MSG("ERROR: unable to access page at 0x%08x.\n", value);
-
return;
}
- std_template.bits = *(unsigned *)value;
+ sdt = *(sdt_entry_t *)value;
if (verbose_flag > 1)
- DEBUG_MSG("SEG DESC @0x%x is 0x%08x\n", value, std_template.bits);
+ DEBUG_MSG("SEG DESC @0x%x is 0x%08x\n", value, sdt);
DEBUG_MSG("SEG DESC @0x%x: PgTbl: 0x%x000",
- value, std_template.sdt_desc.table_addr);
- if (std_template.sdt_desc.wt) DEBUG_MSG(", WTHRU");
+ value, PG_PFNUM(sdt));
+ if (sdt & CACHE_WT) DEBUG_MSG(", WTHRU");
else DEBUG_MSG(", !wthru");
- if (std_template.sdt_desc.sup) DEBUG_MSG(", S-PROT");
+ if (sdt & SG_SO) DEBUG_MSG(", S-PROT");
else DEBUG_MSG(", UserOk");
- if (std_template.sdt_desc.g) DEBUG_MSG(", GLOBAL");
+ if (sdt & CACHE_GLOBAL) DEBUG_MSG(", GLOBAL");
else DEBUG_MSG(", !global");
- if (std_template.sdt_desc.no_cache) DEBUG_MSG(", $INHIBIT");
+ if (sdt & CACHE_INH) DEBUG_MSG(", $INHIBIT");
else DEBUG_MSG(", $ok");
- if (std_template.sdt_desc.prot) DEBUG_MSG(", W-PROT");
+ if (sdt & SG_PROT) DEBUG_MSG(", W-PROT");
else DEBUG_MSG(", WriteOk");
- if (std_template.sdt_desc.dtype) DEBUG_MSG(", VALID");
+ if (sdt & SG_V) DEBUG_MSG(", VALID");
else DEBUG_MSG(", !valid");
DEBUG_MSG(".\n");
/* if not valid, done now */
- if (std_template.sdt_desc.dtype == 0) {
+ if (!(sdt & SG_V)) {
DEBUG_MSG("<would report an error, STD entry not valid>\n");
-
return;
}
- value = std_template.sdt_desc.table_addr << 12;
+ value = ptoa(PG_PFNUM(sdt));
}
/* translate value from physical to virtual */
@@ -2011,12 +2010,12 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
/******* PAGE TABLE *********/
{
- union pte_template pte_template;
+ pt_entry_t pte;
if (verbose_flag)
DEBUG_MSG("will follow to entry %d of page at 0x%x...\n",
virtual_address.field.page_table_index, value);
value |= virtual_address.field.page_table_index *
- sizeof(struct pt_entry);
+ sizeof(pt_entry_t);
if (badwordaddr((vm_offset_t)value)) {
DEBUG_MSG("error: unable to access page at 0x%08x.\n", value);
@@ -2024,39 +2023,38 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
return;
}
- pte_template.bits = *(unsigned *)value;
+ pte = *(pt_entry_t *)value;
if (verbose_flag > 1)
- DEBUG_MSG("PAGE DESC @0x%x is 0x%08x.\n", value, pte_template.bits);
+ DEBUG_MSG("PAGE DESC @0x%x is 0x%08x.\n", value, pte);
DEBUG_MSG("PAGE DESC @0x%x: page @%x000",
- value, pte_template.pte.pfn);
- if (pte_template.pte.wired) DEBUG_MSG(", WIRE");
- else DEBUG_MSG(", !wire");
- if (pte_template.pte.wt) DEBUG_MSG(", WTHRU");
- else DEBUG_MSG(", !wthru");
- if (pte_template.pte.sup) DEBUG_MSG(", S-PROT");
- else DEBUG_MSG(", UserOk");
- if (pte_template.pte.g) DEBUG_MSG(", GLOBAL");
- else DEBUG_MSG(", !global");
- if (pte_template.pte.ci) DEBUG_MSG(", $INHIBIT");
- else DEBUG_MSG(", $ok");
- if (pte_template.pte.modified) DEBUG_MSG(", MOD");
- else DEBUG_MSG(", !mod");
- if (pte_template.pte.pg_used) DEBUG_MSG(", USED");
- else DEBUG_MSG(", !used");
- if (pte_template.pte.prot) DEBUG_MSG(", W-PROT");
- else DEBUG_MSG(", WriteOk");
- if (pte_template.pte.dtype) DEBUG_MSG(", VALID");
- else DEBUG_MSG(", !valid");
+ value, PG_PFNUM(pte));
+ if (pte & PG_W) DEBUG_MSG(", WIRE");
+ else DEBUG_MSG(", !wire");
+ if (pte & CACHE_WT) DEBUG_MSG(", WTHRU");
+ else DEBUG_MSG(", !wthru");
+ if (pte & PG_SO) DEBUG_MSG(", S-PROT");
+ else DEBUG_MSG(", UserOk");
+ if (pte & CACHE_GLOBAL) DEBUG_MSG(", GLOBAL");
+ else DEBUG_MSG(", !global");
+ if (pte & CACHE_INH) DEBUG_MSG(", $INHIBIT");
+ else DEBUG_MSG(", $ok");
+ if (pte & PG_M) DEBUG_MSG(", MOD");
+ else DEBUG_MSG(", !mod");
+ if (pte & PG_U) DEBUG_MSG(", USED");
+ else DEBUG_MSG(", !used");
+ if (pte & PG_PROT) DEBUG_MSG(", W-PROT");
+ else DEBUG_MSG(", WriteOk");
+ if (pte & PG_V) DEBUG_MSG(", VALID");
+ else DEBUG_MSG(", !valid");
DEBUG_MSG(".\n");
/* if not valid, done now */
- if (pte_template.pte.dtype == 0) {
+ if (!(pte & PG_V)) {
DEBUG_MSG("<would report an error, PTE entry not valid>\n");
-
return;
}
- value = pte_template.pte.pfn << 12;
+ value = ptoa(PG_PFNUM(pte));
if (verbose_flag)
DEBUG_MSG("will follow to byte %d of page at 0x%x...\n",
virtual_address.field.page_offset, value);
@@ -2064,7 +2062,6 @@ m8820x_cmmu_show_translation(address, supervisor_flag, verbose_flag, cmmu_num)
if (badwordaddr((vm_offset_t)value)) {
DEBUG_MSG("error: unable to access page at 0x%08x.\n", value);
-
return;
}
}
diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c
index 1c5ccafe92f..7bc22855bbc 100644
--- a/sys/arch/mvme88k/mvme88k/machdep.c
+++ b/sys/arch/mvme88k/mvme88k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.81 2001/12/22 17:57:11 smurph Exp $ */
+/* $OpenBSD: machdep.c,v 1.82 2001/12/24 04:12:40 miod Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -408,19 +408,19 @@ save_u_area(struct proc *p, vm_offset_t va)
{
int i;
for (i=0; i<UPAGES; i++) {
- p->p_md.md_upte[i] = kvtopte((va + (i * NBPG)))->bits;
+ p->p_md.md_upte[i] = *((pt_entry_t *)kvtopte((va + (i * NBPG))));
}
}
void
load_u_area(struct proc *p)
{
- pte_template_t *t;
+ pt_entry_t *t;
int i;
for (i=0; i<UPAGES; i++) {
t = kvtopte((UADDR + (i * NBPG)));
- t->bits = p->p_md.md_upte[i];
+ *t = p->p_md.md_upte[i];
}
for (i=0; i<UPAGES; i++) {
cmmu_flush_tlb(1, (UADDR + (i * NBPG)), NBPG);
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index 82525c94451..e652d1dacc9 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,5 +1,7 @@
-/* $OpenBSD: pmap.c,v 1.59 2001/12/24 00:25:17 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.60 2001/12/24 04:12:40 miod Exp $ */
/*
+ * Copyright (c) 2001 Miodrag Vallat
+ * Copyright (c) 1998-2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
*
@@ -63,13 +65,12 @@
#include <machine/cmmu.h>
#include <machine/cpu_number.h>
#include <machine/pmap_table.h>
-#include <machine/pte.h>
/*
* VM externals
*/
-extern vm_offset_t avail_start, avail_end;
-extern vm_offset_t virtual_avail, virtual_end;
+extern vaddr_t avail_start, avail_end;
+extern vaddr_t virtual_avail, virtual_end;
/*
* Macros to operate cpus_using field
@@ -97,7 +98,7 @@ extern vm_offset_t virtual_avail, virtual_end;
#define CD_BOOT 0x0000080 /* pmap_bootstrap */
#define CD_INIT 0x0000100 /* pmap_init */
#define CD_CREAT 0x0000200 /* pmap_create */
-#define CD_FREE 0x0000400 /* pmap_free_tables */
+#define CD_FREE 0x0000400 /* pmap_release */
#define CD_DESTR 0x0000800 /* pmap_destroy */
#define CD_RM 0x0001000 /* pmap_remove */
#define CD_RMAL 0x0002000 /* pmap_remove_all */
@@ -121,20 +122,20 @@ int pmap_con_dbg = CD_NONE;
struct pool pmappool, pvpool;
-caddr_t vmmap;
-pt_entry_t *vmpte, *msgbufmap;
+caddr_t vmmap;
+pt_entry_t *vmpte, *msgbufmap;
-struct pmap kernel_pmap_store;
+struct pmap kernel_pmap_store;
pmap_t kernel_pmap = &kernel_pmap_store;
typedef struct kpdt_entry *kpdt_entry_t;
struct kpdt_entry {
- kpdt_entry_t next;
- vm_offset_t phys;
+ kpdt_entry_t next;
+ paddr_t phys;
};
#define KPDT_ENTRY_NULL ((kpdt_entry_t)0)
-kpdt_entry_t kpdt_free;
+kpdt_entry_t kpdt_free;
/*
* MAX_KERNEL_VA_SIZE must fit into the virtual address space between
@@ -170,10 +171,10 @@ kpdt_entry_t kpdt_free;
* Two pages of scratch space.
* Used in pmap_copy_page() and pmap_zero_page().
*/
-vm_offset_t phys_map_vaddr1, phys_map_vaddr2;
+vaddr_t phys_map_vaddr1, phys_map_vaddr2;
/*
- * The Modify List
+ * The Modify List
*
* This is an array, one byte per physical page, which keeps track
* of modified flags for pages which are no longer containd in any
@@ -183,97 +184,37 @@ char *pmap_modify_list;
#define PV_ENTRY_NULL ((pv_entry_t) 0)
-struct simplelock *pv_lock_table; /* array */
-
pv_entry_t pv_head_table; /* array of entries, one per page */
-#define PMAP_MANAGED(pa) (pmap_initialized && \
- vm_physseg_find(atop((pa)), NULL) != -1)
+#define PMAP_MANAGED(pa) \
+ (pmap_initialized && IS_VM_PHYSADDR(pa))
-#define PA_TO_PVH(pa) \
+#define pa_to_pvh(pa) \
({ \
int bank_, pg_; \
\
bank_ = vm_physseg_find(atop((pa)), &pg_); \
&vm_physmem[bank_].pmseg.pvent[pg_]; \
})
-#define LOCK_PVH(pa) \
-({ \
- int bank_, pg_; \
- \
- bank_ = vm_physseg_find(atop((pa)), &pg_); \
- simple_lock(&vm_physmem[bank_].pmseg.plock[pg_]); \
-})
-#define UNLOCK_PVH(pa) \
-({ \
- int bank_, pg_; \
- \
- bank_ = vm_physseg_find(atop((pa)), &pg_); \
- simple_unlock(&vm_physmem[bank_].pmseg.plock[pg_]); \
-})
-#define PA_TO_ATTRIB(pa) \
-({ \
- int bank_, pg_; \
- \
- bank_ = vm_physseg_find(atop((pa)), &pg_); \
- vm_physmem[bank_].pmseg.attrs[pg_]; \
-})
-#define SET_ATTRIB(pa, attr) \
+#define pa_to_attribute(pa) \
({ \
int bank_, pg_; \
\
bank_ = vm_physseg_find(atop((pa)), &pg_); \
- vm_physmem[bank_].pmseg.attrs[pg_] = (attr); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
})
/*
- * Locking and TLB invalidation primitives
+ * Locking primitives
*/
/*
- * Locking Protocols:
- *
- * There are two structures in the pmap module that need locking:
- * the pmaps themselves, and the per-page pv_lists (which are locked
- * by locking the pv_lock_table entry that corresponds to the pv_head
- * for the list in question.) Most routines want to lock a pmap and
- * then do operations in it that require pv_list locking -- however
- * pmap_remove_all and pmap_copy_on_write operate on a physical page
- * basis and want to do the locking in the reverse order, i.e. lock
- * a pv_list and then go through all the pmaps referenced by that list.
- * To protect against deadlock between these two cases, the pmap_lock
- * is used. There are three different locking protocols as a result:
- *
- * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
- * the pmap.
- *
- * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
- * lock on the pmap_lock (shared read), then lock the pmap
- * and finally the pv_lists as needed [i.e. pmap lock before
- * pv_list lock.]
- *
- * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
- * Get a write lock on the pmap_lock (exclusive write); this
- * also guaranteees exclusive access to the pv_lists. Lock the
- * pmaps as needed.
- *
- * At no time may any routine hold more than one pmap lock or more than
- * one pv_list lock. Because interrupt level routines can allocate
- * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
- * kernel_pmap can only be held at splvm.
- */
-/* DCR: 12/18/91 - The above explanation is no longer true. The pmap
- * system lock has been removed in favor of a backoff strategy to
- * avoid deadlock. Now, pv_list-based operations first get the
- * pv_list lock, then try to get the pmap lock, but if they can't,
- * they release the pv_list lock and retry the whole operation.
- */
-/*
* We raise the interrupt level to splvm, to block interprocessor
* interrupts during pmap operations.
*/
#define SPLVM(spl) spl = splvm();
#define SPLX(spl) splx(spl);
+
#define PMAP_LOCK(pmap,spl) \
do { \
SPLVM(spl); \
@@ -285,8 +226,7 @@ pv_entry_t pv_head_table; /* array of entries, one per page */
SPLX(spl); \
} while (0)
-#define PV_LOCK_TABLE_SIZE(n) ((vm_size_t)((n) * sizeof(struct simplelock)))
-#define PV_TABLE_SIZE(n) ((vm_size_t)((n) * sizeof(struct pv_entry)))
+#define PV_TABLE_SIZE(n) ((vsize_t)((n) * sizeof(struct pv_entry)))
#define ETHERPAGES 16
void *etherbuf=NULL;
@@ -302,16 +242,17 @@ boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
#ifdef DEBUG
-void check_pv_list __P((vm_offset_t, pv_entry_t, char *));
+void check_pv_list __P((vaddr_t, pv_entry_t, char *));
void check_pmap_consistency __P((char *));
+void check_map __P((pmap_t, vaddr_t, vaddr_t, char *));
- #define CHECK_PV_LIST(phys,pv_h,who) \
+#define CHECK_PV_LIST(phys,pv_h,who) \
if (pmap_con_dbg & CD_CHKPV) check_pv_list(phys,pv_h,who)
- #define CHECK_PMAP_CONSISTENCY(who) \
+#define CHECK_PMAP_CONSISTENCY(who) \
if (pmap_con_dbg & CD_CHKPM) check_pmap_consistency(who)
#else
- #define CHECK_PV_LIST(phys,pv_h,who)
- #define CHECK_PMAP_CONSISTENCY(who)
+#define CHECK_PV_LIST(phys,pv_h,who)
+#define CHECK_PMAP_CONSISTENCY(who)
#endif /* DEBUG */
/*
@@ -324,21 +265,65 @@ int batc_used;
*/
batc_entry_t batc_entry[BATC_MAX];
-vm_offset_t kmapva = 0;
-extern vm_offset_t bugromva;
-extern vm_offset_t sramva;
-extern vm_offset_t obiova;
-
-void flush_atc_entry __P((long, vm_offset_t, boolean_t));
-unsigned int m88k_protection __P((pmap_t, vm_prot_t));
-pt_entry_t *pmap_expand_kmap __P((vm_offset_t, vm_prot_t));
-void pmap_free_tables __P((pmap_t));
-void pmap_remove_range __P((pmap_t, vm_offset_t, vm_offset_t));
-void pmap_copy_on_write __P((vm_offset_t));
-void pmap_expand __P((pmap_t, vm_offset_t));
-void cache_flush_loop __P((int, vm_offset_t, int));
-void pmap_pinit __P((pmap_t));
+vaddr_t kmapva = 0;
+extern vaddr_t bugromva;
+extern vaddr_t sramva;
+extern vaddr_t obiova;
+
+/*
+ * Internal routines
+ */
+void flush_atc_entry __P((long, vaddr_t, boolean_t));
+pt_entry_t *pmap_expand_kmap __P((vaddr_t, vm_prot_t));
+void pmap_remove_range __P((pmap_t, vaddr_t, vaddr_t));
+void pmap_copy_on_write __P((paddr_t));
+void pmap_expand __P((pmap_t, vaddr_t));
void pmap_release __P((pmap_t));
+vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t, unsigned int));
+vaddr_t pmap_map_batc __P((vaddr_t, paddr_t, paddr_t, vm_prot_t, unsigned int));
+pt_entry_t *pmap_pte __P((pmap_t, vaddr_t));
+void pmap_remove_all __P((paddr_t));
+
+/*
+ * quick PTE field checking macros
+ */
+#define pmap_pte_w(pte) (*(pte) & PG_W)
+#define pmap_pte_m(pte) (*(pte) & PG_M)
+#define pmap_pte_u(pte) (*(pte) & PG_U)
+#define pmap_pte_prot(pte) (*(pte) & PG_PROT)
+
+#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
+
+/*
+ * Convert machine-independent protection code to M88K protection bits.
+ */
+static __inline u_int32_t
+m88k_protection(pmap_t map, vm_prot_t prot)
+{
+ pt_entry_t p;
+
+ p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
+ /*
+ * XXX this should not be necessary anymore now that pmap_enter
+ * does the correct thing... -- miod
+ */
+#ifdef M88110
+ if (cputyp == CPU_88110) {
+ p |= PG_U;
+ /* if the map is the kernel's map and since this
+ * is not a paged kernel, we go ahead and mark
+ * the page as modified to avoid an exception
+ * upon writing to the page the first time. XXX smurph
+ */
+ if (map == kernel_pmap) {
+ if (p & PG_PROT)
+ p |= PG_M;
+ }
+ }
+#endif
+ return (p);
+} /* m88k_protection */
/*
* Routine: FLUSH_ATC_ENTRY
@@ -352,13 +337,16 @@ void pmap_release __P((pmap_t));
* users bit patterns of the CPUs which may hold the TLB, and
* should be flushed
* va virtual address that should be flushed
- * kernel nonzero if supervisor mode, zero if user mode
+ * kernel TRUE if supervisor mode, FALSE if user mode
*/
void
-flush_atc_entry(long users, vm_offset_t va, boolean_t kernel)
+flush_atc_entry(users, va, kernel)
+ long users;
+ vaddr_t va;
+ boolean_t kernel;
{
- register int cpu;
- long tusers = users;
+ int cpu;
+ long tusers = users;
#ifdef DIAGNOSTIC
if ((tusers != 0) && (ff1(tusers) >= MAX_CPUS)) {
@@ -375,34 +363,6 @@ flush_atc_entry(long users, vm_offset_t va, boolean_t kernel)
}
/*
- * Convert machine-independent protection code to M88K protection bits.
- */
-
-unsigned int
-m88k_protection(pmap_t map, vm_prot_t prot)
-{
- pte_template_t p;
-
- p.bits = 0;
- p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
-#ifdef M88110
- if (cputyp == CPU_88110) {
- p.pte.pg_used = 1;
- p.pte.modified = p.pte.prot ? 0 : 1;
- /* if the map is the kernel's map and since this
- * is not a paged kernel, we go ahead and mark
- * the page as modified to avoid an exception
- * upon writing to the page the first time. XXX smurph
- */
- if (map == kernel_pmap) {
- p.pte.modified = p.pte.prot ? 0 : 1;
- }
- }
-#endif
- return (p.bits);
-} /* m88k_protection */
-
-/*
* Routine: PMAP_PTE
*
* Function:
@@ -417,17 +377,14 @@ m88k_protection(pmap_t map, vm_prot_t prot)
*
* Otherwise the page table address is extracted from the segment table,
* the page table index is added, and the result is returned.
- *
- * Calls:
- * SDTENT
- * SDT_VALID
- * PDT_IDX
*/
-
pt_entry_t *
-pmap_pte(pmap_t map, vm_offset_t virt)
+pmap_pte(map, virt)
+ pmap_t map;
+ vaddr_t virt;
{
- sdt_entry_t *sdt;
+ sdt_entry_t *sdt;
+
#ifdef DIAGNOSTIC
/*XXX will this change if physical memory is not contiguous? */
/* take a look at PDTIDX XXXnivas */
@@ -441,9 +398,9 @@ pmap_pte(pmap_t map, vm_offset_t virt)
*/
if (!SDT_VALID(sdt))
return (PT_ENTRY_NULL);
- else
- return ((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) +
- PDTIDX(virt));
+
+ return ((pt_entry_t *)(PG_PFNUM(*(sdt + SDT_ENTRIES))<<PDT_SHIFT) +
+ PDTIDX(virt));
} /* pmap_pte */
@@ -470,9 +427,6 @@ pmap_pte(pmap_t map, vm_offset_t virt)
*
* Calls:
* m88k_protection
- * SDTENT
- * SDT_VALID
- * PDT_IDX
*
* This routine simply dequeues a table from the kpdt_free list,
* initializes all its entries (invalidates them), and sets the
@@ -482,23 +436,22 @@ pmap_pte(pmap_t map, vm_offset_t virt)
* space for the kernel virtual address space).
*
*/
-
pt_entry_t *
-pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
+pmap_expand_kmap(virt, prot)
+ vaddr_t virt;
+ vm_prot_t prot;
{
- int aprot;
- sdt_entry_t *sdt;
- kpdt_entry_t kpdt_ent;
- pmap_t map = kernel_pmap;
+ sdt_entry_t template, *sdt;
+ kpdt_entry_t kpdt_ent;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
#endif
- aprot = m88k_protection (map, prot);
+ template = m88k_protection(kernel_pmap, prot) | SG_V;
/* segment table entry derivate from map and virt. */
- sdt = SDTENT(map, virt);
+ sdt = SDTENT(kernel_pmap, virt);
if (SDT_VALID(sdt))
panic("pmap_expand_kmap: segment table entry VALID");
@@ -509,14 +462,14 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
}
kpdt_free = kpdt_free->next;
/* physical table */
- ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
+ *sdt = kpdt_ent->phys | template;
/* virtual table */
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
+ *(sdt + SDT_ENTRIES) = (vaddr_t)kpdt_ent | template;
(unsigned)(kpdt_ent->phys) = 0;
(unsigned)(kpdt_ent->next) = 0;
return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
-}/* pmap_expand_kmap() */
+} /* pmap_expand_kmap() */
/*
* Routine: PMAP_MAP
@@ -546,7 +499,6 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
* pmap_expand_kmap is called to allocate it. Finally, the page table
* entry is set to point to the physical page.
*
- *
* initialize template with paddr, prot, dt
* look for number of phys pages in range
* {
@@ -557,18 +509,17 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
* }
*
*/
-
-vm_offset_t
+vaddr_t
pmap_map(virt, start, end, prot, cmode)
- vm_offset_t virt, start, end;
+ vaddr_t virt;
+ paddr_t start, end;
vm_prot_t prot;
unsigned int cmode;
{
- int aprot;
unsigned npages;
unsigned num_phys_pages;
- pt_entry_t *pte;
- pte_template_t template;
+ pt_entry_t template, *pte;
+ paddr_t page;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_MAP | CD_NORM)) == (CD_MAP | CD_NORM))
@@ -581,12 +532,10 @@ pmap_map(virt, start, end, prot, cmode)
panic("pmap_map: start greater than end address");
#endif
- aprot = m88k_protection (kernel_pmap, prot);
-
- template.bits = trunc_page(start) | aprot | cmode | DT_VALID;
-
- npages = atop(round_page(end) - trunc_page(start));
+ template = m88k_protection(kernel_pmap, prot) | cmode | PG_V;
+ page = trunc_page(start);
+ npages = atop(round_page(end) - page);
for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
@@ -594,14 +543,14 @@ pmap_map(virt, start, end, prot, cmode)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
- if (pte->dtype)
+ if (PDT_VALID(pte))
printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
- *pte = template.pte;
+ *pte = template | page;
+ page += PAGE_SIZE;
virt += PAGE_SIZE;
- template.bits += PAGE_SIZE;
}
- return (virt);
+ return virt;
} /* pmap_map() */
/*
@@ -623,7 +572,6 @@ pmap_map(virt, start, end, prot, cmode)
*
* Calls:
* m88k_protection
- * BATC_BLK_ALIGNED
* pmap_pte
* pmap_expand_kmap
*
@@ -647,19 +595,19 @@ pmap_map(virt, start, end, prot, cmode)
* }
*
*/
-vm_offset_t
+vaddr_t
pmap_map_batc(virt, start, end, prot, cmode)
- vm_offset_t virt, start, end;
+ vaddr_t virt;
+ paddr_t start, end;
vm_prot_t prot;
unsigned int cmode;
{
- int aprot;
+ unsigned npages;
unsigned num_phys_pages;
- vm_offset_t phys;
- pt_entry_t *pte;
- pte_template_t template;
+ pt_entry_t template, *pte;
+ paddr_t page;
batc_template_t batctmp;
- register int i;
+ int i;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
@@ -672,35 +620,41 @@ pmap_map_batc(virt, start, end, prot, cmode)
panic("pmap_map_batc: start greater than end address");
#endif
- aprot = m88k_protection (kernel_pmap, prot);
- template.bits = trunc_page(start) | aprot | DT_VALID | cmode;
- phys = start;
+
+ template = m88k_protection(kernel_pmap, prot) | cmode | PG_V;
+
batctmp.bits = 0;
batctmp.field.sup = 1; /* supervisor */
- batctmp.field.wt = template.pte.wt; /* write through */
- batctmp.field.g = template.pte.g; /* global */
- batctmp.field.ci = template.pte.ci; /* cache inhibit */
- batctmp.field.wp = template.pte.prot; /* protection */
+ if (template & CACHE_WT)
+ batctmp.field.wt = 1; /* write through */
+ if (template & CACHE_GLOBAL)
+ batctmp.field.g = 1; /* global */
+ if (template & CACHE_INH)
+ batctmp.field.ci = 1; /* cache inhibit */
+ if (template & PG_PROT)
+ batctmp.field.wp = 1; /* protection */
batctmp.field.v = 1; /* valid */
- num_phys_pages = atop(round_page(end) -
- trunc_page(start));
-
- while (num_phys_pages > 0) {
+ page = trunc_page(start);
+ npages = atop(round_page(end) - page);
+ for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
- num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
+ printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, "
+ "align V=%d, page=%x, align P=%d\n",
+ curproc, num_phys_pages, virt,
+ BATC_BLK_ALIGNED(virt), page,
+ BATC_BLK_ALIGNED(page));
#endif
- if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
+ if (BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(page) &&
num_phys_pages >= BATC_BLKBYTES/PAGE_SIZE &&
batc_used < BATC_MAX ) {
/*
* map by BATC
*/
batctmp.field.lba = M88K_BTOBLK(virt);
- batctmp.field.pba = M88K_BTOBLK(phys);
+ batctmp.field.pba = M88K_BTOBLK(page);
for (i = 0; i < max_cpus; i++)
if (cpu_sets[i])
@@ -715,15 +669,14 @@ pmap_map_batc(virt, start, end, prot, cmode)
for (i = 0; i < BATC_BLKBYTES; i += PAGE_SIZE ) {
pte = pmap_pte(kernel_pmap, virt+i);
- if (pte->dtype)
- printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
+ if (PDT_VALID(pte))
+ printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, *pte);
}
}
#endif
batc_used++;
virt += BATC_BLKBYTES;
- phys += BATC_BLKBYTES;
- template.pte.pfn = atop(phys);
+ page += BATC_BLKBYTES;
num_phys_pages -= BATC_BLKBYTES/PAGE_SIZE;
continue;
}
@@ -733,17 +686,15 @@ pmap_map_batc(virt, start, end, prot, cmode)
#ifdef DEBUG
if (pmap_con_dbg & CD_MAPB)
- if (pte->dtype)
+ if (PDT_VALID(pte))
printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
- *pte = template.pte;
+ *pte = template | trunc_page(page);
virt += PAGE_SIZE;
- phys += PAGE_SIZE;
- template.bits += PAGE_SIZE;
- num_phys_pages--;
+ page += PAGE_SIZE;
}
- return (round_page(virt));
+ return virt;
} /* pmap_map_batc() */
/*
@@ -751,26 +702,18 @@ pmap_map_batc(virt, start, end, prot, cmode)
*
* Function:
* Set the cache-control bits in the page table entries(PTE) which maps
- * the specifid virutal address range.
- *
- * mode
- * writethrough 0x200
- * global 0x80
- * cache inhibit 0x40
+ * the specified virtual address range.
*
* Parameters:
* pmap_t map
- * vm_offset_t s
- * vm_offset_t e
+ * vaddr_t s
+ * vaddr_t e
* unsigned mode
*
* Calls:
- * PMAP_LOCK
- * PMAP_UNLOCK
* pmap_pte
* invalidate_pte
* flush_atc_entry
- * dcachefall
*
* This routine sequences through the pages of the specified range.
* For each, it calls pmap_pte to acquire a pointer to the page table
@@ -779,15 +722,17 @@ pmap_map_batc(virt, start, end, prot, cmode)
*
*/
void
-pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
+pmap_cache_ctrl(pmap, s, e, mode)
+ pmap_t pmap;
+ vaddr_t s, e;
+ unsigned mode;
{
int spl;
pt_entry_t *pte;
- vm_offset_t va;
+ vaddr_t va;
boolean_t kflush;
int cpu;
- register unsigned users;
- register pte_template_t opte;
+ unsigned users;
#ifdef DEBUG
if (mode & CACHE_MASK) {
@@ -823,19 +768,18 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
#endif /* DEBUG */
/*
* Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- * XXX
+ * the modified bit and/or the reference bit by any other cpu.
+ * XXX
*/
- opte.bits = invalidate_pte(pte);
- ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
+ *pte = (invalidate_pte(pte) & CACHE_MASK) | mode;
flush_atc_entry(users, va, kflush);
/*
* Data cache should be copied back and invalidated.
*/
- for (cpu=0; cpu<max_cpus; cpu++)
+ for (cpu = 0; cpu < max_cpus; cpu++)
if (cpu_sets[cpu])
- cmmu_flush_remote_cache(cpu, ptoa(pte->pfn),
+ cmmu_flush_remote_cache(cpu, ptoa(PG_PFNUM(*pte)),
PAGE_SIZE);
}
PMAP_UNLOCK(pmap, spl);
@@ -867,8 +811,7 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
*
* Calls:
* simple_lock_init
- * pmap_map
- * pmap_map_batc
+ * pmap_map or pmap_map_batc
*
* The physical address 'load_start' is mapped at
* VM_MIN_KERNEL_ADDRESS, which maps the kernel code and data at the
@@ -890,21 +833,20 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
*/
void
-pmap_bootstrap(vm_offset_t load_start,
- vm_offset_t *phys_start,
- vm_offset_t *phys_end,
- vm_offset_t *virt_start,
- vm_offset_t *virt_end)
+pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end)
+ vaddr_t load_start;
+ paddr_t *phys_start, *phys_end;
+ vaddr_t *virt_start, *virt_end;
{
kpdt_entry_t kpdt_virt;
sdt_entry_t *kmap;
- vm_offset_t vaddr,
+ vaddr_t vaddr,
virt,
- kpdt_phys,
- s_text,
- e_text,
kernel_pmap_size,
pdt_size;
+ paddr_t s_text,
+ e_text,
+ kpdt_phys;
apr_template_t apr_data;
pt_entry_t *pte;
int i;
@@ -1027,8 +969,8 @@ pmap_bootstrap(vm_offset_t load_start,
* init the kpdt queue
*/
kpdt_free = kpdt_virt;
- for (i = pdt_size/PDT_SIZE; i > 0; i--) {
- kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
+ for (i = pdt_size / PDT_SIZE; i > 0; i--) {
+ kpdt_virt->next = (kpdt_entry_t)((vaddr_t)kpdt_virt + PDT_SIZE);
kpdt_virt->phys = kpdt_phys;
kpdt_virt = kpdt_virt->next;
kpdt_phys += PDT_SIZE;
@@ -1046,30 +988,22 @@ pmap_bootstrap(vm_offset_t load_start,
e_text = round_page(e_text);
#ifdef OMRON_PMAP
- #define PMAPER pmap_map
+#define PMAPER pmap_map
#else
- #define PMAPER pmap_map_batc
+#define PMAPER pmap_map_batc
#endif
/* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
- vaddr = PMAPER(0,
- 0,
- 0x10000,
- VM_PROT_WRITE | VM_PROT_READ,
- CACHE_INH);
+ vaddr = PMAPER(0, 0, 0x10000, VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
/* map the kernel text read only */
- vaddr = PMAPER((vm_offset_t)trunc_page(((unsigned)&kernelstart)),
- s_text,
- e_text,
- VM_PROT_WRITE | VM_PROT_READ,
- CACHE_GLOBAL); /* shouldn't it be RO? XXX*/
-
- vaddr = PMAPER(vaddr,
- e_text,
- (vm_offset_t)kmap,
- VM_PROT_WRITE | VM_PROT_READ,
- CACHE_GLOBAL);
+ vaddr = PMAPER((vaddr_t)trunc_page(((unsigned)&kernelstart)),
+ s_text, e_text, VM_PROT_WRITE | VM_PROT_READ,
+ CACHE_GLOBAL); /* shouldn't it be RO? XXX*/
+
+ vaddr = PMAPER(vaddr, e_text, (paddr_t)kmap,
+ VM_PROT_WRITE | VM_PROT_READ, CACHE_GLOBAL);
+
/*
* Map system segment & page tables - should be cache inhibited?
* 88200 manual says that CI bit is driven on the Mbus while accessing
@@ -1085,8 +1019,8 @@ pmap_bootstrap(vm_offset_t load_start,
while (vaddr < (*virt_start - kernel_pmap_size))
vaddr = round_page(vaddr + 1);
}
- vaddr = PMAPER(vaddr, (vm_offset_t)kmap, *phys_start,
- VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
+ vaddr = PMAPER(vaddr, (paddr_t)kmap, *phys_start,
+ VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
if (vaddr != *virt_start) {
/*
@@ -1111,13 +1045,13 @@ pmap_bootstrap(vm_offset_t load_start,
* used by the 1x7 ethernet driver. Remove this when that is fixed.
* XXX -nivas
*/
- if (brdtyp != BRD_188) { /* != BRD_188 */
+ if (brdtyp == BRD_187 || brdtyp == BRD_197) {
*phys_start = vaddr;
etherlen = ETHERPAGES * NBPG;
etherbuf = (void *)vaddr;
vaddr = PMAPER(vaddr, *phys_start, *phys_start + etherlen,
- VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
+ VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
*virt_start += etherlen;
*phys_start += etherlen;
@@ -1176,34 +1110,34 @@ pmap_bootstrap(vm_offset_t load_start,
/*
* size-1, 'cause pmap_map rounds up to next pagenumber
*/
- PMAPER(ptable->virt_start,
- ptable->phys_start,
- ptable->phys_start + (ptable->size - 1),
- ptable->prot, ptable->cacheability);
- }
+ PMAPER(ptable->virt_start, ptable->phys_start,
+ ptable->phys_start + (ptable->size - 1),
+ ptable->prot, ptable->cacheability);
}
- /*
- * Allocate all the submaps we need. Note that SYSMAP just allocates
- * kernel virtual address with no physical backing memory. The idea
- * is physical memory will be mapped at this va before using that va.
- * This means that if different physical pages are going to be mapped
- * at different times, we better do a tlb flush before using it -
- * else we will be referencing the wrong page.
- */
+ }
+#undef PMAPER
+
+ /*
+ * Allocate all the submaps we need. Note that SYSMAP just allocates
+ * kernel virtual address with no physical backing memory. The idea
+ * is physical memory will be mapped at this va before using that va.
+ * This means that if different physical pages are going to be mapped
+ * at different times, we better do a tlb flush before using it -
+ * else we will be referencing the wrong page.
+ */
#define SYSMAP(c, p, v, n) \
({ \
v = (c)virt; \
if ((p = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL) \
- pmap_expand_kmap(virt, (VM_PROT_READ|VM_PROT_WRITE)|(CACHE_GLOBAL << 16)); \
+ pmap_expand_kmap(virt, VM_PROT_READ | VM_PROT_WRITE); \
virt += ((n) * PAGE_SIZE); \
})
virt = *virt_start;
SYSMAP(caddr_t, vmpte, vmmap, 1);
- vmpte->pfn = -1;
- vmpte->dtype = DT_INVALID;
+ *vmpte = PG_NV;
SYSMAP(struct msgbuf *, msgbufmap, msgbufp, btoc(MSGBUFSIZE));
@@ -1224,7 +1158,7 @@ pmap_bootstrap(vm_offset_t load_start,
}
#endif
if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
+ pmap_expand_kmap(virt, VM_PROT_READ | VM_PROT_WRITE);
}
/*
* Switch to using new page tables
@@ -1259,11 +1193,9 @@ pmap_bootstrap(vm_offset_t load_start,
* pmap_remove_range().
*/
pte = pmap_pte(kernel_pmap, phys_map_vaddr1);
- pte->pfn = -1;
- pte->dtype = DT_INVALID;
+ *pte = PG_NV;
pte = pmap_pte(kernel_pmap, phys_map_vaddr2);
- pte->dtype = DT_INVALID;
- pte->pfn = -1;
+ *pte = PG_NV;
/* Load supervisor pointer to segment table. */
cmmu_remote_set_sapr(i, apr_data.bits);
#ifdef DEBUG
@@ -1281,7 +1213,6 @@ pmap_bootstrap(vm_offset_t load_start,
#endif
} /* pmap_bootstrap() */
-
/*
* Routine: PMAP_INIT
*
@@ -1296,7 +1227,6 @@ pmap_bootstrap(vm_offset_t load_start,
*
* Extern/Globals
* pv_head_table
- * pv_lock_table
* pmap_modify_list
* pmap_phys_start
* pmap_phys_end
@@ -1306,7 +1236,7 @@ pmap_bootstrap(vm_offset_t load_start,
* uvm_km_zalloc
*
* This routine does not really have much to do. It allocates space
- * for the pv_head_table, pv_lock_table, pmap_modify_list; and sets these
+ * for the pv_head_table, pmap_modify_list; and sets these
* pointers. It also initializes zones for pmap structures, pv_entry
* structures, and segment tables.
*
@@ -1316,36 +1246,31 @@ pmap_bootstrap(vm_offset_t load_start,
* list. (All other pages are kernel-specific and are permanently
* wired.)
*
- *
* uvm_km_zalloc() memory for pv_table
* uvm_km_zalloc() memory for modify_bits
- *
*/
void
pmap_init(void)
{
- register long npages;
- register vm_offset_t addr;
- register vm_size_t s;
- register int i;
- struct pv_entry *pv;
- char *attr;
- struct simplelock *lock;
- int bank;
+ long npages;
+ vaddr_t addr;
+ vsize_t s;
+ pv_entry_t pv;
+ char *attr;
+ int bank;
#ifdef DEBUG
if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
printf("pmap_init()\n");
#endif
/*
- * Allocate memory for the pv_head_table and its lock bits,
+ * Allocate memory for the pv_head_table,
* the modify bit array, and the pte_page table.
*/
for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
npages += vm_physmem[bank].end - vm_physmem[bank].start;
s = PV_TABLE_SIZE(npages); /* pv_list */
- s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */
s += npages * sizeof(char); /* pmap_modify_list */
#ifdef DEBUG
@@ -1362,34 +1287,19 @@ pmap_init(void)
pv_head_table = (pv_entry_t)addr;
addr += PV_TABLE_SIZE(npages);
- /*
- * Assume that 'simple_lock' is used to lock pv_lock_table
- */
- pv_lock_table = (struct simplelock *)addr; /* XXX */
- addr += PV_LOCK_TABLE_SIZE(npages);
-
pmap_modify_list = (char *)addr;
/*
- * Initialize pv_lock_table
- */
- for (i = 0; i < npages; i++)
- simple_lock_init(&(pv_lock_table[i]));
-
- /*
- * Now that the pv, attribute, and lock tables have been allocated,
+ * Now that the pv and attribute tables have been allocated,
* assign them to the memory segments.
*/
pv = pv_head_table;
- lock = pv_lock_table;
attr = pmap_modify_list;
for (bank = 0; bank < vm_nphysseg; bank++) {
npages = vm_physmem[bank].end - vm_physmem[bank].start;
vm_physmem[bank].pmseg.pvent = pv;
vm_physmem[bank].pmseg.attrs = attr;
- vm_physmem[bank].pmseg.plock = lock;
pv += npages;
- lock += npages;
attr += npages;
}
@@ -1415,8 +1325,6 @@ pmap_init(void)
*
* Calls:
* m88k_protection
- * cmmu_sflush_page
- * bzero
*
* Special Assumptions:
* no locking required
@@ -1426,26 +1334,25 @@ pmap_init(void)
* mappings effective, and zeros all the bits.
*/
void
-pmap_zero_page(vm_offset_t phys)
+pmap_zero_page(phys)
+ paddr_t phys;
{
- vm_offset_t srcva;
- pte_template_t template;
- unsigned int spl;
+ vaddr_t srcva;
+ int spl;
int cpu;
pt_entry_t *srcpte;
cpu = cpu_number();
- srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu * PAGE_SIZE));
+ srcva = (vaddr_t)(phys_map_vaddr1 + (cpu * PAGE_SIZE));
srcpte = pmap_pte(kernel_pmap, srcva);
- template.bits = trunc_page(phys)
- | m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
- | DT_VALID | CACHE_GLOBAL;
-
SPLVM(spl);
cmmu_flush_tlb(1, srcva, PAGE_SIZE);
- *srcpte = template.pte;
+ *srcpte = trunc_page(phys) |
+ m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) |
+ CACHE_GLOBAL | PG_V;
SPLX(spl);
+
bzero((void*)srcva, PAGE_SIZE);
/* force the data out */
cmmu_flush_remote_data_cache(cpu, phys, PAGE_SIZE);
@@ -1460,33 +1367,22 @@ pmap_zero_page(vm_offset_t phys)
* by the hardware. If the size specified is non-zero, the map will be
* used in software only, and is bounded by that size.
*
- * Parameters:
- * size size of the map
- *
* This routines allocates a pmap structure.
*/
struct pmap *
pmap_create(void)
{
- struct pmap *p;
+ pmap_t p;
+ pmap_statistics_t stats;
+ sdt_entry_t *segdt;
+ int i;
+ unsigned int s;
CHECK_PMAP_CONSISTENCY("pmap_create");
p = (struct pmap *)pool_get(&pmappool, PR_WAITOK);
bzero(p, sizeof(*p));
- pmap_pinit(p);
- return (p);
-
-} /* pmap_create() */
-void
-pmap_pinit(pmap_t p)
-{
- pmap_statistics_t stats;
- sdt_entry_t *segdt;
- int i;
- unsigned int s;
-
/*
* Allocate memory for *actual* segment table and *shadow* table.
*/
@@ -1525,9 +1421,8 @@ pmap_pinit(pmap_t p)
* memory for page tables should be CACHE DISABLED on MVME188
*/
pmap_cache_ctrl(kernel_pmap,
- (vm_offset_t)segdt,
- (vm_offset_t)segdt+ (SDT_SIZE*2),
- CACHE_INH);
+ (vaddr_t)segdt, (vaddr_t)segdt+ (SDT_SIZE*2),
+ CACHE_INH);
}
#endif
/*
@@ -1568,10 +1463,11 @@ pmap_pinit(pmap_t p)
p->next->prev = p;
#endif
-} /* pmap_pinit() */
+ return (p);
+} /* pmap_create() */
/*
- * Routine: PMAP_FREE_TABLES (internal)
+ * Routine: PMAP_RELEASE
*
* Internal procedure used by pmap_destroy() to actualy deallocate
* the tables.
@@ -1581,7 +1477,7 @@ pmap_pinit(pmap_t p)
*
* Calls:
* pmap_pte
- * kmem_free
+ * uvm_km_free
* PT_FREE
*
* Special Assumptions:
@@ -1595,7 +1491,8 @@ pmap_pinit(pmap_t p)
*
*/
void
-pmap_free_tables(pmap_t pmap)
+pmap_release(pmap)
+ pmap_t pmap;
{
unsigned long sdt_va; /* outer loop index */
sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
@@ -1604,7 +1501,7 @@ pmap_free_tables(pmap_t pmap)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
- printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
+ printf("(pmap_release :%x) pmap %x\n", curproc, pmap);
#endif
sdttbl = pmap->sdt_vaddr; /* addr of segment table */
@@ -1621,10 +1518,10 @@ pmap_free_tables(pmap_t pmap)
/* Segment table Loop */
for (; i < j; i++) {
sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
- if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
+ if ((gdttbl = pmap_pte(pmap, (vaddr_t)sdt_va)) != PT_ENTRY_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_free_tables :%x) free page table = 0x%x\n",
+ printf("(pmap_release :%x) free page table = 0x%x\n",
curproc, gdttbl);
#endif
PT_FREE(gdttbl);
@@ -1633,27 +1530,22 @@ pmap_free_tables(pmap_t pmap)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_free_tables :%x) free segment table = 0x%x\n",
+ printf("(pmap_release :%x) free segment table = 0x%x\n",
curproc, sdttbl);
#endif
/*
* Freeing both *actual* and *shadow* segment tables
*/
- uvm_km_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
-} /* pmap_free_tables() */
+ uvm_km_free(kernel_map, (vaddr_t)sdttbl, 2*SDT_SIZE);
-void
-pmap_release(register pmap_t p)
-{
- pmap_free_tables(p);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
- printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
+ if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
+ printf("(pmap_release :%x) ref_count = 0\n", curproc);
/* unlink from list of pmap structs */
- p->prev->next = p->next;
- p->next->prev = p->prev;
+ pmap->prev->next = pmap->next;
+ pmap->next->prev = pmap->prev;
#endif
-}
+} /* pmap_release() */
/*
* Routine: PMAP_DESTROY
@@ -1666,23 +1558,22 @@ pmap_release(register pmap_t p)
* pmap pointer to pmap structure
*
* Calls:
- * CHECK_PMAP_CONSISTENCY
- * PMAP_LOCK, PMAP_UNLOCK
- * pmap_free_tables
- * free
+ * pmap_release
+ * pool_put
*
* Special Assumptions:
* Map contains no valid mappings.
*
* This routine decrements the reference count in the pmap
- * structure. If it goes to zero, pmap_free_tables is called to release
- * the memory space to the system. Then, call kmem_free to free the
+ * structure. If it goes to zero, pmap_release is called to release
+ * the memory space to the system. Then, call pool_put to free the
* pmap structure.
*/
void
-pmap_destroy(pmap_t p)
+pmap_destroy(p)
+ pmap_t p;
{
- register int c, s;
+ int c, s;
if (p == PMAP_NULL) {
#ifdef DEBUG
@@ -1707,7 +1598,6 @@ pmap_destroy(pmap_t p)
pmap_release(p);
pool_put(&pmappool, p);
}
-
} /* pmap_destroy() */
@@ -1727,9 +1617,10 @@ pmap_destroy(pmap_t p)
* is incremented. The function then returns.
*/
void
-pmap_reference(pmap_t p)
+pmap_reference(p)
+ pmap_t p;
{
- int s;
+ int s;
if (p != PMAP_NULL) {
PMAP_LOCK(p, s);
@@ -1757,21 +1648,10 @@ pmap_reference(pmap_t p)
* pmap_modify_list
*
* Calls:
- * CHECK_PAGE_ALIGN
- * SDTENT
- * SDT_VALID
- * SDT_NEXT
* pmap_pte
- * PDT_VALID
- * PMAP_MANAGED
- * LOCK_PVH
- * UNLOCK_PVH
- * PA_TO_PVH
- * CHECK_PV_LIST
- * free
+ * pool_put
* invalidate_pte
* flush_atc_entry
- * PHYS_TO_VM_PAGE
*
* Special Assumptions:
* The pmap must be locked.
@@ -1789,17 +1669,18 @@ pmap_reference(pmap_t p)
* panics). The PV list entry is unlinked from the list, and returned to
* its zone.
*/
-
void
-pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
+pmap_remove_range(pmap, s, e)
+ pmap_t pmap;
+ vaddr_t s, e;
{
- int pfn;
pt_entry_t *pte;
pv_entry_t prev, cur;
pv_entry_t pvl;
- vm_offset_t pa, va;
+ paddr_t pa;
+ vaddr_t va;
unsigned users;
- pte_template_t opte;
+ pt_entry_t opte;
boolean_t kflush;
/*
@@ -1821,7 +1702,6 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
for (va = s; va < e; va += PAGE_SIZE) {
-
sdt_entry_t *sdt;
sdt = SDTENT(pmap,va);
@@ -1845,19 +1725,17 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
* Update statistics.
*/
pmap->stats.resident_count--;
- if (pte->wired)
+ if (pmap_pte_w(pte))
pmap->stats.wired_count--;
- pfn = pte->pfn;
- pa = ptoa(pfn);
+ pa = ptoa(PG_PFNUM(*pte));
if (PMAP_MANAGED(pa)) {
- LOCK_PVH(pa);
/*
* Remove the mapping from the pvlist for
* this physical page.
*/
- pvl = PA_TO_PVH(pa);
+ pvl = pa_to_pvh(pa);
CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
#ifdef DIAGNOSTIC
@@ -1866,7 +1744,6 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
#endif
if (pvl->va == va && pvl->pmap == pmap) {
-
/*
* Hander is the pv_entry. Copy the next one
* to hander and free the next one (we can't
@@ -1898,8 +1775,6 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
}
CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
- UNLOCK_PVH(pa);
-
} /* if PMAP_MANAGED */
/*
@@ -1909,20 +1784,20 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
/*
* Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
+ * the modified bit and/or the reference bit by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
+ opte = invalidate_pte(pte);
flush_atc_entry(users, va, kflush);
- if (opte.pte.modified) {
+ if (opte & PG_M) {
if (PMAP_MANAGED(pa)) {
struct vm_page *pg;
- pg = PHYS_TO_VM_PAGE(opte.bits & ~PAGE_MASK);
+ pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
pg->flags &= ~PG_CLEAN;
/* keep track ourselves too */
- SET_ATTRIB(pa, 1);
+ *pa_to_attribute(pa) = TRUE;
}
}
@@ -1937,24 +1812,25 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
* It is assumed that start is properly rounded to the VM page size.
*
* Parameters:
- * pmap pointer to pmap structure
+ * map pointer to pmap structure
+ * s
+ * e
*
* Special Assumptions:
* Assumes not all entries must be valid in specified range.
*
* Calls:
- * CHECK_PAGE_ALIGN
- * PMAP_LOCK, PMAP_UNLOCK
* pmap_remove_range
- * panic
*
* After taking pmap read lock, pmap_remove_range is called to do the
* real work.
*/
void
-pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
+pmap_remove(map, s, e)
+ pmap_t map;
+ vaddr_t s, e;
{
- int spl;
+ int spl;
if (map == PMAP_NULL) {
return;
@@ -1992,15 +1868,9 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
* pmap_modify_list
*
* Calls:
- * PMAP_MANAGED
- * SPLVM, SPLX
- * PA_TO_PVH
- * CHECK_PV_LIST
* simple_lock
- * PDT_VALID
* pmap_pte
- * PHYS_TO_VM_PAGE
- * free
+ * pool_put
*
* If the page specified by the given address is not a managed page,
* this routine simply returns. Otherwise, the PV list associated with
@@ -2016,15 +1886,16 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
* will be null.
*/
void
-pmap_remove_all(vm_offset_t phys)
+pmap_remove_all(phys)
+ paddr_t phys;
{
pv_entry_t pvl, cur;
pt_entry_t *pte;
- vm_offset_t va;
+ vaddr_t va;
pmap_t pmap;
int spl;
unsigned users;
- pte_template_t opte;
+ pt_entry_t opte;
boolean_t kflush;
#ifdef DEBUG
int dbgcnt = 0;
@@ -2038,6 +1909,7 @@ pmap_remove_all(vm_offset_t phys)
#endif
return;
}
+
SPLVM(spl);
/*
* Walk down PV list, removing all mappings.
@@ -2047,9 +1919,8 @@ pmap_remove_all(vm_offset_t phys)
*/
remove_all_Retry:
- pvl = PA_TO_PVH(phys);
+ pvl = pa_to_pvh(phys);
CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
- LOCK_PVH(phys);
/*
* Loop for each entry on the pv list
@@ -2057,7 +1928,6 @@ remove_all_Retry:
while ((pmap = pvl->pmap) != PMAP_NULL) {
va = pvl->va;
if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
goto remove_all_Retry;
}
users = pmap->cpus_using;
@@ -2082,15 +1952,15 @@ remove_all_Retry:
}
if (!PDT_VALID(pte))
panic("pmap_remove_all: pte invalid");
- if (ptoa(pte->pfn) != phys)
+ if (ptoa(PG_PFNUM(*pte)) != phys)
panic("pmap_remove_all: pte doesn't point to page");
- if (pte->wired)
+ if (pmap_pte_w(pte))
panic("pmap_remove_all: removing a wired page");
pmap->stats.resident_count--;
if ((cur = pvl->next) != PV_ENTRY_NULL) {
- *pvl = *cur;
+ *pvl = *cur;
pool_put(&pvpool, cur);
} else
pvl->pmap = PMAP_NULL;
@@ -2100,18 +1970,18 @@ remove_all_Retry:
*
* Invalidate pte temporarily to avoid the modified
* bit and/or the reference bit being written back
- * by other cpu.
+ * by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
+ opte = invalidate_pte(pte);
flush_atc_entry(users, va, kflush);
- if (opte.pte.modified) {
+ if (opte & PG_M) {
struct vm_page *pg;
pg = PHYS_TO_VM_PAGE(phys);
pg->flags &= ~PG_CLEAN;
/* keep track ourselves too */
- SET_ATTRIB(phys, 1);
+ *pa_to_attribute(phys) = TRUE;
}
/*
@@ -2126,7 +1996,6 @@ remove_all_Retry:
}
CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
- UNLOCK_PVH(phys);
SPLX(spl);
} /* pmap_remove_all() */
@@ -2140,13 +2009,9 @@ remove_all_Retry:
* phys physical address of page to be read-protected.
*
* Calls:
- * SPLVM, SPLX
- * PA_TO_PVH
- * CHECK_PV_LIST
- * simple_lock, simple_unlock
- * panic
- * PDT_VALID
- * pmap_pte
+ * pa_to_pvh
+ * simple_lock, simple_unlock
+ * pmap_pte
*
* Special Assumptions:
* All mapings of the page are user-space mappings.
@@ -2156,13 +2021,13 @@ remove_all_Retry:
* read-only bit. The TLB is appropriately flushed.
*/
void
-pmap_copy_on_write(vm_offset_t phys)
+pmap_copy_on_write(phys)
+ paddr_t phys;
{
pv_entry_t pv_e;
pt_entry_t *pte;
int spl;
unsigned users;
- pte_template_t opte;
boolean_t kflush;
if (!PMAP_MANAGED(phys)) {
@@ -2176,16 +2041,16 @@ pmap_copy_on_write(vm_offset_t phys)
SPLVM(spl);
copy_on_write_Retry:
- pv_e = PA_TO_PVH(phys);
+ pv_e = pa_to_pvh(phys);
CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
- LOCK_PVH(phys);
if (pv_e->pmap == PMAP_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- goto out; /* no mappings */
+ SPLX(spl);
+ return; /* no mappings */
}
/*
@@ -2194,14 +2059,10 @@ copy_on_write_Retry:
*/
while (pv_e != PV_ENTRY_NULL) {
- pmap_t pmap;
- vm_offset_t va;
-
- pmap = pv_e->pmap;
- va = pv_e->va;
+ pmap_t pmap = pv_e->pmap;
+ vaddr_t va = pv_e->va;
if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
goto copy_on_write_Retry;
}
@@ -2220,7 +2081,7 @@ copy_on_write_Retry:
panic("pmap_copy_on_write: pte from pv_list not in map");
if (!PDT_VALID(pte))
panic("pmap_copy_on_write: invalid pte");
- if (ptoa(pte->pfn) != phys)
+ if (ptoa(PG_PFNUM(*pte)) != phys)
panic("pmap_copy_on_write: pte doesn't point to page");
/*
@@ -2228,20 +2089,16 @@ copy_on_write_Retry:
*
* Invalidate pte temporarily to avoid the modified
* bit and/or the reference bit being written back
- * by other cpu.
+ * by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = M88K_RO;
- ((pte_template_t *)pte)->bits = opte.bits;
+ *pte = invalidate_pte(pte) | PG_PROT;
flush_atc_entry(users, va, kflush);
simple_unlock(&pmap->lock);
pv_e = pv_e->next;
}
- CHECK_PV_LIST(phys, PA_TO_PVH(phys), "pmap_copy_on_write");
+ CHECK_PV_LIST(phys, pa_to_pvh(phys), "pmap_copy_on_write");
-out:
- UNLOCK_PVH(phys);
SPLX(spl);
} /* pmap_copy_on_write */
@@ -2272,15 +2129,15 @@ out:
* Otherwise, the PTE's protection attributes are adjusted as specified.
*/
void
-pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
+pmap_protect(pmap, s, e, prot)
+ pmap_t pmap;
+ vaddr_t s, e;
+ vm_prot_t prot;
{
- pte_template_t maprot;
- unsigned ap;
int spl;
- pt_entry_t *pte;
- vm_offset_t va;
+ pt_entry_t ap, *pte;
+ vaddr_t va;
unsigned users;
- pte_template_t opte;
boolean_t kflush;
if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
@@ -2295,8 +2152,7 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
panic("pmap_protect: start grater than end address");
#endif
- maprot.bits = m88k_protection(pmap, prot);
- ap = maprot.pte.prot;
+ ap = m88k_protection(pmap, prot) & PG_PROT;
PMAP_LOCK(pmap, spl);
@@ -2336,17 +2192,13 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
#endif
continue; /* no page mapping */
}
-#if 0
- printf("(pmap_protect :%x) pte good\n", curproc);
-#endif
+
/*
* Invalidate pte temporarily to avoid the
* modified bit and/or the reference bit being
- * written back by other cpu.
+ * written back by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = ap;
- ((pte_template_t *)pte)->bits = opte.bits;
+ *pte = (invalidate_pte(pte) & ~PG_PROT) | ap;
flush_atc_entry(users, va, kflush);
pte++;
}
@@ -2376,10 +2228,8 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
*
* Calls:
* pmap_pte
- * kmem_alloc
- * kmem_free
- * zalloc
- * free
+ * uvm_km_free
+ * uvm_km_zalloc
* pmap_extract
*
* Special Assumptions
@@ -2399,10 +2249,13 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
*
*/
void
-pmap_expand(pmap_t map, vm_offset_t v)
+pmap_expand(map, v)
+ pmap_t map;
+ vaddr_t v;
{
int i, spl;
- vm_offset_t pdt_vaddr, pdt_paddr;
+ vaddr_t pdt_vaddr;
+ paddr_t pdt_paddr;
sdt_entry_t *sdt;
pt_entry_t *pte;
@@ -2425,7 +2278,7 @@ pmap_expand(pmap_t map, vm_offset_t v)
#ifdef MVME188
if (brdtyp == BRD_188) {
/*
- * the page for page tables should be CACHE DISABLED on MVME188
+ * the pages for page tables should be CACHE DISABLED on MVME188
*/
pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
}
@@ -2444,7 +2297,7 @@ pmap_expand(pmap_t map, vm_offset_t v)
#ifdef DEBUG
if (pmap_con_dbg & CD_EXP)
- printf("(pmap_expand :%x) table has already allocated\n", curproc);
+ printf("(pmap_expand :%x) table has already been allocated\n", curproc);
#endif
return;
}
@@ -2465,8 +2318,8 @@ pmap_expand(pmap_t map, vm_offset_t v)
* page tables.
*/
for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
- ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
+ *((sdt_entry_t *)sdt) = pdt_paddr | SG_RW | SG_V;
+ *((sdt_entry_t *)(sdt + SDT_ENTRIES)) = pdt_vaddr | SG_RW | SG_V;
sdt++;
pdt_paddr += PDT_SIZE;
pdt_vaddr += PDT_SIZE;
@@ -2503,7 +2356,7 @@ pmap_expand(pmap_t map, vm_offset_t v)
* pmap_pte
* pmap_expand
* pmap_remove_range
- * free
+ * PT_FREE
*
* This routine starts off by calling pmap_pte to obtain a (virtual)
* pointer to the page table entry corresponding to given virtual
@@ -2542,19 +2395,20 @@ pmap_expand(pmap_t map, vm_offset_t v)
*
*/
int
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
- vm_prot_t prot,
- int flags)
+pmap_enter(pmap, va, pa, prot, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int flags;
{
- int ap;
int spl;
pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t old_pa;
- pte_template_t template;
+ pt_entry_t ap, *pte;
+ paddr_t old_pa;
+ pt_entry_t template;
pv_entry_t pvl;
unsigned users;
- pte_template_t opte;
int kflush;
boolean_t wired = (flags & PMAP_WIRED) != 0;
@@ -2609,7 +2463,7 @@ Retry:
* Special case if the physical page is already mapped
* at this address.
*/
- old_pa = ptoa(pte->pfn);
+ old_pa = ptoa(PG_PFNUM(*pte));
if (old_pa == pa) {
if (pmap == kernel_pmap) {
kflush = TRUE;
@@ -2621,31 +2475,31 @@ Retry:
* May be changing its wired attributes or protection
*/
- if (wired && !pte->wired)
+ if (wired && !(pmap_pte_w(pte)))
pmap->stats.wired_count++;
- else if (!wired && pte->wired)
+ else if (!wired && pmap_pte_w(pte))
pmap->stats.wired_count--;
if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_INH;
+ template = CACHE_INH | PG_V;
else
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_GLOBAL;
+ template = CACHE_GLOBAL | PG_V;
if (wired)
- template.pte.wired = 1;
+ template |= PG_W;
/*
* If there is a same mapping, we have nothing to do.
*/
- if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
- || (pte->prot != template.pte.prot)) {
+ if (!PDT_VALID(pte) || pmap_pte_w_chg(pte, template & PG_W) ||
+ (pmap_pte_prot_chg(pte, ap & PG_PROT))) {
/*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
+ * Invalidate pte temporarily to avoid being written
+ * back the modified bit and/or the reference bit by
+ * any other cpu.
*/
- opte.bits = invalidate_pte(pte);
- template.pte.modified = opte.pte.modified;
- *pte++ = template.pte;
+ template |= (invalidate_pte(pte) & PG_M);
+ *pte++ = template | ap | trunc_page(pa);
flush_atc_entry(users, va, kflush);
}
@@ -2653,7 +2507,7 @@ Retry:
/*
* Remove old mapping from the PV list if necessary.
*/
- if (old_pa != (vm_offset_t)-1) {
+ if (old_pa != (paddr_t)-1) {
/*
* Invalidate the translation buffer,
* then remove the mapping.
@@ -2665,7 +2519,7 @@ Retry:
phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
PMAP_MANAGED(pa) ? 1 : 0);
printf("pte %x pfn %x valid %x\n",
- pte, pte->pfn, pte->dtype);
+ pte, PG_PFNUM(*pte), PDT_VALID(pte));
}
}
#endif
@@ -2688,8 +2542,7 @@ Retry:
* Enter the mappimg in the PV list for this
* physical page.
*/
- LOCK_PVH(pa);
- pvl = PA_TO_PVH(pa);
+ pvl = pa_to_pvh(pa);
CHECK_PV_LIST (pa, pvl, "pmap_enter before");
if (pvl->pmap == PMAP_NULL) {
@@ -2718,7 +2571,6 @@ Retry:
* Add new pv_entry after header.
*/
if (pv_e == PV_ENTRY_NULL) {
- UNLOCK_PVH(pa);
pv_e = pool_get(&pvpool, PR_NOWAIT);
goto Retry;
}
@@ -2727,11 +2579,10 @@ Retry:
pv_e->next = pvl->next;
pvl->next = pv_e;
/*
- * Remeber that we used the pvlist entry.
+ * Remember that we used the pvlist entry.
*/
pv_e = PV_ENTRY_NULL;
}
- UNLOCK_PVH(pa);
}
/*
@@ -2742,14 +2593,18 @@ Retry:
pmap->stats.wired_count++;
if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_INH;
+ template = CACHE_INH | PG_V;
else
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_GLOBAL;
-
+ template = CACHE_GLOBAL | PG_V;
if (wired)
- template.pte.wired = 1;
+ template |= PG_W;
- *(int *)pte = template.bits;
+ if (flags & VM_PROT_WRITE)
+ template |= PG_U | PG_M;
+ else if (flags & VM_PROT_ALL)
+ template |= PG_U;
+
+ *pte = template | ap | trunc_page(pa);
} /* if ( pa == old_pa ) ... else */
@@ -2770,19 +2625,16 @@ Retry:
* pmap pointer to pmap structure
* v virtual address of page to be unwired
*
- * Extern/Global:
- * pte_per_vm_page
- *
* Calls:
- * PMAP_LOCK, PMAP_UNLOCK
* pmap_pte
- * panic
*
* Special Assumptions:
* The mapping must already exist in the pmap.
*/
void
-pmap_unwire(pmap_t map, vm_offset_t v)
+pmap_unwire(map, v)
+ pmap_t map;
+ vaddr_t v;
{
pt_entry_t *pte;
int spl;
@@ -2792,14 +2644,13 @@ pmap_unwire(pmap_t map, vm_offset_t v)
if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
panic("pmap_unwire: pte missing");
- if (pte->wired) {
+ if (pmap_pte_w(pte)) {
/* unwired mapping */
map->stats.wired_count--;
- pte->wired = 0;
+ *pte &= ~PG_W;
}
PMAP_UNLOCK(map, spl);
-
} /* pmap_unwire() */
/*
@@ -2829,12 +2680,16 @@ pmap_unwire(pmap_t map, vm_offset_t v)
* the PTE is returned.
*/
boolean_t
-pmap_extract(pmap_t pmap, vm_offset_t va, paddr_t *pap)
+pmap_extract(pmap, va, pap)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t *pap;
{
- pt_entry_t *pte;
- paddr_t pa;
- int i;
- int spl;
+ pt_entry_t *pte;
+ paddr_t pa;
+ int i;
+ int spl;
+ boolean_t rv = FALSE;
#ifdef DIAGNOSTIC
if (pmap == PMAP_NULL)
@@ -2845,32 +2700,29 @@ pmap_extract(pmap_t pmap, vm_offset_t va, paddr_t *pap)
* check BATC first
*/
if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used-1; i > 0; i--)
+ for (i = batc_used - 1; i > 0; i--)
if (batc_entry[i].lba == M88K_BTOBLK(va)) {
- *pap = (batc_entry[i].pba << BATC_BLKSHIFT) |
- (va & BATC_BLKMASK );
+ if (pap != NULL)
+ *pap = (batc_entry[i].pba << BATC_BLKSHIFT) |
+ (va & BATC_BLKMASK);
return (TRUE);
}
PMAP_LOCK(pmap, spl);
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
- goto fail;
- } else {
- if (PDT_VALID(pte))
- pa = ptoa(pte->pfn);
- else
- goto fail;
+ if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL) {
+ if (PDT_VALID(pte)) {
+ rv = TRUE;
+ if (pap != NULL) {
+ pa = ptoa(PG_PFNUM(*pte));
+ pa |= (va & PAGE_MASK); /* offset within page */
+ *pap = pa;
+ }
+ }
}
- pa |= (va & PAGE_MASK); /* offset within page */
- *pap = pa;
-
- PMAP_UNLOCK(pmap, spl);
- return (TRUE);
-fail:
PMAP_UNLOCK(pmap, spl);
- return (FALSE);
+ return (rv);
} /* pmap_extract() */
/*
@@ -2894,10 +2746,11 @@ fail:
*/
/* ARGSUSED */
void
-pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap, src_pmap;
+ vaddr_t dst_addr, src_addr;
+ vsize_t len;
{
-
}/* pmap_copy() */
/*
@@ -2913,9 +2766,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
* pmap pointer to pmap structure
*
* Calls:
- * CHECK_PMAP_CONSISTENCY
- * panic
- * PMAP_LOCK, PMAP_UNLOCK
* PT_FREE
* pmap_pte
* pmap_remove_range
@@ -2940,21 +2790,22 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
* module deals with a single table at a time.]
*/
void
-pmap_collect(pmap_t pmap)
+pmap_collect(pmap)
+ pmap_t pmap;
{
-
- vm_offset_t sdt_va; /* outer loop index */
- vm_offset_t sdt_vt; /* end of segment */
- sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
- sdt_entry_t *sdtp; /* ptr to index into segment table */
- sdt_entry_t *sdt; /* ptr to index into segment table */
- pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
- pt_entry_t *gdtp; /* ptr to index into a page table */
- boolean_t found_gdt_wired; /* flag indicating a wired page exists
+ vaddr_t sdt_va; /* outer loop index */
+ vaddr_t sdt_vt; /* end of segment */
+ sdt_entry_t *sdttbl; /* ptr to first entry in seg table */
+ sdt_entry_t *sdtp; /* ptr to index into segment table */
+ sdt_entry_t *sdt; /* ptr to index into segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ pt_entry_t *gdttblend; /* ptr to byte after last entry in
+ table group */
+ pt_entry_t *gdtp; /* ptr to index into a page table */
+ boolean_t found_gdt_wired; /* flag indicating a wired page exists
in a page table's address range */
- int spl;
- unsigned int i,j;
+ int spl;
+ unsigned int i, j;
#ifdef DIAGNOSTIC
if (pmap == PMAP_NULL)
@@ -2981,7 +2832,6 @@ pmap_collect(pmap_t pmap)
because of integer overflow/wraparound if VM_MAX_ADDRESS
is near 0xffffffff
*/
-
i = VM_MIN_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
j = VM_MAX_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
if (j < 1024)
@@ -3000,8 +2850,8 @@ pmap_collect(pmap_t pmap)
/* scan page maps for wired pages */
found_gdt_wired = FALSE;
- for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
- if (gdtp->wired) {
+ for (gdtp = gdttbl; gdtp < gdttblend; gdtp++) {
+ if (pmap_pte_w(gdtp)) {
found_gdt_wired = TRUE;
break;
}
@@ -3023,8 +2873,8 @@ pmap_collect(pmap_t pmap)
* we can safely deallocated the page map(s)
*/
for (sdt = sdtp; sdt < (sdtp + PDT_TABLE_GROUP_SIZE); sdt++) {
- ((sdt_entry_template_t *) sdt) -> bits = 0;
- ((sdt_entry_template_t *)(sdt+SDT_ENTRIES))->bits = 0;
+ *((sdt_entry_t *) sdt) = 0;
+ *((sdt_entry_t *)(sdt+SDT_ENTRIES)) = 0;
}
/*
@@ -3072,16 +2922,17 @@ pmap_collect(pmap_t pmap)
*
*/
void
-pmap_activate(struct proc *p)
+pmap_activate(p)
+ struct proc *p;
{
- apr_template_t apr_data;
+ apr_template_t apr_data;
#ifdef notyet
#ifdef OMRON_PMAP
- int n;
+ int n;
#endif
#endif
- pmap_t pmap = p->p_vmspace->vm_map.pmap;
- int cpu = cpu_number();
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+ int cpu = cpu_number();
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
@@ -3101,7 +2952,7 @@ pmap_activate(struct proc *p)
apr_data.field.ci = 0;
apr_data.field.te = 1;
#ifdef notyet
- #ifdef OMRON_PMAP
+#ifdef OMRON_PMAP
/*
* cmmu_pmap_activate will set the uapr and the batc entries,
* then flush the *USER* TLB. IF THE KERNEL WILL EVER CARE
@@ -3112,26 +2963,26 @@ pmap_activate(struct proc *p)
pmap->i_batc, pmap->d_batc);
for (n = 0; n < BATC_MAX; n++)
*(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
- #else
+#else
cmmu_set_uapr(apr_data.bits);
cmmu_flush_tlb(0, 0, -1);
- #endif
-#endif /* notyet */
+#endif
+#else
/*
* I am forcing it to not program the BATC at all. pmap.c module
* needs major, major cleanup. XXX nivas
*/
cmmu_set_uapr(apr_data.bits);
cmmu_flush_tlb(0, 0, -1);
+#endif /* notyet */
/*
- * Mark that this cpu is using the pmap.
+ * Mark that this cpu is using the pmap.
*/
SETBIT_CPUSET(cpu, &(pmap->cpus_using));
simple_unlock(&pmap->lock);
} else {
-
/*
* kernel_pmap must be always active.
*/
@@ -3158,13 +3009,13 @@ pmap_activate(struct proc *p)
*
*/
void
-pmap_deactivate(struct proc *p)
+pmap_deactivate(p)
+ struct proc *p;
{
- pmap_t pmap = p->p_vmspace->vm_map.pmap;
- int cpu = cpu_number();
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+ int cpu = cpu_number();
if (pmap != kernel_pmap) {
-
/*
* we expect the spl is already raised to sched level.
*/
@@ -3190,8 +3041,6 @@ pmap_deactivate(struct proc *p)
*
* Calls:
* m88kprotection
- * cmmu_sflush_page
- * bcopy
*
* Special Assumptions:
* no locking reauired
@@ -3201,48 +3050,41 @@ pmap_deactivate(struct proc *p)
* new mappings effective, and performs the copy.
*/
void
-pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+pmap_copy_page(src, dst)
+ paddr_t src, dst;
{
- vm_offset_t dstva, srcva;
- unsigned int spl;
- int aprot;
- pte_template_t template;
- pt_entry_t *dstpte, *srcpte;
- int cpu = cpu_number();
+ vaddr_t dstva, srcva;
+ int spl;
+ pt_entry_t template, *dstpte, *srcpte;
+ int cpu = cpu_number();
+
+ template = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) |
+ CACHE_GLOBAL | PG_V;
/*
* Map source physical address.
*/
- aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
-
- srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu << PAGE_SHIFT));
- dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu << PAGE_SHIFT));
+ srcva = (vaddr_t)(phys_map_vaddr1 + (cpu << PAGE_SHIFT));
+ dstva = (vaddr_t)(phys_map_vaddr2 + (cpu << PAGE_SHIFT));
srcpte = pmap_pte(kernel_pmap, srcva);
dstpte = pmap_pte(kernel_pmap, dstva);
- template.bits = trunc_page(src) | aprot |
- DT_VALID | CACHE_GLOBAL;
-
- /* do we need to write back dirty bits */
SPLVM(spl);
cmmu_flush_tlb(1, srcva, PAGE_SIZE);
- *srcpte = template.pte;
+ *srcpte = template | trunc_page(src);
/*
* Map destination physical address.
*/
- template.bits = trunc_page(dst) | aprot |
- CACHE_GLOBAL | DT_VALID;
cmmu_flush_tlb(1, dstva, PAGE_SIZE);
- *dstpte = template.pte;
+ *dstpte = template | trunc_page(dst);
SPLX(spl);
bcopy((void*)srcva, (void*)dstva, PAGE_SIZE);
/* flush source, dest out of cache? */
cmmu_flush_remote_data_cache(cpu, src, PAGE_SIZE);
cmmu_flush_remote_data_cache(cpu, dst, PAGE_SIZE);
-
} /* pmap_copy_page() */
/*
@@ -3259,13 +3101,8 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* pmap_modify_list
*
* Calls:
- * PMAP_MANAGED
- * SPLVM, SPLX
- * PA_TO_PVH
- * CHECK_PV_LIST
- * simple_lock, simple_unlock
+ * pa_to_pvh
* pmap_pte
- * panic
*
* The modify_list entry corresponding to the
* page's frame index will be zeroed. The PV list will be traversed.
@@ -3274,16 +3111,16 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* inspected bits were found on, an TLB flush will be performed.
*/
boolean_t
-pmap_clear_modify(struct vm_page *pg)
+pmap_clear_modify(pg)
+ struct vm_page *pg;
{
pv_entry_t pvl;
pv_entry_t pvep;
pt_entry_t *pte;
pmap_t pmap;
int spl;
- vm_offset_t va;
+ vaddr_t va;
unsigned users;
- pte_template_t opte;
boolean_t kflush;
boolean_t ret;
paddr_t phys = VM_PAGE_TO_PHYS(pg);
@@ -3298,19 +3135,17 @@ pmap_clear_modify(struct vm_page *pg)
SPLVM(spl);
clear_modify_Retry:
- pvl = PA_TO_PVH(phys);
+ pvl = pa_to_pvh(phys);
CHECK_PV_LIST(phys, pvl, "pmap_clear_modify");
- LOCK_PVH(phys);
/* update corresponding pmap_modify_list element */
- SET_ATTRIB(phys, 0);
+ *pa_to_attribute(phys) = FALSE;
if (pvl->pmap == PMAP_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- UNLOCK_PVH(phys);
SPLX(spl);
return (ret);
}
@@ -3320,7 +3155,6 @@ clear_modify_Retry:
pmap = pvep->pmap;
va = pvep->va;
if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
goto clear_modify_Retry;
}
users = pmap->cpus_using;
@@ -3336,17 +3170,14 @@ clear_modify_Retry:
/*
* Invalidate pte temporarily to avoid the modified bit
- * and/or the reference being written back by other cpu.
+ * and/or the reference being written back by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
/* clear modified bit */
- opte.pte.modified = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
+ *pte = invalidate_pte(pte) & ~PG_M;
flush_atc_entry(users, va, kflush);
simple_unlock(&pmap->lock);
}
- UNLOCK_PVH(phys);
SPLX(spl);
return (ret);
@@ -3371,7 +3202,7 @@ clear_modify_Retry:
* simple_lock, simple_unlock
* SPLVM, SPLX
* PMAP_MANAGED
- * PA_TO_PVH
+ * pa_to_pvh
* pmap_pte
*
* If the entry in the modify list, corresponding to the given page,
@@ -3385,14 +3216,15 @@ clear_modify_Retry:
* immediately (doesn't need to walk remainder of list).
*/
boolean_t
-pmap_is_modified(struct vm_page *pg)
+pmap_is_modified(pg)
+ struct vm_page *pg;
{
- pv_entry_t pvl;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- boolean_t modified_flag;
- paddr_t phys = VM_PAGE_TO_PHYS(pg);
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ boolean_t modified_flag;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys))
@@ -3401,11 +3233,11 @@ pmap_is_modified(struct vm_page *pg)
SPLVM(spl);
- pvl = PA_TO_PVH(phys);
+ pvl = pa_to_pvh(phys);
CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
is_mod_Retry:
- if ((boolean_t)PA_TO_ATTRIB(phys)) {
+ if ((boolean_t)*pa_to_attribute(phys)) {
/* we've already cached a modify flag for this page,
no use looking further... */
#ifdef DEBUG
@@ -3415,17 +3247,15 @@ is_mod_Retry:
SPLX(spl);
return (TRUE);
}
- LOCK_PVH(phys);
if (pvl->pmap == PMAP_NULL) {
/* unmapped page - get info from page_modified array
maintained by pmap_remove_range/ pmap_remove_all */
- modified_flag = (boolean_t) PA_TO_ATTRIB(phys);
+ modified_flag = (boolean_t)*pa_to_attribute(phys);
#ifdef DEBUG
if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- UNLOCK_PVH(phys);
SPLX(spl);
return (modified_flag);
}
@@ -3434,7 +3264,6 @@ is_mod_Retry:
pvep = pvl;
while (pvep != PV_ENTRY_NULL) {
if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(phys);
goto is_mod_Retry;
}
@@ -3443,13 +3272,12 @@ is_mod_Retry:
printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
panic("pmap_is_modified: bad pv list entry");
}
- if (ptep->modified) {
+ if (pmap_pte_m(ptep)) {
simple_unlock(&pvep->pmap->lock);
#ifdef DEBUG
if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
#endif
- UNLOCK_PVH(phys);
SPLX(spl);
return (TRUE);
}
@@ -3457,10 +3285,8 @@ is_mod_Retry:
pvep = pvep->next;
}
- UNLOCK_PVH(phys);
SPLX(spl);
return (FALSE);
-
} /* pmap_is_modified() */
/*
@@ -3473,13 +3299,8 @@ is_mod_Retry:
* pg vm_page
*
* Calls:
- * PMAP_MANAGED
- * SPLVM, SPLX
- * PA_TO_PVH
- * CHECK_PV_LIST
- * simple_lock
+ * pa_to_pvh
* pmap_pte
- * panic
*
* Extern/Global:
* pv_head_array, pv lists
@@ -3489,16 +3310,16 @@ is_mod_Retry:
* were found on, a TLB flush will be performed.
*/
boolean_t
-pmap_clear_reference(struct vm_page *pg)
+pmap_clear_reference(pg)
+ struct vm_page *pg;
{
pv_entry_t pvl;
pv_entry_t pvep;
pt_entry_t *pte;
pmap_t pmap;
int spl;
- vm_offset_t va;
+ vaddr_t va;
unsigned users;
- pte_template_t opte;
boolean_t kflush;
paddr_t phys;
boolean_t ret;
@@ -3513,29 +3334,24 @@ pmap_clear_reference(struct vm_page *pg)
SPLVM(spl);
- clear_reference_Retry:
- LOCK_PVH(phys);
- pvl = PA_TO_PVH(phys);
+clear_reference_Retry:
+ pvl = pa_to_pvh(phys);
CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
-
if (pvl->pmap == PMAP_NULL) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
#endif
- UNLOCK_PVH(phys);
SPLX(spl);
return (ret);
}
/* for each listed pmap, turn off the page refrenced bit */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
+ for (pvep = pvl; pvep != PV_ENTRY_NULL; pvep = pvep->next) {
pmap = pvep->pmap;
va = pvep->va;
if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
goto clear_reference_Retry;
}
users = pmap->cpus_using;
@@ -3551,18 +3367,14 @@ pmap_clear_reference(struct vm_page *pg)
/*
* Invalidate pte temporarily to avoid the modified bit
- * and/or the reference being written back by other cpu.
+ * and/or the reference being written back by any other cpu.
*/
- opte.bits = invalidate_pte(pte);
/* clear reference bit */
- opte.pte.pg_used = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
+ *pte = invalidate_pte(pte) & ~PG_U;
flush_atc_entry(users, va, kflush);
simple_unlock(&pmap->lock);
- pvep = pvep->next;
}
- UNLOCK_PVH(phys);
SPLX(spl);
return (ret);
@@ -3582,11 +3394,7 @@ pmap_clear_reference(struct vm_page *pg)
* pv_head_array, pv lists
*
* Calls:
- * PMAP_MANAGED
- * SPLVM
- * PA_TO_PVH
- * CHECK_PV_LIST
- * simple_lock
+ * pa_to_pvh
* pmap_pte
*
* This routine walks the PV list corresponding to the
@@ -3594,15 +3402,15 @@ pmap_clear_reference(struct vm_page *pg)
* examined. If a used bit is found on, the function returns TRUE
* immediately (doesn't need to walk remainder of list).
*/
-
boolean_t
-pmap_is_referenced(struct vm_page *pg)
+pmap_is_referenced(pg)
+ struct vm_page *pg;
{
- pv_entry_t pvl;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- paddr_t phys = VM_PAGE_TO_PHYS(pg);
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
#ifdef DIAGNOSTIC
if (!PMAP_MANAGED(phys))
@@ -3611,7 +3419,7 @@ pmap_is_referenced(struct vm_page *pg)
SPLVM(spl);
- pvl = PA_TO_PVH(phys);
+ pvl = pa_to_pvh(phys);
CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
is_ref_Retry:
@@ -3620,29 +3428,22 @@ is_ref_Retry:
return (FALSE);
}
- LOCK_PVH(phys);
-
/* for each listed pmap, check used bit for given page */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
+ for (pvep = pvl; pvep != PV_ENTRY_NULL; pvep = pvep->next) {
if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(phys);
goto is_ref_Retry;
}
ptep = pmap_pte(pvep->pmap, pvep->va);
if (ptep == PT_ENTRY_NULL)
panic("pmap_is_referenced: bad pv list entry.");
- if (ptep->pg_used) {
+ if (pmap_pte_u(ptep)) {
simple_unlock(&pvep->pmap->lock);
- UNLOCK_PVH(phys);
SPLX(spl);
return (TRUE);
}
simple_unlock(&pvep->pmap->lock);
- pvep = pvep->next;
}
- UNLOCK_PVH(phys);
SPLX(spl);
return (FALSE);
} /* pmap_is referenced() */
@@ -3657,7 +3458,9 @@ is_ref_Retry:
* Lower the permission for all mappings to a given page.
*/
void
-pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+pmap_page_protect(pg, prot)
+ struct vm_page *pg;
+ vm_prot_t prot;
{
paddr_t phys = VM_PAGE_TO_PHYS(pg);
@@ -3675,161 +3478,6 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
}
-#ifdef FUTURE_MAYBE
-/*
- * Routine: PAGEMOVE
- *
- * Function:
- * Move pages from one kernel virtual address to another.
- *
- * Parameters:
- * from kernel virtual address of source
- * to kernel virtual address of destination
- * size size in bytes
- *
- * Calls:
- * PMAP_LOCK
- * PMAP_UNLOCK
- * LOCK_PVH
- * UNLOCK_PVH
- * CHECK_PV_LIST
- * pmap_pte
- * pmap_expand_kmap
- * cmmu_sflush
- *
- * Special Assumptions:
- * size must be a multiple of CLBYTES (?)
- */
-void
-pagemove(vm_offset_t from, vm_offset_t to, int size)
-{
- vm_offset_t pa;
- pt_entry_t *srcpte, *dstpte;
- pv_entry_t pvl;
- int spl;
- unsigned users;
- pte_template_t opte;
-
- PMAP_LOCK(kernel_pmap, spl);
-
- users = kernel_pmap->cpus_using;
-
- while (size > 0) {
- /*
- * check if the source addr is mapped
- */
- if ((srcpte = pmap_pte(kernel_pmap, from)) == PT_ENTRY_NULL) {
- panic("pagemove: source addr 0x%x not mapped", from);
- }
-
- /*
- *
- */
- if ((dstpte = pmap_pte(kernel_pmap, to)) == PT_ENTRY_NULL)
- if ((dstpte = pmap_expand_kmap(to,
- VM_PROT_READ | VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic("pagemove: Cannot allocate destination pte");
- /*
- *
- */
- if (dstpte->dtype == DT_VALID) {
- panic("pagemove: destination pte 0x%x (vaddr 0x%x)"
- " already valid", *((unsigned *)dstpte), to);
- }
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
- printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
- if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
- printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
-
-#endif /* DEBUG */
-
- /*
- * Update pv_list
- */
- pa = ptoa(srcpte->pfn);
- if (PMAP_MANAGED(pa)) {
- LOCK_PVH(pa);
- pvl = PA_TO_PVH(pa);
- CHECK_PV_LIST(pa, pvl, "pagemove");
- pvl->va = to;
- UNLOCK_PVH(pa);
- }
-
- /*
- * copy pte
- *
- * Invalidate pte temporarily to avoid the modified bit
- * and/or the reference being written back by other cpu.
- */
- opte.bits = invalidate_pte(srcpte);
- flush_atc_entry(users, from, TRUE);
- ((pte_template_t *)dstpte)->bits = opte.bits;
- from += PAGE_SIZE;
- to += PAGE_SIZE;
-
- size -= PAGE_SIZE;
- }
- PMAP_UNLOCK(kernel_pmap, spl);
-} /* pagemove */
-
-#endif /* FUTURE_MAYBE */
-
-void
-cache_flush_loop(int mode, vm_offset_t pa, int size)
-{
- int i;
- int ncpus;
- void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
-
- switch (mode) {
- default:
- panic("bad cache_flush_loop mode");
- return;
-
- case FLUSH_CACHE: /* All caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu->cmmu_flush_remote_cache_func;
- break;
-
- case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu->cmmu_flush_remote_inst_cache_func;
- break;
-
- case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu->cmmu_flush_remote_data_cache_func;
- break;
-
- case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
- ncpus = 1;
- cfunc = cmmu->cmmu_flush_remote_cache_func;
- break;
-
- case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
- ncpus = 1;
- cfunc = cmmu->cmmu_flush_remote_inst_cache_func;
- break;
-
- case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
- ncpus = 1;
- cfunc = cmmu->cmmu_flush_remote_data_cache_func;
- break;
- }
-
- if (ncpus == 1) {
- (*cfunc)(cpu_number(), pa, size);
- } else {
- for (i=0; i<max_cpus; i++) {
- if (cpu_sets[i]) {
- (*cfunc)(i, pa, size);
- }
- }
- }
-}
-
#ifdef DEBUG
/*
* DEBUGGING ROUTINES - check_pv_list and check_pmp_consistency are used
@@ -3867,15 +3515,17 @@ cache_flush_loop(int mode, vm_offset_t pa, int size)
* the pmap in question. If the returned physical address does not match
* that for the PV list being perused, the function panics.
*/
-
void
-check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
+check_pv_list(phys, pv_h, who)
+ paddr_t phys;
+ pv_entry_t pv_h;
+ char *who;
{
- pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t pa;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ paddr_t pa;
- if (pv_h != PA_TO_PVH(phys)) {
+ if (pv_h != pa_to_pvh(phys)) {
printf("check_pv_list: incorrect pv_h supplied.\n");
panic(who);
}
@@ -3889,7 +3539,7 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
if (pv_h->next != PV_ENTRY_NULL) {
printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
panic(who);
- } else return; /* proper empry lst */
+ } else return; /* proper empty list */
}
pv_e = pv_h;
@@ -3904,7 +3554,7 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
pa = 0;
else
- pa = ptoa(pte->pfn) | (pv_e->va & PAGE_MASK);
+ pa = ptoa(PG_PFNUM(*pte)) | (pv_e->va & PAGE_MASK);
if (pa != phys) {
printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
@@ -3913,7 +3563,6 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
pv_e = pv_e->next;
}
-
} /* check_pv_list() */
/*
@@ -3945,21 +3594,23 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
* searched for the corresponding pmap/va entry. If not found, or if
* duplicate PV list entries are found, the function panics.
*/
-
void
-check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
+check_map(map, s, e, who)
+ pmap_t map;
+ vaddr_t s, e;
+ char *who;
{
- vm_offset_t va,
- old_va,
- phys;
- pv_entry_t pv_h,
- pv_e,
- saved_pv_e;
- pt_entry_t *ptep;
- boolean_t found;
- int loopcnt;
- int bank;
- unsigned npages;
+ vaddr_t va,
+ old_va;
+ paddr_t phys;
+ pv_entry_t pv_h,
+ pv_e,
+ saved_pv_e;
+ pt_entry_t *ptep;
+ boolean_t found;
+ int loopcnt;
+ int bank;
+ unsigned npages;
/*
* for each page in the address space, check to see if there's
@@ -3992,7 +3643,7 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
if (!PDT_VALID(ptep))
continue; /* no page mapping */
- phys = ptoa(ptep->pfn); /* pick up phys addr */
+ phys = ptoa(PG_PFNUM(*ptep)); /* pick up phys addr */
if (!PMAP_MANAGED(phys))
continue; /* no PV list */
@@ -4009,13 +3660,12 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
npages += vm_physmem[bank].end - vm_physmem[bank].start;
if (map == kernel_pmap &&
- va < round_page((vm_offset_t)(pmap_modify_list + npages)))
+ va < round_page((vaddr_t)(pmap_modify_list + npages)))
continue;
- pv_h = PA_TO_PVH(phys);
+ pv_h = pa_to_pvh(phys);
found = FALSE;
if (pv_h->pmap != PMAP_NULL) {
-
loopcnt = 10000; /* loop limit */
pv_e = pv_h;
while (pv_e != PV_ENTRY_NULL) {
@@ -4081,23 +3731,22 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
* PV manipulations had not been activated when these pages were alloceted.
*
*/
-
void
-check_pmap_consistency(char *who)
+check_pmap_consistency(who)
+ char *who;
{
- pmap_t p;
- int i;
- vm_offset_t phys;
- pv_entry_t pv_h;
- int spl;
- int bank;
- unsigned npages;
+ pmap_t p;
+ int i;
+ paddr_t phys;
+ pv_entry_t pv_h;
+ int spl;
+ int bank;
+ unsigned npages;
if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
if (pv_head_table == PV_ENTRY_NULL) {
-
printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
return;
}
@@ -4120,7 +3769,7 @@ check_pmap_consistency(char *who)
/* run through all managed paes, check pv_list for each one */
for (npages = 0, bank = 0; bank < vm_nphysseg; bank++) {
for (phys = ptoa(vm_physmem[bank].start); phys < ptoa(vm_physmem[bank].end); phys += PAGE_SIZE) {
- pv_h = PA_TO_PVH(phys);
+ pv_h = pa_to_pvh(phys);
check_pv_list(phys, pv_h, who);
}
}
@@ -4129,254 +3778,25 @@ check_pmap_consistency(char *who)
if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
-
} /* check_pmap_consistency() */
#endif /* DEBUG */
-/*
- * PMAP PRINT MACROS AND ROUTINES FOR DEBUGGING
- * These routines are called only from the debugger.
- */
-
-#define PRINT_SDT(p) \
- printf("%08x : ", \
- ((sdt_entry_template_t *)p)-> bits); \
- printf("table adress=0x%x, prot=%d, dtype=%d\n", \
- ptoa(p->table_addr), \
- p->prot, \
- p->dtype);
-
-#define PRINT_PDT(p) \
- printf("%08x : ", \
- ((pte_template_t *)p)-> bits); \
- printf("frame num=0x%x, prot=%d, dtype=%d, wired=%d, modified=%d, pg_used=%d\n", \
- p->pfn, \
- p->prot, \
- p->dtype, \
- p->wired, \
- p->modified, \
- p->pg_used);
-
void
-pmap_virtual_space(vm_offset_t *startp, vm_offset_t *endp)
+pmap_virtual_space(startp, endp)
+ vaddr_t *startp, *endp;
{
*startp = virtual_avail;
*endp = virtual_end;
}
-#ifdef USING_BATC
- #ifdef OMRON_PMAP
-/*
- * Set BATC
- */
-void
-pmap_set_batc(
- pmap_t pmap,
- boolean_t data,
- int i,
- vm_offset_t va,
- vm_offset_t pa,
- boolean_t super,
- boolean_t wt,
- boolean_t global,
- boolean_t ci,
- boolean_t wp,
- boolean_t valid)
-{
- register batc_template_t batctmp;
-
- if (i < 0 || i > (BATC_MAX - 1)) {
- panic("pmap_set_batc: illegal batc number");
- /* bad number */
- return;
- }
-
- batctmp.field.lba = va >> 19;
- batctmp.field.pba = pa >> 19;
- batctmp.field.sup = super;
- batctmp.field.wt = wt;
- batctmp.field.g = global;
- batctmp.field.ci = ci;
- batctmp.field.wp = wp;
- batctmp.field.v = valid;
-
- if (data) {
- pmap->d_batc[i].bits = batctmp.bits;
- } else {
- pmap->i_batc[i].bits = batctmp.bits;
- }
-}
-
-void
-use_batc(task_t task,
- boolean_t data, /* for data-cmmu ? */
- int i, /* batc number */
- vm_offset_t va, /* virtual address */
- vm_offset_t pa, /* physical address */
- boolean_t s, /* for super-mode ? */
- boolean_t wt, /* is writethrough */
- boolean_t g, /* is global ? */
- boolean_t ci, /* is cache inhibited ? */
- boolean_t wp, /* is write-protected ? */
- boolean_t v) /* is valid ? */
-{
- pmap_t pmap;
- pmap = vm_map_pmap(task->map);
- pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
-}
-
- #endif
-#endif /* USING_BATC */
-#ifdef FUTURE_MAYBE
-/*
- * Machine-level page attributes
- *
- * The only attribute that may be controlled right now is cacheability.
- *
- * Obviously these attributes will be used in a sparse
- * fashion, so we use a simple sorted list of address ranges
- * which possess the attribute.
- */
-
-/*
- * Destroy an attribute list.
- */
void
-pmap_destroy_ranges(pmap_range_t *ranges)
-{
- pmap_range_t this, next;
-
- this = *ranges;
- while (this != 0) {
- next = this->next;
- pmap_range_free(this);
- this = next;
- }
- *ranges = 0;
-}
-
-/*
- * Lookup an address in a sorted range list.
- */
-boolean_t
-pmap_range_lookup(pmap_range_t *ranges, vm_offset_t address)
-{
- pmap_range_t range;
-
- for (range = *ranges; range != 0; range = range->next) {
- if (address < range->start)
- return FALSE;
- if (address < range->end)
- return TRUE;
- }
- return FALSE;
-}
-
-/*
- * Add a range to a list.
- * The pmap must be locked.
- */
-void
-pmap_range_add(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
-{
- pmap_range_t range, *prev;
-
- /* look for the start address */
-
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start < range->start)
- break;
- if (start <= range->end)
- goto start_overlaps;
- }
-
- /* start address is not present */
-
- if ((range == 0) || (end < range->start)) {
- /* no overlap; allocate a new range */
-
- range = pmap_range_alloc();
- range->start = start;
- range->end = end;
- range->next = *prev;
- *prev = range;
- return;
- }
-
- /* extend existing range forward to start */
-
- range->start = start;
-
-start_overlaps:
- /* delete redundant ranges */
-
- while ((range->next != 0) && (range->next->start <= end)) {
- pmap_range_t old;
-
- old = range->next;
- range->next = old->next;
- range->end = old->end;
- pmap_range_free(old);
- }
-
- /* extend existing range backward to end */
-
- if (range->end < end)
- range->end = end;
-}
-
-/*
- * Remove a range from a list.
- * The pmap must be locked.
- */
-void
-pmap_range_remove(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
-{
- pmap_range_t range, *prev;
-
- /* look for start address */
-
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start <= range->start)
- break;
- if (start < range->end) {
- if (end < range->end) {
- pmap_range_t new;
- /* split this range */
- new = pmap_range_alloc();
- new->next = range->next;
- new->start = end;
- new->end = range->end;
-
- range->next = new;
- range->end = start;
- return;
- }
- /* truncate this range */
- range->end = start;
- }
- }
-
- /* start address is not in the middle of a range */
-
- while ((range != 0) && (range->end <= end)) {
- *prev = range->next;
- pmap_range_free(range);
- range = *prev;
- }
-
- if ((range != 0) && (range->start < end))
- range->start = end;
-}
-#endif /* FUTURE_MAYBE */
-
-void
-pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
{
- int ap;
int spl;
- pt_entry_t *pte;
- pte_template_t template;
+ pt_entry_t template, *pte;
unsigned users;
CHECK_PAGE_ALIGN (va, "pmap_kenter_pa - VA");
@@ -4391,7 +3811,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
PMAP_LOCK(kernel_pmap, spl);
users = kernel_pmap->cpus_using;
- ap = m88k_protection(kernel_pmap, prot);
+ template = m88k_protection(kernel_pmap, prot);
/*
* Expand pmap to include this pte.
@@ -4407,22 +3827,21 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
kernel_pmap->stats.resident_count++;
kernel_pmap->stats.wired_count++;
+ invalidate_pte(pte);
if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_INH;
+ template |= CACHE_INH | PG_V | PG_W;
else
- template.bits = DT_VALID | ap | trunc_page(pa) | CACHE_GLOBAL;
-
- template.pte.wired = 1;
-
- invalidate_pte(pte);
- *(int *)pte = template.bits;
+ template |= CACHE_GLOBAL | PG_V | PG_W;
+ *pte = template | trunc_page(pa);
flush_atc_entry(users, va, TRUE);
PMAP_UNLOCK(kernel_pmap, spl);
}
void
-pmap_kremove(vaddr_t va, vsize_t len)
+pmap_kremove(va, len)
+ vaddr_t va;
+ vsize_t len;
{
int spl;
unsigned users;
diff --git a/sys/arch/mvme88k/mvme88k/trap.c b/sys/arch/mvme88k/mvme88k/trap.c
index c96090f858a..f3b7ecf9242 100644
--- a/sys/arch/mvme88k/mvme88k/trap.c
+++ b/sys/arch/mvme88k/mvme88k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.32 2001/12/22 17:57:11 smurph Exp $ */
+/* $OpenBSD: trap.c,v 1.33 2001/12/24 04:12:40 miod Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -913,9 +913,9 @@ m88110_trap(unsigned type, struct m88100_saved_state *frame)
pte = pmap_pte(map->pmap, va);
if (pte == PT_ENTRY_NULL)
panic("NULL pte on write fault??");
- if (!pte->modified && !pte->prot) {
+ if (!(*pte & PG_M) && !(*pte & PG_RO)) {
/* Set modified bit and try the write again. */
- pte->modified = 1;
+ *pte |= PG_M;
return;
}
}
@@ -990,15 +990,15 @@ m88110_user_fault:
pte = pmap_pte(vm_map_pmap(map), va);
if (pte == PT_ENTRY_NULL)
panic("NULL pte on write fault??");
- if (!pte->modified && !pte->prot) {
+ if (!(*pte & PG_M) && !(*pte & PG_PROT)) {
/* Set modified bit and try the write again. */
- pte->modified = 1;
+ *pte |= PG_M;
/* invalidate ATCs to force table search */
set_dcmd(CMMU_DCMD_INV_UATC);
return;
} else {
/* This must be a real write protection fault */
- printf("Write protect???? mod = %d, wp = %d\n", pte->modified, pte->prot);
+ printf("Write protect???? mod = %d, wp = %d\n", !!(*pte & PG_M), !!(*pte & PG_PROT));
result = uvm_fault(map, va, 0, ftype);
}
}