summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2001-12-20 19:02:30 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2001-12-20 19:02:30 +0000
commit664a880e367bbf984dbe49922fdace0ca00a5d91 (patch)
tree90d4583eb879ff0136c57ac9586aa724fb4f3add /sys/arch
parent6743d0464c802e3119c4632471d95b000cfa0d89 (diff)
Temporarily revert the pmap_motorola changes, as they may account for
some problems as well. Requested by deraadt@
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/amiga/amiga/amiga_init.c12
-rw-r--r--sys/arch/amiga/amiga/locore.s93
-rw-r--r--sys/arch/amiga/amiga/pmap.c2586
-rw-r--r--sys/arch/amiga/amiga/pmap_bootstrap.c248
-rw-r--r--sys/arch/amiga/conf/files.amiga5
-rw-r--r--sys/arch/amiga/include/cpu.h4
-rw-r--r--sys/arch/amiga/include/param.h11
-rw-r--r--sys/arch/amiga/include/pmap.h141
-rw-r--r--sys/arch/amiga/include/pte.h84
-rw-r--r--sys/arch/hp300/conf/files.hp3004
-rw-r--r--sys/arch/hp300/hp300/pmap.c (renamed from sys/arch/m68k/m68k/pmap_motorola.c)429
-rw-r--r--sys/arch/hp300/hp300/pmap_bootstrap.c41
-rw-r--r--sys/arch/hp300/include/cpu.h5
-rw-r--r--sys/arch/hp300/include/param.h22
-rw-r--r--sys/arch/hp300/include/pmap.h165
-rw-r--r--sys/arch/hp300/include/pte.h159
-rw-r--r--sys/arch/m68k/include/param.h26
-rw-r--r--sys/arch/m68k/include/pmap_motorola.h160
-rw-r--r--sys/arch/m68k/include/pte_motorola.h152
-rw-r--r--sys/arch/mac68k/conf/files.mac68k4
-rw-r--r--sys/arch/mac68k/include/param.h6
-rw-r--r--sys/arch/mac68k/include/pmap.h188
-rw-r--r--sys/arch/mac68k/include/pte.h162
-rw-r--r--sys/arch/mac68k/mac68k/pmap.c2351
-rw-r--r--sys/arch/mac68k/mac68k/pmap_bootstrap.c41
-rw-r--r--sys/arch/mvme68k/conf/files.mvme68k4
-rw-r--r--sys/arch/mvme68k/include/param.h21
-rw-r--r--sys/arch/mvme68k/include/pmap.h158
-rw-r--r--sys/arch/mvme68k/include/pte.h158
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap.c2403
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap_bootstrap.c60
31 files changed, 8904 insertions, 999 deletions
diff --git a/sys/arch/amiga/amiga/amiga_init.c b/sys/arch/amiga/amiga/amiga_init.c
index 0e9c0f56f2f..23fc823257a 100644
--- a/sys/arch/amiga/amiga/amiga_init.c
+++ b/sys/arch/amiga/amiga/amiga_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: amiga_init.c,v 1.23 2001/12/06 22:33:57 miod Exp $ */
+/* $OpenBSD: amiga_init.c,v 1.24 2001/12/20 19:02:23 miod Exp $ */
/* $NetBSD: amiga_init.c,v 1.56 1997/06/10 18:22:24 veego Exp $ */
/*
@@ -174,6 +174,8 @@ alloc_z2mem(amount)
*
*/
+int kernel_copyback = 1;
+
void
start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync)
int id;
@@ -567,12 +569,15 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync)
* recommended by Motorola; for the 68060 mandatory)
*/
if (RELOC(mmutype, int) <= MMU_68040) {
+
+ if (RELOC(kernel_copyback, int))
+ pg_proto |= PG_CCB;
+
/*
* ASSUME: segment table and statically allocated page tables
* of the kernel are contiguously allocated, start at
* Sysseg and end at the current value of vstart.
*/
- pg_proto |= PG_CCB;
for (; i<RELOC(Sysseg, u_int); i+= NBPG, pg_proto += NBPG)
*pg++ = pg_proto;
@@ -581,7 +586,8 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync)
*pg++ = pg_proto;
pg_proto = (pg_proto & ~PG_CI);
- pg_proto |= PG_CCB;
+ if (RELOC(kernel_copyback, int))
+ pg_proto |= PG_CCB;
}
#endif
/*
diff --git a/sys/arch/amiga/amiga/locore.s b/sys/arch/amiga/amiga/locore.s
index 9d1442dac5a..6b1da1e1365 100644
--- a/sys/arch/amiga/amiga/locore.s
+++ b/sys/arch/amiga/amiga/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.34 2001/12/06 21:13:28 millert Exp $ */
+/* $OpenBSD: locore.s,v 1.35 2001/12/20 19:02:24 miod Exp $ */
/* $NetBSD: locore.s,v 1.89 1997/07/17 16:22:54 is Exp $ */
/*
@@ -1288,7 +1288,7 @@ Lsw2:
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
- movl _caddr2_pte,a1@(PCB_CMAP2) | save temporary map PTE
+ movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE
#ifdef FPU_EMULATE
tstl _fputype | do we have any FPU?
jeq Lswnofpsave | no, dont save
@@ -1342,7 +1342,7 @@ Lswnofpsave:
lea tmpstk,sp | now goto a tmp stack for NMI
- movl a1@(PCB_CMAP2),_caddr2_pte | reload tmp map
+ movl a1@(PCB_CMAP2),_CMAP2 | reload tmp map
moveml a1@(PCB_REGS),#0xFCFC | and registers
movl a1@(PCB_USP),a0
movl a0,usp | and USP
@@ -1396,7 +1396,7 @@ ENTRY(savectx)
movl usp,a0 | grab USP
movl a0,a1@(PCB_USP) | and save it
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
- movl _caddr2_pte,a1@(PCB_CMAP2) | save temporary map PTE
+ movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE
#ifdef FPU_EMULATE
tstl _fputype
jeq Lsavedone
@@ -1431,12 +1431,95 @@ Lsavedone:
rts
/*
+ * Copy 1 relocation unit (NBPG bytes)
+ * from user virtual address to physical address
+ */
+ENTRY(copyseg)
+ movl _curpcb,a1 | current pcb
+ movl #Lcpydone,a1@(PCB_ONFAULT) | where to return to on a fault
+ movl sp@(8),d0 | destination page number
+ moveq #PGSHIFT,d1
+ lsll d1,d0 | convert to address
+ orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable
+ movl _CMAP2,a0
+ movl _CADDR2,sp@- | destination kernel VA
+ movl d0,a0@ | load in page table
+ jbsr _TBIS | invalidate any old mapping
+ addql #4,sp
+ movl _CADDR2,a1 | destination addr
+ movl sp@(4),a0 | source addr
+ movl #NBPG/4-1,d0 | count
+Lcpyloop:
+ movsl a0@+,d1 | read longword
+ movl d1,a1@+ | write longword
+ dbf d0,Lcpyloop | continue until done
+Lcpydone:
+ movl _curpcb,a1 | current pcb
+ clrl a1@(PCB_ONFAULT) | clear error catch
+ rts
+
+/*
+ * Copy 1 relocation unit (NBPG bytes)
+ * from physical address to physical address
+ */
+ENTRY(physcopyseg)
+ movl sp@(4),d0 | source page number
+ moveq #PGSHIFT,d1
+ lsll d1,d0 | convert to address
+ orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable
+ movl _CMAP1,a0
+ movl d0,a0@ | load in page table
+ movl _CADDR1,sp@- | destination kernel VA
+ jbsr _TBIS | invalidate any old mapping
+ addql #4,sp
+
+ movl sp@(8),d0 | destination page number
+ moveq #PGSHIFT,d1
+ lsll d1,d0 | convert to address
+ orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable
+ movl _CMAP2,a0
+ movl d0,a0@ | load in page table
+ movl _CADDR2,sp@- | destination kernel VA
+ jbsr _TBIS | invalidate any old mapping
+ addql #4,sp
+
+ movl _CADDR1,a0 | source addr
+ movl _CADDR2,a1 | destination addr
+ movl #NBPG/4-1,d0 | count
+Lpcpy:
+ movl a0@+,a1@+ | copy longword
+ dbf d0,Lpcpy | continue until done
+ rts
+
+/*
+ * zero out physical memory
+ * specified in relocation units (NBPG bytes)
+ */
+ENTRY(clearseg)
+ movl sp@(4),d0 | destination page number
+ moveq #PGSHIFT,d1
+ lsll d1,d0 | convert to address
+ orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable
+ movl _CMAP1,a0
+ movl _CADDR1,sp@- | destination kernel VA
+ movl d0,a0@ | load in page map
+ jbsr _TBIS | invalidate any old mapping
+ addql #4,sp
+ movl _CADDR1,a1 | destination addr
+ movl #NBPG/4-1,d0 | count
+/* simple clear loop is fastest on 68020 */
+Lclrloop:
+ clrl a1@+ | clear a longword
+ dbf d0,Lclrloop | continue til done
+ rts
+
+/*
* Invalidate entire TLB.
*/
ENTRY(TBIA)
__TBIA:
cmpl #MMU_68040,_mmutype
- jle Ltbia040
+ jeq Ltbia040
pflusha | flush entire TLB
tstl _mmutype
jpl Lmc68851a | 68851 implies no d-cache
diff --git a/sys/arch/amiga/amiga/pmap.c b/sys/arch/amiga/amiga/pmap.c
new file mode 100644
index 00000000000..49a25744307
--- /dev/null
+++ b/sys/arch/amiga/amiga/pmap.c
@@ -0,0 +1,2586 @@
+/* $OpenBSD: pmap.c,v 1.45 2001/12/20 19:02:24 miod Exp $ */
+/* $NetBSD: pmap.c,v 1.68 1999/06/19 19:44:09 is Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 7.5 (Berkeley) 5/10/91
+ */
+
+/*
+ * AMIGA physical map management code.
+ * For 68020/68030 machines with 68551, or 68030 MMUs
+ * Don't even pay lip service to multiprocessor support.
+ *
+ * will only work for PAGE_SIZE == NBPG
+ * right now because of the assumed one-to-one relationship of PT
+ * pages to STEs.
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/user.h>
+#include <uvm/uvm.h>
+#include <machine/pte.h>
+#include <machine/cpu.h>
+#include <machine/vmparam.h>
+#include <amiga/amiga/memlist.h>
+/*
+ * Allocate various and sundry SYSMAPs used in the days of old VM
+ * and not yet converted. XXX.
+ */
+
+#ifdef DEBUG
+struct kpt_stats {
+ int collectscans;
+ int collectpages;
+ int kpttotal;
+ int kptinuse;
+ int kptmaxuse;
+};
+struct enter_stats {
+ int kernel; /* entering kernel mapping */
+ int user; /* entering user mapping */
+ int ptpneeded; /* needed to allocate a PT page */
+ int pwchange; /* no mapping change, just wiring or protection */
+ int wchange; /* no mapping change, just wiring */
+ int mchange; /* was mapped but mapping to different page */
+ int managed; /* a managed page */
+ int firstpv; /* first mapping for this PA */
+ int secondpv; /* second mapping for this PA */
+ int ci; /* cache inhibited */
+ int unmanaged; /* not a managed page */
+ int flushes; /* cache flushes */
+};
+struct remove_stats {
+ int calls;
+ int removes;
+ int pvfirst;
+ int pvsearch;
+ int ptinvalid;
+ int uflushes;
+ int sflushes;
+};
+
+struct remove_stats remove_stats;
+struct enter_stats enter_stats;
+struct kpt_stats kpt_stats;
+
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_CACHE 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_SEGTAB 0x0400
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
+
+static void pmap_check_wiring __P((char *, vaddr_t));
+static void pmap_pvdump __P((paddr_t));
+#endif
+
+/*
+ * Get STEs and PTEs for user/kernel address space
+ */
+#if defined(M68040) || defined(M68060)
+#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> pmap_ishift]))
+#define pmap_ste1(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
+/* XXX assumes physically contiguous ST pages (if more than one) */
+#define pmap_ste2(m, v) \
+ (&((m)->pm_stab[(u_int *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \
+ - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
+#define pmap_ste_v(m, v) \
+ (mmutype == MMU_68040 \
+ ? ((*pmap_ste1(m, v) & SG_V) && \
+ (*pmap_ste2(m, v) & SG_V)) \
+ : (*pmap_ste(m, v) & SG_V))
+#else /* defined(M68040) || defined(M68060) */
+#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
+#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
+#endif /* defined(M68040) || defined(M68060) */
+
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
+
+#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME)
+
+#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W)
+#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI)
+#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M)
+#define pmap_pte_u(pte) (*(u_int *)(pte) & PG_U)
+#define pmap_pte_prot(pte) (*(u_int *)(pte) & PG_PROT)
+#define pmap_pte_v(pte) (*(u_int *)(pte) & PG_V)
+
+#define pmap_pte_set_w(pte, v) \
+ do { if (v) *(u_int *)(pte) |= PG_W; else *(u_int *)(pte) &= ~PG_W; \
+ } while (0)
+#define pmap_pte_set_prot(pte, v) \
+ do { if (v) *(u_int *)(pte) |= PG_PROT; else *(u_int *)(pte) &= ~PG_PROT; \
+ } while (0)
+#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
+
+#define active_pmap(pm) \
+ ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to a vax protection code.
+ */
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
+
+/*
+ * Kernel page table page management.
+ *
+ * One additional page of KPT allows for 16 MB of virtual buffer cache.
+ * A GENERIC kernel allocates this for 2 MB of real buffer cache,
+ * which in turn is allocated for 38 MB of RAM.
+ * We add one per 16 MB of RAM to allow for tuning the machine-independent
+ * options.
+ */
+#ifndef NKPTADDSHIFT
+#define NKPTADDSHIFT 24
+#endif
+
+struct kpt_page {
+ struct kpt_page *kpt_next; /* link on either used or free list */
+ vaddr_t kpt_va; /* always valid kernel VA */
+ paddr_t kpt_pa; /* PA of this page (for speed) */
+};
+struct kpt_page *kpt_free_list, *kpt_used_list;
+struct kpt_page *kpt_pages;
+
+/*
+ * Kernel segment/page table and page table map.
+ * The page table map gives us a level of indirection we need to dynamically
+ * expand the page table. It is essentially a copy of the segment table
+ * with PTEs instead of STEs. All are initialized in locore at boot time.
+ * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
+ * Segtabzero is an empty segment table which all processes share til they
+ * reference something.
+ */
+u_int *Sysseg, *Sysseg_pa;
+u_int *Sysmap, *Sysptmap;
+u_int *Segtabzero, *Segtabzeropa;
+vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
+
+struct pmap kernel_pmap_store;
+struct vm_map *pt_map;
+struct vm_map pt_map_store;
+
+vsize_t mem_size; /* memory size in bytes */
+vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int page_cnt; /* number of pages managed by the VM system */
+boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+char *pmap_attributes; /* reference and modify bits */
+TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+#if defined(M68040) || defined(M68060)
+static int pmap_ishift; /* segment table index shift */
+int protostfree; /* prototype (default) free ST map */
+#endif
+extern paddr_t msgbufpa; /* physical address of the msgbuf */
+
+u_long noncontig_enable;
+extern vaddr_t amiga_uptbase;
+
+extern paddr_t z2mem_start;
+
+extern vaddr_t reserve_dumppages __P((vaddr_t));
+
+boolean_t pmap_testbit __P((paddr_t, int));
+void pmap_enter_ptpage __P((pmap_t, vaddr_t));
+static void pmap_ptpage_addref __P((vaddr_t));
+static int pmap_ptpage_delref __P((vaddr_t));
+static void pmap_changebit __P((vaddr_t, int, boolean_t));
+ struct pv_entry * pmap_alloc_pv __P((void));
+void pmap_free_pv __P((struct pv_entry *));
+void pmap_pinit __P((pmap_t));
+void pmap_release __P((pmap_t));
+static void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
+
+static void amiga_protection_init __P((void));
+void pmap_collect1 __P((pmap_t, paddr_t, paddr_t));
+
+/* pmap_remove_mapping flags */
+#define PRM_TFLUSH 0x01
+#define PRM_CFLUSH 0x02
+#define PRM_KEEPPTPAGE 0x04
+
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+caddr_t CADDR1, CADDR2, vmmap;
+u_int *CMAP1, *CMAP2, *vmpte, *msgbufmap;
+
+#define PAGE_IS_MANAGED(pa) (pmap_initialized \
+ && vm_physseg_find(atop((pa)), NULL) != -1)
+
+#define pa_to_pvh(pa) \
+({ \
+ int bank_, pg_; \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.pvent[pg_]; \
+})
+
+#define pa_to_attribute(pa) \
+({ \
+ int bank_, pg_; \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
+})
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Map the kernel's code and data, and allocate the system page table.
+ *
+ * On the HP this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address 0 to the actual (physical)
+ * address of 0xFFxxxxxx.]
+ */
+void
+pmap_bootstrap(firstaddr, loadaddr)
+ paddr_t firstaddr;
+ paddr_t loadaddr;
+{
+ vaddr_t va;
+ u_int *pte;
+ int i;
+ struct boot_memseg *sp, *esp;
+ paddr_t fromads, toads;
+
+ fromads = firstaddr;
+ toads = maxmem << PGSHIFT;
+
+ uvmexp.pagesize = NBPG;
+ uvm_setpagesize();
+
+ /* XXX: allow for msgbuf */
+ toads -= m68k_round_page(MSGBUFSIZE);
+ msgbufpa = toads;
+ /*
+ * first segment of memory is always the one loadbsd found
+ * for loading the kernel into.
+ */
+ uvm_page_physload(atop(fromads), atop(toads),
+ atop(fromads), atop(toads), VM_FREELIST_DEFAULT);
+
+ sp = memlist->m_seg;
+ esp = sp + memlist->m_nseg;
+ i = 1;
+ for (; noncontig_enable && sp < esp; sp++) {
+ if ((sp->ms_attrib & MEMF_FAST) == 0)
+ continue; /* skip if not FastMem */
+ if (firstaddr >= sp->ms_start &&
+ firstaddr < sp->ms_start + sp->ms_size)
+ continue; /* skip kernel segment */
+ if (sp->ms_size == 0)
+ continue; /* skip zero size segments */
+ fromads = sp->ms_start;
+ toads = sp->ms_start + sp->ms_size;
+#ifdef DEBUG_A4000
+ /*
+ * My A4000 doesn't seem to like Zorro II memory - this
+ * hack is to skip the motherboard memory and use the
+ * Zorro II memory. Only for trying to debug the problem.
+ * Michael L. Hitch
+ */
+ if (toads == 0x08000000)
+ continue; /* skip A4000 motherboard mem */
+#endif
+ /*
+ * Deal with Zorro II memory stolen for DMA bounce buffers.
+ * This needs to be handled better.
+ *
+ * XXX is: disabled. This is handled now in amiga_init.c
+ * by removing the stolen memory from the memlist.
+ *
+ * XXX is: enabled again, but check real size and position.
+ * We check z2mem_start is in this segment, and set its end
+ * to the z2mem_start.
+ *
+ */
+ if ((fromads <= z2mem_start) && (toads > z2mem_start))
+ toads = z2mem_start;
+
+ uvm_page_physload(atop(fromads), atop(toads),
+ atop(fromads), atop(toads), (fromads & 0xff000000) ?
+ VM_FREELIST_DEFAULT : VM_FREELIST_ZORROII);
+ physmem += (toads - fromads) / NBPG;
+ ++i;
+ if (noncontig_enable == 1)
+ break; /* Only two segments enabled */
+ }
+
+ mem_size = physmem << PGSHIFT;
+ virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ /*
+ * Initialize protection array.
+ */
+ amiga_protection_init();
+
+ /*
+ * Kernel page/segment table allocated in locore,
+ * just initialize pointers.
+ */
+ pmap_kernel()->pm_stpa = Sysseg_pa;
+ pmap_kernel()->pm_stab = Sysseg;
+ pmap_kernel()->pm_ptab = Sysmap;
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ pmap_ishift = SG4_SHIFT1;
+ pmap_kernel()->pm_stfree = protostfree;
+ } else
+ pmap_ishift = SG_ISHIFT;
+#endif
+
+ simple_lock_init(&pmap_kernel()->pm_lock);
+ pmap_kernel()->pm_count = 1;
+
+ /*
+ * Allocate all the submaps we need
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
+
+ va = virtual_avail;
+ pte = pmap_pte(pmap_kernel(), va);
+
+ SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
+ SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
+ SYSMAP(caddr_t ,vmpte ,vmmap ,1 )
+ SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,btoc(MSGBUFSIZE))
+
+ DCIS();
+ virtual_avail = reserve_dumppages(va);
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init()
+{
+ extern vaddr_t amigahwaddr;
+ extern u_int namigahwpg;
+ vaddr_t addr, addr2;
+ paddr_t paddr;
+ vsize_t s;
+ u_int npg;
+ struct pv_entry *pv;
+ char *attr;
+ int rv, bank;
+#if defined(M68060)
+ struct kpt_page *kptp;
+#endif
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_init()\n");
+#endif
+ /*
+ * Now that kernel map has been allocated, we can mark as
+ * unavailable regions which we have mapped in locore.
+ * XXX in pmap_boostrap() ???
+ */
+ addr = (vaddr_t) amigahwaddr;
+ if (uvm_map(kernel_map, &addr,
+ ptoa(namigahwpg),
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)))
+ goto bogons;
+ addr = (vaddr_t) Sysmap;
+ if (uvm_map(kernel_map, &addr, AMIGA_KPTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED))) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT) {
+ printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
+ Sysseg, Sysmap, Sysptmap);
+ printf(" vstart %lx, vend %lx\n", virtual_avail, virtual_end);
+ }
+#endif
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * initial segment table, pv_head_table and pmap_attributes.
+ */
+ for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
+ page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+#ifdef DEBUG
+ printf("pmap_init: %2d: %08lx - %08lx (%10d)\n", bank,
+ vm_physmem[bank].start << PGSHIFT,
+ vm_physmem[bank].end << PGSHIFT, page_cnt << PGSHIFT);
+#endif
+ }
+ s = AMIGA_STSIZE; /* Segtabzero */
+ s += page_cnt * sizeof(struct pv_entry); /* pv table */
+ s += page_cnt * sizeof(char); /* attribute table */
+
+ s = round_page(s);
+ addr = uvm_km_zalloc(kernel_map, s);
+ Segtabzero = (u_int *)addr;
+ pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
+
+ addr += AMIGA_STSIZE;
+
+ pv_table = (pv_entry_t)addr;
+ addr += page_cnt * sizeof(struct pv_entry);
+
+ pmap_attributes = (char *)addr;
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes);
+#endif
+
+ /*
+ * Now that the pv and attribute tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_table;
+ attr = pmap_attributes;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npg = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += npg;
+ attr += npg;
+ }
+
+ /*
+ * Allocate physical memory for kernel PT pages and their management.
+ * we need enough pages to map the page tables for each process
+ * plus some slop.
+ */
+ npg = howmany(((maxproc + 16) * AMIGA_UPTSIZE / NPTEPG), NBPG);
+#ifdef NKPTADD
+ npg += NKPTADD;
+#else
+ npg += mem_size >> NKPTADDSHIFT;
+#endif
+#if 1/*def DEBUG*/
+ printf("Maxproc %d, mem_size %ld MB: allocating %d KPT pages\n",
+ maxproc, mem_size>>20, npg);
+#endif
+ s = ptoa(npg) + round_page(npg * sizeof (struct kpt_page));
+
+ /*
+ * Verify that space will be allocated in region for which
+ * we already have kernel PT pages.
+ */
+ addr = 0;
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv || (addr + s) >= (vaddr_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ uvm_unmap(kernel_map, addr, addr + s);
+ /*
+ * Now allocate the space and link the pages together to
+ * form the KPT free list.
+ */
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+ s = ptoa(npg);
+ addr2 = addr + s;
+ kpt_pages = &((struct kpt_page *)addr2)[npg];
+ kpt_free_list = (struct kpt_page *)0;
+ do {
+ addr2 -= NBPG;
+ (--kpt_pages)->kpt_next = kpt_free_list;
+ kpt_free_list = kpt_pages;
+ kpt_pages->kpt_va = addr2;
+ pmap_extract(pmap_kernel(), addr2, &kpt_pages->kpt_pa);
+ } while (addr != addr2);
+
+#ifdef DEBUG
+ kpt_stats.kpttotal = atop(s);
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: KPT: %ld pages from %lx to %lx\n", atop(s),
+ addr, addr + s);
+#endif
+
+ /*
+ * Allocate the segment table map and the page table map.
+ */
+ addr = amiga_uptbase;
+ if ((AMIGA_UPTMAXSIZE / AMIGA_UPTSIZE) < maxproc) {
+ s = AMIGA_UPTMAXSIZE;
+ /*
+ * XXX We don't want to hang when we run out of
+ * page tables, so we lower maxproc so that fork()
+ * will fail instead. Note that root could still raise
+ * this value via sysctl(2).
+ */
+ maxproc = AMIGA_UPTMAXSIZE / AMIGA_UPTSIZE;
+ } else
+ s = (maxproc * AMIGA_UPTSIZE);
+
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ TRUE, &pt_map_store);
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040)
+ protostfree = ~1 & ~(-1 << MAXUL2SIZE);
+#endif /* defined(M68040) || defined(M68060) */
+
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+ /*
+ * Now that this is done, mark the pages shared with the
+ * hardware page table search as non-CCB (actually, as CI).
+ *
+ * XXX Hm. Given that this is in the kernel map, can't we just
+ * use the va's?
+ */
+#ifdef M68060
+ if (machineid & AMIGA_68060) {
+ kptp = kpt_free_list;
+ while (kptp) {
+ pmap_changebit(kptp->kpt_pa, PG_CCB, 0);
+ pmap_changebit(kptp->kpt_pa, PG_CI, 1);
+ kptp = kptp->kpt_next;
+ }
+
+ paddr = (paddr_t)Segtabzeropa;
+ while (paddr < (paddr_t)Segtabzeropa + AMIGA_STSIZE) {
+ pmap_changebit(paddr, PG_CCB, 0);
+ pmap_changebit(paddr, PG_CI, 1);
+ paddr += NBPG;
+ }
+
+ DCIS();
+ }
+#endif
+}
+
+struct pv_entry *
+pmap_alloc_pv()
+{
+ struct pv_page *pvp;
+ struct pv_entry *pv;
+ int i;
+
+ if (pv_nfree == 0) {
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+ pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
+ for (i = NPVPPG - 2; i; i--, pv++)
+ pv->pv_next = pv + 1;
+ pv->pv_next = 0;
+ pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
+ TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ pv = &pvp->pvp_pv[0];
+ } else {
+ --pv_nfree;
+ pvp = pv_page_freelist.tqh_first;
+ if (--pvp->pvp_pgi.pgi_nfree == 0) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ }
+ pv = pvp->pvp_pgi.pgi_freelist;
+#ifdef DIAGNOSTIC
+ if (pv == 0)
+ panic("pmap_alloc_pv: pgi_nfree inconsistent");
+#endif
+ pvp->pvp_pgi.pgi_freelist = pv->pv_next;
+ }
+ return pv;
+}
+
+void
+pmap_free_pv(pv)
+ struct pv_entry *pv;
+{
+ struct pv_page *pvp;
+
+ pvp = (struct pv_page *)trunc_page((vaddr_t)pv);
+ switch (++pvp->pvp_pgi.pgi_nfree) {
+ case 1:
+ TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ default:
+ pv->pv_next = pvp->pvp_pgi.pgi_freelist;
+ pvp->pvp_pgi.pgi_freelist = pv;
+ ++pv_nfree;
+ break;
+ case NPVPPG:
+ pv_nfree -= NPVPPG - 1;
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
+ break;
+ }
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vaddr_t
+pmap_map(virt, start, end, prot)
+ vaddr_t virt;
+ paddr_t start;
+ paddr_t end;
+ int prot;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_map(%lx, %lx, %lx, %x)\n", virt, start, end,
+ prot);
+#endif
+ while (start < end) {
+ pmap_enter(pmap_kernel(), virt, start, prot, 0);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return(virt);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+struct pmap *
+pmap_create(void)
+{
+ struct pmap *pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_create(%lx)\n", size);
+#endif
+
+ pmap = (struct pmap *)malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ pmap_t pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_pinit(%p)\n", pmap);
+#endif
+ /*
+ * No need to allocate page table space yet but we do need a
+ * valid segment table. Initially, we point everyone at the
+ * "null" segment table. On the first pmap_enter, a real
+ * segment table will be allocated.
+ */
+ pmap->pm_stab = Segtabzero;
+ pmap->pm_stpa = Segtabzeropa;
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040)
+ pmap->pm_stfree = protostfree;
+#endif
+ pmap->pm_count = 1;
+ simple_lock_init(&pmap->pm_lock);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ pmap_t pmap;
+{
+ int count;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_destroy(%p)\n", pmap);
+#endif
+ if (pmap == NULL)
+ return;
+
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ free((caddr_t)pmap, M_VMPMAP);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ pmap_t pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_release(%p)\n", pmap);
+#endif
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
+ if (pmap->pm_count != 1)
+ panic("pmap_release count");
+#endif
+ if (pmap->pm_ptab)
+ uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
+ AMIGA_UPTSIZE);
+ if (pmap->pm_stab != Segtabzero)
+ uvm_km_free_wakeup(kernel_map, (vaddr_t)pmap->pm_stab,
+ AMIGA_STSIZE);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_reference(%p)\n", pmap);
+#endif
+ if (pmap != NULL) {
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+ }
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+{
+ paddr_t pa;
+ vaddr_t va;
+ u_int *pte;
+ int flags;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
+#endif
+
+ if (pmap == NULL)
+ return;
+
+#ifdef DEBUG
+ remove_stats.calls++;
+#endif
+ flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ /*
+ * Weed out invalid mappings.
+ * Note: we assume that the segment table is always allocated.
+ */
+ if (!pmap_ste_v(pmap, va)) {
+ /* XXX: avoid address wrap around */
+ if (va >= m68k_trunc_seg((vaddr_t)-1))
+ break;
+ va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
+ continue;
+ }
+ pte = pmap_pte(pmap, va);
+ pa = pmap_pte_pa(pte);
+ if (pa == 0)
+ continue;
+ pmap_remove_mapping(pmap, va, pte, flags);
+ }
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ paddr_t pa;
+ pv_entry_t pv;
+ int s;
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
+ printf("pmap_page_protect(%lx, %x)\n", pa, prot);
+#endif
+ if (!PAGE_IS_MANAGED(pa))
+ return;
+
+ switch (prot) {
+ case VM_PROT_ALL:
+ break;
+ /* copy_on_write */
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_changebit(pa, PG_RO, TRUE);
+ break;
+ /* remove_all */
+ default:
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv->pv_pmap != NULL) {
+ pt_entry_t *pte;
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+#ifdef DEBUG
+ if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) ||
+ pmap_pte_pa(pte) != pa)
+{
+ printf("pmap_page_protect: va %lx, pmap_ste_v %d pmap_pte_pa %08x/%lx\n",
+ pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va),
+ pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)), pa);
+ printf(" pvh %p pv %p pv_next %p\n", pa_to_pvh(pa), pv, pv->pv_next);
+ panic("pmap_page_protect: bad mapping");
+}
+#endif
+ if (!pmap_pte_w(pte))
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ pte, PRM_TFLUSH|PRM_CFLUSH);
+ else {
+ pv = pv->pv_next;
+#ifdef DEBUG
+ if (pmapdebug & PDB_PARANOIA)
+ printf("%s wired mapping for %lx not removed\n",
+ "pmap_page_protect:", pa);
+#endif
+ if (pv == NULL)
+ break;
+ }
+ }
+ splx(s);
+ break;
+ }
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
+{
+ u_int *pte;
+ vaddr_t va;
+ boolean_t needtflush;
+ int isro;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
+ printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva,
+ prot);
+#endif
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ pte = pmap_pte(pmap, sva);
+ isro = pte_prot(pmap, prot) == PG_RO ? 1 : 0;
+ needtflush = active_pmap(pmap);
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ /*
+ * Page table page is not allocated.
+ * Skip it, we don't want to force allocation
+ * of unnecessary PTE pages just to set the protection.
+ */
+ if (!pmap_ste_v(pmap, va)) {
+ /* XXX: avoid address wrap around */
+ if (va >= m68k_trunc_seg((vaddr_t)-1))
+ break;
+ va = m68k_round_seg(va + PAGE_SIZE) - PAGE_SIZE;
+ pte = pmap_pte(pmap, va);
+ pte++;
+ continue;
+ }
+ /*
+ * skip if page not valid or protection is same
+ */
+ if (!pmap_pte_v(pte) || !pmap_pte_prot_chg(pte, isro)) {
+ pte++;
+ continue;
+ }
+#if defined(M68040) || defined(M68060)
+ /*
+ * Clear caches if making RO (see section
+ * "7.3 Cache Coherency" in the manual).
+ */
+ if (isro && mmutype == MMU_68040) {
+ paddr_t pa = pmap_pte_pa(pte);
+
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ pmap_pte_set_prot(pte, isro);
+ if (needtflush)
+ TBIS(va);
+ pte++;
+ }
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+extern int kernel_copyback;
+
+int
+pmap_enter(pmap, va, pa, prot, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int flags;
+{
+ u_int *pte;
+ int npte;
+ paddr_t opa;
+ boolean_t cacheable = TRUE;
+ boolean_t checkpv = TRUE;
+ boolean_t wired = (flags & PMAP_WIRED) != 0;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", pmap, va, pa,
+ prot, wired);
+#endif
+
+#ifdef DEBUG
+ if (pmap == pmap_kernel())
+ enter_stats.kernel++;
+ else
+ enter_stats.user++;
+#endif
+ /*
+ * For user mapping, allocate kernel VM resources if necessary.
+ */
+ if (pmap->pm_ptab == NULL)
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, AMIGA_UPTSIZE);
+
+ /*
+ * Segment table entry not valid, we need a new PT page
+ */
+ if (!pmap_ste_v(pmap, va))
+ pmap_enter_ptpage(pmap, va);
+
+ pte = pmap_pte(pmap, va);
+ opa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pte %p, *pte %x\n", pte, *(int *)pte);
+#endif
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+#ifdef DEBUG
+ enter_stats.pwchange++;
+#endif
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if ((wired && !pmap_pte_w(pte)) || (!wired && pmap_pte_w(pte))){
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: wiring change -> %x\n", wired);
+#endif
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+#ifdef DEBUG
+ enter_stats.wchange++;
+#endif
+ }
+ /*
+ * Retain cache inhibition status
+ */
+ checkpv = FALSE;
+ if (pmap_pte_ci(pte))
+ cacheable = FALSE;
+ goto validate;
+ }
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: removing old mapping %lx\n", va);
+#endif
+ pmap_remove_mapping(pmap, va, pte,
+ PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
+#ifdef DEBUG
+ enter_stats.mchange++;
+#endif
+ }
+
+ /*
+ * If this is a new user mapping, increment the wiring count
+ * on this PT page. PT pages are wired down as long as there
+ * is a valid mapping in the page.
+ */
+ if (pmap != pmap_kernel())
+ pmap_ptpage_addref(trunc_page((vaddr_t)pte));
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+ if (PAGE_IS_MANAGED(pa)) {
+ pv_entry_t pv, npv;
+ int s;
+
+#ifdef DEBUG
+ enter_stats.managed++;
+#endif
+ pv = pa_to_pvh(pa);
+ s = splimp();
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pv at %p: %lx/%p/%p\n", pv, pv->pv_va,
+ pv->pv_pmap, pv->pv_next);
+#endif
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+#ifdef DEBUG
+ enter_stats.firstpv++;
+#endif
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ pv->pv_ptste = NULL;
+ pv->pv_ptpmap = NULL;
+ pv->pv_flags = 0;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+#ifdef DEBUG
+ for (npv = pv; npv; npv = npv->pv_next)
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ panic("pmap_enter: already in pv_tab");
+#endif
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ npv->pv_ptste = NULL;
+ npv->pv_ptpmap = NULL;
+ pv->pv_next = npv;
+#ifdef DEBUG
+ if (!npv->pv_next)
+ enter_stats.secondpv++;
+#endif
+ }
+ splx(s);
+ }
+ /*
+ * Assumption: if it is not part of our managed memory
+ * then it must be device memory which may be volitile.
+ */
+ else if (pmap_initialized) {
+ checkpv = cacheable = FALSE;
+#ifdef DEBUG
+ enter_stats.unmanaged++;
+#endif
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Now validate mapping with desired protection/wiring.
+ * Assume uniform modified and referenced status for all
+ * AMIGA pages in a MACH page.
+ */
+#if defined(M68040) || defined(M68060)
+#if DEBUG
+ if (pmapdebug & 0x10000 && mmutype == MMU_68040 &&
+ pmap == pmap_kernel()) {
+ char *s;
+ if (va >= amiga_uptbase &&
+ va < (amiga_uptbase + AMIGA_UPTMAXSIZE))
+ s = "UPT";
+ else if (va >= (u_int)Sysmap &&
+ va < ((u_int)Sysmap + AMIGA_KPTSIZE))
+ s = "KPT";
+ else if (va >= (u_int)pmap->pm_stab &&
+ va < ((u_int)pmap->pm_stab + AMIGA_STSIZE))
+ s = "KST";
+ else if (curproc &&
+ va >= (u_int)curproc->p_vmspace->vm_map.pmap->pm_stab &&
+ va < ((u_int)curproc->p_vmspace->vm_map.pmap->pm_stab +
+ AMIGA_STSIZE))
+ s = "UST";
+ else
+ s = "other";
+ printf("pmap_init: validating %s kernel page at %lx -> %lx\n",
+ s, va, pa);
+
+ }
+#endif
+ if (mmutype == MMU_68040 && pmap == pmap_kernel() &&
+ ((va >= amiga_uptbase && va < (amiga_uptbase + AMIGA_UPTMAXSIZE)) ||
+ (va >= (u_int)Sysmap && va < ((u_int)Sysmap + AMIGA_KPTSIZE))))
+ cacheable = FALSE; /* don't cache user page tables */
+
+ /* Don't cache if process can't take it, like SunOS ones. */
+ if (mmutype == MMU_68040 && pmap != pmap_kernel() &&
+ (curproc->p_md.md_flags & MDP_UNCACHE_WX) &&
+ (prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE))
+ checkpv = cacheable = FALSE;
+#endif
+ npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
+ npte |= (*(int *)pte & (PG_M|PG_U));
+ if (wired)
+ npte |= PG_W;
+ if (!checkpv && !cacheable)
+#if defined(M68060) && defined(NO_SLOW_CIRRUS)
+#if defined(M68040) || defined(M68030) || defined(M68020)
+ npte |= (cputype == CPU_68060 ? PG_CIN : PG_CI);
+#else
+ npte |= PG_CIN;
+#endif
+#else
+ npte |= PG_CI;
+#endif
+#if defined(M68040) || defined(M68060)
+ else if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW &&
+ (kernel_copyback || pmap != pmap_kernel()))
+ npte |= PG_CCB; /* cache copyback */
+#endif
+ /*
+ * Remember if this was a wiring-only change.
+ * If so, we need not flush the TLB and caches.
+ */
+ wired = ((*(int *)pte ^ npte) == PG_W);
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040 && !wired) {
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: new pte value %x\n", npte);
+#endif
+ *(int *)pte++ = npte;
+ if (!wired && active_pmap(pmap))
+ TBIS(va);
+#ifdef DEBUG
+ if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
+ va -= PAGE_SIZE;
+ pmap_check_wiring("enter", trunc_page((vaddr_t)pmap_pte(pmap, va)));
+ }
+#endif
+
+ return (0);
+}
+
+/*
+ * Routine: pmap_unwire
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_unwire(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
+{
+ u_int *pte;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_unwire(%p, %lx)\n", pmap, va);
+#endif
+ if (pmap == NULL)
+ return;
+
+ pte = pmap_pte(pmap, va);
+#ifdef DEBUG
+ /*
+ * Page table page is not allocated.
+ * Should this ever happen? Ignore it for now,
+ * we don't want to force allocation of unnecessary PTE pages.
+ */
+ if (!pmap_ste_v(pmap, va)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid STE for %lx\n",
+ va);
+ return;
+ }
+ /*
+ * Page not valid. Should this ever happen?
+ * Just continue and change wiring anyway.
+ */
+ if (!pmap_pte_v(pte)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid PTE for %lx\n",
+ va);
+ }
+#endif
+ if (pmap_pte_w(pte)) {
+ pmap->pm_stats.wired_count--;
+ }
+ /*
+ * Wiring is not a hardware characteristic so there is no need
+ * to invalidate TLB.
+ */
+ pmap_pte_set_w(pte, 0);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+boolean_t
+pmap_extract(pmap, va, pap)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t *pap;
+{
+ paddr_t pa;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_extract(%p, %lx) -> ", pmap, va);
+#endif
+ if (pmap && pmap_ste_v(pmap, va))
+ pa = *(int *)pmap_pte(pmap, va);
+ else
+ return (FALSE);
+ *pap = (pa & PG_FRAME) | (va & ~PG_FRAME);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("%lx\n", *pap);
+#endif
+ return (TRUE);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vaddr_t dst_addr;
+ vsize_t len;
+ vaddr_t src_addr;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n", dst_pmap,
+ src_pmap, dst_addr, len, src_addr);
+#endif
+}
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(pmap)
+ pmap_t pmap;
+{
+ int bank, s;
+
+ if (pmap != pmap_kernel())
+ return;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_collect(%p)\n", pmap);
+ kpt_stats.collectscans++;
+#endif
+ s = splimp();
+
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+
+#ifdef notyet
+ /* Go compact and garbage-collect the pv_table. */
+ pmap_collect_pv();
+#endif
+ splx(s);
+}
+
+/*
+ * Routine: pmap_collect1()
+ *
+ * Function:
+ * Helper function for pmap_collect(). Do the actual
+ * garbage-collection of range of physical addresses.
+ */
+void
+pmap_collect1(pmap, startpa, endpa)
+ pmap_t pmap;
+ paddr_t startpa, endpa;
+{
+ paddr_t pa;
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ paddr_t kpa;
+#ifdef DEBUG
+ int *ste;
+ int opmapdebug = 0;
+#endif
+
+ for (pa = startpa; pa < endpa; pa += NBPG) {
+ struct kpt_page *kpt, **pkpt;
+
+ /*
+ * Locate physical pages which are being used as kernel
+ * page table pages.
+ */
+ pv = pa_to_pvh(pa);
+ if (pv->pv_pmap != pmap_kernel() ||
+ !(pv->pv_flags & PV_PTPAGE))
+ continue;
+ do {
+ if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
+ break;
+ } while ((pv = pv->pv_next) > 0);
+ if (pv == NULL)
+ continue;
+#ifdef DEBUG
+ if (pv->pv_va < (vaddr_t)Sysmap ||
+ pv->pv_va >= (vaddr_t)Sysmap + AMIGA_KPTSIZE)
+ printf("collect: kernel PT VA out of range\n");
+ else
+ goto ok;
+ pmap_pvdump(pa);
+ continue;
+ok:
+#endif
+ pte = (int *)(pv->pv_va + NBPG);
+ while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
+ ;
+ if (pte >= (pt_entry_t *)pv->pv_va)
+ continue;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
+ printf(
+ "collect: freeing KPT page at %lx (ste %x@%p)\n",
+ pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
+ opmapdebug = pmapdebug;
+ pmapdebug |= PDB_PTPAGE;
+ }
+
+ ste = (int *)pv->pv_ptste;
+#endif
+ /*
+ * If all entries were invalid we can remove the page.
+ * We call pmap_remove to take care of invalidating ST
+ * and Sysptmap entries.
+ */
+ pmap_extract(pmap, pv->pv_va, &kpa);
+ pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
+ PRM_TFLUSH|PRM_CFLUSH);
+
+ /*
+ * Use the physical address to locate the original
+ * (kmem_alloc assigned) address for the page and put
+ * that page back on the free list.
+ */
+ for (pkpt = &kpt_used_list, kpt = *pkpt;
+ kpt != (struct kpt_page *)0;
+ pkpt = &kpt->kpt_next, kpt = *pkpt)
+ if (kpt->kpt_pa == kpa)
+ break;
+#ifdef DEBUG
+ if (kpt == (struct kpt_page *)0)
+ panic("pmap_collect: lost a KPT page");
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ printf("collect: %lx (%lx) to free list\n",
+ kpt->kpt_va, kpa);
+#endif
+ *pkpt = kpt->kpt_next;
+ kpt->kpt_next = kpt_free_list;
+ kpt_free_list = kpt;
+#ifdef DEBUG
+ kpt_stats.kptinuse--;
+ kpt_stats.collectpages++;
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ pmapdebug = opmapdebug;
+
+ if (*ste)
+ printf("collect: kernel STE at %p still valid (%x)\n",
+ ste, *ste);
+ ste =
+ (int *)&Sysptmap[(u_int *)ste-pmap_ste(pmap_kernel(), 0)];
+ if (*ste)
+ printf(
+ "collect: kernel PTmap at %p still valid (%x)\n",
+ ste, *ste);
+#endif
+ }
+}
+
+/*
+ * Mark that a processor is about to be used by a given pmap.
+ */
+void
+pmap_activate(p)
+ struct proc *p;
+{
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
+ printf("pmap_activate(%p)\n", p);
+#endif
+ PMAP_ACTIVATE(pmap, p == curproc);
+}
+
+/*
+ * Mark that a processor is no longer in use by a given pmap.
+ */
+void
+pmap_deactivate(p)
+ struct proc *p;
+{
+}
+
+/*
+ * pmap_zero_page zeros the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bzero to clear its contents, one machine dependent page
+ * at a time.
+ */
+void
+pmap_zero_page(phys)
+ paddr_t phys;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_zero_page(%lx)\n", phys);
+#endif
+ phys >>= PG_SHIFT;
+ clearseg(phys);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(src, dst)
+ paddr_t src, dst;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy_page(%lx, %lx)\n", src, dst);
+#endif
+ src >>= PG_SHIFT;
+ dst >>= PG_SHIFT;
+ physcopyseg(src, dst);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_modify(%lx)\n", pa);
+#endif
+ ret = pmap_is_modified(pg);
+
+ pmap_changebit(pa, PG_M, FALSE);
+
+ return (ret);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_reference(%lx)\n", pa);
+#endif
+ ret = pmap_is_referenced(pg);
+ pmap_changebit(pa, PG_U, FALSE);
+
+ return (ret);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_referenced(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_U);
+ printf("pmap_is_referenced(%lx) -> %c\n", pa, "FT"[rv]);
+ return (rv);
+ }
+#endif
+ return (pmap_testbit(pa, PG_U));
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_modified(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_M);
+ printf("pmap_is_modified(%lx) -> %c\n", pa, "FT"[rv]);
+ return (rv);
+ }
+#endif
+ return (pmap_testbit(pa, PG_M));
+}
+
+paddr_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return(m68k_ptob(ppn));
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/*
+ * pmap_remove_mapping:
+ *
+ * Invalidate a single page denoted by pmap/va.
+ *
+ * If (pte != NULL), it is the already computed PTE for the page.
+ *
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ *
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache
+ * information.
+ *
+ * If (flags & PRM_KEEPPTPAGE), we don't free the page table page
+ * if the reference drops to zero.
+ */
+static void
+pmap_remove_mapping(pmap, va, pte, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ pt_entry_t *pte;
+ int flags;
+{
+ paddr_t pa;
+ struct pv_entry *pv, *npv;
+ pmap_t ptpmap;
+ st_entry_t *ste;
+ int s, bits;
+#if defined(M68040) || defined(M68060)
+ int i;
+#endif
+#ifdef DEBUG
+ pt_entry_t opte;
+#endif
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove_mapping(%p, %lx, %p, %x)\n",
+ pmap, va, pte, flags);
+#endif
+
+ /*
+ * PTE not provided, compute it from pmap and va.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ pte = pmap_pte(pmap, va);
+ if (*pte == PG_NV)
+ return;
+ }
+
+ pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ opte = *pte;
+#endif
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTE after saving the reference modify info.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_REMOVE)
+ printf ("remove: invalidating pte at %p\n", pte);
+#endif
+
+ bits = *pte & (PG_U|PG_M);
+ *pte = PG_NV;
+ if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+ TBIS(va);
+ /*
+ * For user mappings decrement the wiring count on
+ * the PT page.
+ */
+ if (pmap != pmap_kernel()) {
+ vaddr_t ptpva = trunc_page((vaddr_t)pte);
+ int refs = pmap_ptpage_delref(ptpva);
+#ifdef DEBUG
+ if (pmapdebug & PDB_WIRING)
+ pmap_check_wiring("remove", ptpva);
+#endif
+ /*
+ * If reference count drops to 1, and we're not instructed
+ * to keep it around, free the PT page.
+ *
+ * Note: refcnt == 1 comes from the fact that we allocate
+ * the page with uvm_fault_wire(), which initially wires
+ * the page. The first reference we actually add causes
+ * the refcnt to be 2.
+ */
+ if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+ struct pv_entry *pv;
+ paddr_t pa;
+
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
+#ifdef DIAGNOSTIC
+ if (PAGE_IS_MANAGED(pa) == 0)
+ panic("pmap_remove_mapping: unmanaged PT page");
+#endif
+ pv = pa_to_pvh(pa);
+#ifdef DIAGNOSTIC
+ if (pv->pv_ptste == NULL)
+ panic("pmap_remove_mapping: ptste == NULL");
+ if (pv->pv_pmap != pmap_kernel() ||
+ pv->pv_va != ptpva ||
+ pv->pv_next != NULL)
+ panic("pmap_remove_mapping: "
+ "bad PT page pmap %p, va 0x%lx, next %p",
+ pv->pv_pmap, pv->pv_va, pv->pv_next);
+#endif
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ NULL, PRM_TFLUSH|PRM_CFLUSH);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: PT page 0x%lx (0x%lx) freed\n",
+ ptpva, pa);
+#endif
+ }
+ }
+
+ /*
+ * If this isn't a managed page, we are all done.
+ */
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+ /*
+ * Otherwise remove it from the PV table
+ * (raise IPL since we may be called at interrupt time).
+ */
+ pv = pa_to_pvh(pa);
+ ste = ST_ENTRY_NULL;
+ s = splimp();
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ ste = pv->pv_ptste;
+ ptpmap = pv->pv_ptpmap;
+ npv = pv->pv_next;
+ if (npv) {
+ npv->pv_flags = pv->pv_flags;
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else
+ pv->pv_pmap = NULL;
+#ifdef DEBUG
+ remove_stats.pvfirst++;
+#endif
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+#ifdef DEBUG
+ remove_stats.pvsearch++;
+#endif
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ break;
+ pv = npv;
+ }
+#ifdef DEBUG
+ if (npv == NULL)
+ panic("pmap_remove: PA not in pv_tab");
+#endif
+ ste = npv->pv_ptste;
+ ptpmap = npv->pv_ptpmap;
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ pv = pa_to_pvh(pa);
+ }
+
+ /*
+ * If this was a PT page we must also remove the
+ * mapping from the associated segment table.
+ */
+ if (ste) {
+#ifdef DEBUG
+ remove_stats.ptinvalid++;
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: ste was %x@%p pte was %x@%p\n",
+ *ste, ste, opte, pmap_pte(pmap, va));
+#endif
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ /*
+ * On the 68040, the PT page contains NPTEPG/SG4_LEV3SIZE
+ * page tables, so we need to remove all the associated
+ * segment table entries
+ * (This may be incorrect: if a single page table is
+ * being removed, the whole page should not be
+ * removed.)
+ */
+ for (i = 0; i < NPTEPG / SG4_LEV3SIZE; ++i)
+ *ste++ = SG_NV;
+ ste -= NPTEPG / SG4_LEV3SIZE;
+#ifdef DEBUG
+ if (pmapdebug &(PDB_REMOVE|PDB_SEGTAB|0x10000))
+ printf("pmap_remove:PT at %lx removed\n", va);
+#endif
+ } else
+#endif /* defined(M68040) || defined(M68060) */
+ *ste = SG_NV;
+ /*
+ * If it was a user PT page, we decrement the
+ * reference count on the segment table as well,
+ * freeing it if it is now empty.
+ */
+ if (ptpmap != pmap_kernel()) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: stab %p, refcnt %d\n",
+ ptpmap->pm_stab,
+ ptpmap->pm_sref - 1);
+ if ((pmapdebug & PDB_PARANOIA) &&
+ ptpmap->pm_stab != (st_entry_t *)trunc_page((vaddr_t)ste))
+ panic("remove: bogus ste");
+#endif
+ if (--(ptpmap->pm_sref) == 0) {
+#ifdef DEBUG
+ if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: free stab %p\n",
+ ptpmap->pm_stab);
+#endif
+ uvm_km_free_wakeup(kernel_map,
+ (vaddr_t)ptpmap->pm_stab, AMIGA_STSIZE);
+ ptpmap->pm_stab = Segtabzero;
+ ptpmap->pm_stpa = Segtabzeropa;
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040)
+ ptpmap->pm_stfree = protostfree;
+#endif
+ /*
+ * XXX may have changed segment table
+ * pointer for current process so
+ * update now to reload hardware.
+ */
+ if (active_user_pmap(ptpmap))
+ PMAP_ACTIVATE(ptpmap, 1);
+ }
+#ifdef DEBUG
+ else if (ptpmap->pm_sref < 0)
+ panic("remove: sref < 0");
+#endif
+ }
+#if 0
+ /*
+ * XXX this should be unnecessary as we have been
+ * flushing individual mappings as we go.
+ */
+ if (ptpmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pv->pv_flags &= ~PV_PTPAGE;
+ ptpmap->pm_ptpages--;
+ }
+ /*
+ * Update saved attributes for managed page
+ */
+ *pa_to_attribute(pa) |= bits;
+ splx(s);
+}
+
+/*
+ * pmap_ptpage_addref:
+ *
+ * Add a reference to the specified PT page.
+ */
+void
+pmap_ptpage_addref(ptpva)
+ vaddr_t ptpva;
+{
+ struct vm_page *m;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ m->wire_count++;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+}
+
+/*
+ * pmap_ptpage_delref:
+ *
+ * Delete a reference to the specified PT page.
+ */
+int
+pmap_ptpage_delref(ptpva)
+ vaddr_t ptpva;
+{
+ struct vm_page *m;
+ int rv;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ rv = --m->wire_count;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+ return (rv);
+}
+
+static void
+amiga_protection_init()
+{
+ int *kp, prot;
+
+ kp = protection_codes;
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = 0;
+ break;
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = PG_RO;
+ break;
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = PG_RW;
+ break;
+ }
+ }
+}
+
+/* static */
+boolean_t
+pmap_testbit(pa, bit)
+ paddr_t pa;
+ int bit;
+{
+ pv_entry_t pv;
+ int *pte;
+ int s;
+
+ if (!PAGE_IS_MANAGED(pa))
+ return (FALSE);
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Check saved info first
+ */
+ if (*pa_to_attribute(pa) & bit) {
+ splx(s);
+ return (TRUE);
+ }
+ /*
+ * Not found, check current mappings returning
+ * immediately if found.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ pte = (int *)pmap_pte(pv->pv_pmap, pv->pv_va);
+ if (*pte & bit) {
+ splx(s);
+ return (TRUE);
+ }
+ }
+ }
+ splx(s);
+ return (FALSE);
+}
+
+static void
+pmap_changebit(pa, bit, setem)
+ paddr_t pa;
+ int bit;
+ boolean_t setem;
+{
+ pv_entry_t pv;
+ int *pte, npte;
+ vaddr_t va;
+ boolean_t firstpage;
+ int s;
+
+ firstpage = TRUE;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_BITS)
+ printf("pmap_changebit(%lx, %x, %s)\n", pa, bit,
+ setem ? "set" : "clear");
+#endif
+ if (!PAGE_IS_MANAGED(pa))
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ if (!setem)
+ *pa_to_attribute(pa) &= ~bit;
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ * If setting RO do we need to clear the VAC?
+ */
+ if (pv->pv_pmap == NULL) {
+ splx(s);
+ return;
+ }
+ for (; pv; pv = pv->pv_next) {
+ va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (bit == PG_RO) {
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+ }
+
+ pte = (int *)pmap_pte(pv->pv_pmap, va);
+ if (setem)
+ npte = *pte | bit;
+ else
+ npte = *pte & ~bit;
+ if (*pte != npte) {
+ /*
+ * If we are changing caching status or
+ * protection make sure the caches are
+ * flushed (but only once).
+ */
+#if defined(M68040) || defined(M68060)
+ if (firstpage && mmutype == MMU_68040 &&
+ ((bit == PG_RO && setem) || (bit & PG_CMASK))) {
+ firstpage = FALSE;
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
+ if (active_pmap(pv->pv_pmap))
+ TBIS(va);
+ }
+ }
+ splx(s);
+}
+
+/* static */
+void
+pmap_enter_ptpage(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
+{
+ paddr_t ptpa;
+ pv_entry_t pv;
+#ifdef M68060
+ u_int stpa;
+#endif
+ u_int *ste;
+ int s;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
+ printf("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va);
+ enter_stats.ptpneeded++;
+#endif
+ /*
+ * Allocate a segment table if necessary. Note that it is allocated
+ * from kernel_map and not pt_map. This keeps user page tables
+ * aligned on segment boundaries in the kernel address space.
+ * The segment table is wired down. It will be freed whenever the
+ * reference count drops to zero.
+ */
+ if (pmap->pm_stab == Segtabzero) {
+ /* XXX Atari uses kernel_map here: */
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(kernel_map, AMIGA_STSIZE);
+ pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
+ (paddr_t *)&pmap->pm_stpa);
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+#if defined(M68060)
+ stpa = (u_int)pmap->pm_stpa;
+ if (machineid & AMIGA_68060) {
+ while (stpa < (u_int)pmap->pm_stpa +
+ AMIGA_STSIZE) {
+ pmap_changebit(stpa, PG_CCB, 0);
+ pmap_changebit(stpa, PG_CI, 1);
+ stpa += NBPG;
+ }
+ DCIS(); /* XXX */
+ }
+#endif
+ pmap->pm_stfree = protostfree;
+ }
+#endif
+ /*
+ * XXX may have changed segment table pointer for current
+ * process so update now to reload hardware.
+ */
+ if (active_user_pmap(pmap))
+ PMAP_ACTIVATE(pmap, 1);
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter_pt: pmap %p stab %p(%p)\n", pmap,
+ pmap->pm_stab, pmap->pm_stpa);
+#endif
+ }
+
+ ste = pmap_ste(pmap, va);
+
+#if defined(M68040) || defined(M68060)
+ /*
+ * Allocate level 2 descriptor block if necessary
+ */
+ if (mmutype == MMU_68040) {
+ if (*ste == SG_NV) {
+ int ix;
+ caddr_t addr;
+
+ ix = bmtol2(pmap->pm_stfree);
+ if (ix == -1)
+ panic("enter_pt: out of address space");
+ pmap->pm_stfree &= ~l2tobm(ix);
+ addr = (caddr_t)&pmap->pm_stab[ix * SG4_LEV2SIZE];
+ bzero(addr, SG4_LEV2SIZE * sizeof(st_entry_t));
+ addr = (caddr_t)&pmap->pm_stpa[ix * SG4_LEV2SIZE];
+ *ste = (u_int) addr | SG_RW | SG_U | SG_V;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter_pt: alloc ste2 %d(%p)\n", ix,
+ addr);
+#endif
+ }
+ ste = pmap_ste2(pmap, va);
+ /*
+ * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
+ * level 3 descriptors, we need a chunk of NPTEPG/SEG4_LEV3SIZE
+ * (64) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
+ * PT page -- the unit of allocation. We set 'ste' to point
+ * to the first entry of that chunk which is validated in its
+ * entirety below.
+ */
+ ste = (u_int *)((int)ste & ~(NBPG / SG4_LEV3SIZE - 1));
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter_pt: ste2 %p (%p)\n", pmap_ste2(pmap, va),
+ ste);
+#endif
+ }
+#endif
+ va = trunc_page((vaddr_t)pmap_pte(pmap, va));
+
+ /*
+ * In the kernel we allocate a page from the kernel PT page
+ * free list and map it into the kernel page table map (via
+ * pmap_enter).
+ */
+ if (pmap == pmap_kernel()) {
+ struct kpt_page *kpt;
+
+ s = splimp();
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
+ /*
+ * No PT pages available.
+ * Try once to free up unused ones.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_COLLECT)
+ printf(
+ "enter_pt: no KPT pages, collecting...\n");
+#endif
+ pmap_collect(pmap_kernel());
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0)
+ panic("pmap_enter_ptpage: can't get KPT page");
+ }
+#ifdef DEBUG
+ if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
+ kpt_stats.kptmaxuse = kpt_stats.kptinuse;
+#endif
+ kpt_free_list = kpt->kpt_next;
+ kpt->kpt_next = kpt_used_list;
+ kpt_used_list = kpt;
+ ptpa = kpt->kpt_pa;
+ bzero((char *)kpt->kpt_va, NBPG);
+ pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT,
+ VM_PROT_DEFAULT|PMAP_WIRED);
+#if defined(M68060)
+ if (machineid & AMIGA_68060) {
+ pmap_changebit(ptpa, PG_CCB, 0);
+ pmap_changebit(ptpa, PG_CI, 1);
+ DCIS();
+ }
+#endif
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+ printf(
+ "enter_pt: add &Sysptmap[%d]: %x (KPT page %lx)\n",
+ ste - pmap_ste(pmap, 0),
+ *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)],
+ kpt->kpt_va);
+#endif
+ splx(s);
+ }
+ /*
+ * For user processes we just simulate a fault on that location
+ * letting the VM system allocate a zero-filled page.
+ *
+ * Note we use a wire-fault to keep the page off the paging
+ * queues. This sets our PT page's reference (wire) count to
+ * 1, which is what we use to check if the page can be freed.
+ * See pmap_remove_mapping().
+ */
+ else {
+ /*
+ * Count the segment table reference now so that we won't
+ * lose the segment table when low on memory.
+ */
+ pmap->pm_sref++;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+ printf("enter_pt: about to fault UPT pg at %lx\n", va);
+#endif
+ s = uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE);
+ if (s) {
+ printf("uvm_fault_wire(pt_map, 0x%lx, 0%lx, RW) "
+ "-> %d\n", va, va + PAGE_SIZE, s);
+ panic("pmap_enter: uvm_fault_wire failed");
+ }
+ ptpa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
+#if 0 /* XXXX what is this? XXXX */
+ /*
+ * Mark the page clean now to avoid its pageout (and
+ * hence creation of a pager) between now and when it
+ * is wired; i.e. while it is on a paging queue.
+ */
+ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
+#endif
+ }
+
+#ifdef M68060
+ if (machineid & M68060) {
+ pmap_changebit(ptpa, PG_CCB, 0);
+ pmap_changebit(ptpa, PG_CI, 1);
+ DCIS();
+ }
+#endif
+ /*
+ * Locate the PV entry in the kernel for this PT page and
+ * record the STE address. This is so that we can invalidate
+ * the STE when we remove the mapping for the page.
+ */
+ pv = pa_to_pvh(ptpa);
+ s = splimp();
+ if (pv) {
+ pv->pv_flags |= PV_PTPAGE;
+ do {
+ if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
+ break;
+ } while ((pv = pv->pv_next) > 0);
+ }
+#ifdef DEBUG
+ if (pv == NULL) {
+ printf("enter_pt: PV entry for PT page %lx not found\n", ptpa);
+ panic("pmap_enter_ptpage: PT page not entered");
+ }
+#endif
+ pv->pv_ptste = ste;
+ pv->pv_ptpmap = pmap;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+ printf("enter_pt: new PT page at PA %lx, ste at %p\n", ptpa,
+ ste);
+#endif
+
+ /*
+ * Map the new PT page into the segment table.
+ * Also increment the reference count on the segment table if this
+ * was a user page table page. Note that we don't use vm_map_pageable
+ * to keep the count like we do for PT pages, this is mostly because
+ * it would be difficult to identify ST pages in pmap_pageable to
+ * release them. We also avoid the overhead of vm_map_pageable.
+ */
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ u_int *este;
+
+ for (este = &ste[NPTEPG / SG4_LEV3SIZE]; ste < este; ++ste) {
+ *ste = ptpa | SG_U | SG_RW | SG_V;
+ ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
+ }
+ }
+ else
+#endif
+ *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
+ if (pmap != pmap_kernel()) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter_pt: stab %p refcnt %d\n", pmap->pm_stab,
+ pmap->pm_sref);
+#endif
+ }
+ /*
+ * Flush stale TLB info.
+ */
+ if (pmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+ pmap->pm_ptpages++;
+ splx(s);
+}
+
+#ifdef DEBUG
+void
+pmap_pvdump(pa)
+ paddr_t pa;
+{
+ pv_entry_t pv;
+
+ printf("pa %lx", pa);
+ for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
+ printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
+ pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
+ pv->pv_flags);
+ printf("\n");
+}
+
+/*
+ * pmap_check_wiring:
+ *
+ * Count the number of valid mappings in the specified PT page,
+ * and ensure that it is consistent with the number of wirings
+ * to that page that the VM system has.
+ */
+void
+pmap_check_wiring(str, va)
+ char *str;
+ vaddr_t va;
+{
+ pt_entry_t *pte;
+ paddr_t pa;
+ struct vm_page *m;
+ int count;
+
+ if (!pmap_ste_v(pmap_kernel(), va) ||
+ !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
+ return;
+
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
+ m = PHYS_TO_VM_PAGE(pa);
+ if (m->wire_count < 1) {
+ printf("*%s*: 0x%lx: wire count %d\n", str, va, m->wire_count);
+ return;
+ }
+
+ count = 0;
+ for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
+ if (*pte)
+ count++;
+ if ((m->wire_count - 1) != count)
+ printf("*%s*: 0x%lx: w%d/a%d\n",
+ str, va, (m->wire_count-1), count);
+}
+#endif
+
+/*
+ * Routine: pmap_virtual_space
+ *
+ * Function:
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap. Called by
+ * vm_bootstrap_steal_memory().
+ */
+void
+pmap_virtual_space(vstartp, vendp)
+ vaddr_t *vstartp, *vendp;
+{
+
+ *vstartp = virtual_avail;
+ *vendp = virtual_end;
+}
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot,
+ VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/amiga/amiga/pmap_bootstrap.c b/sys/arch/amiga/amiga/pmap_bootstrap.c
deleted file mode 100644
index 03636122ba0..00000000000
--- a/sys/arch/amiga/amiga/pmap_bootstrap.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/* $OpenBSD: pmap_bootstrap.c,v 1.2 2001/12/14 21:44:02 miod Exp $ */
-/* $NetBSD: pmap.c,v 1.68 1999/06/19 19:44:09 is Exp $ */
-
-/*-
- * Copyright (c) 1999 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Jason R. Thorpe.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the NetBSD
- * Foundation, Inc. and its contributors.
- * 4. Neither the name of The NetBSD Foundation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 1991 Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)pmap.c 7.5 (Berkeley) 5/10/91
- */
-
-/*
- * AMIGA physical map management code, the bootstrap part.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
-#include <sys/malloc.h>
-#include <sys/msgbuf.h>
-#include <sys/user.h>
-#include <uvm/uvm.h>
-#include <machine/cpu.h>
-#include <machine/vmparam.h>
-#include <amiga/amiga/memlist.h>
-
-extern paddr_t msgbufpa; /* physical address of the msgbuf */
-extern paddr_t z2mem_start;
-
-u_long noncontig_enable;
-caddr_t CADDR1, CADDR2, vmmap;
-u_int *Sysseg_pa;
-
-extern vm_offset_t virtual_avail, virtual_end;
-extern st_entry_t *Sysseg;
-#if defined(M68040) || defined(M68060)
-extern int protostfree;
-#endif
-
-extern vaddr_t reserve_dumppages __P((vaddr_t));
-
-/*
- * Bootstrap the system enough to run with virtual memory.
- * Map the kernel's code and data, and allocate the system page table.
- *
- * On the HP this is called after mapping has already been enabled
- * and just syncs the pmap module with what has already been done.
- * [We can't call it easily with mapping off since the kernel is not
- * mapped with PA == VA, hence we would have to relocate every address
- * from the linked base (virtual) address 0 to the actual (physical)
- * address of 0xFFxxxxxx.]
- */
-void
-pmap_bootstrap(firstaddr, loadaddr)
- paddr_t firstaddr;
- paddr_t loadaddr;
-{
- vaddr_t va;
- int i;
- struct boot_memseg *sp, *esp;
- paddr_t fromads, toads;
-
- fromads = firstaddr;
- toads = maxmem << PGSHIFT;
-
- uvmexp.pagesize = NBPG;
- uvm_setpagesize();
-
- /* XXX: allow for msgbuf */
- toads -= m68k_round_page(MSGBUFSIZE);
- msgbufpa = toads;
-
- /*
- * first segment of memory is always the one loadbsd found
- * for loading the kernel into.
- */
- uvm_page_physload(atop(fromads), atop(toads),
- atop(fromads), atop(toads), VM_FREELIST_DEFAULT);
-
- sp = memlist->m_seg;
- esp = sp + memlist->m_nseg;
- i = 1;
- for (; noncontig_enable && sp < esp; sp++) {
- if ((sp->ms_attrib & MEMF_FAST) == 0)
- continue; /* skip if not FastMem */
- if (firstaddr >= sp->ms_start &&
- firstaddr < sp->ms_start + sp->ms_size)
- continue; /* skip kernel segment */
- if (sp->ms_size == 0)
- continue; /* skip zero size segments */
- fromads = sp->ms_start;
- toads = sp->ms_start + sp->ms_size;
-#ifdef DEBUG_A4000
- /*
- * My A4000 doesn't seem to like Zorro II memory - this
- * hack is to skip the motherboard memory and use the
- * Zorro II memory. Only for trying to debug the problem.
- * Michael L. Hitch
- */
- if (toads == 0x08000000)
- continue; /* skip A4000 motherboard mem */
-#endif
- /*
- * Deal with Zorro II memory stolen for DMA bounce buffers.
- * This needs to be handled better.
- *
- * XXX is: disabled. This is handled now in amiga_init.c
- * by removing the stolen memory from the memlist.
- *
- * XXX is: enabled again, but check real size and position.
- * We check z2mem_start is in this segment, and set its end
- * to the z2mem_start.
- *
- */
- if ((fromads <= z2mem_start) && (toads > z2mem_start))
- toads = z2mem_start;
-
- uvm_page_physload(atop(fromads), atop(toads),
- atop(fromads), atop(toads), (fromads & 0xff000000) ?
- VM_FREELIST_DEFAULT : VM_FREELIST_ZORROII);
- physmem += (toads - fromads) / NBPG;
- ++i;
- if (noncontig_enable == 1)
- break; /* Only two segments enabled */
- }
-
- mem_size = physmem << PGSHIFT;
- virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
- virtual_end = VM_MAX_KERNEL_ADDRESS;
-
- /*
- * Kernel page/segment table allocated in locore,
- * just initialize pointers.
- */
- pmap_kernel()->pm_stab = Sysseg;
- pmap_kernel()->pm_ptab = Sysmap;
- simple_lock_init(&pmap_kernel()->pm_lock);
- pmap_kernel()->pm_count = 1;
- pmap_kernel()->pm_stpa = Sysseg_pa;
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
- pmap_kernel()->pm_stfree = protostfree;
- }
-#endif
-
- /*
- * Allocate all the submaps we need
- */
- va = virtual_avail;
-
- vmmap = (caddr_t)va;
- va += NBPG;
-
- msgbufp = (struct msgbuf *)va;
- va += btoc(MSGBUFSIZE);
-
- DCIS();
- virtual_avail = reserve_dumppages(va);
-}
-
-void
-pmap_init_md()
-{
- extern vaddr_t amigahwaddr;
- extern u_int namigahwpg;
- vaddr_t addr;
-
- /*
- * Now that kernel map has been allocated, we can mark as
- * unavailable regions which we have mapped in locore.
- * XXX in pmap_boostrap() ???
- */
- addr = (vaddr_t) amigahwaddr;
- if (uvm_map(kernel_map, &addr,
- ptoa(namigahwpg),
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)))
- panic("pmap_init: bogons in the VM system!\n");
-}
diff --git a/sys/arch/amiga/conf/files.amiga b/sys/arch/amiga/conf/files.amiga
index debb4f9e287..e99963f82e4 100644
--- a/sys/arch/amiga/conf/files.amiga
+++ b/sys/arch/amiga/conf/files.amiga
@@ -1,4 +1,4 @@
-# $OpenBSD: files.amiga,v 1.46 2001/11/30 23:19:27 miod Exp $
+# $OpenBSD: files.amiga,v 1.47 2001/12/20 19:02:26 miod Exp $
# $NetBSD: files.amiga,v 1.62 1997/08/27 19:32:47 is Exp $
@@ -359,8 +359,7 @@ file arch/amiga/amiga/disksubr.c
file arch/amiga/amiga/dkbad.c
file arch/amiga/amiga/machdep.c
file arch/amiga/amiga/mem.c
-file arch/m68k/m68k/pmap_motorola.c
-file arch/amiga/amiga/pmap_bootstrap.c
+file arch/amiga/amiga/pmap.c
file arch/amiga/amiga/sys_machdep.c
file arch/amiga/amiga/trap.c
file arch/amiga/amiga/vm_machdep.c
diff --git a/sys/arch/amiga/include/cpu.h b/sys/arch/amiga/include/cpu.h
index 1e711202f62..518399a7f95 100644
--- a/sys/arch/amiga/include/cpu.h
+++ b/sys/arch/amiga/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.16 2001/12/07 00:59:16 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.17 2001/12/20 19:02:26 miod Exp $ */
/* $NetBSD: cpu.h,v 1.36 1996/09/11 00:11:42 thorpej Exp $ */
/*
@@ -184,10 +184,12 @@ struct fpframe;
struct user;
struct pcb;
+void clearseg __P((vm_offset_t));
void doboot __P((void)) __attribute__((__noreturn__));
void loadustp __P((int));
void m68881_save __P((struct fpframe *));
void m68881_restore __P((struct fpframe *));
+void physcopyseg __P((vm_offset_t, vm_offset_t));
u_int probeva __P((u_int, u_int));
void proc_trampoline __P((void));
void savectx __P((struct pcb *));
diff --git a/sys/arch/amiga/include/param.h b/sys/arch/amiga/include/param.h
index c7bb979d23a..656303d5957 100644
--- a/sys/arch/amiga/include/param.h
+++ b/sys/arch/amiga/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.18 2001/12/05 01:57:14 provos Exp $ */
+/* $OpenBSD: param.h,v 1.19 2001/12/20 19:02:26 miod Exp $ */
/* $NetBSD: param.h,v 1.35 1997/07/10 08:22:36 veego Exp $ */
/*
@@ -60,6 +60,15 @@
#define KERNBASE 0x00000000 /* start of kernel virtual */
+#define SEGSHIFT 24 /* LOG2(NBSEG) [68030 value] */
+/* bytes/segment */
+/* (256 * (1 << PGSHIFT)) == (1 << SEGSHIFT) */
+#define NBSEG ((mmutype == MMU_68040) \
+ ? (32 * (1 << PGSHIFT)) : (256 * (1 << PGSHIFT)))
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+
+#define UPAGES 2 /* pages of u-area */
+
#include <m68k/param.h>
#define NPTEPG (NBPG/(sizeof (pt_entry_t)))
diff --git a/sys/arch/amiga/include/pmap.h b/sys/arch/amiga/include/pmap.h
index 7e40c742b75..a4fa87f48f6 100644
--- a/sys/arch/amiga/include/pmap.h
+++ b/sys/arch/amiga/include/pmap.h
@@ -1,13 +1,142 @@
-/* $OpenBSD: pmap.h,v 1.10 2001/11/30 23:20:09 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.11 2001/12/20 19:02:26 miod Exp $ */
+/* $NetBSD: pmap.h,v 1.17 1997/06/10 18:34:52 veego Exp $ */
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.h 7.6 (Berkeley) 5/10/91
+ */
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
-#include <m68k/pmap_motorola.h>
+/*
+ * Pmap stuff
+ */
+struct pmap {
+ pt_entry_t *pm_ptab; /* KVA of page table */
+ st_entry_t *pm_stab; /* KVA of segment table */
+ int pm_stfree; /* 040: free lev2 blocks */
+ u_int *pm_stpa; /* 040: ST phys addr */
+ short pm_sref; /* segment table ref count */
+ short pm_count; /* pmap reference count */
+ long pm_ptpages; /* more stats: PT pages */
+ struct simplelock pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+};
+
+typedef struct pmap *pmap_t;
+
+/*
+ * On the 040 we keep track of which level 2 blocks are already in use
+ * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
+ * (block 31). For convenience, the level 1 table is considered to be
+ * block 0.
+ *
+ * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
+ * for the kernel and users. 16 implies only the initial "segment table"
+ * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
+ * physically contiguous pages for the ST in pmap.c!
+ */
+#define MAXKL2SIZE 32
+#define MAXUL2SIZE 16
+#define l2tobm(n) (1 << (n))
+#define bmtol2(n) (ffs(n) - 1)
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmap, loadhw) \
+{ \
+ if ((loadhw)) \
+ loadustp(m68k_btop((pmap)->pm_stpa)); \
+}
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ u_int *pv_ptste; /* non-zero if VA maps a PT page */
+ struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
+ int pv_flags; /* flags */
+} *pv_entry_t;
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
+struct pv_page;
+
+struct pv_page_info {
+ TAILQ_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+
+/*
+ * This is basically:
+ * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+ */
+#define NPVPPG 340
+
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
#ifdef _KERNEL
-void pmap_init_md __P((void));
-#define PMAP_INIT_MD() pmap_init_md()
-#endif
+pv_entry_t pv_table; /* array of entries, one per page */
+u_int *Sysmap;
+char *vmmap; /* map for mem, dumps, etc. */
+struct pmap kernel_pmap_store;
+
+#define pmap_kernel() (&kernel_pmap_store)
+#define active_pmap(pm) \
+ ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
+#define active_user_pmap(pm) \
+ (curproc && \
+ (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+#define pmap_update(pmap) /* nothing */
+
+#endif /* _KERNEL */
-#endif /* _MACHINE_PMAP_H_ */
+#endif /* !_MACHINE_PMAP_H_ */
diff --git a/sys/arch/amiga/include/pte.h b/sys/arch/amiga/include/pte.h
index 9a6d5d146a8..a5acfc91685 100644
--- a/sys/arch/amiga/include/pte.h
+++ b/sys/arch/amiga/include/pte.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pte.h,v 1.4 2001/12/06 22:33:18 miod Exp $ */
+/* $OpenBSD: pte.h,v 1.5 2001/12/20 19:02:26 miod Exp $ */
/* $NetBSD: pte.h,v 1.14 1995/09/29 13:52:09 chopps Exp $ */
/*
@@ -45,12 +45,72 @@
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
-#include <m68k/pte_motorola.h>
+/*
+ * AMIGA hardware segment/page table entries
+ */
+
+struct pte {
+ u_int pte;
+};
+typedef u_int pt_entry_t; /* Mach page table entry */
+
+struct ste {
+ u_int ste;
+};
+typedef u_int st_entry_t; /* segment table entry */
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+#define ST_ENTRY_NULL ((st_entry_t *) 0)
+
+#define SG_V 0x00000002 /* segment is valid */
+#define SG_NV 0x00000000
+#define SG_PROT 0x00000004 /* access protection mask */
+#define SG_RO 0x00000004
+#define SG_RW 0x00000000
+#define SG_U 0x00000008 /* modified bit (68040) */
+#define SG_FRAME 0xffffe000
+#define SG_IMASK 0xff000000
+#define SG_ISHIFT 24
+#define SG_PMASK 0x00ffe000
+#define SG_PSHIFT 13
+
+/* 68040 additions */
+#define SG4_MASK1 0xfe000000 /* pointer table 1 index mask */
+#define SG4_SHIFT1 25
+#define SG4_MASK2 0x01fc0000 /* pointer table 2 index mask */
+#define SG4_SHIFT2 18
+#define SG4_MASK3 0x0003e000 /* page table index mask */
+#define SG4_SHIFT3 13
+#define SG4_ADDR1 0xfffffe00 /* pointer table address mask */
+#define SG4_ADDR2 0xffffff80 /* page table address mask */
+#define SG4_LEV1SIZE 128 /* entries in pointer table 1 */
+#define SG4_LEV2SIZE 128 /* entries in pointer table 2 */
+#define SG4_LEV3SIZE 32 /* entries in page table */
+
+#define PG_V 0x00000001
+#define PG_NV 0x00000000
+#define PG_PROT 0x00000004
+#define PG_U 0x00000008
+#define PG_M 0x00000010
+#define PG_W 0x00000100
+#define PG_RO 0x00000004
+#define PG_RW 0x00000000
+#define PG_FRAME 0xffffe000
+#define PG_CI 0x00000040
+#define PG_SHIFT 13
+#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
+
+/* 68040 additions */
+#define PG_CMASK 0x00000060 /* cache mode mask */
+#define PG_CWT 0x00000000 /* writethrough caching */
+#define PG_CCB 0x00000020 /* copyback caching */
+#define PG_CIS 0x00000040 /* cache inhibited serialized */
+#define PG_CIN 0x00000060 /* cache inhibited nonserialized */
+#define PG_SO 0x00000080 /* supervisor only */
#define AMIGA_040RTSIZE 512 /* root (level 1) table size */
#define AMIGA_040STSIZE 512 /* segment (level 2) table size */
#define AMIGA_040PTSIZE 128 /* page (level 3) table size */
-
#if 0
#define AMIGA_STSIZE 1024 /* segment table size */
#else
@@ -65,10 +125,9 @@
* AMIGA_MAX_KPTSIZE the most number of bytes for kpt pages
* AMIGA_MAX_PTSIZE the number of bytes to map everything
*/
-extern vaddr_t amiga_uptbase;
#define AMIGA_MAX_COREUPT 1024
#define AMIGA_UPTSIZE roundup(VM_MAXUSER_ADDRESS / NPTEPG, NBPG)
-#define AMIGA_UPTBASE amiga_uptbase
+#define AMIGA_UPTBASE 0x10000000
#define AMIGA_UPTMAXSIZE \
roundup((AMIGA_MAX_COREUPT * AMIGA_UPTSIZE), NBPG)
#define AMIGA_MAX_KPTSIZE \
@@ -77,10 +136,15 @@ extern vaddr_t amiga_uptbase;
roundup((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NPTEPG, NBPG)
#define AMIGA_MAX_PTSIZE roundup(0xffffffff / NPTEPG, NBPG)
-#define MACHINE_STSIZE AMIGA_STSIZE
-#define MACHINE_MAX_PTSIZE AMIGA_MAX_PTSIZE
-#define MACHINE_MAX_KPTSIZE AMIGA_MAX_KPTSIZE
-#define MACHINE_PTBASE AMIGA_UPTBASE
-#define MACHINE_PTMAXSIZE AMIGA_UPTMAXSIZE
+/*
+ * Kernel virtual address to page table entry and to physical address.
+ */
+#define kvtopte(va) \
+ (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
+#define ptetokv(pt) \
+ ((((u_int *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
+#define kvtophys(va) \
+ ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
+
#endif /* !_MACHINE_PTE_H_ */
diff --git a/sys/arch/hp300/conf/files.hp300 b/sys/arch/hp300/conf/files.hp300
index 135a4fe1ef6..89dc04b6758 100644
--- a/sys/arch/hp300/conf/files.hp300
+++ b/sys/arch/hp300/conf/files.hp300
@@ -1,4 +1,4 @@
-# $OpenBSD: files.hp300,v 1.17 2001/11/30 20:58:14 miod Exp $
+# $OpenBSD: files.hp300,v 1.18 2001/12/20 19:02:27 miod Exp $
# $NetBSD: files.hp300,v 1.28 1997/05/12 08:23:28 thorpej Exp $
#
# hp300-specific configuration info
@@ -187,7 +187,7 @@ file arch/hp300/hp300/machdep.c
file arch/hp300/hp300/intr.c
file arch/hp300/hp300/leds.c useleds
file arch/hp300/hp300/mem.c
-file arch/m68k/m68k/pmap_motorola.c
+file arch/hp300/hp300/pmap.c
file arch/hp300/hp300/pmap_bootstrap.c
file arch/hp300/hp300/sys_machdep.c
file arch/hp300/hp300/trap.c
diff --git a/sys/arch/m68k/m68k/pmap_motorola.c b/sys/arch/hp300/hp300/pmap.c
index 06058c3d402..0fde6c99c88 100644
--- a/sys/arch/m68k/m68k/pmap_motorola.c
+++ b/sys/arch/hp300/hp300/pmap.c
@@ -1,6 +1,7 @@
-/* $OpenBSD: pmap_motorola.c,v 1.13 2001/12/16 23:06:04 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.38 2001/12/20 19:02:27 miod Exp $ */
+/* $NetBSD: pmap.c,v 1.80 1999/09/16 14:52:06 chs Exp $ */
-/*
+/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* All rights reserved.
*
@@ -37,37 +38,6 @@
*/
/*
- * Copyright (c) 1995 Theo de Raadt
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed under OpenBSD by
- * Theo de Raadt for Willowglen Singapore.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -107,14 +77,13 @@
*/
/*
- * m68k series physical map management code.
+ * HP9000/300 series physical map management code.
*
* Supports:
- * 68020 with HP MMU
- * 68020 with 68851 MMU
- * 68030 with on-chip MMU
- * 68040 with on-chip MMU
- * 68060 with on-chip MMU
+ * 68020 with HP MMU models 320, 350
+ * 68020 with 68851 MMU models 318, 319, 330
+ * 68030 with on-chip MMU models 340, 360, 370, 345, 375, 400
+ * 68040 with on-chip MMU models 380, 385, 425, 433
*
* Notes:
* Don't even pay lip service to multiprocessor support.
@@ -171,7 +140,6 @@
#include <machine/pte.h>
-/* #define UVM_PAGE_INLINE */
#include <uvm/uvm.h>
#include <machine/cpu.h>
@@ -192,25 +160,24 @@
#define PDB_PARANOIA 0x2000
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
-#define PDB_ALL 0xFFFF
int debugmap = 0;
int pmapdebug = PDB_PARANOIA;
#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
int dowriteback = 1; /* 68040: enable writeback caching */
int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
#endif
-#else
+#else /* ! DEBUG */
#define PMAP_DPRINTF(l, x) /* nothing */
-#endif /* DEBUG */
+#endif /* DEBUG */
/*
* Get STEs and PTEs for user/kernel address space
*/
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
#define pmap_ste1(m, v) \
(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
/* XXX assumes physically contiguous ST pages (if more than one) */
@@ -219,9 +186,9 @@ int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
#define pmap_ste(m, v) \
(&((m)->pm_stab[(vaddr_t)(v) \
- >> (mmutype <= MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
+ >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
#define pmap_ste_v(m, v) \
- (mmutype <= MMU_68040 \
+ (mmutype == MMU_68040 \
? ((*pmap_ste1(m, v) & SG_V) && \
(*pmap_ste2(m, v) & SG_V)) \
: (*pmap_ste(m, v) & SG_V))
@@ -248,9 +215,10 @@ int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
/*
* Given a map and a machine independent protection code,
- * convert to an m68k protection code.
+ * convert to an hp300 protection code.
*/
-#define pte_prot(p) ((p) & VM_PROT_WRITE ? PG_RW : PG_RO)
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
/*
* Kernel page table page management.
@@ -277,16 +245,11 @@ pt_entry_t *Sysmap, *Sysptmap;
st_entry_t *Segtabzero, *Segtabzeropa;
vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
-extern caddr_t CADDR1, CADDR2;
-
-pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
-pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
-
struct pmap kernel_pmap_store;
struct vm_map *st_map, *pt_map;
struct vm_map st_map_store, pt_map_store;
-paddr_t avail_start; /* PA of first available physical page */
+paddr_t avail_start; /* PA of first available physical page */
paddr_t avail_end; /* PA of last available physical page */
vsize_t mem_size; /* memory size in bytes */
vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
@@ -299,47 +262,29 @@ char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
-#if defined(M68K_MMU_HP)
-int pmap_aliasmask; /* separation at which VA aliasing is ok */
+#ifdef M68K_MMU_HP
+int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
int protostfree; /* prototype (default) free ST map */
#endif
-struct pool pmap_pmap_pool; /* memory pool for pmap structures */
-
-/*
- * Internal routines
- */
-struct pv_entry *pmap_alloc_pv __P((void));
-void pmap_free_pv __P((struct pv_entry *));
-void pmap_collect_pv __P((void));
-#ifdef COMPAT_HPUX
-int pmap_mapmulti __P((pmap_t, vaddr_t));
-#endif
-void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
-boolean_t pmap_testbit __P((paddr_t, int));
-void pmap_changebit __P((paddr_t, int, int));
-void pmap_enter_ptpage __P((pmap_t, vaddr_t));
-void pmap_ptpage_addref __P((vaddr_t));
-int pmap_ptpage_delref __P((vaddr_t));
-void pmap_collect1 __P((pmap_t, paddr_t, paddr_t));
-void pmap_pinit __P((pmap_t));
-void pmap_release __P((pmap_t));
+extern caddr_t CADDR1, CADDR2;
+pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
+pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
-#ifdef DEBUG
-void pmap_pvdump __P((paddr_t));
-void pmap_check_wiring __P((char *, vaddr_t));
-#endif
+struct pool pmap_pmap_pool; /* memory pool for pmap structures */
-/* pmap_remove_mapping flags */
-#define PRM_TFLUSH 1
-#define PRM_CFLUSH 2
-#define PRM_KEEPPTPAGE 4
+struct pv_entry *pmap_alloc_pv __P((void));
+void pmap_free_pv __P((struct pv_entry *));
+void pmap_collect_pv __P((void));
+#ifdef COMPAT_HPUX
+int pmap_mapmulti __P((pmap_t, vaddr_t));
+#endif /* COMPAT_HPUX */
-#define PAGE_IS_MANAGED(pa) \
- (pmap_initialized && IS_VM_PHYSADDR(pa))
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
#define pa_to_pvh(pa) \
({ \
@@ -358,6 +303,29 @@ void pmap_check_wiring __P((char *, vaddr_t));
})
/*
+ * Internal routines
+ */
+void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((paddr_t, int));
+void pmap_changebit __P((paddr_t, int, int));
+void pmap_enter_ptpage __P((pmap_t, vaddr_t));
+void pmap_ptpage_addref __P((vaddr_t));
+int pmap_ptpage_delref __P((vaddr_t));
+void pmap_collect1 __P((pmap_t, paddr_t, paddr_t));
+void pmap_pinit __P((pmap_t));
+void pmap_release __P((pmap_t));
+
+#ifdef DEBUG
+void pmap_pvdump __P((paddr_t));
+void pmap_check_wiring __P((char *, vaddr_t));
+#endif
+
+/* pmap_remove_mapping flags */
+#define PRM_TFLUSH 0x01
+#define PRM_CFLUSH 0x02
+#define PRM_KEEPPTPAGE 0x04
+
+/*
* pmap_virtual_space: [ INTERFACE ]
*
* Report the range of available kernel virtual address
@@ -409,9 +377,16 @@ pmap_init()
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in pmap_bootstrap().
*/
- PMAP_INIT_MD();
+ addr = (vaddr_t) intiobase;
+ if (uvm_map(kernel_map, &addr,
+ m68k_ptob(IIOMAPSIZE+EIOMAPSIZE),
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)))
+ goto bogons;
addr = (vaddr_t) Sysmap;
- if (uvm_map(kernel_map, &addr, MACHINE_MAX_PTSIZE,
+ if (uvm_map(kernel_map, &addr, HP_MAX_PTSIZE,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
@@ -421,6 +396,7 @@ pmap_init()
* portion of the kernel page table isn't big enough
* and we overran the page table map.
*/
+ bogons:
panic("pmap_init: bogons in the VM system!\n");
}
@@ -437,7 +413,7 @@ pmap_init()
*/
for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
- s = MACHINE_STSIZE; /* Segtabzero */
+ s = HP_STSIZE; /* Segtabzero */
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
@@ -446,21 +422,8 @@ pmap_init()
panic("pmap_init: can't allocate data structures");
Segtabzero = (st_entry_t *) addr;
- pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
-#ifdef M68060
- if (mmutype == MMU_68060) {
- for (addr2 = addr; addr2 < addr + MACHINE_STSIZE;
- addr2 += PAGE_SIZE) {
- pt_entry_t *pte;
-
- pte = pmap_pte(pmap_kernel(), addr2);
- *pte = (*pte | PG_CI) & ~PG_CCB;
- TBIS(addr2);
- }
- DCIS();
- }
-#endif
- addr += MACHINE_STSIZE;
+ pmap_extract(pmap_kernel(), addr, (paddr_t *)Segtabzeropa);
+ addr += HP_STSIZE;
pv_table = (struct pv_entry *) addr;
addr += page_cnt * sizeof(struct pv_entry);
@@ -490,7 +453,7 @@ pmap_init()
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
*/
- npages = min(atop(MACHINE_MAX_KPTSIZE), maxproc+16);
+ npages = min(atop(HP_MAX_KPTSIZE), maxproc+16);
s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
/*
@@ -517,25 +480,12 @@ pmap_init()
kpt_pages = &((struct kpt_page *)addr2)[npages];
kpt_free_list = NULL;
do {
- addr2 -= PAGE_SIZE;
+ addr2 -= NBPG;
(--kpt_pages)->kpt_next = kpt_free_list;
kpt_free_list = kpt_pages;
kpt_pages->kpt_va = addr2;
pmap_extract(pmap_kernel(), addr2, &kpt_pages->kpt_pa);
-#ifdef M68060
- if (mmutype == MMU_68060) {
- pt_entry_t *pte;
-
- pte = pmap_pte(pmap_kernel(), addr2);
- *pte = (*pte | PG_CI) & ~PG_CCB;
- TBIS(addr2);
- }
-#endif
} while (addr != addr2);
-#ifdef M68060
- if (mmutype == MMU_68060)
- DCIS();
-#endif
PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
atop(s), addr, addr + s));
@@ -543,27 +493,27 @@ pmap_init()
/*
* Allocate the segment table map and the page table map.
*/
- s = maxproc * MACHINE_STSIZE;
+ s = maxproc * HP_STSIZE;
st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
&st_map_store);
- addr = MACHINE_PTBASE;
- if ((MACHINE_PTMAXSIZE / MACHINE_MAX_PTSIZE) < maxproc) {
- s = MACHINE_PTMAXSIZE;
+ addr = HP_PTBASE;
+ if ((HP_PTMAXSIZE / HP_MAX_PTSIZE) < maxproc) {
+ s = HP_PTMAXSIZE;
/*
* XXX We don't want to hang when we run out of
* page tables, so we lower maxproc so that fork()
* will fail instead. Note that root could still raise
* this value via sysctl(3).
*/
- maxproc = (MACHINE_PTMAXSIZE / MACHINE_MAX_PTSIZE);
+ maxproc = (HP_PTMAXSIZE / HP_MAX_PTSIZE);
} else
- s = (maxproc * MACHINE_MAX_PTSIZE);
- pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
+ s = (maxproc * HP_MAX_PTSIZE);
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
TRUE, &pt_map_store);
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
protostfree = ~l2tobm(0);
for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
protostfree &= ~l2tobm(rv);
@@ -595,7 +545,7 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
- pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, PAGE_SIZE);
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
@@ -644,7 +594,7 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE);
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
break;
}
}
@@ -709,7 +659,7 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
- uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE);
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
}
}
@@ -736,8 +686,8 @@ pmap_map(va, spa, epa, prot)
while (spa < epa) {
pmap_enter(pmap_kernel(), va, spa, prot, 0);
- va += PAGE_SIZE;
- spa += PAGE_SIZE;
+ va += NBPG;
+ spa += NBPG;
}
pmap_update(pmap_kernel());
return (va);
@@ -787,8 +737,8 @@ pmap_pinit(pmap)
*/
pmap->pm_stab = Segtabzero;
pmap->pm_stpa = Segtabzeropa;
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040)
+#if defined(M68040)
+ if (mmutype == MMU_68040)
pmap->pm_stfree = protostfree;
#endif
pmap->pm_count = 1;
@@ -840,12 +790,11 @@ pmap_release(pmap)
if (pmap->pm_ptab) {
pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
- (vaddr_t)pmap->pm_ptab + MACHINE_MAX_PTSIZE);
- pmap_update(pmap_kernel());
+ (vaddr_t)pmap->pm_ptab + HP_MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
- (vaddr_t)pmap->pm_ptab + MACHINE_MAX_PTSIZE);
+ (vaddr_t)pmap->pm_ptab + HP_MAX_PTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
- MACHINE_MAX_PTSIZE);
+ HP_MAX_PTSIZE);
}
KASSERT(pmap->pm_stab == Segtabzero);
}
@@ -930,7 +879,7 @@ pmap_remove(pmap, sva, eva)
needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
while (sva < eva) {
- nssva = m68k_trunc_seg(sva) + NBSEG;
+ nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
if (nssva == 0 || nssva > eva)
nssva = eva;
@@ -975,7 +924,7 @@ pmap_remove(pmap, sva, eva)
firstpage = FALSE;
}
pte++;
- sva += PAGE_SIZE;
+ sva += NBPG;
}
}
/*
@@ -1054,9 +1003,11 @@ pmap_page_protect(pg, prot)
pte, PRM_TFLUSH|PRM_CFLUSH);
else {
pv = pv->pv_next;
- PMAP_DPRINTF(PDB_PARANOIA,
- ("%s wired mapping for %lx not removed\n",
- "pmap_page_protect:", pa));
+#ifdef DEBUG
+ if (pmapdebug & PDB_PARANOIA)
+ printf("%s wired mapping for %lx not removed\n",
+ "pmap_page_protect:", pa);
+#endif
if (pv == NULL)
break;
}
@@ -1090,11 +1041,11 @@ pmap_protect(pmap, sva, eva, prot)
return;
}
- isro = pte_prot(prot);
+ isro = pte_prot(pmap, prot);
needtflush = active_pmap(pmap);
firstpage = TRUE;
while (sva < eva) {
- nssva = m68k_trunc_seg(sva) + NBSEG;
+ nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
if (nssva == 0 || nssva > eva)
nssva = eva;
/*
@@ -1124,12 +1075,12 @@ pmap_protect(pmap, sva, eva, prot)
if (firstpage && pmap_aliasmask)
DCIS();
#endif
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
/*
* Clear caches if making RO (see section
* "7.3 Cache Coherency" in the manual).
*/
- if (isro && mmutype <= MMU_68040) {
+ if (isro && mmutype == MMU_68040) {
paddr_t pa = pmap_pte_pa(pte);
DCFP(pa);
@@ -1142,7 +1093,7 @@ pmap_protect(pmap, sva, eva, prot)
firstpage = FALSE;
}
pte++;
- sva += PAGE_SIZE;
+ sva += NBPG;
}
}
}
@@ -1194,7 +1145,7 @@ pmap_enter(pmap, va, pa, prot, flags)
*/
if (pmap->pm_ptab == NULL)
pmap->pm_ptab = (pt_entry_t *)
- uvm_km_valloc_wait(pt_map, MACHINE_MAX_PTSIZE);
+ uvm_km_valloc_wait(pt_map, HP_MAX_PTSIZE);
/*
* Segment table entry not valid, we need a new PT page
@@ -1360,7 +1311,7 @@ pmap_enter(pmap, va, pa, prot, flags)
}
/*
* Assumption: if it is not part of our managed memory
- * then it must be device memory which may be volatile.
+ * then it must be device memory which may be volitile.
*/
else if (pmap_initialized) {
checkpv = cacheable = FALSE;
@@ -1385,13 +1336,13 @@ validate:
/*
* Build the new PTE.
*/
- npte = pa | pte_prot(prot) | (*pte & (PG_M|PG_U)) | PG_V;
+ npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
if (wired)
npte |= PG_W;
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
/* Don't cache if process can't take it, like SunOS ones. */
- if (mmutype <= MMU_68040 && pmap != pmap_kernel() &&
+ if (mmutype == MMU_68040 && pmap != pmap_kernel() &&
(curproc->p_md.md_flags & MDP_UNCACHE_WX) &&
(prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE))
checkpv = cacheable = FALSE;
@@ -1399,8 +1350,8 @@ validate:
if (!checkpv && !cacheable)
npte |= PG_CI;
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
+#if defined(M68040)
+ if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
#ifdef DEBUG
if (dowriteback && (dokwriteback || pmap != pmap_kernel()))
#endif
@@ -1414,8 +1365,8 @@ validate:
* If so, we need not flush the TLB and caches.
*/
wired = ((*pte ^ npte) == PG_W);
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040 && !wired) {
+#if defined(M68040)
+ if (mmutype == MMU_68040 && !wired) {
DCFP(pa);
ICPP(pa);
}
@@ -1472,7 +1423,7 @@ pmap_kenter_pa(va, pa, prot)
splx(s);
}
- pa = trunc_page(pa);
+ pa = m68k_trunc_page(pa);
pte = pmap_pte(pmap, va);
PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
@@ -1489,15 +1440,15 @@ pmap_kenter_pa(va, pa, prot)
* Build the new PTE.
*/
- npte = pa | pte_prot(prot) | PG_V | PG_W;
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040 && (npte & (PG_PROT)) == PG_RW)
+ npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
+#if defined(M68040)
+ if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW)
npte |= PG_CCB;
#endif
PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
DCFP(pa);
ICPP(pa);
}
@@ -1523,7 +1474,7 @@ pmap_kremove(va, len)
firstpage = TRUE;
needcflush = FALSE;
while (sva < eva) {
- nssva = m68k_trunc_seg(sva) + NBSEG;
+ nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
if (nssva == 0 || nssva > eva)
nssva = eva;
@@ -1597,7 +1548,7 @@ pmap_kremove(va, len)
firstpage = FALSE;
}
pte++;
- sva += PAGE_SIZE;
+ sva += NBPG;
}
}
@@ -1692,15 +1643,15 @@ pmap_extract(pmap, va, pap)
{
boolean_t rv = FALSE;
paddr_t pa;
- pt_entry_t *pte;
+ u_int pte;
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_extract(%p, %lx) -> ", pmap, va));
if (pmap_ste_v(pmap, va)) {
- pte = pmap_pte(pmap, va);
- if (pmap_pte_v(pte)) {
- pa = pmap_pte_pa(pte) | (va & ~PG_FRAME);
+ pte = *(u_int *)pmap_pte(pmap, va);
+ if (pte) {
+ pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
if (pap != NULL)
*pap = pa;
rv = TRUE;
@@ -1810,7 +1761,7 @@ pmap_collect1(pmap, startpa, endpa)
int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
#endif
- for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
+ for (pa = startpa; pa < endpa; pa += NBPG) {
struct kpt_page *kpt, **pkpt;
/*
@@ -1828,7 +1779,7 @@ pmap_collect1(pmap, startpa, endpa)
continue;
#ifdef DEBUG
if (pv->pv_va < (vaddr_t)Sysmap ||
- pv->pv_va >= (vaddr_t)Sysmap + MACHINE_MAX_PTSIZE)
+ pv->pv_va >= (vaddr_t)Sysmap + HP_MAX_PTSIZE)
printf("collect: kernel PT VA out of range\n");
else
goto ok;
@@ -1836,7 +1787,7 @@ pmap_collect1(pmap, startpa, endpa)
continue;
ok:
#endif
- pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
+ pte = (pt_entry_t *)(pv->pv_va + NBPG);
while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
;
if (pte >= (pt_entry_t *)pv->pv_va)
@@ -1884,11 +1835,11 @@ ok:
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
pmapdebug = opmapdebug;
- if (!(*ste & SG_V))
+ if (*ste != SG_NV)
printf("collect: kernel STE at %p still valid (%x)\n",
ste, *ste);
ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
- if (!(*ste & SG_V))
+ if (*ste != SG_NV)
printf("collect: kernel PTmap at %p still valid (%x)\n",
ste, *ste);
#endif
@@ -1924,7 +1875,7 @@ pmap_zero_page(phys)
#endif
#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+ if (mmutype == MMU_68040) {
/*
* Set copyback caching on the page; this is required
* for cache consistency (since regular mappings are
@@ -1976,7 +1927,7 @@ pmap_copy_page(src, dst)
#endif
#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+ if (mmutype == MMU_68040) {
/*
* Set copyback caching on the pages; this is required
* for cache consistency (since regular mappings are
@@ -2155,7 +2106,7 @@ pmap_mapmulti(pmap, va)
#endif
bste = pmap_ste(pmap, HPMMBASEADDR(va));
ste = pmap_ste(pmap, va);
- if (!(*ste & SG_V) && (*bste & SG_V)) {
+ if (*ste == SG_NV && (*bste & SG_V)) {
*ste = *bste;
TBIAU();
return (0);
@@ -2381,8 +2332,8 @@ pmap_remove_mapping(pmap, va, pte, flags)
PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
("remove: ste was %x@%p pte was %x@%p\n",
*ste, ste, opte, pmap_pte(pmap, va)));
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
while (ste < este)
@@ -2415,17 +2366,16 @@ pmap_remove_mapping(pmap, va, pte, flags)
ptpmap->pm_stab));
pmap_remove(pmap_kernel(),
(vaddr_t)ptpmap->pm_stab,
- (vaddr_t)ptpmap->pm_stab + MACHINE_STSIZE);
- pmap_update(pmap_kernel());
+ (vaddr_t)ptpmap->pm_stab + HP_STSIZE);
uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
ptpmap->pm_stpa));
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab,
- MACHINE_STSIZE);
+ HP_STSIZE);
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040)
+#if defined(M68040)
+ if (mmutype == MMU_68040)
ptpmap->pm_stfree = protostfree;
#endif
@@ -2532,7 +2482,7 @@ pmap_changebit(pa, set, mask)
pt_entry_t *pte, npte;
vaddr_t va;
int s;
-#if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
+#if defined(M68K_MMU_HP) || defined(M68040)
boolean_t firstpage = TRUE;
#endif
@@ -2575,7 +2525,7 @@ pmap_changebit(pa, set, mask)
#endif
npte = (*pte | set) & mask;
if (*pte != npte) {
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
/*
* If we are changing caching status or
* protection make sure the caches are
@@ -2615,9 +2565,6 @@ pmap_enter_ptpage(pmap, va)
struct pv_entry *pv;
st_entry_t *ste;
int s;
-#if defined(M68040) || defined(M68060)
- paddr_t stpa;
-#endif
PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
@@ -2631,29 +2578,15 @@ pmap_enter_ptpage(pmap, va)
*/
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (st_entry_t *)
- uvm_km_zalloc(st_map, MACHINE_STSIZE);
+ uvm_km_zalloc(st_map, HP_STSIZE);
pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
-#ifdef DEBUG
- if (dowriteback && dokwriteback) {
-#endif
- stpa = (paddr_t)pmap->pm_stpa;
-#if defined(M68060)
- if (mmutype == MMU_68060) {
- while (stpa < (paddr_t)pmap->pm_stpa +
- MACHINE_STSIZE) {
- pmap_changebit(stpa, PG_CI, ~PG_CCB);
- stpa += PAGE_SIZE;
- }
- DCIS(); /* XXX */
- } else
-#endif
- pmap_changebit(stpa, 0, ~PG_CCB);
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
#ifdef DEBUG
- }
+ if (dowriteback && dokwriteback)
#endif
+ pmap_changebit((paddr_t)pmap->pm_stpa, 0, ~PG_CCB);
pmap->pm_stfree = protostfree;
}
#endif
@@ -2670,11 +2603,11 @@ pmap_enter_ptpage(pmap, va)
}
ste = pmap_ste(pmap, va);
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
/*
* Allocate level 2 descriptor block if necessary
*/
- if (mmutype <= MMU_68040) {
+ if (mmutype == MMU_68040) {
if (*ste == SG_NV) {
int ix;
caddr_t addr;
@@ -2695,12 +2628,12 @@ pmap_enter_ptpage(pmap, va)
/*
* Since a level 2 descriptor maps a block of SG4_LEV3SIZE
* level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
- * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
+ * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
* PT page--the unit of allocation. We set `ste' to point
* to the first entry of that chunk which is validated in its
* entirety below.
*/
- ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
+ ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
@@ -2725,20 +2658,16 @@ pmap_enter_ptpage(pmap, va)
PMAP_DPRINTF(PDB_COLLECT,
("enter: no KPT pages, collecting...\n"));
pmap_collect(pmap_kernel());
- if ((kpt = kpt_free_list) == NULL)
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0)
panic("pmap_enter_ptpage: can't get KPT page");
}
kpt_free_list = kpt->kpt_next;
kpt->kpt_next = kpt_used_list;
kpt_used_list = kpt;
ptpa = kpt->kpt_pa;
- bzero((caddr_t)kpt->kpt_va, PAGE_SIZE);
+ bzero((caddr_t)kpt->kpt_va, NBPG);
pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
-#if defined(M68060)
- if (mmutype == MMU_68060)
- pmap_changebit(ptpa, PG_CI, ~PG_CCB);
-#endif
pmap_update(pmap);
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
@@ -2775,9 +2704,9 @@ pmap_enter_ptpage(pmap, va)
pmap_enter(pmap_kernel(), va, ptpa,
VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
- pmap_update(pmap_kernel());
+ pmap_update(pmap);
}
-#if defined(M68040) || defined(M68060)
+#if defined(M68040)
/*
* Turn off copyback caching of page table pages,
* could get ugly otherwise.
@@ -2785,7 +2714,7 @@ pmap_enter_ptpage(pmap, va)
#ifdef DEBUG
if (dowriteback && dokwriteback)
#endif
- if (mmutype <= MMU_68040) {
+ if (mmutype == MMU_68040) {
#ifdef DEBUG
pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
@@ -2793,13 +2722,7 @@ pmap_enter_ptpage(pmap, va)
pmap == pmap_kernel() ? "Kernel" : "User",
va, ptpa, pte, *pte);
#endif
-#ifdef M68060
- if (mmutype == MMU_68060) {
- pmap_changebit(ptpa, PG_CI, ~PG_CCB);
- DCIS();
- } else
-#endif
- pmap_changebit(ptpa, 0, ~PG_CCB);
+ pmap_changebit(ptpa, 0, ~PG_CCB);
}
#endif
/*
@@ -2834,8 +2757,8 @@ pmap_enter_ptpage(pmap, va)
* it would be difficult to identify ST pages in pmap_pageable to
* release them. We also avoid the overhead of vm_map_pageable.
*/
-#if defined(M68040) || defined(M68060)
- if (mmutype <= MMU_68040) {
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
st_entry_t *este;
for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
@@ -2850,17 +2773,14 @@ pmap_enter_ptpage(pmap, va)
("enter: stab %p refcnt %d\n",
pmap->pm_stab, pmap->pm_sref));
}
-
-#if defined(M68060)
- if (mmutype == MMU_68060) {
- /*
- * Flush stale TLB info.
- */
- if (pmap == pmap_kernel())
- TBIAS();
- else
- TBIAU();
- }
+#if 0
+ /*
+ * Flush stale TLB info.
+ */
+ if (pmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
#endif
pmap->pm_ptpages++;
splx(s);
@@ -2957,8 +2877,7 @@ pmap_check_wiring(str, va)
}
count = 0;
- for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
- pte++)
+ for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)
count++;
if ((pg->wire_count - 1) != count)
@@ -2966,21 +2885,3 @@ pmap_check_wiring(str, va)
str, va, (pg->wire_count - 1), count);
}
#endif /* DEBUG */
-
-/* XXX this should go out soon */
-#ifdef mac68k
-void
-mac68k_set_pte(va, pge)
- vaddr_t va;
- paddr_t pge;
-{
-extern vaddr_t tmp_vpages[];
- register pt_entry_t *pte;
-
- if (va != tmp_vpages[0])
- return;
-
- pte = pmap_pte(pmap_kernel(), va);
- *pte = (pt_entry_t) pge;
-}
-#endif
diff --git a/sys/arch/hp300/hp300/pmap_bootstrap.c b/sys/arch/hp300/hp300/pmap_bootstrap.c
index 9108859dcc3..b8418539f87 100644
--- a/sys/arch/hp300/hp300/pmap_bootstrap.c
+++ b/sys/arch/hp300/hp300/pmap_bootstrap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_bootstrap.c,v 1.13 2001/12/14 21:44:04 miod Exp $ */
+/* $OpenBSD: pmap_bootstrap.c,v 1.14 2001/12/20 19:02:27 miod Exp $ */
/* $NetBSD: pmap_bootstrap.c,v 1.13 1997/06/10 18:56:50 veego Exp $ */
/*
@@ -68,6 +68,7 @@ extern int maxmem, physmem;
extern paddr_t avail_start, avail_end;
extern vaddr_t virtual_avail, virtual_end;
extern vsize_t mem_size;
+extern int protection_codes[];
#ifdef M68K_MMU_HP
extern int pmap_aliasmask;
#endif
@@ -447,6 +448,25 @@ pmap_bootstrap(nextpa, firstpa)
#endif
/*
+ * Initialize protection array.
+ * XXX don't use a switch statement, it might produce an
+ * absolute "jmp" table.
+ */
+ {
+ int *kp;
+
+ kp = &RELOC(protection_codes, int);
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ }
+
+ /*
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
@@ -500,22 +520,3 @@ pmap_bootstrap(nextpa, firstpa)
RELOC(virtual_avail, vaddr_t) = va;
}
}
-
-void
-pmap_init_md()
-{
- vaddr_t addr;
-
- /*
- * mark as unavailable the regions which we have mapped in
- * pmap_bootstrap().
- */
- addr = (vaddr_t) intiobase;
- if (uvm_map(kernel_map, &addr,
- m68k_ptob(IIOMAPSIZE+EIOMAPSIZE),
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)))
- panic("pmap_init: bogons in the VM system!\n");
-}
diff --git a/sys/arch/hp300/include/cpu.h b/sys/arch/hp300/include/cpu.h
index 06f7ecb1e0f..80ea3091c74 100644
--- a/sys/arch/hp300/include/cpu.h
+++ b/sys/arch/hp300/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.16 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.17 2001/12/20 19:02:27 miod Exp $ */
/* $NetBSD: cpu.h,v 1.28 1998/02/13 07:41:51 scottr Exp $ */
/*
@@ -184,6 +184,9 @@ int badaddr __P((caddr_t));
int badbaddr __P((caddr_t));
void dumpconf __P((void));
+/* pmap.c functions */
+vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
+
/* sys_machdep.c functions */
int cachectl __P((int, vaddr_t, int));
diff --git a/sys/arch/hp300/include/param.h b/sys/arch/hp300/include/param.h
index 85b39cad6b0..3f1974599c0 100644
--- a/sys/arch/hp300/include/param.h
+++ b/sys/arch/hp300/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.18 2001/12/05 01:57:14 provos Exp $ */
+/* $OpenBSD: param.h,v 1.19 2001/12/20 19:02:27 miod Exp $ */
/* $NetBSD: param.h,v 1.35 1997/07/10 08:22:38 veego Exp $ */
/*
@@ -65,6 +65,12 @@
#define KERNBASE 0x00000000 /* start of kernel virtual */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+#define NBSEG (1 << SEGSHIFT) /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+
+#define UPAGES 2 /* pages of u-area */
+
#include <m68k/param.h>
#define NPTEPG (NBPG/(sizeof (pt_entry_t)))
@@ -85,4 +91,18 @@
void _delay __P((u_int));
#endif /* _KERNEL && !_LOCORE */
+#ifdef COMPAT_HPUX
+/*
+ * Constants/macros for HPUX multiple mapping of user address space.
+ * Pages in the first 256Mb are mapped in at every 256Mb segment.
+ */
+#define HPMMMASK 0xF0000000
+#define ISHPMMADDR(v) \
+ ((curproc->p_md.md_flags & MDP_HPUXMMAP) && \
+ ((unsigned)(v) & HPMMMASK) && \
+ ((unsigned)(v) & HPMMMASK) != HPMMMASK)
+#define HPMMBASEADDR(v) \
+ ((unsigned)(v) & ~HPMMMASK)
+#endif
+
#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/arch/hp300/include/pmap.h b/sys/arch/hp300/include/pmap.h
index 9df02e00f25..8cdb0e6286f 100644
--- a/sys/arch/hp300/include/pmap.h
+++ b/sys/arch/hp300/include/pmap.h
@@ -1,13 +1,162 @@
-/* $OpenBSD: pmap.h,v 1.11 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.12 2001/12/20 19:02:27 miod Exp $ */
+/* $NetBSD: pmap.h,v 1.13 1997/06/10 18:58:19 veego Exp $ */
-#ifndef _MACHINE_PMAP_H_
-#define _MACHINE_PMAP_H_
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.h 8.1 (Berkeley) 6/10/93
+ */
-#include <m68k/pmap_motorola.h>
+#ifndef _HP300_PMAP_H_
+#define _HP300_PMAP_H_
-#ifdef _KERNEL
-void pmap_init_md __P((void));
-#define PMAP_INIT_MD() pmap_init_md()
+#include <machine/cpu.h>
+#include <machine/pte.h>
+
+#if defined(M68040)
+#define HP_SEG_SIZE (mmutype == MMU_68040 ? 0x40000 : NBSEG)
+#else
+#define HP_SEG_SIZE NBSEG
+#endif
+
+#define hp300_trunc_seg(x) (((unsigned)(x)) & ~(HP_SEG_SIZE-1))
+#define hp300_round_seg(x) hp300_trunc_seg((unsigned)(x) + HP_SEG_SIZE-1)
+
+/*
+ * Pmap stuff
+ */
+struct pmap {
+ pt_entry_t *pm_ptab; /* KVA of page table */
+ st_entry_t *pm_stab; /* KVA of segment table */
+ int pm_stchanged; /* ST changed */
+ int pm_stfree; /* 040: free lev2 blocks */
+ st_entry_t *pm_stpa; /* 040: ST phys addr */
+ short pm_sref; /* segment table ref count */
+ short pm_count; /* pmap reference count */
+ struct simplelock pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_ptpages; /* more stats: PT pages */
+};
+
+typedef struct pmap *pmap_t;
+
+/*
+ * On the 040 we keep track of which level 2 blocks are already in use
+ * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
+ * (block 31). For convenience, the level 1 table is considered to be
+ * block 0.
+ *
+ * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
+ * for the kernel and users. 8 implies only the initial "segment table"
+ * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
+ * physically contiguous pages for the ST in pmap.c!
+ */
+#define MAXKL2SIZE 32
+#define MAXUL2SIZE 8
+#define l2tobm(n) (1 << (n))
+#define bmtol2(n) (ffs(n) - 1)
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmap, loadhw) \
+{ \
+ if ((loadhw)) \
+ loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
+}
+#define PMAP_DEACTIVATE(pmapp, pcbp)
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry, the list is pv_table.
+ */
+struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vaddr_t pv_va; /* virtual address for mapping */
+ st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
+ struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
+ int pv_flags; /* flags */
+};
+
+#define PV_CI 0x01 /* header: all entries are cache inhibited */
+#define PV_PTPAGE 0x02 /* header: entry maps a page table page */
+
+struct pv_page;
+
+struct pv_page_info {
+ TAILQ_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+
+/*
+ * This is basically:
+ * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+ */
+#define NPVPPG 170
+
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
+
+extern struct pmap kernel_pmap_store;
+
+#define pmap_kernel() (&kernel_pmap_store)
+#define active_pmap(pm) \
+ ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
+#define active_user_pmap(pm) \
+ (curproc && \
+ (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
+
+extern struct pv_entry *pv_table; /* array of entries, one per page */
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+
+#define pmap_update(pmap) /* empty */
+
+extern pt_entry_t *Sysmap;
+extern char *vmmap; /* map for mem, dumps, etc. */
+
+#ifdef M68K_MMU_HP
+void pmap_prefer __P((vaddr_t, vaddr_t *));
+#define PMAP_PREFER(foff, vap) pmap_prefer((foff), (vap))
#endif
-#endif /* _MACHINE_PMAP_H_ */
+#endif /* !_HP300_PMAP_H_ */
diff --git a/sys/arch/hp300/include/pte.h b/sys/arch/hp300/include/pte.h
index e359464ad33..e524b59b016 100644
--- a/sys/arch/hp300/include/pte.h
+++ b/sys/arch/hp300/include/pte.h
@@ -1,14 +1,153 @@
-/* $OpenBSD: pte.h,v 1.3 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: pte.h,v 1.4 2001/12/20 19:02:27 miod Exp $ */
+/* $NetBSD: pte.h,v 1.4 1994/10/26 07:26:40 cgd Exp $ */
-#ifndef _MACHINE_PTE_H_
-#define _MACHINE_PTE_H_
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: pte.h 1.13 92/01/20$
+ *
+ * @(#)pte.h 8.1 (Berkeley) 6/10/93
+ */
-#include <m68k/pte_motorola.h>
+#ifndef _HP300_PTE_H_
+#define _HP300_PTE_H_
-#define MACHINE_STSIZE M68K_STSIZE
-#define MACHINE_MAX_PTSIZE M68K_MAX_PTSIZE
-#define MACHINE_MAX_KPTSIZE M68K_MAX_KPTSIZE
-#define MACHINE_PTBASE M68K_PTBASE
-#define MACHINE_PTMAXSIZE M68K_PTMAXSIZE
+/*
+ * HP300 hardware segment/page table entries
+ */
-#endif /* _MACHINE_PTE_H_ */
+#if 0
+struct ste {
+ unsigned int sg_pfnum:20; /* page table frame number */
+ unsigned int :8; /* reserved at 0 */
+ unsigned int :1; /* reserved at 1 */
+ unsigned int sg_prot:1; /* write protect bit */
+ unsigned int sg_v:2; /* valid bits */
+};
+
+struct ste40 {
+ unsigned int sg_ptaddr:24; /* page table page addr */
+ unsigned int :4; /* reserved at 0 */
+ unsigned int sg_u; /* hardware modified (dirty) bit */
+ unsigned int sg_prot:1; /* write protect bit */
+ unsigned int sg_v:2; /* valid bits */
+};
+
+struct pte {
+ unsigned int pg_pfnum:20; /* page frame number or 0 */
+ unsigned int :3;
+ unsigned int pg_w:1; /* is wired */
+ unsigned int :1; /* reserved at zero */
+ unsigned int pg_ci:1; /* cache inhibit bit */
+ unsigned int :1; /* reserved at zero */
+ unsigned int pg_m:1; /* hardware modified (dirty) bit */
+ unsigned int pg_u:1; /* hardware used (reference) bit */
+ unsigned int pg_prot:1; /* write protect bit */
+ unsigned int pg_v:2; /* valid bit */
+};
+#endif
+
+typedef int st_entry_t; /* segment table entry */
+typedef int pt_entry_t; /* Mach page table entry */
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+#define ST_ENTRY_NULL ((st_entry_t *) 0)
+
+#define SG_V 0x00000002 /* segment is valid */
+#define SG_NV 0x00000000
+#define SG_PROT 0x00000004 /* access protection mask */
+#define SG_RO 0x00000004
+#define SG_RW 0x00000000
+#define SG_U 0x00000008 /* modified bit (68040) */
+#define SG_FRAME 0xfffff000
+#define SG_IMASK 0xffc00000
+#define SG_ISHIFT 22
+#define SG_PMASK 0x003ff000
+#define SG_PSHIFT 12
+
+/* 68040 additions */
+#define SG4_MASK1 0xfe000000
+#define SG4_SHIFT1 25
+#define SG4_MASK2 0x01fc0000
+#define SG4_SHIFT2 18
+#define SG4_MASK3 0x0003f000
+#define SG4_SHIFT3 12
+#define SG4_ADDR1 0xfffffe00
+#define SG4_ADDR2 0xffffff00
+#define SG4_LEV1SIZE 128
+#define SG4_LEV2SIZE 128
+#define SG4_LEV3SIZE 64
+
+#define PG_V 0x00000001
+#define PG_NV 0x00000000
+#define PG_PROT 0x00000004
+#define PG_U 0x00000008
+#define PG_M 0x00000010
+#define PG_W 0x00000100
+#define PG_RO 0x00000004
+#define PG_RW 0x00000000
+#define PG_FRAME 0xfffff000
+#define PG_CI 0x00000040
+#define PG_SHIFT 12
+#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
+
+/* 68040 additions */
+#define PG_CMASK 0x00000060 /* cache mode mask */
+#define PG_CWT 0x00000000 /* writethrough caching */
+#define PG_CCB 0x00000020 /* copyback caching */
+#define PG_CIS 0x00000040 /* cache inhibited serialized */
+#define PG_CIN 0x00000060 /* cache inhibited nonserialized */
+#define PG_SO 0x00000080 /* supervisor only */
+
+#define HP_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
+ /* user process segment table size */
+#define HP_MAX_PTSIZE 0x400000 /* max size of UPT */
+#define HP_MAX_KPTSIZE 0x100000 /* max memory to allocate to KPT */
+#define HP_PTBASE 0x10000000 /* UPT map base address */
+#define HP_PTMAXSIZE 0x70000000 /* UPT map maximum size */
+
+/*
+ * Kernel virtual address to page table entry and to physical address.
+ */
+#define kvtopte(va) \
+ (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
+#define ptetokv(pt) \
+ ((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
+#define kvtophys(va) \
+ ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
+
+#endif /* !_HP300_PTE_H_ */
diff --git a/sys/arch/m68k/include/param.h b/sys/arch/m68k/include/param.h
index f948a1bcebe..7578b263e02 100644
--- a/sys/arch/m68k/include/param.h
+++ b/sys/arch/m68k/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.5 2001/11/30 20:57:52 miod Exp $ */
+/* $OpenBSD: param.h,v 1.6 2001/12/20 19:02:28 miod Exp $ */
/* $NetBSD: param.h,v 1.2 1997/06/10 18:21:23 veego Exp $ */
/*
@@ -81,16 +81,6 @@
#define SSIZE 1 /* initial stack size/NBPG */
#define SINCR 1 /* increment of stack/NBPG */
-#ifndef SEGSHIFT
-#if defined(M68040) || defined(M68060)
-#define SEGSHIFT ((mmutype <= MMU_68040) ? 18 : (34 - PGSHIFT))
-#else
-#define SEGSHIFT (34 - PGSHIFT)
-#endif
-#define NBSEG (1 << SEGSHIFT)
-#define SEGOFSET (NBSEG - 1)
-#endif
-
/* mac68k use 3 pages of u-area */
#ifndef UPAGES
#define UPAGES 2 /* pages of u-area */
@@ -150,18 +140,4 @@
#define m68k_btop(x) ((unsigned)(x) >> PGSHIFT)
#define m68k_ptob(x) ((unsigned)(x) << PGSHIFT)
-#ifdef COMPAT_HPUX
-/*
- * Constants/macros for HPUX multiple mapping of user address space.
- * Pages in the first 256Mb are mapped in at every 256Mb segment.
- */
-#define HPMMMASK 0xF0000000
-#define ISHPMMADDR(v) \
- ((curproc->p_md.md_flags & MDP_HPUXMMAP) && \
- ((unsigned)(v) & HPMMMASK) && \
- ((unsigned)(v) & HPMMMASK) != HPMMMASK)
-#define HPMMBASEADDR(v) \
- ((unsigned)(v) & ~HPMMMASK)
-#endif /* COMPAT_HPUX */
-
#endif /* !_M68K_PARAM_H_ */
diff --git a/sys/arch/m68k/include/pmap_motorola.h b/sys/arch/m68k/include/pmap_motorola.h
deleted file mode 100644
index 028c4f61faa..00000000000
--- a/sys/arch/m68k/include/pmap_motorola.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* $OpenBSD: pmap_motorola.h,v 1.2 2001/12/05 00:11:51 millert Exp $ */
-
-/*
- * Copyright (c) 1987 Carnegie-Mellon University
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)pmap.h 8.1 (Berkeley) 6/10/93
- */
-
-#ifndef _PMAP_MOTOROLA_H_
-#define _PMAP_MOTOROLA_H_
-
-#include <machine/cpu.h>
-#include <machine/pte.h>
-
-/*
- * Pmap stuff
- */
-struct pmap {
- pt_entry_t *pm_ptab; /* KVA of page table */
- st_entry_t *pm_stab; /* KVA of segment table */
- int pm_stfree; /* 040: free lev2 blocks */
- st_entry_t *pm_stpa; /* 040: ST phys addr */
- short pm_sref; /* segment table ref count */
- short pm_count; /* pmap reference count */
- struct simplelock pm_lock; /* lock on pmap */
- struct pmap_statistics pm_stats; /* pmap statistics */
- long pm_ptpages; /* more stats: PT pages */
-};
-
-typedef struct pmap *pmap_t;
-
-/*
- * On the 040 we keep track of which level 2 blocks are already in use
- * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
- * (block 31). For convenience, the level 1 table is considered to be
- * block 0.
- *
- * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
- * for the kernel and users. 8 implies only the initial "segment table"
- * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
- * physically contiguous pages for the ST in pmap.c!
- */
-#define MAXKL2SIZE 32
-#define MAXUL2SIZE 8
-#define l2tobm(n) (1 << (n))
-#define bmtol2(n) (ffs(n) - 1)
-
-/*
- * Macros for speed
- */
-#define PMAP_ACTIVATE(pmap, loadhw) \
-{ \
- if ((loadhw)) \
- loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
-}
-
-/*
- * For each vm_page_t, there is a list of all currently valid virtual
- * mappings of that page. An entry is a pv_entry, the list is pv_table.
- */
-struct pv_entry {
- struct pv_entry *pv_next; /* next pv_entry */
- struct pmap *pv_pmap; /* pmap where mapping lies */
- vaddr_t pv_va; /* virtual address for mapping */
- st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
- struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
- int pv_flags; /* flags */
-};
-
-#define PV_CI 0x01 /* header: all entries are cache inhibited */
-#define PV_PTPAGE 0x02 /* header: entry maps a page table page */
-
-struct pv_page;
-
-struct pv_page_info {
- TAILQ_ENTRY(pv_page) pgi_list;
- struct pv_entry *pgi_freelist;
- int pgi_nfree;
-};
-
-/*
- * This is basically:
- * ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
- */
-#if PAGE_SHIFT == 13
-#define NPVPPG 340
-#elif PAGE_SHIFT == 12
-#define NPVPPG 170
-#endif
-
-struct pv_page {
- struct pv_page_info pvp_pgi;
- struct pv_entry pvp_pv[NPVPPG];
-};
-
-#ifdef _KERNEL
-
-extern struct pmap kernel_pmap_store;
-
-#define pmap_kernel() (&kernel_pmap_store)
-#define active_pmap(pm) \
- ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
-#define active_user_pmap(pm) \
- (curproc && \
- (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
-
-extern struct pv_entry *pv_table; /* array of entries, one per page */
-
-#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
-#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
-
-#define pmap_update(pmap) /* nothing (yet) */
-
-extern pt_entry_t *Sysmap;
-extern char *vmmap; /* map for mem, dumps, etc. */
-
-#ifdef M68K_MMU_HP
-void pmap_prefer __P((vaddr_t, vaddr_t *));
-#define PMAP_PREFER(foff, vap) pmap_prefer((foff), (vap))
-#endif
-
-vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
-
-#endif /* _KERNEL */
-
-#endif /* !_PMAP_MOTOROLA_H_ */
diff --git a/sys/arch/m68k/include/pte_motorola.h b/sys/arch/m68k/include/pte_motorola.h
deleted file mode 100644
index a250eaa82e1..00000000000
--- a/sys/arch/m68k/include/pte_motorola.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* $OpenBSD: pte_motorola.h,v 1.1 2001/11/30 20:54:50 miod Exp $ */
-
-/*
- * Copyright (c) 1988 University of Utah.
- * Copyright (c) 1982, 1986, 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: Utah $Hdr: pte.h 1.13 92/01/20$
- *
- * @(#)pte.h 8.1 (Berkeley) 6/10/93
- */
-
-#ifndef _PTE_MOTOROLA_H_
-#define _PTE_MOTOROLA_H_
-
-/*
- * m68k hardware with motorola MMU segment/page table entries
- */
-
-#if 0
-struct ste {
- unsigned int sg_pfnum:20; /* page table frame number */
- unsigned int :8; /* reserved at 0 */
- unsigned int :1; /* reserved at 1 */
- unsigned int sg_prot:1; /* write protect bit */
- unsigned int sg_v:2; /* valid bits */
-};
-
-struct ste40 {
- unsigned int sg_ptaddr:24; /* page table page addr */
- unsigned int :4; /* reserved at 0 */
- unsigned int sg_u; /* hardware modified (dirty) bit */
- unsigned int sg_prot:1; /* write protect bit */
- unsigned int sg_v:2; /* valid bits */
-};
-
-struct pte {
- unsigned int pg_pfnum:20; /* page frame number or 0 */
- unsigned int :3;
- unsigned int pg_w:1; /* is wired */
- unsigned int :1; /* reserved at zero */
- unsigned int pg_ci:1; /* cache inhibit bit */
- unsigned int :1; /* reserved at zero */
- unsigned int pg_m:1; /* hardware modified (dirty) bit */
- unsigned int pg_u:1; /* hardware used (reference) bit */
- unsigned int pg_prot:1; /* write protect bit */
- unsigned int pg_v:2; /* valid bit */
-};
-#endif
-
-typedef int st_entry_t; /* segment table entry */
-typedef int pt_entry_t; /* Mach page table entry */
-
-#define PT_ENTRY_NULL ((pt_entry_t *) 0)
-#define ST_ENTRY_NULL ((st_entry_t *) 0)
-
-#define SG_V 0x00000002 /* segment is valid */
-#define SG_NV 0x00000000
-#define SG_PROT 0x00000004 /* access protection mask */
-#define SG_RO 0x00000004
-#define SG_RW 0x00000000
-#define SG_U 0x00000008 /* modified bit (68040) */
-#define SG_FRAME 0xfffff000
-#define SG_IMASK 0xffc00000
-#define SG_ISHIFT 22
-#define SG_PMASK 0x003ff000
-#define SG_PSHIFT 12
-
-/* 68040 additions */
-#define SG4_MASK1 0xfe000000
-#define SG4_SHIFT1 25
-#define SG4_MASK2 0x01fc0000
-#define SG4_SHIFT2 18
-#define SG4_MASK3 0x0003f000
-#define SG4_SHIFT3 12
-#define SG4_ADDR1 0xfffffe00
-#define SG4_ADDR2 0xffffff00
-#define SG4_LEV1SIZE 128
-#define SG4_LEV2SIZE 128
-#define SG4_LEV3SIZE 64
-
-#define PG_V 0x00000001
-#define PG_NV 0x00000000
-#define PG_PROT 0x00000004
-#define PG_U 0x00000008
-#define PG_M 0x00000010
-#define PG_W 0x00000100
-#define PG_RO 0x00000004
-#define PG_RW 0x00000000
-#define PG_FRAME 0xfffff000
-#define PG_CI 0x00000040
-#define PG_SHIFT 12
-#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
-
-/* 68040 additions */
-#define PG_CMASK 0x00000060 /* cache mode mask */
-#define PG_CWT 0x00000000 /* writethrough caching */
-#define PG_CCB 0x00000020 /* copyback caching */
-#define PG_CIS 0x00000040 /* cache inhibited serialized */
-#define PG_CIN 0x00000060 /* cache inhibited nonserialized */
-#define PG_SO 0x00000080 /* supervisor only */
-
-#define M68K_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
- /* user process segment table size */
-#define M68K_MAX_PTSIZE 0x400000 /* max size of UPT */
-#define M68K_MAX_KPTSIZE 0x100000 /* max memory to allocate to KPT */
-#define M68K_PTBASE 0x10000000 /* UPT map base address */
-#define M68K_PTMAXSIZE 0x70000000 /* UPT map maximum size */
-
-/*
- * Kernel virtual address to page table entry and to physical address.
- */
-#define kvtopte(va) \
- (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
-#define ptetokv(pt) \
- ((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
-#define kvtophys(va) \
- ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
-
-#endif /* !_PTE_MOTOROLA_H_ */
diff --git a/sys/arch/mac68k/conf/files.mac68k b/sys/arch/mac68k/conf/files.mac68k
index 07b6f6dfa94..18baa464216 100644
--- a/sys/arch/mac68k/conf/files.mac68k
+++ b/sys/arch/mac68k/conf/files.mac68k
@@ -1,4 +1,4 @@
-# $OpenBSD: files.mac68k,v 1.25 2001/11/30 20:58:18 miod Exp $
+# $OpenBSD: files.mac68k,v 1.26 2001/12/20 19:02:28 miod Exp $
# $NetBSD: files.mac68k,v 1.61 1997/03/01 20:22:16 scottr Exp $
# mac68k-specific configuration info
@@ -114,7 +114,7 @@ file arch/mac68k/mac68k/macrom.c
file arch/mac68k/mac68k/macromasm.s
file arch/mac68k/mac68k/mainbus.c
file arch/mac68k/mac68k/mem.c
-file arch/m68k/m68k/pmap_motorola.c
+file arch/mac68k/mac68k/pmap.c
file arch/mac68k/mac68k/pmap_bootstrap.c
file arch/mac68k/mac68k/pram.c
file arch/mac68k/mac68k/pramasm.s
diff --git a/sys/arch/mac68k/include/param.h b/sys/arch/mac68k/include/param.h
index 50f6fcfcd50..c53102b7acd 100644
--- a/sys/arch/mac68k/include/param.h
+++ b/sys/arch/mac68k/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.15 2001/12/05 01:57:15 provos Exp $ */
+/* $OpenBSD: param.h,v 1.16 2001/12/20 19:02:28 miod Exp $ */
/* $NetBSD: param.h,v 1.28 1997/03/01 06:57:45 scottr Exp $ */
/*
@@ -98,6 +98,10 @@
#define KERNBASE 0x00000000 /* start of kernel virtual */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+#define NBSEG (1 << SEGSHIFT) /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+
#define UPAGES 3 /* pages of u-area */
#include <m68k/param.h>
diff --git a/sys/arch/mac68k/include/pmap.h b/sys/arch/mac68k/include/pmap.h
index a0ae0f2011d..c190143ed8e 100644
--- a/sys/arch/mac68k/include/pmap.h
+++ b/sys/arch/mac68k/include/pmap.h
@@ -1,16 +1,194 @@
-/* $OpenBSD: pmap.h,v 1.14 2001/12/02 02:01:52 millert Exp $ */
+/* $OpenBSD: pmap.h,v 1.15 2001/12/20 19:02:28 miod Exp $ */
+/* $NetBSD: pmap.h,v 1.26 1999/07/21 03:18:21 briggs Exp $ */
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1993 Allen K. Briggs, Chris P. Caputo,
+ * Michael L. Finch, Bradley A. Grantham, and
+ * Lawrence A. Kesteloot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Alice Group.
+ * 4. The names of the Alice Group or any of its members may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE ALICE GROUP ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE ALICE GROUP BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * @(#)pmap.h 7.6 (Berkeley) 5/10/91
+ */
#ifndef _MAC68K_PMAP_H_
#define _MAC68K_PMAP_H_
-#include <m68k/pmap_motorola.h>
+#include <machine/cpu.h>
+#include <machine/pte.h>
+
+#if defined(M68040)
+#define MAC_SEG_SIZE (mmutype == MMU_68040 ? 0x40000 : NBSEG)
+#else
+#define MAC_SEG_SIZE NBSEG
+#endif
+
+#define mac68k_trunc_seg(x) (((unsigned)(x)) & ~(MAC_SEG_SIZE-1))
+#define mac68k_round_seg(x) mac68k_trunc_seg((unsigned)(x)+MAC_SEG_SIZE-1)
+
+/*
+ * Pmap stuff
+ */
+struct pmap {
+ pt_entry_t *pm_ptab; /* KVA of page table */
+ st_entry_t *pm_stab; /* KVA of segment table */
+ int pm_stfree; /* 040: free lev2 blocks */
+ st_entry_t *pm_stpa; /* 040: ST phys addr */
+ short pm_sref; /* segment table ref count */
+ short pm_count; /* pmap reference count */
+ struct simplelock pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_ptpages; /* more stats: PT pages */
+};
+
+typedef struct pmap *pmap_t;
+
+/*
+ * On the 040, we keep track of which level 2 blocks are already in use
+ * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
+ * (block 31). For convenience, the level 1 table is considered to be
+ * block 0.
+ *
+ * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed
+ * for the kernel and users. 8 implies only the initial "segment table"
+ * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
+ * physically contiguous pages for the ST in pmap.c!
+ */
+#define MAXKL2SIZE 32
+#define MAXUL2SIZE 8
+#define l2tobm(n) (1 << (n))
+#define bmtol2(n) (ffs(n) - 1)
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmap, loadhw) \
+{ \
+ if ((loadhw)) \
+ loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
+}
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vaddr_t pv_va; /* virtual address for mapping */
+ st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
+ struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
+ int pv_flags; /* flags */
+} *pv_entry_t;
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
+struct pv_page;
+
+struct pv_page_info {
+ TAILQ_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+
+/*
+ * This is basically:
+ * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+ */
+#define NPVPPG 170
+
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
#ifdef _KERNEL
+extern struct pmap kernel_pmap_store;
+
+#define pmap_kernel() (&kernel_pmap_store)
+#define active_pmap(pm) \
+ ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
+#define active_user_pmap(pm) \
+ (curproc && \
+ (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
+
+extern struct pv_entry *pv_table; /* array of entries, one per page */
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+#define pmap_update(pmap) /* nothing */
-void mac68k_set_pte __P((vaddr_t va, paddr_t pge));
+extern pt_entry_t *Sysmap;
+extern char *vmmap; /* map for mem, dumps, etc. */
-void pmap_init_md __P((void));
-#define PMAP_INIT_MD() pmap_init_md()
+/* pmap.c */
+vm_offset_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
+void mac68k_set_pte __P((vm_offset_t va, vm_offset_t pge));
#endif /* _KERNEL */
diff --git a/sys/arch/mac68k/include/pte.h b/sys/arch/mac68k/include/pte.h
index bf2f20714e4..e15eaaf41d6 100644
--- a/sys/arch/mac68k/include/pte.h
+++ b/sys/arch/mac68k/include/pte.h
@@ -1,14 +1,156 @@
-/* $OpenBSD: pte.h,v 1.5 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: pte.h,v 1.6 2001/12/20 19:02:28 miod Exp $ */
+/* $NetBSD: pte.h,v 1.9 1996/05/05 06:17:58 briggs Exp $ */
-#ifndef _MACHINE_PTE_H_
-#define _MACHINE_PTE_H_
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1993 Allen K. Briggs, Chris P. Caputo,
+ * Michael L. Finch, Bradley A. Grantham, and
+ * Lawrence A. Kesteloot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Alice Group.
+ * 4. The names of the Alice Group or any of its members may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE ALICE GROUP ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE ALICE GROUP BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * from: Utah $Hdr: pte.h 1.11 89/09/03$
+ *
+ * @(#)pte.h 7.3 (Berkeley) 5/8/91
+ */
-#include <m68k/pte_motorola.h>
+#ifndef _MAC68K_PTE_H_
+#define _MAC68K_PTE_H_
-#define MACHINE_STSIZE M68K_STSIZE
-#define MACHINE_MAX_PTSIZE M68K_MAX_PTSIZE
-#define MACHINE_MAX_KPTSIZE M68K_MAX_KPTSIZE
-#define MACHINE_PTBASE M68K_PTBASE
-#define MACHINE_PTMAXSIZE M68K_PTMAXSIZE
+/*
+ * Mac hardware segment/page table entries
+ */
-#endif /* _MACHINE_PTE_H_ */
+typedef int st_entry_t; /* segment table entry */
+typedef int pt_entry_t; /* Mach page table entry */
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+#define ST_ENTRY_NULL ((st_entry_t *) 0)
+
+#define SG_V 0x00000002 /* segment is valid */
+#define SG_NV 0x00000000
+#define SG_PROT 0x00000004 /* access protection mask */
+#define SG_RO 0x00000004
+#define SG_RW 0x00000000
+#define SG_U 0x00000008 /* modified bit (68040) */
+#define SG_FRAME 0xfffff000
+#define SG_IMASK 0xffc00000
+#define SG_ISHIFT 22
+#define SG_PMASK 0x003ff000
+#define SG_PSHIFT 12
+
+/* 68040 additions */
+#define SG4_MASK1 0xfe000000
+#define SG4_SHIFT1 25
+#define SG4_MASK2 0x01fc0000
+#define SG4_SHIFT2 18
+#define SG4_MASK3 0x0003f000
+#define SG4_SHIFT3 12
+#define SG4_ADDR1 0xfffffe00
+#define SG4_ADDR2 0xffffff00
+#define SG4_LEV1SIZE 128
+#define SG4_LEV2SIZE 128
+#define SG4_LEV3SIZE 64
+
+#define PG_V 0x00000001
+#define PG_NV 0x00000000
+#define PG_PROT 0x00000004
+#define PG_U 0x00000008
+#define PG_M 0x00000010
+#define PG_W 0x00000100
+#define PG_RO 0x00000004
+#define PG_RW 0x00000000
+#define PG_FRAME 0xfffff000
+#define PG_CI 0x00000040
+#define PG_SHIFT 12
+#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
+
+/* 68040 additions */
+#define PG_CMASK 0x00000060 /* cache mode mask */
+#define PG_CWT 0x00000000 /* writethrough caching */
+#define PG_CCB 0x00000020 /* copyback caching */
+#define PG_CIS 0x00000040 /* cache inhibited serialized */
+#define PG_CIN 0x00000060 /* cache inhibited nonserialized */
+#define PG_SO 0x00000080 /* supervisor only */
+
+#define MAC_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
+ /* user process segment table size */
+#define MAC_MAX_PTSIZE 0x400000 /* max size of UPT */
+#define MAC_MAX_KPTSIZE 0x100000 /* max memory to allocate to KPT */
+#define MAC_PTBASE 0x10000000 /* UPT map base address */
+#define MAC_PTMAXSIZE 0x70000000 /* UPT map maximum size */
+
+/*
+ * Kernel virtual address to page table entry and to physical address.
+ */
+#define kvtopte(va) \
+ (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
+#define ptetokv(pt) \
+ ((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
+#define kvtophys(va) \
+ ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
+
+#endif /* _MAC68K_PTE_H_ */
diff --git a/sys/arch/mac68k/mac68k/pmap.c b/sys/arch/mac68k/mac68k/pmap.c
new file mode 100644
index 00000000000..6ff46799375
--- /dev/null
+++ b/sys/arch/mac68k/mac68k/pmap.c
@@ -0,0 +1,2351 @@
+/* $OpenBSD: pmap.c,v 1.37 2001/12/20 19:02:28 miod Exp $ */
+/* $NetBSD: pmap.c,v 1.55 1999/04/22 04:24:53 chs Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 8.6 (Berkeley) 5/27/94
+ */
+
+/*
+ * Derived from HP9000/300 series physical map management code.
+ *
+ * Supports:
+ * 68020 with 68851 MMU Mac II
+ * 68030 with on-chip MMU IIcx, etc.
+ * 68040 with on-chip MMU Quadras, etc.
+ *
+ * Notes:
+ * Don't even pay lip service to multiprocessor support.
+ *
+ * We assume TLB entries don't have process tags (except for the
+ * supervisor/user distinction) so we only invalidate TLB entries
+ * when changing mappings for the current (or kernel) pmap. This is
+ * technically not true for the 68551 but we flush the TLB on every
+ * context switch, so it effectively winds up that way.
+ *
+ * Bitwise and/or operations are significantly faster than bitfield
+ * references so we use them when accessing STE/PTEs in the pmap_pte_*
+ * macros. Note also that the two are not always equivalent; e.g.:
+ * (*pte & PG_PROT)[4] != pte->pg_prot[1]
+ * and a couple of routines that deal with protection and wiring take
+ * some shortcuts that assume the and/or definitions.
+ *
+ * This implementation will only work for PAGE_SIZE == NBPG
+ * (i.e. 4096 bytes).
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+#include <sys/pool.h>
+
+#include <machine/pte.h>
+
+#include <uvm/uvm.h>
+
+#include <machine/cpu.h>
+
+#ifdef DEBUG
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_CACHE 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_SEGTAB 0x0400
+#define PDB_MULTIMAP 0x0800
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
+
+#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
+
+#if defined(M68040)
+int dowriteback = 1; /* 68040: enable writeback caching */
+int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
+#endif
+#else /* ! DEBUG */
+#define PMAP_DPRINTF(l, x) /* nothing */
+#endif /* DEBUG */
+
+/*
+ * Get STEs and PTEs for user/kernel address space
+ */
+#if defined(M68040)
+#define pmap_ste1(m, v) \
+ (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
+/* XXX assumes physically contiguous ST pages (if more than one) */
+#define pmap_ste2(m, v) \
+ (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
+ - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
+#define pmap_ste(m, v) \
+ (&((m)->pm_stab[(vaddr_t)(v) \
+ >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
+#define pmap_ste_v(m, v) \
+ (mmutype == MMU_68040 \
+ ? ((*pmap_ste1(m, v) & SG_V) && \
+ (*pmap_ste2(m, v) & SG_V)) \
+ : (*pmap_ste(m, v) & SG_V))
+#else
+#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
+#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
+#endif
+
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
+#define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
+#define pmap_pte_w(pte) (*(pte) & PG_W)
+#define pmap_pte_ci(pte) (*(pte) & PG_CI)
+#define pmap_pte_m(pte) (*(pte) & PG_M)
+#define pmap_pte_u(pte) (*(pte) & PG_U)
+#define pmap_pte_prot(pte) (*(pte) & PG_PROT)
+#define pmap_pte_v(pte) (*(pte) & PG_V)
+
+#define pmap_pte_set_w(pte, v) \
+ if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
+#define pmap_pte_set_prot(pte, v) \
+ if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
+#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to a m68k protection code.
+ */
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
+
+/*
+ * Kernel page table page management.
+ */
+struct kpt_page {
+ struct kpt_page *kpt_next; /* link on either used or free list */
+ vaddr_t kpt_va; /* always valid kernel VA */
+ paddr_t kpt_pa; /* PA of this page (for speed) */
+};
+struct kpt_page *kpt_free_list, *kpt_used_list;
+struct kpt_page *kpt_pages;
+
+/*
+ * Kernel segment/page table and page table map.
+ * The page table map gives us a level of indirection we need to dynamically
+ * expand the page table. It is essentially a copy of the segment table
+ * with PTEs instead of STEs. All are initialized in locore at boot time.
+ * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
+ * Segtabzero is an empty segment table which all processes share til they
+ * reference something.
+ */
+st_entry_t *Sysseg;
+pt_entry_t *Sysmap, *Sysptmap;
+st_entry_t *Segtabzero, *Segtabzeropa;
+vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
+
+struct pmap kernel_pmap_store;
+struct vm_map *st_map, *pt_map;
+struct vm_map st_map_store, pt_map_store;
+
+paddr_t avail_start; /* PA of first available physical page */
+paddr_t avail_end; /* PA of last available physical page */
+vsize_t mem_size; /* memory size in bytes */
+vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int page_cnt; /* number of pages managed by VM system */
+
+boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+struct pv_entry *pv_table;
+char *pmap_attributes; /* reference and modify bits */
+TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+
+/* The following four variables are defined in pmap_bootstrap.c */
+extern int vidlen;
+#define VIDMAPSIZE btoc(vidlen)
+
+#if defined(M68040)
+int protostfree; /* prototype (default) free ST map */
+#endif
+
+extern caddr_t CADDR1, CADDR2;
+
+pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
+pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
+
+struct pool pmap_pmap_pool; /* memory pool for pmap structures */
+
+struct pv_entry *pmap_alloc_pv __P((void));
+void pmap_free_pv __P((struct pv_entry *));
+void pmap_collect_pv __P((void));
+
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
+
+#define pa_to_pvh(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.pvent[pg_]; \
+})
+
+#define pa_to_attribute(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
+})
+
+/*
+ * Internal routines
+ */
+void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((paddr_t, int));
+void pmap_changebit __P((paddr_t, int, int));
+void pmap_enter_ptpage __P((pmap_t, vaddr_t));
+void pmap_collect1 __P((pmap_t, paddr_t, vaddr_t));
+void pmap_pinit __P((pmap_t));
+void pmap_release __P((pmap_t));
+
+#ifdef DEBUG
+void pmap_pvdump __P((paddr_t));
+void pmap_check_wiring __P((char *, vaddr_t));
+#endif
+
+/* pmap_remove_mapping flags */
+#define PRM_TFLUSH 1
+#define PRM_CFLUSH 2
+
+/*
+ * pmap_virtual_space: [ INTERFACE ]
+ *
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap.
+ *
+ * This is only an interface function if we do not use
+ * pmap_steal_memory()!
+ *
+ * Note: no locking is necessary in this function.
+ */
+void
+pmap_virtual_space(vstartp, vendp)
+ vaddr_t *vstartp, *vendp;
+{
+
+ *vstartp = virtual_avail;
+ *vendp = virtual_end;
+}
+
+/*
+ * pmap_init: [ INTERFACE ]
+ *
+ * Initialize the pmap module. Called by vm_init(), to initialize any
+ * structures that the pmap system needs to map virtual memory.
+ *
+ * Note: no locking is necessary in this function.
+ */
+void
+pmap_init()
+{
+ vaddr_t addr, addr2;
+ vsize_t s;
+ struct pv_entry *pv;
+ char *attr;
+ int rv;
+ int npages;
+ int bank;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
+
+ /*
+ * Before we do anything else, initialize the PTE pointers
+ * used by pmap_zero_page() and pmap_copy_page().
+ */
+ caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
+ caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
+
+ /*
+ * Now that kernel map has been allocated, we can mark as
+ * unavailable regions which we have mapped in pmap_bootstrap().
+ */
+ addr = (vaddr_t)IOBase;
+ if (uvm_map(kernel_map, &addr,
+ m68k_ptob(IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE),
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)))
+ goto bogons;
+ addr = (vaddr_t)Sysmap;
+ if (uvm_map(kernel_map, &addr, MAC_MAX_PTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED))) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+ bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+
+ PMAP_DPRINTF(PDB_INIT,
+ ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
+ Sysseg, Sysmap, Sysptmap));
+ PMAP_DPRINTF(PDB_INIT,
+ (" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
+ avail_start, avail_end, virtual_avail, virtual_end));
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * initial segment table, pv_head_table and pmap_attributes.
+ */
+ for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
+ page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+ s = MAC_STSIZE; /* Segtabzero */
+ s += page_cnt * sizeof(struct pv_entry); /* pv table */
+ s += page_cnt * sizeof(char); /* attribute table */
+ s = round_page(s);
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: can't allocate data structures");
+
+ Segtabzero = (st_entry_t *)addr;
+ pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
+ addr += MAC_STSIZE;
+
+ pv_table = (struct pv_entry *)addr;
+ addr += page_cnt * sizeof(struct pv_entry);
+
+ pmap_attributes = (char *)addr;
+
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes));
+
+ /*
+ * Now that the pv and attribute tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_table;
+ attr = pmap_attributes;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npages = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += npages;
+ attr += npages;
+ }
+
+ /*
+ * Allocate physical memory for kernel PT pages and their management.
+ * We need 1 PT page per possible task plus some slop.
+ */
+ npages = min(atop(MAC_MAX_KPTSIZE), maxproc+16);
+ s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
+
+ /*
+ * Verify that space will be allocated in region for which
+ * we already have kernel PT pages.
+ */
+ addr = 0;
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv || (addr + s) >= (vaddr_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ uvm_unmap(kernel_map, addr, addr + s);
+
+ /*
+ * Now allocate the space and link the pages together to
+ * form the KPT free list.
+ */
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+ s = ptoa(npages);
+ addr2 = addr + s;
+ kpt_pages = &((struct kpt_page *)addr2)[npages];
+ kpt_free_list = (struct kpt_page *)0;
+ do {
+ addr2 -= NBPG;
+ (--kpt_pages)->kpt_next = kpt_free_list;
+ kpt_free_list = kpt_pages;
+ kpt_pages->kpt_va = addr2;
+ pmap_extract(pmap_kernel(), addr2, &kpt_pages->kpt_pa);
+ } while (addr != addr2);
+
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
+ atop(s), addr, addr + s));
+
+ /*
+ * Allocate the segment table map and the page table map.
+ */
+ s = maxproc * MAC_STSIZE;
+ st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
+ FALSE, &st_map_store);
+
+ addr = MAC_PTBASE;
+ if ((MAC_PTMAXSIZE / MAC_MAX_PTSIZE) < maxproc) {
+ s = MAC_PTMAXSIZE;
+ /*
+ * XXX We don't want to hang when we run out of
+ * page tables, so we lower maxproc so that fork()
+ * will fail instead. Note that root could still raise
+ * this value via sysctl(2).
+ */
+ maxproc = (MAC_PTMAXSIZE / MAC_MAX_PTSIZE);
+ } else
+ s = (maxproc * MAC_MAX_PTSIZE);
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
+ TRUE, &pt_map_store);
+
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
+ protostfree = ~l2tobm(0);
+ for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
+ protostfree &= ~l2tobm(rv);
+ }
+#endif
+
+ /*
+ * Initialize the pmap pools.
+ */
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+/*
+ * pmap_alloc_pv:
+ *
+ * Allocate a pv_entry.
+ */
+struct pv_entry *
+pmap_alloc_pv()
+{
+ struct pv_page *pvp;
+ struct pv_entry *pv;
+ int i;
+
+ if (pv_nfree == 0) {
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+ pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
+ for (i = NPVPPG - 2; i; i--, pv++)
+ pv->pv_next = pv + 1;
+ pv->pv_next = 0;
+ pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
+ TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ pv = &pvp->pvp_pv[0];
+ } else {
+ --pv_nfree;
+ pvp = pv_page_freelist.tqh_first;
+ if (--pvp->pvp_pgi.pgi_nfree == 0) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ }
+ pv = pvp->pvp_pgi.pgi_freelist;
+#ifdef DIAGNOSTIC
+ if (pv == 0)
+ panic("pmap_alloc_pv: pgi_nfree inconsistent");
+#endif
+ pvp->pvp_pgi.pgi_freelist = pv->pv_next;
+ }
+ return pv;
+}
+
+/*
+ * pmap_free_pv:
+ *
+ * Free a pv_entry.
+ */
+void
+pmap_free_pv(pv)
+ struct pv_entry *pv;
+{
+ struct pv_page *pvp;
+
+ pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
+ switch (++pvp->pvp_pgi.pgi_nfree) {
+ case 1:
+ TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ default:
+ pv->pv_next = pvp->pvp_pgi.pgi_freelist;
+ pvp->pvp_pgi.pgi_freelist = pv;
+ ++pv_nfree;
+ break;
+ case NPVPPG:
+ pv_nfree -= NPVPPG - 1;
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
+ break;
+ }
+}
+
+/*
+ * pmap_collect_pv:
+ *
+ * Perform compaction on the PV list, called via pmap_collect().
+ */
+void
+pmap_collect_pv()
+{
+ struct pv_page_list pv_page_collectlist;
+ struct pv_page *pvp, *npvp;
+ struct pv_entry *ph, *ppv, *pv, *npv;
+ int s;
+
+ TAILQ_INIT(&pv_page_collectlist);
+
+ for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
+ if (pv_nfree < NPVPPG)
+ break;
+ npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+ if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
+ pvp_pgi.pgi_list);
+ pv_nfree -= NPVPPG;
+ pvp->pvp_pgi.pgi_nfree = -1;
+ }
+ }
+
+ if (pv_page_collectlist.tqh_first == 0)
+ return;
+
+ for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
+ if (ph->pv_pmap == 0)
+ continue;
+ s = splimp();
+ for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
+ pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
+ if (pvp->pvp_pgi.pgi_nfree == -1) {
+ pvp = pv_page_freelist.tqh_first;
+ if (--pvp->pvp_pgi.pgi_nfree == 0) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp,
+ pvp_pgi.pgi_list);
+ }
+ npv = pvp->pvp_pgi.pgi_freelist;
+#ifdef DIAGNOSTIC
+ if (npv == 0)
+ panic("pmap_collect_pv: pgi_nfree inconsistent");
+#endif
+ pvp->pvp_pgi.pgi_freelist = npv->pv_next;
+ *npv = *pv;
+ ppv->pv_next = npv;
+ ppv = npv;
+ } else
+ ppv = pv;
+ }
+ splx(s);
+ }
+
+ for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
+ npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
+ }
+}
+
+/*
+ * pmap_map:
+ *
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ *
+ * Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
+ */
+vaddr_t
+pmap_map(va, spa, epa, prot)
+ vaddr_t va;
+ paddr_t spa, epa;
+ int prot;
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
+
+ while (spa < epa) {
+ pmap_enter(pmap_kernel(), va, spa, prot, 0);
+ va += NBPG;
+ spa += NBPG;
+ }
+ return (va);
+}
+
+/*
+ * pmap_create: [ INTERFACE ]
+ *
+ * Create and return a physical map.
+ *
+ * Note: no locking is necessary in this function.
+ */
+struct pmap *
+pmap_create(void)
+{
+ struct pmap *pmap;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
+ ("pmap_create(%lx)\n", size));
+
+ pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
+
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * pmap_pinit:
+ *
+ * Initialize a preallocated and zeroed pmap structure.
+ *
+ * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
+ */
+void
+pmap_pinit(pmap)
+ struct pmap *pmap;
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
+ ("pmap_pinit(%p)\n", pmap));
+
+ /*
+ * No need to allocate page table space yet but we do need a
+ * valid segment table. Initially, we point everyone at the
+ * "null" segment table. On the first pmap_enter, a real
+ * segment table will be allocated.
+ */
+ pmap->pm_stab = Segtabzero;
+ pmap->pm_stpa = Segtabzeropa;
+#if defined(M68040)
+ if (mmutype == MMU_68040)
+ pmap->pm_stfree = protostfree;
+#endif
+ pmap->pm_count = 1;
+ simple_lock_init(&pmap->pm_lock);
+}
+
+/*
+ * pmap_destroy: [ INTERFACE ]
+ *
+ * Drop the reference count on the specified pmap, releasing
+ * all resources if the reference count drops to zero.
+ */
+void
+pmap_destroy(pmap)
+ pmap_t pmap;
+{
+ int count;
+
+ if (pmap == NULL)
+ return;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
+
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ pool_put(&pmap_pmap_pool, pmap);
+ }
+}
+
+/*
+ * pmap_release:
+ *
+ * Relese the resources held by a pmap.
+ *
+ * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
+ */
+void
+pmap_release(pmap)
+ struct pmap *pmap;
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
+
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
+ if (pmap->pm_count != 1)
+ panic("pmap_release count");
+#endif
+
+ if (pmap->pm_ptab)
+ uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
+ MAC_MAX_PTSIZE);
+ if (pmap->pm_stab != Segtabzero)
+ uvm_km_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
+ MAC_STSIZE);
+}
+
+/*
+ * pmap_reference: [ INTERFACE ]
+ *
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+
+ if (pmap == NULL)
+ return;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
+
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+}
+
+ /*
+ * pmap_activate: [ INTERFACE ]
+ *
+ * Activate the pmap used by the specified process. This includes
+ * reloading the MMU context if the current process, and marking
+ * the pmap in use by the processor.
+ *
+ * Note: we may only use spin locks here, since we are called
+ * by a critical section in cpu_switch()!
+ */
+void
+pmap_activate(p)
+ struct proc *p;
+{
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
+ ("pmap_activate(%p)\n", p));
+
+ PMAP_ACTIVATE(pmap, p == curproc);
+}
+
+/*
+ * pmap_deactivate: [ INTERFACE ]
+ *
+ * Mark that the pmap used by the specified process is no longer
+ * in use by the processor.
+ *
+ * The comment above pmap_activate() wrt. locking applies here,
+ * as well.
+ */
+void
+pmap_deactivate(p)
+ struct proc *p;
+{
+
+ /* No action necessary in this pmap implementation. */
+}
+
+/*
+ * pmap_remove: [ INTERFACE ]
+ *
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+{
+ vaddr_t nssva;
+ pt_entry_t *pte;
+ boolean_t firstpage, needcflush;
+ int flags;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
+
+ if (pmap == NULL)
+ return;
+
+ firstpage = TRUE;
+ needcflush = FALSE;
+ flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+ while (sva < eva) {
+ nssva = mac68k_trunc_seg(sva) + MAC_SEG_SIZE;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!pmap_ste_v(pmap, sva)) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Invalidate every valid mapping within this segment.
+ */
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (pmap_pte_v(pte)) {
+ pmap_remove_mapping(pmap, sva, pte, flags);
+ firstpage = FALSE;
+ }
+ pte++;
+ sva += NBPG;
+ }
+ }
+ /*
+ * Didn't do anything, no need for cache flushes
+ */
+ if (firstpage)
+ return;
+}
+
+/*
+ * pmap_page_protect: [ INTERFACE ]
+ *
+ * Lower the permission for all mappings to a given page to
+ * the permissions specified.
+ */
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ paddr_t pa;
+ struct pv_entry *pv;
+ int s;
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
+ printf("pmap_page_protect(%lx, %x)\n", pa, prot);
+#endif
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+
+ switch (prot) {
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ return;
+ /* copy_on_write */
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_changebit(pa, PG_RO, ~0);
+ return;
+ /* remove_all */
+ default:
+ break;
+ }
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv->pv_pmap != NULL) {
+ pt_entry_t *pte;
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+#ifdef DEBUG
+ if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
+ pmap_pte_pa(pte) != pa)
+ panic("pmap_page_protect: bad mapping");
+#endif
+ if (!pmap_pte_w(pte))
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ pte, PRM_TFLUSH|PRM_CFLUSH);
+ else {
+ pv = pv->pv_next;
+#ifdef DEBUG
+ if (pmapdebug & PDB_PARANOIA)
+ printf("%s wired mapping for %lx not removed\n",
+ "pmap_page_protect:", pa);
+#endif
+ if (pv == NULL)
+ break;
+ }
+ }
+ splx(s);
+}
+
+/*
+ * pmap_protect: [ INTERFACE ]
+ *
+ * Set the physical protectoin on the specified range of this map
+ * as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
+{
+ vaddr_t nssva;
+ pt_entry_t *pte;
+ boolean_t firstpage, needtflush;
+ int isro;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
+ ("pmap_protect(%p, %lx, %lx, %x)\n",
+ pmap, sva, eva, prot));
+
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ isro = pte_prot(pmap, prot);
+ needtflush = active_pmap(pmap);
+ firstpage = TRUE;
+ while (sva < eva) {
+ nssva = mac68k_trunc_seg(sva) + MAC_SEG_SIZE;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!pmap_ste_v(pmap, sva)) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Change protection on mapping if it is valid and doesn't
+ * already have the correct protection.
+ */
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
+#if defined(M68040)
+ /*
+ * Clear caches if making RO (see section
+ * "7.3 Cache Coherency" in the manual).
+ */
+ if (isro && mmutype == MMU_68040) {
+ paddr_t pa = pmap_pte_pa(pte);
+
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ pmap_pte_set_prot(pte, isro);
+ if (needtflush)
+ TBIS(sva);
+ firstpage = FALSE;
+ }
+ pte++;
+ sva += NBPG;
+ }
+ }
+}
+
+void
+mac68k_set_pte(va, pge)
+ vm_offset_t va;
+ vm_offset_t pge;
+{
+extern vm_offset_t tmp_vpages[];
+ register pt_entry_t *pte;
+
+ if (va != tmp_vpages[0])
+ return;
+
+ pte = pmap_pte(pmap_kernel(), va);
+ *pte = (pt_entry_t) pge;
+}
+
+/*
+ * pmap_enter: [ INTERFACE ]
+ *
+ * Insert the given physical page (pa) at
+ * the specified virtual address (va) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte cannot be reclaimed.
+ *
+ * Note: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. Thatis, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap, va, pa, prot, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int flags;
+{
+ pt_entry_t *pte;
+ int npte;
+ paddr_t opa;
+ boolean_t cacheable = TRUE;
+ boolean_t checkpv = TRUE;
+ boolean_t wired = (flags & PMAP_WIRED) != 0;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
+ pmap, va, pa, prot, wired));
+
+#ifdef DIAGNOSTIC
+ /*
+ * pmap_enter() should never be used for CADDR1 and CADDR2.
+ */
+ if (pmap == pmap_kernel() &&
+ (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
+ panic("pmap_enter: used for CADDR1 or CADDR2");
+#endif
+
+ /*
+ * For user mapping, allocate kernel VM resources if necessary.
+ */
+ if (pmap->pm_ptab == NULL)
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, MAC_MAX_PTSIZE);
+
+ /*
+ * Segment table entry not valid, we need a new PT page
+ */
+ if (!pmap_ste_v(pmap, va))
+ pmap_enter_ptpage(pmap, va);
+
+ pa = m68k_trunc_page(pa);
+ pte = pmap_pte(pmap, va);
+ opa = pmap_pte_pa(pte);
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: wiring change -> %x\n", wired));
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+ }
+ /*
+ * Retain cache inhibition status
+ */
+ checkpv = FALSE;
+ if (pmap_pte_ci(pte))
+ cacheable = FALSE;
+ goto validate;
+ }
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: removing old mapping %lx\n", va));
+ pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
+ }
+
+ /*
+ * If this is a new user mapping, increment the wiring count
+ * on this PT page. PT pages are wired down as long as there
+ * is a valid mapping in the page.
+ */
+ if (pmap != pmap_kernel())
+ (void)uvm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
+ round_page((vaddr_t)(pte+1)), FALSE, FALSE);
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+ if (PAGE_IS_MANAGED(pa)) {
+ struct pv_entry *pv, *npv;
+ int s;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: pv at %p: %lx/%p/%p\n",
+ pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ pv->pv_ptste = NULL;
+ pv->pv_ptpmap = NULL;
+ pv->pv_flags = 0;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+#ifdef DEBUG
+ for (npv = pv; npv; npv = npv->pv_next)
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ panic("pmap_enter: already in pv_tab");
+#endif
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ npv->pv_ptste = NULL;
+ npv->pv_ptpmap = NULL;
+ npv->pv_flags = 0;
+ pv->pv_next = npv;
+ }
+
+ /*
+ * Speed pmap_is_referenced() or pmap_is_modified() based
+ * on the hint provided in access_type.
+ */
+#ifdef DIAGNOSTIC
+ if ((flags & VM_PROT_ALL) & ~prot)
+ panic("pmap_enter: access_type exceeds prot");
+#endif
+ if (flags & VM_PROT_WRITE)
+ *pa_to_attribute(pa) |= (PG_U|PG_M);
+ else if (flags & VM_PROT_ALL)
+ *pa_to_attribute(pa) |= PG_U;
+ splx(s);
+ }
+ /*
+ * Assumption: if it is not part of our managed memory
+ * then it must be device memory which may be volitile.
+ */
+ else if (pmap_initialized) {
+ checkpv = cacheable = FALSE;
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Build the new PTE.
+ */
+ npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
+ if (wired)
+ npte |= PG_W;
+
+ /* Don't cache if process can't take it, like SunOS ones. */
+ if (mmutype == MMU_68040 && pmap != pmap_kernel() &&
+ (curproc->p_md.md_flags & MDP_UNCACHE_WX) &&
+ (prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE))
+ checkpv = cacheable = FALSE;
+
+ if (!checkpv && !cacheable)
+ npte |= PG_CI;
+#if defined(M68040)
+ if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
+#ifdef DEBUG
+ if (dowriteback && (dokwriteback || pmap != pmap_kernel()))
+#endif
+ npte |= PG_CCB;
+#endif
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
+
+ /*
+ * Remember if this was a wiring-only change.
+ * If so, we need not flush the TLB and caches.
+ */
+ wired = ((*pte ^ npte) == PG_W);
+#if defined(M68040)
+ if (mmutype == MMU_68040 && !wired) {
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
+ if (!wired && active_pmap(pmap))
+ TBIS(va);
+#ifdef DEBUG
+ if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
+ pmap_check_wiring("enter", trunc_page((vaddr_t)pmap_pte(pmap, va)));
+#endif
+
+ return (0);
+}
+
+/*
+ * pmap_unwire: [ INTERFACE ]
+ *
+ * Change the wiring attribute for a map/virtual-address pair.
+ *
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_unwire(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
+{
+ pt_entry_t *pte;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_unwire(%p, %lx)\n", pmap, va));
+
+ if (pmap == NULL)
+ return;
+
+ pte = pmap_pte(pmap, va);
+#ifdef DEBUG
+ /*
+ * Page table page is not allocated.
+ * Should this ever happen? Ignore it for now,
+ * we don't want to force allocation of unnecessary PTE pages.
+ */
+ if (!pmap_ste_v(pmap, va)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid STE for %lx\n", va);
+ return;
+ }
+ /*
+ * Page not valid. Should this ever happen?
+ * Just continue and change wiring anyway.
+ */
+ if (!pmap_pte_v(pte)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid PTE for %lx\n", va);
+ }
+#endif
+ /*
+ * If wiring actually changed (always?) set the wire bit and
+ * update the wire count. Note that wiring is not a hardware
+ * characteristic so there is no need to invalidate the TLB.
+ */
+ if (pmap_pte_w_chg(pte, 0)) {
+ pmap_pte_set_w(pte, 0);
+ pmap->pm_stats.wired_count--;
+ }
+}
+
+/*
+ * pmap_extract: [ INTERFACE ]
+ *
+ * Extract the physical address associated with the given
+ * pmap/virtual address pair.
+ */
+boolean_t
+pmap_extract(pmap, va, pap)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t *pap;
+{
+ paddr_t pa;
+
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_extract(%p, %lx) -> ", pmap, va));
+
+ if (pmap && pmap_ste_v(pmap, va))
+ pa = *pmap_pte(pmap, va);
+ else
+ return (FALSE);
+
+ pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("%lx\n", pa));
+
+ *pap = pa;
+ return (TRUE);
+}
+
+/*
+ * pmap_copy: [ INTERFACE ]
+ *
+ * Copy the mapping range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vaddr_t dst_addr;
+ vsize_t len;
+ vaddr_t src_addr;
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
+ dst_pmap, src_pmap, dst_addr, len, src_addr));
+}
+
+/*
+ * pmap_collect: [ INTERFACE ]
+ *
+ * Garbage collects the physical map system for pages which are no
+ * longer used. Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but others may be
+ * collected.
+ *
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(pmap)
+ pmap_t pmap;
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
+
+ if (pmap == pmap_kernel()) {
+ int bank, s;
+
+ /*
+ * XXX This is very bogus. We should handle kernel PT
+ * XXX pages much differently.
+ */
+
+ s = splimp();
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+ splx(s);
+ } else {
+ /*
+ * This process is about to be swapped out; free all of
+ * the PT pages by removing the physical mappings for its
+ * entire address space. Note: pmap_remove() performs
+ * all necessary locking.
+ */
+ pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+ }
+
+#ifdef notyet
+ /* Go compact and garbage-collect the pv_table. */
+ pmap_collect_pv();
+#endif
+}
+
+/*
+ * pmap_collect1:
+ *
+ * Garbage-collect KPT pages. Helper for the above (bogus)
+ * pmap_collect().
+ *
+ * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
+ * WAY OF HANDLING PT PAGES!
+ */
+void
+pmap_collect1(pmap, startpa, endpa)
+ pmap_t pmap;
+ paddr_t startpa, endpa;
+{
+ paddr_t pa;
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ paddr_t kpa;
+#ifdef DEBUG
+ st_entry_t *ste;
+ int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
+#endif
+
+ for (pa = startpa; pa < endpa; pa += NBPG) {
+ struct kpt_page *kpt, **pkpt;
+
+ /*
+ * Locate physical pages which are being used as kernel
+ * page table pages.
+ */
+ pv = pa_to_pvh(pa);
+ if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
+ continue;
+ do {
+ if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
+ break;
+ } while ((pv = pv->pv_next));
+ if (pv == NULL)
+ continue;
+#ifdef DEBUG
+ if (pv->pv_va < (vaddr_t)Sysmap ||
+ pv->pv_va >= (vaddr_t)Sysmap + MAC_MAX_PTSIZE)
+ printf("collect: kernel PT VA out of range\n");
+ else
+ goto ok;
+ pmap_pvdump(pa);
+ continue;
+ok:
+#endif
+ pte = (pt_entry_t *)(pv->pv_va + NBPG);
+ while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
+ ;
+ if (pte >= (pt_entry_t *)pv->pv_va)
+ continue;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
+ printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
+ pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
+ opmapdebug = pmapdebug;
+ pmapdebug |= PDB_PTPAGE;
+ }
+
+ ste = pv->pv_ptste;
+#endif
+ /*
+ * If all entries were invalid we can remove the page.
+ * We call pmap_remove_entry to take care of invalidating
+ * ST and Sysptmap entries.
+ */
+ pmap_extract(pmap, pv->pv_va, &kpa);
+ pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
+ PRM_TFLUSH|PRM_CFLUSH);
+ /*
+ * Use the physical address to locate the original
+ * (kmem_alloc assigned) address for the page and put
+ * that page back on the free list.
+ */
+ for (pkpt = &kpt_used_list, kpt = *pkpt;
+ kpt != (struct kpt_page *)0;
+ pkpt = &kpt->kpt_next, kpt = *pkpt)
+ if (kpt->kpt_pa == kpa)
+ break;
+#ifdef DEBUG
+ if (kpt == (struct kpt_page *)0)
+ panic("pmap_collect: lost a KPT page");
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ printf("collect: %lx (%lx) to free list\n",
+ kpt->kpt_va, kpa);
+#endif
+ *pkpt = kpt->kpt_next;
+ kpt->kpt_next = kpt_free_list;
+ kpt_free_list = kpt;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ pmapdebug = opmapdebug;
+
+ if (*ste != SG_NV)
+ printf("collect: kernel STE at %p still valid (%x)\n",
+ ste, *ste);
+ ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
+ if (*ste != SG_NV)
+ printf("collect: kernel PTmap at %p still valid (%x)\n",
+ ste, *ste);
+#endif
+ }
+}
+
+/*
+ * pmap_zero_page: [ INTERFACE ]
+ *
+ * Zero the specified (machine independent) page by mapping the page
+ * into virtual memory and using bzero to clear its contents, one
+ * machine dependent page at a time.
+ *
+ * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
+ * (Actually, we go to splimp(), and since we don't
+ * support multiple processors, this is sufficient.)
+ */
+void
+pmap_zero_page(phys)
+ paddr_t phys;
+{
+ int s, npte;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
+
+ npte = phys | PG_V;
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ /*
+ * Set copyback caching on the page; this is required
+ * for cache consistency (since regular mappings are
+ * copyback as well).
+ */
+ npte |= PG_CCB;
+ }
+#endif
+
+ s = splimp();
+
+ *caddr1_pte = npte;
+ TBIS((vaddr_t)CADDR1);
+
+ zeropage(CADDR1);
+
+#ifdef DEBUG
+ *caddr1_pte = PG_NV;
+ TBIS((vaddr_t)CADDR1);
+#endif
+
+ splx(s);
+}
+
+/*
+ * pmap_copy_page: [ INTERFACE ]
+ *
+ * Copy the specified (machine independent) page by mapping the page
+ * into virtual memory and using bcopy to copy the page, one machine
+ * dependent page at a time.
+ *
+ * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
+ * (Actually, we go to splimp(), and since we don't
+ * support multiple processors, this is sufficient.)
+ */
+void
+pmap_copy_page(src, dst)
+ paddr_t src, dst;
+{
+ int s, npte1, npte2;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
+
+ npte1 = src | PG_RO | PG_V;
+ npte2 = dst | PG_V;
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ /*
+ * Set copyback caching on the pages; this is required
+ * for cache consistency (since regular mappings are
+ * copyback as well).
+ */
+ npte1 |= PG_CCB;
+ npte2 |= PG_CCB;
+ }
+#endif
+
+ s = splimp();
+
+ *caddr1_pte = npte1;
+ TBIS((vaddr_t)CADDR1);
+
+ *caddr2_pte = npte2;
+ TBIS((vaddr_t)CADDR2);
+
+ copypage(CADDR1, CADDR2);
+
+#ifdef DEBUG
+ *caddr1_pte = PG_NV;
+ TBIS((vaddr_t)CADDR1);
+
+ *caddr2_pte = PG_NV;
+ TBIS((vaddr_t)CADDR2);
+#endif
+
+ splx(s);
+}
+
+/*
+ * pmap_clear_modify: [ INTERFACE ]
+ *
+ * Clear the modify bits on the specified physical page.
+ */
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_modified(pg);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
+
+ pmap_changebit(pa, 0, ~PG_M);
+
+ return (ret);
+}
+
+/*
+ * pmap_clear_reference: [ INTERFACE ]
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_referenced(pg);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa));
+
+ pmap_changebit(pa, 0, ~PG_U);
+
+ return (ret);
+}
+
+/*
+ * pmap_is_referenced: [ INTERFACE ]
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+boolean_t
+pmap_is_referenced(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_U);
+ printf("pmap_is_referenced(%lx) -> %c\n", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_U));
+}
+
+/*
+ * pmap_is_modified: [ INTERFACE ]
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+boolean_t
+pmap_is_modified(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_M);
+ printf("pmap_is_modified(%lx) -> %c\n", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_M));
+}
+
+/*
+ * pmap_phys_address: [ INTERFACE ]
+ *
+ * Return the physical address corresponding to the specified
+ * cookie. Used by the device pager to decode a device driver's
+ * mmap entry point return value.
+ *
+ * Note: no locking is necessary in this function.
+ */
+paddr_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return(m68k_ptob(ppn));
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/*
+ * pmap_remove_mapping:
+ *
+ * Invalidate a single page denoted by pmap/va.
+ *
+ * If (pte != NULL), it is the already computed PTE for the page.
+ *
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ *
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache
+ * information.
+ */
+/* static */
+void
+pmap_remove_mapping(pmap, va, pte, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ pt_entry_t *pte;
+ int flags;
+{
+ paddr_t pa;
+ struct pv_entry *pv, *npv;
+ pmap_t ptpmap;
+ st_entry_t *ste;
+ int s, bits;
+#ifdef DEBUG
+ pt_entry_t opte;
+#endif
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
+ pmap, va, pte, flags));
+
+ /*
+ * PTE not provided, compute it from pmap and va.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ pte = pmap_pte(pmap, va);
+ if (*pte == PG_NV)
+ return;
+ }
+
+ pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ opte = *pte;
+#endif
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTE after saving the reference modify info.
+ */
+ PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
+ bits = *pte & (PG_U|PG_M);
+ *pte = PG_NV;
+ if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+ TBIS(va);
+ /*
+ * For user mappings decrement the wiring count on
+ * the PT page. We do this after the PTE has been
+ * invalidated because vm_map_pageable winds up in
+ * pmap_pageable which clears the modify bit for the
+ * PT page.
+ */
+ if (pmap != pmap_kernel()) {
+ (void)uvm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
+ round_page((vaddr_t)(pte+1)), TRUE, FALSE);
+#ifdef DEBUG
+ if (pmapdebug & PDB_WIRING)
+ pmap_check_wiring("remove", (vaddr_t)trunc_page(pte));
+#endif
+ }
+ /*
+ * If this isn't a managed page, we are all done.
+ */
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+ /*
+ * Otherwise remove it from the PV table
+ * (raise IPL since we may be called at interrupt time).
+ */
+ pv = pa_to_pvh(pa);
+ ste = ST_ENTRY_NULL;
+ s = splimp();
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ ste = pv->pv_ptste;
+ ptpmap = pv->pv_ptpmap;
+ npv = pv->pv_next;
+ if (npv) {
+ npv->pv_flags = pv->pv_flags;
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else
+ pv->pv_pmap = NULL;
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ break;
+ pv = npv;
+ }
+#ifdef DEBUG
+ if (npv == NULL)
+ panic("pmap_remove: PA not in pv_tab");
+#endif
+ ste = npv->pv_ptste;
+ ptpmap = npv->pv_ptpmap;
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ pv = pa_to_pvh(pa);
+ }
+
+ /*
+ * If this was a PT page we must also remove the
+ * mapping from the associated segment table.
+ */
+ if (ste) {
+ PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
+ ("remove: ste was %x@%p pte was %x@%p\n",
+ *ste, ste, opte, pmap_pte(pmap, va)));
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
+ st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
+
+ while (ste < este)
+ *ste++ = SG_NV;
+#ifdef DEBUG
+ ste -= NPTEPG/SG4_LEV3SIZE;
+#endif
+ } else
+#endif
+ *ste = SG_NV;
+ /*
+ * If it was a user PT page, we decrement the
+ * reference count on the segment table as well,
+ * freeing it if it is now empty.
+ */
+ if (ptpmap != pmap_kernel()) {
+ PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
+ ("remove: stab %p, refcnt %d\n",
+ ptpmap->pm_stab, ptpmap->pm_sref - 1));
+#ifdef DEBUG
+ if ((pmapdebug & PDB_PARANOIA) &&
+ ptpmap->pm_stab != (st_entry_t *)trunc_page((vaddr_t)ste))
+ panic("remove: bogus ste");
+#endif
+ if (--(ptpmap->pm_sref) == 0) {
+ PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
+ ("remove: free stab %p\n",
+ ptpmap->pm_stab));
+ uvm_km_free_wakeup(st_map,
+ (vaddr_t)ptpmap->pm_stab, MAC_STSIZE);
+ ptpmap->pm_stab = Segtabzero;
+ ptpmap->pm_stpa = Segtabzeropa;
+#if defined(M68040)
+ if (mmutype == MMU_68040)
+ ptpmap->pm_stfree = protostfree;
+#endif
+ /*
+ * XXX may have changed segment table
+ * pointer for current process so
+ * update now to reload hardware.
+ */
+ if (active_user_pmap(ptpmap))
+ PMAP_ACTIVATE(ptpmap, 1);
+ }
+#ifdef DEBUG
+ else if (ptpmap->pm_sref < 0)
+ panic("remove: sref < 0");
+#endif
+ }
+#if 0
+ /*
+ * XXX this should be unnecessary as we have been
+ * flushing individual mappings as we go.
+ */
+ if (ptpmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pv->pv_flags &= ~PV_PTPAGE;
+ ptpmap->pm_ptpages--;
+ }
+ /*
+ * Update saved attributes for managed page
+ */
+ *pa_to_attribute(pa) |= bits;
+ splx(s);
+}
+
+/*
+ * pmap_testbit:
+ *
+ * Test the modified/referenced bits of a physical page.
+ */
+/* static */
+boolean_t
+pmap_testbit(pa, bit)
+ paddr_t pa;
+ int bit;
+{
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ int s;
+
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return (FALSE);
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Check saved info first
+ */
+ if (*pa_to_attribute(pa) & bit) {
+ splx(s);
+ return(TRUE);
+ }
+
+ /*
+ * Not found. Check current mappings, returning immediately if
+ * found. Cache a hit to speed future lookups.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ if (*pte & bit) {
+ *pa_to_attribute(pa) |= bit;
+ splx(s);
+ return(TRUE);
+ }
+ }
+ }
+ splx(s);
+ return(FALSE);
+}
+
+/*
+ * pmap_changebit:
+ *
+ * Change the modified/referenced bits, or other PTE bits,
+ * for a physical page.
+ */
+/* static */
+void
+pmap_changebit(pa, set, mask)
+ paddr_t pa;
+ int set, mask;
+{
+ struct pv_entry *pv;
+ pt_entry_t *pte, npte;
+ vaddr_t va;
+ int s;
+#if defined(M68040)
+ boolean_t firstpage = TRUE;
+#endif
+
+ PMAP_DPRINTF(PDB_BITS,
+ ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
+
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ *pa_to_attribute(pa) &= mask;
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ * If setting RO do we need to clear the VAC?
+ */
+ if (pv->pv_pmap != NULL) {
+#ifdef DEBUG
+ int toflush = 0;
+#endif
+ for (; pv; pv = pv->pv_next) {
+#ifdef DEBUG
+ toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
+#endif
+ va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (set == PG_RO) {
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+ }
+
+ pte = pmap_pte(pv->pv_pmap, va);
+ npte = (*pte | set) & mask;
+ if (*pte != npte) {
+#if defined(M68040)
+ /*
+ * If we are changing caching status or
+ * protection make sure the caches are
+ * flushed (but only once).
+ */
+ if (firstpage && (mmutype == MMU_68040) &&
+ ((set == PG_RO) ||
+ (set & PG_CMASK) ||
+ (mask & PG_CMASK) == 0)) {
+ firstpage = FALSE;
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
+ if (active_pmap(pv->pv_pmap))
+ TBIS(va);
+ }
+ }
+ }
+ splx(s);
+}
+
+/*
+ * pmap_enter_ptpage:
+ *
+ * Allocate and map a PT page for the specified pmap/va pair.
+ */
+/* static */
+void
+pmap_enter_ptpage(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
+{
+ paddr_t ptpa;
+ struct pv_entry *pv;
+ st_entry_t *ste;
+ int s;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
+ ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
+
+ /*
+ * Allocate a segment table if necessary. Note that it is allocated
+ * from a private map and not pt_map. This keeps user page tables
+ * aligned on segment boundaries in the kernel address space.
+ * The segment table is wired down. It will be freed whenever the
+ * reference count drops to zero.
+ */
+ if (pmap->pm_stab == Segtabzero) {
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(st_map, MAC_STSIZE);
+ pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
+ (paddr_t *)&pmap->pm_stpa);
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
+#ifdef DEBUG
+ if (dowriteback && dokwriteback)
+#endif
+ pmap_changebit((paddr_t)pmap->pm_stpa, 0, ~PG_CCB);
+ pmap->pm_stfree = protostfree;
+ }
+#endif
+ /*
+ * XXX may have changed segment table pointer for current
+ * process so update now to reload hardware.
+ */
+ if (active_user_pmap(pmap))
+ PMAP_ACTIVATE(pmap, 1);
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: pmap %p stab %p(%p)\n",
+ pmap, pmap->pm_stab, pmap->pm_stpa));
+ }
+
+ ste = pmap_ste(pmap, va);
+#if defined(M68040)
+ /*
+ * Allocate level 2 descriptor block if necessary
+ */
+ if (mmutype == MMU_68040) {
+ if (*ste == SG_NV) {
+ int ix;
+ caddr_t addr;
+
+ ix = bmtol2(pmap->pm_stfree);
+ if (ix == -1)
+ panic("enter: out of address space"); /* XXX */
+ pmap->pm_stfree &= ~l2tobm(ix);
+ addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
+ bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
+ addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
+ *ste = (u_int)addr | SG_RW | SG_U | SG_V;
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: alloc ste2 %d(%p)\n", ix, addr));
+ }
+ ste = pmap_ste2(pmap, va);
+ /*
+ * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
+ * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
+ * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
+ * PT page--the unit of allocation. We set `ste' to point
+ * to the first entry of that chunk which is validated in its
+ * entirety below.
+ */
+ ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
+ }
+#endif
+ va = trunc_page((vaddr_t)pmap_pte(pmap, va));
+
+ /*
+ * In the kernel we allocate a page from the kernel PT page
+ * free list and map it into the kernel page table map (via
+ * pmap_enter).
+ */
+ if (pmap == pmap_kernel()) {
+ struct kpt_page *kpt;
+
+ s = splimp();
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
+ /*
+ * No PT pages available.
+ * Try once to free up unused ones.
+ */
+ PMAP_DPRINTF(PDB_COLLECT,
+ ("enter: no KPT pages, collecting...\n"));
+ pmap_collect(pmap_kernel());
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0)
+ panic("pmap_enter_ptpage: can't get KPT page");
+ }
+ kpt_free_list = kpt->kpt_next;
+ kpt->kpt_next = kpt_used_list;
+ kpt_used_list = kpt;
+ ptpa = kpt->kpt_pa;
+ bzero((caddr_t)kpt->kpt_va, NBPG);
+ pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT,
+ VM_PROT_DEFAULT|PMAP_WIRED);
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
+ int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
+
+ printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
+ ix, Sysptmap[ix], kpt->kpt_va);
+ }
+#endif
+ splx(s);
+ }
+ /*
+ * For user processes we just simulate a fault on that location
+ * letting the VM system allocate a zero-filled page.
+ */
+ else {
+ /*
+ * Count the segment table reference now so that we won't
+ * lose the segment table when low on memory.
+ */
+ pmap->pm_sref++;
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
+ ("enter: about to fault UPT pg at %lx\n", va));
+ s = uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE);
+ if (s) {
+ printf("uvm_fault(pt_map, 0x%lx, 0, RW) -> %d\n",
+ va, s);
+ panic("pmap_enter: uvm_fault failed");
+ }
+ pmap_extract(pmap_kernel(), va, &ptpa);
+ }
+#if defined(M68040)
+ /*
+ * Turn off copyback caching of page table pages,
+ * could get ugly otherwise.
+ */
+#ifdef DEBUG
+ if (dowriteback && dokwriteback)
+#endif
+ if (mmutype == MMU_68040) {
+#ifdef DEBUG
+ pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
+ if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
+ printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
+ pmap == pmap_kernel() ? "Kernel" : "User",
+ va, ptpa, pte, *pte);
+#endif
+ pmap_changebit(ptpa, 0, ~PG_CCB);
+ }
+#endif
+ /*
+ * Locate the PV entry in the kernel for this PT page and
+ * record the STE address. This is so that we can invalidate
+ * the STE when we remove the mapping for the page.
+ */
+ pv = pa_to_pvh(ptpa);
+ s = splimp();
+ if (pv) {
+ pv->pv_flags |= PV_PTPAGE;
+ do {
+ if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
+ break;
+ } while ((pv = pv->pv_next));
+ }
+#ifdef DEBUG
+ if (pv == NULL)
+ panic("pmap_enter_ptpage: PT page not entered");
+#endif
+ pv->pv_ptste = ste;
+ pv->pv_ptpmap = pmap;
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
+ ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
+
+ /*
+ * Map the new PT page into the segment table.
+ * Also increment the reference count on the segment table if this
+ * was a user page table page. Note that we don't use vm_map_pageable
+ * to keep the count like we do for PT pages, this is mostly because
+ * it would be difficult to identify ST pages in pmap_pageable to
+ * release them. We also avoid the overhead of vm_map_pageable.
+ */
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
+ st_entry_t *este;
+
+ for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
+ *ste = ptpa | SG_U | SG_RW | SG_V;
+ ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
+ }
+ } else
+#endif
+ *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
+ if (pmap != pmap_kernel()) {
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: stab %p refcnt %d\n",
+ pmap->pm_stab, pmap->pm_sref));
+ }
+#if 0
+ /*
+ * Flush stale TLB info.
+ */
+ if (pmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pmap->pm_ptpages++;
+ splx(s);
+}
+
+#ifdef DEBUG
+/*
+ * pmap_pvdump:
+ *
+ * Dump the contents of the PV list for the specified physical page.
+ */
+/* static */
+void
+pmap_pvdump(pa)
+ paddr_t pa;
+{
+ struct pv_entry *pv;
+
+ printf("pa %lx", pa);
+ for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
+ printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
+ pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
+ pv->pv_flags);
+ printf("\n");
+}
+
+/*
+ * pmap_check_wiring:
+ *
+ * Count the number of valid mappings in the specified PT page,
+ * and ensure that it is consistent with the number of wirings
+ * to that page that the VM system has.
+ */
+/* static */
+void
+pmap_check_wiring(str, va)
+ char *str;
+ vaddr_t va;
+{
+ struct vm_map_entry *entry;
+ int count;
+ pt_entry_t *pte;
+
+ va = trunc_page(va);
+ if (!pmap_ste_v(pmap_kernel(), va) ||
+ !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
+ return;
+
+ if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
+ printf("wired_check: entry for %lx not found\n", va);
+ return;
+ }
+ count = 0;
+ for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
+ if (*pte)
+ count++;
+ if (entry->wired_count != count)
+ printf("*%s*: %lx: w%d/a%d\n",
+ str, va, entry->wired_count, count);
+}
+#endif /* DEBUG */
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/mac68k/mac68k/pmap_bootstrap.c b/sys/arch/mac68k/mac68k/pmap_bootstrap.c
index f3a45f84fca..54781633ec6 100644
--- a/sys/arch/mac68k/mac68k/pmap_bootstrap.c
+++ b/sys/arch/mac68k/mac68k/pmap_bootstrap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_bootstrap.c,v 1.17 2001/12/14 21:44:05 miod Exp $ */
+/* $OpenBSD: pmap_bootstrap.c,v 1.18 2001/12/20 19:02:28 miod Exp $ */
/* $NetBSD: pmap_bootstrap.c,v 1.50 1999/04/07 06:14:33 scottr Exp $ */
/*
@@ -71,6 +71,7 @@ extern paddr_t avail_start;
extern paddr_t avail_end;
extern vaddr_t virtual_avail, virtual_end;
extern vsize_t mem_size;
+extern int protection_codes[];
/*
* These are used to map the RAM:
@@ -484,6 +485,25 @@ pmap_bootstrap(nextpa, firstpa)
virtual_end = VM_MAX_KERNEL_ADDRESS;
/*
+ * Initialize protection array.
+ * XXX don't use a switch statement, it might produce an
+ * absolute "jmp" table.
+ */
+ {
+ int *kp;
+
+ kp = (int *) &protection_codes;
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ }
+
+ /*
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
@@ -622,22 +642,3 @@ bootstrap_mac68k(tc)
videoaddr = newvideoaddr;
}
-
-void
-pmap_init_md()
-{
- vaddr_t addr;
-
- /*
- * Mark as unavailable the regions which we have mapped in
- * pmap_bootstrap().
- */
- addr = (vaddr_t)IOBase;
- if (uvm_map(kernel_map, &addr,
- m68k_ptob(IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE),
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)))
- panic("pmap_init: bogons in the VM system!\n");
-}
diff --git a/sys/arch/mvme68k/conf/files.mvme68k b/sys/arch/mvme68k/conf/files.mvme68k
index b6b9c9f89ed..e8801595ab7 100644
--- a/sys/arch/mvme68k/conf/files.mvme68k
+++ b/sys/arch/mvme68k/conf/files.mvme68k
@@ -1,4 +1,4 @@
-# $OpenBSD: files.mvme68k,v 1.20 2001/11/30 20:58:18 miod Exp $
+# $OpenBSD: files.mvme68k,v 1.21 2001/12/20 19:02:29 miod Exp $
# config file for mvme68k
@@ -115,7 +115,7 @@ file arch/mvme68k/mvme68k/disksubr.c
file arch/mvme68k/mvme68k/dkbad.c
file arch/mvme68k/mvme68k/machdep.c
file arch/mvme68k/mvme68k/mem.c
-file arch/m68k/m68k/pmap_motorola.c
+file arch/mvme68k/mvme68k/pmap.c
file arch/mvme68k/mvme68k/pmap_bootstrap.c
file arch/mvme68k/mvme68k/sys_machdep.c
file arch/mvme68k/mvme68k/trap.c
diff --git a/sys/arch/mvme68k/include/param.h b/sys/arch/mvme68k/include/param.h
index 55c75df112e..b3b64736ec6 100644
--- a/sys/arch/mvme68k/include/param.h
+++ b/sys/arch/mvme68k/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.15 2001/12/07 21:49:15 art Exp $ */
+/* $OpenBSD: param.h,v 1.16 2001/12/20 19:02:29 miod Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -57,6 +57,12 @@
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (PAGE_SIZE - 1)
+#define NPTEPG (NBPG/(sizeof (pt_entry_t)))
+
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+#define NBSEG (1 << SEGSHIFT) /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+
#define KERNBASE 0x00000000 /* start of kernel virtual */
#define KERNTEXTOFF 0x00010000 /* start of kernel text */
@@ -86,4 +92,17 @@
#define DELAY(n) delay(n)
#endif
+#ifdef COMPAT_HPUX
+/*
+ * Constants/macros for HPUX multiple mapping of user address space.
+ * Pages in the first 256Mb are mapped in at every 256Mb segment.
+ */
+#define HPMMMASK 0xF0000000
+#define ISHPMMADDR(v) \
+ ((curproc->p_md.md_flags & MDP_HPUXMMAP) && \
+ ((unsigned)(v) & HPMMMASK) && \
+ ((unsigned)(v) & HPMMMASK) != HPMMMASK)
+#define HPMMBASEADDR(v) \
+ ((unsigned)(v) & ~HPMMMASK)
+#endif
#endif /* _MACHINE_PARAM_H_ */
diff --git a/sys/arch/mvme68k/include/pmap.h b/sys/arch/mvme68k/include/pmap.h
index 309e6bdd51c..2e873a725f9 100644
--- a/sys/arch/mvme68k/include/pmap.h
+++ b/sys/arch/mvme68k/include/pmap.h
@@ -1,13 +1,155 @@
-/* $OpenBSD: pmap.h,v 1.9 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.10 2001/12/20 19:02:29 miod Exp $ */
-#ifndef _MACHINE_PMAP_H_
-#define _MACHINE_PMAP_H_
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.h 8.1 (Berkeley) 6/10/93
+ */
-#include <m68k/pmap_motorola.h>
+#ifndef _MVME68K_PMAP_H_
+#define _MVME68K_PMAP_H_
-#ifdef _KERNEL
-void pmap_init_md __P((void));
-#define PMAP_INIT_MD() pmap_init_md()
+#include <machine/pte.h>
+
+#if defined(M68040)
+#define M68K_SEG_SIZE (mmutype == MMU_68040 ? 0x40000 : NBSEG)
+#else
+#define M68K_SEG_SIZE NBSEG
#endif
-#endif /* _MACHINE_PMAP_H_ */
+/*
+ * Pmap stuff
+ */
+struct pmap {
+ pt_entry_t *pm_ptab; /* KVA of page table */
+ st_entry_t *pm_stab; /* KVA of segment table */
+ int pm_stfree; /* 040: free lev2 blocks */
+ st_entry_t *pm_stpa; /* 040: ST phys addr */
+ short pm_sref; /* segment table ref count */
+ short pm_count; /* pmap reference count */
+ struct simplelock pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_ptpages; /* more stats: PT pages */
+};
+
+typedef struct pmap *pmap_t;
+
+/*
+ * On the 040 we keep track of which level 2 blocks are already in use
+ * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
+ * (block 31). For convenience, the level 1 table is considered to be
+ * block 0.
+ *
+ * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
+ * for the kernel and users. 8 implies only the initial "segment table"
+ * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
+ * physically contiguous pages for the ST in pmap.c!
+ */
+#define MAXKL2SIZE 32
+#define MAXUL2SIZE 8
+#define l2tobm(n) (1 << (n))
+#define bmtol2(n) (ffs(n) - 1)
+
+/*
+ * Macros for speed
+ */
+#define PMAP_ACTIVATE(pmap, loadhw) \
+{ \
+ if ((loadhw)) \
+ loadustp(m68k_btop((vm_offset_t)(pmap)->pm_stpa)); \
+}
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry, the list is pv_table.
+ */
+struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vm_offset_t pv_va; /* virtual address for mapping */
+ st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
+ struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
+ int pv_flags; /* flags */
+};
+
+#define PV_CI 0x01 /* header: all entries are cache inhibited */
+#define PV_PTPAGE 0x02 /* header: entry maps a page table page */
+
+struct pv_page;
+
+struct pv_page_info {
+ TAILQ_ENTRY(pv_page) pgi_list;
+ struct pv_entry *pgi_freelist;
+ int pgi_nfree;
+};
+
+/*
+ * This is basically:
+ * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+ */
+#define NPVPPG 170
+
+struct pv_page {
+ struct pv_page_info pvp_pgi;
+ struct pv_entry pvp_pv[NPVPPG];
+};
+
+#ifdef _KERNEL
+
+extern struct pmap kernel_pmap_store;
+
+#define pmap_kernel() (&kernel_pmap_store)
+#define active_pmap(pm) \
+ ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
+#define active_user_pmap(pm) \
+ (curproc && \
+ (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
+
+
+extern struct pv_entry *pv_table; /* array of entries, one per page */
+
+#define pmap_page_index(pa) atop(pa - vm_first_phys)
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+#define pmap_update(pmap) /* nothing */
+
+extern pt_entry_t *Sysmap;
+extern char *vmmap; /* map for mem, dumps, etc. */
+#endif /* _KERNEL */
+
+#endif /* !_MVME68K_PMAP_H_ */
diff --git a/sys/arch/mvme68k/include/pte.h b/sys/arch/mvme68k/include/pte.h
index bf2f20714e4..78e9deb44c8 100644
--- a/sys/arch/mvme68k/include/pte.h
+++ b/sys/arch/mvme68k/include/pte.h
@@ -1,14 +1,152 @@
-/* $OpenBSD: pte.h,v 1.5 2001/11/30 20:58:18 miod Exp $ */
+/* $OpenBSD: pte.h,v 1.6 2001/12/20 19:02:29 miod Exp $ */
-#ifndef _MACHINE_PTE_H_
-#define _MACHINE_PTE_H_
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: pte.h 1.13 92/01/20$
+ *
+ * @(#)pte.h 8.1 (Berkeley) 6/10/93
+ */
-#include <m68k/pte_motorola.h>
+#ifndef _MVME68K_PTE_H_
+#define _MVME68K_PTE_H_
-#define MACHINE_STSIZE M68K_STSIZE
-#define MACHINE_MAX_PTSIZE M68K_MAX_PTSIZE
-#define MACHINE_MAX_KPTSIZE M68K_MAX_KPTSIZE
-#define MACHINE_PTBASE M68K_PTBASE
-#define MACHINE_PTMAXSIZE M68K_PTMAXSIZE
+/*
+ * m68k hardware segment/page table entries
+ */
-#endif /* _MACHINE_PTE_H_ */
+#if 0
+struct ste {
+ unsigned int sg_pfnum:20; /* page table frame number */
+ unsigned int :8; /* reserved at 0 */
+ unsigned int :1; /* reserved at 1 */
+ unsigned int sg_prot:1; /* write protect bit */
+ unsigned int sg_v:2; /* valid bits */
+};
+
+struct ste40 {
+ unsigned int sg_ptaddr:24; /* page table page addr */
+ unsigned int :4; /* reserved at 0 */
+ unsigned int sg_u; /* hardware modified (dirty) bit */
+ unsigned int sg_prot:1; /* write protect bit */
+ unsigned int sg_v:2; /* valid bits */
+};
+
+struct pte {
+ unsigned int pg_pfnum:20; /* page frame number or 0 */
+ unsigned int :3;
+ unsigned int pg_w:1; /* is wired */
+ unsigned int :1; /* reserved at zero */
+ unsigned int pg_ci:1; /* cache inhibit bit */
+ unsigned int :1; /* reserved at zero */
+ unsigned int pg_m:1; /* hardware modified (dirty) bit */
+ unsigned int pg_u:1; /* hardware used (reference) bit */
+ unsigned int pg_prot:1; /* write protect bit */
+ unsigned int pg_v:2; /* valid bit */
+};
+#endif
+
+typedef int st_entry_t; /* segment table entry */
+typedef int pt_entry_t; /* Mach page table entry */
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+#define ST_ENTRY_NULL ((st_entry_t *) 0)
+
+#define SG_V 0x00000002 /* segment is valid */
+#define SG_NV 0x00000000
+#define SG_PROT 0x00000004 /* access protection mask */
+#define SG_RO 0x00000004
+#define SG_RW 0x00000000
+#define SG_U 0x00000008 /* modified bit (68040) */
+#define SG_FRAME 0xfffff000
+#define SG_IMASK 0xffc00000
+#define SG_ISHIFT 22
+#define SG_PMASK 0x003ff000
+#define SG_PSHIFT 12
+
+/* 68040 additions */
+#define SG4_MASK1 0xfe000000
+#define SG4_SHIFT1 25
+#define SG4_MASK2 0x01fc0000
+#define SG4_SHIFT2 18
+#define SG4_MASK3 0x0003f000
+#define SG4_SHIFT3 12
+#define SG4_ADDR1 0xfffffe00
+#define SG4_ADDR2 0xffffff00
+#define SG4_LEV1SIZE 128
+#define SG4_LEV2SIZE 128
+#define SG4_LEV3SIZE 64
+
+#define PG_V 0x00000001
+#define PG_NV 0x00000000
+#define PG_PROT 0x00000004
+#define PG_U 0x00000008
+#define PG_M 0x00000010
+#define PG_W 0x00000100
+#define PG_RO 0x00000004
+#define PG_RW 0x00000000
+#define PG_FRAME 0xfffff000
+#define PG_CI 0x00000040
+#define PG_SHIFT 12
+#define PG_PFNUM(x) (((x) & PG_FRAME) >> PG_SHIFT)
+
+/* 68040 additions */
+#define PG_CMASK 0x00000060 /* cache mode mask */
+#define PG_CWT 0x00000000 /* writethrough caching */
+#define PG_CCB 0x00000020 /* copyback caching */
+#define PG_CIS 0x00000040 /* cache inhibited serialized */
+#define PG_CIN 0x00000060 /* cache inhibited nonserialized */
+#define PG_SO 0x00000080 /* supervisor only */
+
+#define M68K_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
+ /* user process segment table size */
+#define M68K_MAX_PTSIZE 0x400000 /* max size of UPT */
+#define M68K_MAX_KPTSIZE 0x100000 /* max memory to allocate to KPT */
+#define M68K_PTBASE 0x10000000 /* UPT map base address */
+#define M68K_PTMAXSIZE 0x70000000 /* UPT map maximum size */
+
+/*
+ * Kernel virtual address to page table entry and to physical address.
+ */
+#define kvtopte(va) \
+ (&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
+#define ptetokv(pt) \
+ ((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
+#define kvtophys(va) \
+ ((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
+
+#endif /* !_MVME68K_PTE_H_ */
diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c
new file mode 100644
index 00000000000..7ef277f8f3e
--- /dev/null
+++ b/sys/arch/mvme68k/mvme68k/pmap.c
@@ -0,0 +1,2403 @@
+/* $OpenBSD: pmap.c,v 1.37 2001/12/20 19:02:29 miod Exp $ */
+
+/*
+ * Copyright (c) 1995 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Theo de Raadt for Willowglen Singapore.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 8.6 (Berkeley) 5/27/94
+ */
+
+/*
+ * m68k series physical map management code.
+ *
+ * Supports:
+ * XXX 68020 with 68551 MMU
+ * 68030 with on-chip MMU
+ * 68040 with on-chip MMU
+ * 68060 with on-chip MMU
+ *
+ * Notes:
+ * Don't even pay lip service to multiprocessor support.
+ *
+ * We assume TLB entries don't have process tags (except for the
+ * supervisor/user distinction) so we only invalidate TLB entries
+ * when changing mappings for the current (or kernel) pmap. This is
+ * technically not true for the 68551 but we flush the TLB on every
+ * context switch, so it effectively winds up that way.
+ *
+ * Bitwise and/or operations are significantly faster than bitfield
+ * references so we use them when accessing STE/PTEs in the pmap_pte_*
+ * macros. Note also that the two are not always equivalent; e.g.:
+ * (*pte & PG_PROT) [4] != pte->pg_prot [1]
+ * and a couple of routines that deal with protection and wiring take
+ * some shortcuts that assume the and/or definitions.
+ *
+ * This implementation will only work for PAGE_SIZE == NBPG
+ * (i.e. 4096 bytes).
+ */
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+
+#include <machine/pte.h>
+
+#include <uvm/uvm_extern.h>
+#include <uvm/uvm.h>
+
+#include <machine/cpu.h>
+
+#ifdef DEBUG
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_CACHE 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_SEGTAB 0x0400
+#define PDB_MULTIMAP 0x0800
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+#define PDB_ALL 0xFFFF
+
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
+
+#if defined(M68040) || defined(M68060)
+int dowriteback = 1; /* 68040: enable writeback caching */
+int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
+#endif
+#endif
+
+/*
+ * Get STEs and PTEs for user/kernel address space
+ */
+#if defined(M68040) || defined(M68060)
+#define pmap_ste1(m, v) \
+ (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
+/* XXX assumes physically contiguous ST pages (if more than one) */
+#define pmap_ste2(m, v) \
+ (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
+ - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
+#define pmap_ste(m, v) \
+ (&((m)->pm_stab[(vm_offset_t)(v) \
+ >> (mmutype <= MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
+#define pmap_ste_v(m, v) \
+ (mmutype <= MMU_68040 \
+ ? ((*pmap_ste1(m, v) & SG_V) && \
+ (*pmap_ste2(m, v) & SG_V)) \
+ : (*pmap_ste(m, v) & SG_V))
+#else
+#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
+#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
+#endif
+
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
+#define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
+#define pmap_pte_w(pte) (*(pte) & PG_W)
+#define pmap_pte_ci(pte) (*(pte) & PG_CI)
+#define pmap_pte_m(pte) (*(pte) & PG_M)
+#define pmap_pte_u(pte) (*(pte) & PG_U)
+#define pmap_pte_prot(pte) (*(pte) & PG_PROT)
+#define pmap_pte_v(pte) (*(pte) & PG_V)
+
+#define pmap_pte_set_w(pte, v) \
+ if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
+#define pmap_pte_set_prot(pte, v) \
+ if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
+#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
+#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to an m68k protection code.
+ */
+#define pte_prot(m, p) (protection_codes[p])
+int protection_codes[8];
+
+/*
+ * Kernel page table page management.
+ */
+struct kpt_page {
+ struct kpt_page *kpt_next; /* link on either used or free list */
+ vm_offset_t kpt_va; /* always valid kernel VA */
+ vm_offset_t kpt_pa; /* PA of this page (for speed) */
+};
+struct kpt_page *kpt_free_list, *kpt_used_list;
+struct kpt_page *kpt_pages;
+
+/*
+ * Kernel segment/page table and page table map.
+ * The page table map gives us a level of indirection we need to dynamically
+ * expand the page table. It is essentially a copy of the segment table
+ * with PTEs instead of STEs. All are initialized in locore at boot time.
+ * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
+ * Segtabzero is an empty segment table which all processes share til they
+ * reference something.
+ */
+st_entry_t *Sysseg;
+pt_entry_t *Sysmap, *Sysptmap;
+st_entry_t *Segtabzero, *Segtabzeropa;
+vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
+
+struct pmap kernel_pmap_store;
+struct vm_map *st_map, *pt_map;
+struct vm_map st_map_store, pt_map_store;
+
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
+vm_size_t mem_size; /* memory size in bytes */
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int page_cnt; /* number of pages managed by VM system */
+
+boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+struct pv_entry *pv_table;
+char *pmap_attributes; /* reference and modify bits */
+TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
+int pv_nfree;
+
+#if defined(M68040) || defined(M68060)
+int protostfree; /* prototype (default) free ST map */
+#endif
+
+/*
+ * Internal routines
+ */
+void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((vm_offset_t, int));
+void pmap_changebit __P((vm_offset_t, int, boolean_t));
+void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+void pmap_ptpage_addref __P((vaddr_t));
+int pmap_ptpage_delref __P((vaddr_t));
+void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t));
+void pmap_pinit __P((struct pmap *));
+void pmap_release __P((struct pmap *));
+
+
+#ifdef DEBUG
+void pmap_pvdump __P((vm_offset_t));
+void pmap_check_wiring __P((char *, vm_offset_t));
+#endif
+
+/* pmap_remove_mapping flags */
+#define PRM_TFLUSH 1
+#define PRM_CFLUSH 2
+#define PRM_KEEPPTPAGE 4
+
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
+
+#define pa_to_pvh(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.pvent[pg_]; \
+})
+
+#define pa_to_attribute(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
+})
+
+/*
+ * Routine: pmap_virtual_space
+ *
+ * Function:
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap. Called by
+ * vm_bootstrap_steal_memory().
+ */
+void
+pmap_virtual_space(vstartp, vendp)
+ vm_offset_t *vstartp, *vendp;
+{
+
+ *vstartp = virtual_avail;
+ *vendp = virtual_end;
+}
+
+/*
+ * Routine: pmap_init
+ *
+ * Function:
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init()
+{
+ vm_offset_t addr, addr2;
+ vm_size_t s;
+ int rv;
+ int npages;
+ struct pv_entry *pv;
+ char *attr;
+ int bank;
+
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_init()\n");
+#endif
+ /*
+ * Now that kernel map has been allocated, we can mark as
+ * unavailable regions which we have mapped in pmap_bootstrap().
+ */
+ addr = (vaddr_t) intiobase;
+ if (uvm_map(kernel_map, &addr,
+ m68k_ptob(iiomapsize+EIOMAPSIZE),
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)))
+ goto bogons;
+ addr = (vaddr_t) Sysmap;
+ if (uvm_map(kernel_map, &addr, M68K_MAX_PTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED))) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+ bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT) {
+ printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
+ Sysseg, Sysmap, Sysptmap);
+ printf(" pstart %x, pend %x, vstart %x, vend %x\n",
+ avail_start, avail_end, virtual_avail, virtual_end);
+ }
+#endif
+
+ /*
+ * Allocate memory for random pmap data structures. Includes the
+ * initial segment table, pv_head_table and pmap_attributes.
+ */
+ for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
+ page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+ s = M68K_STSIZE; /* Segtabzero */
+ s += page_cnt * sizeof(struct pv_entry); /* pv table */
+ s += page_cnt * sizeof(char); /* attribute table */
+ s = round_page(s);
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: can't allocate data structures");
+
+ Segtabzero = (st_entry_t *) addr;
+ pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
+#ifdef M68060
+ if (mmutype == MMU_68060) {
+ addr2 = addr;
+ while (addr2 < addr + M68K_STSIZE) {
+ pmap_changebit(addr2, PG_CCB, 0);
+ pmap_changebit(addr2, PG_CI, 1);
+ addr2 += NBPG;
+ }
+ DCIS();
+ }
+#endif
+ addr += M68K_STSIZE;
+
+ pv_table = (struct pv_entry *) addr;
+ addr += page_cnt * sizeof(struct pv_entry);
+
+ pmap_attributes = (char *) addr;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes);
+#endif
+
+ /*
+ * Now that the pv and attribute tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_table;
+ attr = pmap_attributes;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npages = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += npages;
+ attr += npages;
+ }
+
+ /*
+ * Allocate physical memory for kernel PT pages and their management.
+ * We need 1 PT page per possible task plus some slop.
+ */
+ npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
+ s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
+
+ /*
+ * Verify that space will be allocated in region for which
+ * we already have kernel PT pages.
+ */
+ addr = 0;
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv || (addr + s) >= (vaddr_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ uvm_unmap(kernel_map, addr, addr + s);
+
+ /*
+ * Now allocate the space and link the pages together to
+ * form the KPT free list.
+ */
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+ s = ptoa(npages);
+ addr2 = addr + s;
+ kpt_pages = &((struct kpt_page *)addr2)[npages];
+ kpt_free_list = (struct kpt_page *) 0;
+ do {
+ addr2 -= NBPG;
+ (--kpt_pages)->kpt_next = kpt_free_list;
+ kpt_free_list = kpt_pages;
+ kpt_pages->kpt_va = addr2;
+ pmap_extract(pmap_kernel(), addr2, &kpt_pages->kpt_pa);
+#ifdef M68060
+ if (mmutype == MMU_68060) {
+ pmap_changebit(kpt_pages->kpt_pa, PG_CCB, 0);
+ pmap_changebit(kpt_pages->kpt_pa, PG_CI, 1);
+ DCIS();
+ }
+#endif
+ } while (addr != addr2);
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: KPT: %d pages from %x to %x\n",
+ atop(s), addr, addr + s);
+#endif
+
+ /*
+ * Allocate the segment table map
+ */
+ s = maxproc * M68K_STSIZE;
+ st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
+ &st_map_store);
+
+ /*
+ * Slightly modified version of kmem_suballoc() to get page table
+ * map where we want it.
+ */
+ addr = M68K_PTBASE;
+ if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
+ s = M68K_PTMAXSIZE;
+ /*
+ * XXX We don't want to hang when we run out of
+ * page tables, so we lower maxproc so that fork()
+ * will fail instead. Note that root could still raise
+ * this value via sysctl(2).
+ */
+ maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
+ } else
+ s = (maxproc * M68K_MAX_PTSIZE);
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ TRUE, &pt_map_store);
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040) {
+ protostfree = ~l2tobm(0);
+ for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
+ protostfree &= ~l2tobm(rv);
+ }
+#endif
+
+ /*
+ * Now it is safe to enable pv_table recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+struct pv_entry *
+pmap_alloc_pv()
+{
+ struct pv_page *pvp;
+ struct pv_entry *pv;
+ int i;
+
+ if (pv_nfree == 0) {
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+ pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
+ for (i = NPVPPG - 2; i; i--, pv++)
+ pv->pv_next = pv + 1;
+ pv->pv_next = 0;
+ pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
+ TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ pv = &pvp->pvp_pv[0];
+ } else {
+ --pv_nfree;
+ pvp = pv_page_freelist.tqh_first;
+ if (--pvp->pvp_pgi.pgi_nfree == 0) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ }
+ pv = pvp->pvp_pgi.pgi_freelist;
+#ifdef DIAGNOSTIC
+ if (pv == 0)
+ panic("pmap_alloc_pv: pgi_nfree inconsistent");
+#endif
+ pvp->pvp_pgi.pgi_freelist = pv->pv_next;
+ }
+ return pv;
+}
+
+void
+pmap_free_pv(pv)
+ struct pv_entry *pv;
+{
+ register struct pv_page *pvp;
+ register int i;
+
+ pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
+ switch (++pvp->pvp_pgi.pgi_nfree) {
+ case 1:
+ TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ default:
+ pv->pv_next = pvp->pvp_pgi.pgi_freelist;
+ pvp->pvp_pgi.pgi_freelist = pv;
+ ++pv_nfree;
+ break;
+ case NPVPPG:
+ pv_nfree -= NPVPPG - 1;
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+ break;
+ }
+}
+
+void
+pmap_collect_pv()
+{
+ struct pv_page_list pv_page_collectlist;
+ struct pv_page *pvp, *npvp;
+ struct pv_entry *ph, *ppv, *pv, *npv;
+ int s;
+
+ TAILQ_INIT(&pv_page_collectlist);
+
+ for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
+ if (pv_nfree < NPVPPG)
+ break;
+ npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+ if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp, pvp_pgi.pgi_list);
+ pv_nfree -= pvp->pvp_pgi.pgi_nfree;
+ pvp->pvp_pgi.pgi_nfree = -1;
+ }
+ }
+
+ if (pv_page_collectlist.tqh_first == 0)
+ return;
+
+ for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
+ if (ph->pv_pmap == 0)
+ continue;
+ s = splimp();
+ for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
+ pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
+ if (pvp->pvp_pgi.pgi_nfree == -1) {
+ pvp = pv_page_freelist.tqh_first;
+ if (--pvp->pvp_pgi.pgi_nfree == 0) {
+ TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ }
+ npv = pvp->pvp_pgi.pgi_freelist;
+#ifdef DIAGNOSTIC
+ if (npv == 0)
+ panic("pmap_collect_pv: pgi_nfree inconsistent");
+#endif
+ pvp->pvp_pgi.pgi_freelist = npv->pv_next;
+ *npv = *pv;
+ ppv->pv_next = npv;
+ ppv = npv;
+ } else
+ ppv = pv;
+ }
+ splx(s);
+ }
+
+ for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
+ npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+ }
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * For now, VM is already on, we only need to map the
+ * specified memory.
+ */
+vm_offset_t
+pmap_map(va, spa, epa, prot)
+ vm_offset_t va, spa, epa;
+ int prot;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_map(%x, %x, %x, %x)\n", va, spa, epa, prot);
+#endif
+
+ while (spa < epa) {
+ pmap_enter(pmap_kernel(), va, spa, prot, prot);
+ va += NBPG;
+ spa += NBPG;
+ }
+ return (va);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+struct pmap *
+pmap_create(void)
+{
+ struct pmap *pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_create(%x)\n", size);
+#endif
+
+ pmap = (struct pmap *) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ register struct pmap *pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_pinit(%x)\n", pmap);
+#endif
+
+ /*
+ * No need to allocate page table space yet but we do need a
+ * valid segment table. Initially, we point everyone at the
+ * "null" segment table. On the first pmap_enter, a real
+ * segment table will be allocated.
+ */
+ pmap->pm_stab = Segtabzero;
+ pmap->pm_stpa = Segtabzeropa;
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040)
+ pmap->pm_stfree = protostfree;
+#endif
+ pmap->pm_count = 1;
+ simple_lock_init(&pmap->pm_lock);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ register pmap_t pmap;
+{
+ int count;
+
+ if (pmap == NULL)
+ return;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_destroy(%x)\n", pmap);
+#endif
+
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ free((caddr_t)pmap, M_VMPMAP);
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ register struct pmap *pmap;
+{
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_release(%x)\n", pmap);
+#endif
+
+#ifdef notdef /* DIAGNOSTIC */
+ /* count would be 0 from pmap_destroy... */
+ simple_lock(&pmap->pm_lock);
+ if (pmap->pm_count != 1)
+ panic("pmap_release count");
+#endif
+
+ if (pmap->pm_ptab)
+ uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
+ M68K_MAX_PTSIZE);
+ if (pmap->pm_stab != Segtabzero)
+ uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
+ M68K_STSIZE);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+
+ if (pmap == NULL)
+ return;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_reference(%x)\n", pmap);
+#endif
+
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+}
+
+/*
+ * Mark that a processor is about to be used by a given pmap.
+ */
+void
+pmap_activate(p)
+ struct proc *p;
+{
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
+ printf("pmap_activate(%p)\n", p);
+#endif
+
+ PMAP_ACTIVATE(pmap, p == curproc);
+}
+
+void
+pmap_deactivate(p)
+ struct proc *p;
+{
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ register pmap_t pmap;
+ register vm_offset_t sva, eva;
+{
+ register vm_offset_t nssva;
+ register pt_entry_t *pte;
+ boolean_t firstpage, needcflush;
+ int flags;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
+#endif
+
+ if (pmap == NULL)
+ return;
+
+ firstpage = TRUE;
+ needcflush = FALSE;
+ flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
+ while (sva < eva) {
+ nssva = m68k_trunc_seg(sva) + M68K_SEG_SIZE;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!pmap_ste_v(pmap, sva)) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Invalidate every valid mapping within this segment.
+ */
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (pmap_pte_v(pte)) {
+ pmap_remove_mapping(pmap, sva, pte, flags);
+ firstpage = FALSE;
+ }
+ pte++;
+ sva += NBPG;
+ }
+ }
+ /*
+ * Didn't do anything, no need for cache flushes
+ */
+ if (firstpage)
+ return;
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ struct pv_entry *pv;
+ int s;
+
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
+ printf("pmap_page_protect(%x, %x)\n", pa, prot);
+#endif
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+
+ switch (prot) {
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ return;
+ /* copy_on_write */
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_changebit(pa, PG_RO, TRUE);
+ return;
+ /* remove_all */
+ default:
+ break;
+ }
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv->pv_pmap != NULL) {
+ register pt_entry_t *pte;
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+#ifdef DEBUG
+ if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
+ pmap_pte_pa(pte) != pa)
+ panic("pmap_page_protect: bad mapping");
+#endif
+ if (!pmap_pte_w(pte))
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ pte, PRM_TFLUSH|PRM_CFLUSH);
+ else {
+ pv = pv->pv_next;
+#ifdef DEBUG
+ if (pmapdebug & PDB_PARANOIA)
+ printf("%s wired mapping for %x not removed\n",
+ "pmap_page_protect:", pa);
+#endif
+ }
+ }
+ splx(s);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ register pmap_t pmap;
+ register vm_offset_t sva, eva;
+ vm_prot_t prot;
+{
+ register vm_offset_t nssva;
+ register pt_entry_t *pte;
+ boolean_t firstpage, needtflush;
+ int isro;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
+ printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
+#endif
+
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ isro = pte_prot(pmap, prot);
+ needtflush = active_pmap(pmap);
+ firstpage = TRUE;
+ while (sva < eva) {
+ nssva = m68k_trunc_seg(sva) + M68K_SEG_SIZE;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!pmap_ste_v(pmap, sva)) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Change protection on mapping if it is valid and doesn't
+ * already have the correct protection.
+ */
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
+#if defined(M68040) || defined(M68060)
+ /*
+ * Clear caches if making RO (see section
+ * "7.3 Cache Coherency" in the manual).
+ */
+ if (isro && mmutype <= MMU_68040) {
+ vm_offset_t pa = pmap_pte_pa(pte);
+
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ pmap_pte_set_prot(pte, isro);
+ if (needtflush)
+ TBIS(sva);
+ firstpage = FALSE;
+ }
+ pte++;
+ sva += NBPG;
+ }
+ }
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+
+int
+pmap_enter(pmap, va, pa, prot, flags)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int flags;
+{
+ register pt_entry_t *pte;
+ register int npte;
+ vm_offset_t opa;
+ boolean_t cacheable = TRUE;
+ boolean_t checkpv = TRUE;
+ boolean_t wired = (flags & PMAP_WIRED) != 0;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_enter(%x, %x, %x, %x, %x)\n",
+ pmap, va, pa, prot, flags);
+#endif
+
+ /*
+ * For user mapping, allocate kernel VM resources if necessary.
+ */
+ if (pmap->pm_ptab == NULL)
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, M68K_MAX_PTSIZE);
+
+ /*
+ * Segment table entry not valid, we need a new PT page
+ */
+ if (!pmap_ste_v(pmap, va))
+ pmap_enter_ptpage(pmap, va);
+
+ pa = m68k_trunc_page(pa);
+ pte = pmap_pte(pmap, va);
+ opa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pte %x, *pte %x\n", pte, *pte);
+#endif
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (opa == pa) {
+ /*
+ * Wiring change, just update stats.
+ * We don't worry about wiring PT pages as they remain
+ * resident as long as there are valid mappings in them.
+ * Hence, if a user page is wired, the PT page will be also.
+ */
+ if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: wiring change -> %x\n", wired);
+#endif
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ else
+ pmap->pm_stats.wired_count--;
+ }
+ /*
+ * Retain cache inhibition status
+ */
+ checkpv = FALSE;
+ if (pmap_pte_ci(pte))
+ cacheable = FALSE;
+ goto validate;
+ }
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: removing old mapping %x\n", va);
+#endif
+ pmap_remove_mapping(pmap, va, pte,
+ PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
+ }
+
+ /*
+ * If this is a new user mapping, increment the wiring count
+ * on this PT page. PT pages are wired down as long as there
+ * is a valid mapping in the page.
+ */
+ if (pmap != pmap_kernel()) {
+ pmap_ptpage_addref(trunc_page((vaddr_t)pte));
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory
+ * Note that we raise IPL while manipulating pv_table
+ * since pmap_enter can be called at interrupt time.
+ */
+ if (PAGE_IS_MANAGED(pa)) {
+ register struct pv_entry *pv, *npv;
+ int s;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: pv at %x: %x/%x/%x\n",
+ pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
+#endif
+ /*
+ * No entries yet, use header as the first entry
+ */
+ if (pv->pv_pmap == NULL) {
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ pv->pv_ptste = NULL;
+ pv->pv_ptpmap = NULL;
+ pv->pv_flags = 0;
+ }
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ */
+ else {
+#ifdef DEBUG
+ for (npv = pv; npv; npv = npv->pv_next)
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ panic("pmap_enter: already in pv_tab");
+#endif
+ npv = pmap_alloc_pv();
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ npv->pv_ptste = NULL;
+ npv->pv_ptpmap = NULL;
+ npv->pv_flags = 0;
+ pv->pv_next = npv;
+ }
+ splx(s);
+ }
+ /*
+ * Assumption: if it is not part of our managed memory
+ * then it must be device memory which may be volitile.
+ */
+ else if (pmap_initialized) {
+ checkpv = cacheable = FALSE;
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Build the new PTE.
+ */
+ npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
+ if (wired)
+ npte |= PG_W;
+
+ /* Don't cache if process can't take it, like SunOS ones. */
+ if (mmutype <= MMU_68040 && pmap != pmap_kernel() &&
+ (curproc->p_md.md_flags & MDP_UNCACHE_WX) &&
+ (prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE))
+ checkpv = cacheable = FALSE;
+
+ if (!checkpv && !cacheable)
+ npte |= PG_CI;
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
+#ifdef DEBUG
+ if (dowriteback && (dokwriteback || pmap != pmap_kernel()))
+#endif
+ npte |= PG_CCB;
+#endif
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("enter: new pte value %x\n", npte);
+#endif
+ /*
+ * Remember if this was a wiring-only change.
+ * If so, we need not flush the TLB and caches.
+ */
+ wired = ((*pte ^ npte) == PG_W);
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040 && !wired) {
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
+ if (!wired && active_pmap(pmap))
+ TBIS(va);
+#ifdef DEBUG
+ if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
+ pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
+#endif
+
+ return (0);
+}
+
+/*
+ * Routine: pmap_unwire
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_unwire(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
+{
+ pt_entry_t *pte;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_unwire(%x, %x)\n", pmap, va);
+#endif
+ if (pmap == NULL)
+ return;
+
+ pte = pmap_pte(pmap, va);
+#ifdef DEBUG
+ /*
+ * Page table page is not allocated.
+ * Should this ever happen? Ignore it for now,
+ * we don't want to force allocation of unnecessary PTE pages.
+ */
+ if (!pmap_ste_v(pmap, va)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid STE for %x\n", va);
+ return;
+ }
+ /*
+ * Page not valid. Should this ever happen?
+ * Just continue and change wiring anyway.
+ */
+ if (!pmap_pte_v(pte)) {
+ if (pmapdebug & PDB_PARANOIA)
+ printf("pmap_unwire: invalid PTE for %x\n", va);
+ }
+#endif
+ /*
+ * If wiring actually changed (always?) set the wire bit and
+ * update the wire count. Note that wiring is not a hardware
+ * characteristic so there is no need to invalidate the TLB.
+ */
+ if (pmap_pte_w_chg(pte, 0)) {
+ pmap_pte_set_w(pte, 0);
+ pmap->pm_stats.wired_count--;
+ }
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+boolean_t
+pmap_extract(pmap, va, pap)
+ pmap_t pmap;
+ vm_offset_t va;
+ paddr_t *pap;
+{
+ paddr_t pa;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_extract(%x, %x) -> ", pmap, va);
+#endif
+ if (pmap && pmap_ste_v(pmap, va))
+ pa = *pmap_pte(pmap, va);
+ else
+ return (FALSE);
+ *pap = (pa & PG_FRAME) | (va & ~PG_FRAME);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("%x\n", *pap);
+#endif
+ return (TRUE);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vm_offset_t dst_addr;
+ vm_size_t len;
+ vm_offset_t src_addr;
+{
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy(%x, %x, %x, %x, %x)\n",
+ dst_pmap, src_pmap, dst_addr, len, src_addr);
+#endif
+}
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(pmap)
+ pmap_t pmap;
+{
+ int bank, s;
+
+ if (pmap != pmap_kernel())
+ return;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_collect(%p)\n", pmap);
+#endif
+ s = splimp();
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+ splx(s);
+
+#ifdef notyet
+ /* Go compact and garbage-collect the pv_table. */
+ pmap_collect_pv();
+#endif
+}
+
+/*
+ * Routine: pmap_collect1()
+ *
+ * Function:
+ * Helper function for pmap_collect(). Do the actual
+ * garbage-collection of range of physical addresses.
+ */
+void
+pmap_collect1(pmap, startpa, endpa)
+ pmap_t pmap;
+ vm_offset_t startpa, endpa;
+{
+ vm_offset_t pa;
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ vm_offset_t kpa;
+#ifdef DEBUG
+ st_entry_t *ste;
+ int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
+#endif
+
+ for (pa = startpa; pa < endpa; pa += NBPG) {
+ struct kpt_page *kpt, **pkpt;
+
+ /*
+ * Locate physical pages which are being used as kernel
+ * page table pages.
+ */
+ pv = pa_to_pvh(pa);
+ if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
+ continue;
+ do {
+ if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
+ break;
+ } while ((pv = pv->pv_next));
+ if (pv == NULL)
+ continue;
+#ifdef DEBUG1
+ if (pv->pv_va < (vm_offset_t)Sysmap ||
+ pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
+ printf("collect: kernel PT VA out of range\n");
+ else
+ goto ok;
+ pmap_pvdump(pa);
+ continue;
+ok:
+#endif
+ pte = (pt_entry_t *)(pv->pv_va + NBPG);
+ while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
+ ;
+ if (pte >= (pt_entry_t *)pv->pv_va)
+ continue;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
+ printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
+ pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
+ opmapdebug = pmapdebug;
+ pmapdebug |= PDB_PTPAGE;
+ }
+
+ ste = pv->pv_ptste;
+#endif
+ /*
+ * If all entries were invalid we can remove the page.
+ * We call pmap_remove_entry to take care of invalidating
+ * ST and Sysptmap entries.
+ */
+ pmap_extract(pmap, pv->pv_va, &kpa);
+ pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
+ PRM_TFLUSH|PRM_CFLUSH);
+ /*
+ * Use the physical address to locate the original
+ * (kmem_alloc assigned) address for the page and put
+ * that page back on the free list.
+ */
+ for (pkpt = &kpt_used_list, kpt = *pkpt;
+ kpt != (struct kpt_page *)0;
+ pkpt = &kpt->kpt_next, kpt = *pkpt)
+ if (kpt->kpt_pa == kpa)
+ break;
+#ifdef DEBUG
+ if (kpt == (struct kpt_page *)0)
+ panic("pmap_collect: lost a KPT page");
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ printf("collect: %lx (%lx) to free list\n",
+ kpt->kpt_va, kpa);
+#endif
+ *pkpt = kpt->kpt_next;
+ kpt->kpt_next = kpt_free_list;
+ kpt_free_list = kpt;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
+ pmapdebug = opmapdebug;
+
+ if (*ste != SG_NV)
+ printf("collect: kernel STE at %p still valid (%x)\n",
+ ste, *ste);
+ ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
+ if (*ste != SG_NV)
+ printf("collect: kernel PTmap at %p still valid (%x)\n",
+ ste, *ste);
+#endif
+ }
+}
+
+/*
+ * pmap_zero_page zeros the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bzero to clear its contents, one machine dependent page
+ * at a time.
+ *
+ * XXX this is a bad implementation for virtual cache machines
+ * (320/350) because pmap_enter doesn't cache-inhibit the temporary
+ * kernel mapping and we wind up with data cached for that KVA.
+ * It is probably a win for physical cache machines (370/380)
+ * as the cache loading is not wasted.
+ */
+void
+pmap_zero_page(phys)
+ vm_offset_t phys;
+{
+ register vm_offset_t kva;
+ extern caddr_t CADDR1;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_zero_page(%x)\n", phys);
+#endif
+ kva = (vm_offset_t) CADDR1;
+ pmap_enter(pmap_kernel(), kva, phys, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ zeropage((caddr_t)kva);
+ pmap_remove_mapping(pmap_kernel(), kva, PT_ENTRY_NULL,
+ PRM_TFLUSH|PRM_CFLUSH);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ *
+ *
+ * XXX this is a bad implementation for virtual cache machines
+ * (320/350) because pmap_enter doesn't cache-inhibit the temporary
+ * kernel mapping and we wind up with data cached for that KVA.
+ * It is probably a win for physical cache machines (370/380)
+ * as the cache loading is not wasted.
+ */
+void
+pmap_copy_page(src, dst)
+ vm_offset_t src, dst;
+{
+ register vm_offset_t skva, dkva;
+ extern caddr_t CADDR1, CADDR2;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy_page(%x, %x)\n", src, dst);
+#endif
+ skva = (vm_offset_t) CADDR1;
+ dkva = (vm_offset_t) CADDR2;
+ pmap_enter(pmap_kernel(), skva, src, VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
+ pmap_enter(pmap_kernel(), dkva, dst, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ copypage((caddr_t)skva, (caddr_t)dkva);
+ /* CADDR1 and CADDR2 are virtually contiguous */
+ pmap_remove(pmap_kernel(), skva, skva + (2 * NBPG));
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_modified(pg);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_modify(%x)\n", pa);
+#endif
+ pmap_changebit(pa, PG_M, FALSE);
+
+ return (ret);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t ret;
+
+ ret = pmap_is_referenced(pg);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_reference(%x)\n", pa);
+#endif
+ pmap_changebit(pa, PG_U, FALSE);
+
+ return (ret);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_referenced(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_U);
+ printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_U));
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t
+pmap_is_modified(struct vm_page *pg)
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ boolean_t rv = pmap_testbit(pa, PG_M);
+ printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
+ return(rv);
+ }
+#endif
+ return(pmap_testbit(pa, PG_M));
+}
+
+vm_offset_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+ return(m68k_ptob(ppn));
+}
+
+#ifdef COMPAT_HPUX
+/*
+ * 'PUX hack for dealing with the so called multi-mapped address space.
+ * The first 256mb is mapped in at every 256mb region from 0x10000000
+ * up to 0xF0000000. This allows for 15 bits of tag information.
+ *
+ * We implement this at the segment table level, the machine independent
+ * VM knows nothing about it.
+ */
+pmap_mapmulti(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
+{
+ st_entry_t *ste, *bste;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_MULTIMAP) {
+ ste = pmap_ste(pmap, HPMMBASEADDR(va));
+ printf("pmap_mapmulti(%x, %x): bste %x(%x)",
+ pmap, va, ste, *ste);
+ ste = pmap_ste(pmap, va);
+ printf(" ste %x(%x)\n", ste, *ste);
+ }
+#endif
+ bste = pmap_ste(pmap, HPMMBASEADDR(va));
+ ste = pmap_ste(pmap, va);
+ if (*ste == SG_NV && (*bste & SG_V)) {
+ *ste = *bste;
+ TBIAU();
+ return (0);
+ }
+ return (EFAULT);
+}
+#endif
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/*
+ * Invalidate a single page denoted by pmap/va.
+ * If (pte != NULL), it is the already computed PTE for the page.
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ * If (flags & PRM_KEEPPTPAGE), we don't free the page table page if the
+ * reference drops to zero.
+ */
+/* static */
+void
+pmap_remove_mapping(pmap, va, pte, flags)
+ register pmap_t pmap;
+ register vm_offset_t va;
+ register pt_entry_t *pte;
+ int flags;
+{
+ register vm_offset_t pa;
+ register struct pv_entry *pv, *npv;
+ pmap_t ptpmap;
+ st_entry_t *ste;
+ int s, bits;
+#ifdef DEBUG
+ pt_entry_t opte;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
+ pmap, va, pte, flags);
+#endif
+
+ /*
+ * PTE not provided, compute it from pmap and va.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ pte = pmap_pte(pmap, va);
+ if (*pte == PG_NV)
+ return;
+ }
+ pa = pmap_pte_pa(pte);
+#ifdef DEBUG
+ opte = *pte;
+#endif
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTE after saving the reference modify info.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_REMOVE)
+ printf("remove: invalidating pte at %x\n", pte);
+#endif
+ bits = *pte & (PG_U|PG_M);
+ *pte = PG_NV;
+ if ((flags & PRM_TFLUSH) && active_pmap(pmap))
+ TBIS(va);
+ /*
+ * For user mappings decrement the wiring count on
+ * the PT page. We do this after the PTE has been
+ * invalidated because vm_map_pageable winds up in
+ * pmap_pageable which clears the modify bit for the
+ * PT page.
+ */
+ if (pmap != pmap_kernel()) {
+ vaddr_t ptpva = trunc_page((vaddr_t)pte);
+ int refs = pmap_ptpage_delref(ptpva);
+
+ /*
+ * If reference count drops to 1, and we're not instructed
+ * to keep it around, free the PT page.
+ *
+ * Note: refcnt == 1 comes from the fact that we allocate
+ * the page with uvm_fault_wire(), which initially wires
+ * the page. The first reference we actually add causes
+ * the refcnt to be 2.
+ */
+ if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+ struct pv_entry *pv;
+ paddr_t pa;
+
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
+#ifdef DIAGNOSTIC
+ if (PAGE_IS_MANAGED(pa) == 0)
+ panic("pmap_remove_mapping: unmanaged PT page");
+#endif
+ pv = pa_to_pvh(pa);
+#ifdef DIAGNOSTIC
+ if (pv->pv_ptste == NULL)
+ panic("pmap_remove_mapping: ptste == NULL");
+ if (pv->pv_pmap != pmap_kernel() ||
+ pv->pv_va != ptpva ||
+ pv->pv_next != NULL)
+ panic("pmap_remove_mapping: "
+ "bad PT page pmap %p, va 0x%lx, next %p",
+ pv->pv_pmap, pv->pv_va, pv->pv_next);
+#endif
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ NULL, PRM_TFLUSH|PRM_CFLUSH);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: PT page 0x%lx (0x%lx) freed\n",
+ ptpva, pa));
+#endif
+ }
+#ifdef DEBUG
+ if (pmapdebug & PDB_WIRING)
+ pmap_check_wiring("remove", trunc_page(pte));
+#endif
+ }
+ /*
+ * If this isn't a managed page, we are all done.
+ */
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+ /*
+ * Otherwise remove it from the PV table
+ * (raise IPL since we may be called at interrupt time).
+ */
+ pv = pa_to_pvh(pa);
+ ste = ST_ENTRY_NULL;
+ s = splimp();
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ ste = pv->pv_ptste;
+ ptpmap = pv->pv_ptpmap;
+ npv = pv->pv_next;
+ if (npv) {
+ npv->pv_flags = pv->pv_flags;
+ *pv = *npv;
+ pmap_free_pv(npv);
+ } else
+ pv->pv_pmap = NULL;
+ } else {
+ for (npv = pv->pv_next; npv; npv = npv->pv_next) {
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ break;
+ pv = npv;
+ }
+#ifdef DEBUG
+ if (npv == NULL)
+ panic("pmap_remove: PA not in pv_tab");
+#endif
+ ste = npv->pv_ptste;
+ ptpmap = npv->pv_ptpmap;
+ pv->pv_next = npv->pv_next;
+ pmap_free_pv(npv);
+ pv = pa_to_pvh(pa);
+ }
+ /*
+ * If this was a PT page we must also remove the
+ * mapping from the associated segment table.
+ */
+ if (ste) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: ste was %x@%x pte was %x@%x\n",
+ *ste, ste, opte, pmap_pte(pmap, va));
+#endif
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040) {
+ st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
+
+ while (ste < este)
+ *ste++ = SG_NV;
+#ifdef DEBUG
+ ste -= NPTEPG/SG4_LEV3SIZE;
+#endif
+ } else
+#endif
+ *ste = SG_NV;
+ /*
+ * If it was a user PT page, we decrement the
+ * reference count on the segment table as well,
+ * freeing it if it is now empty.
+ */
+ if (ptpmap != pmap_kernel()) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: stab %x, refcnt %d\n",
+ ptpmap->pm_stab, ptpmap->pm_sref - 1);
+ if ((pmapdebug & PDB_PARANOIA) &&
+ ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
+ panic("remove: bogus ste");
+#endif
+ if (--(ptpmap->pm_sref) == 0) {
+#ifdef DEBUG
+ if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
+ printf("remove: free stab %x\n",
+ ptpmap->pm_stab);
+#endif
+ uvm_km_free_wakeup(st_map,
+ (vm_offset_t)ptpmap->pm_stab,
+ M68K_STSIZE);
+ ptpmap->pm_stab = Segtabzero;
+ ptpmap->pm_stpa = Segtabzeropa;
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040)
+ ptpmap->pm_stfree = protostfree;
+#endif
+ /*
+ * XXX may have changed segment table
+ * pointer for current process so
+ * update now to reload hardware.
+ */
+ if (active_user_pmap(ptpmap))
+ PMAP_ACTIVATE(ptpmap, 1);
+ }
+#ifdef DEBUG
+ else if (ptpmap->pm_sref < 0)
+ panic("remove: sref < 0");
+#endif
+ }
+#if 0
+ /*
+ * XXX this should be unnecessary as we have been
+ * flushing individual mappings as we go.
+ */
+ if (ptpmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pv->pv_flags &= ~PV_PTPAGE;
+ ptpmap->pm_ptpages--;
+ }
+ /*
+ * Update saved attributes for managed page
+ */
+ *pa_to_attribute(pa) |= bits;
+ splx(s);
+}
+
+/* static */
+boolean_t
+pmap_testbit(pa, bit)
+ register vm_offset_t pa;
+ int bit;
+{
+ register struct pv_entry *pv;
+ register pt_entry_t *pte;
+ int s;
+
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return(FALSE);
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Check saved info first
+ */
+ if (*pa_to_attribute(pa) & bit) {
+ splx(s);
+ return(TRUE);
+ }
+ /*
+ * Not found, check current mappings returning
+ * immediately if found.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ if (*pte & bit) {
+ splx(s);
+ return(TRUE);
+ }
+ }
+ }
+ splx(s);
+ return(FALSE);
+}
+
+/* static */
+void
+pmap_changebit(pa, bit, setem)
+ register vm_offset_t pa;
+ int bit;
+ boolean_t setem;
+{
+ register struct pv_entry *pv;
+ register pt_entry_t *pte, npte;
+ vm_offset_t va;
+ int s;
+ boolean_t firstpage = TRUE;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_BITS)
+ printf("pmap_changebit(%x, %x, %s)\n",
+ pa, bit, setem ? "set" : "clear");
+#endif
+ if (PAGE_IS_MANAGED(pa) == 0)
+ return;
+
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ if (!setem)
+ *pa_to_attribute(pa) &= ~bit;
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ * If setting RO do we need to clear the VAC?
+ */
+ if (pv->pv_pmap != NULL) {
+#ifdef DEBUG
+ int toflush = 0;
+#endif
+ for (; pv; pv = pv->pv_next) {
+#ifdef DEBUG
+ toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
+#endif
+ va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (bit == PG_RO) {
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+ }
+
+ pte = pmap_pte(pv->pv_pmap, va);
+ if (setem)
+ npte = *pte | bit;
+ else
+ npte = *pte & ~bit;
+ if (*pte != npte) {
+#if defined(M68040) || defined(M68060)
+ /*
+ * If we are changing caching status or
+ * protection make sure the caches are
+ * flushed (but only once).
+ */
+ if (firstpage && mmutype <= MMU_68040 &&
+ (bit == PG_RO && setem ||
+ (bit & PG_CMASK))) {
+ firstpage = FALSE;
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
+ if (active_pmap(pv->pv_pmap))
+ TBIS(va);
+ }
+ }
+ }
+ splx(s);
+}
+
+/* static */
+void
+pmap_enter_ptpage(pmap, va)
+ register pmap_t pmap;
+ register vm_offset_t va;
+{
+ vm_offset_t ptpa;
+ register struct pv_entry *pv;
+#ifdef M68060
+ u_int stpa;
+#endif
+ st_entry_t *ste;
+ int s;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
+ printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
+#endif
+ /*
+ * Allocate a segment table if necessary. Note that it is allocated
+ * from a private map and not pt_map. This keeps user page tables
+ * aligned on segment boundaries in the kernel address space.
+ * The segment table is wired down. It will be freed whenever the
+ * reference count drops to zero.
+ */
+ if (pmap->pm_stab == Segtabzero) {
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(st_map, M68K_STSIZE);
+ pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab,
+ (paddr_t *)&pmap->pm_stpa);
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040) {
+#ifdef DEBUG
+ if (dowriteback && dokwriteback)
+#endif
+#if defined(M68060)
+ stpa = (u_int)pmap->pm_stpa;
+ if (mmutype == MMU_68060) {
+ while (stpa < (u_int)pmap->pm_stpa +
+ M68K_STSIZE) {
+ pmap_changebit(stpa, PG_CCB, 0);
+ pmap_changebit(stpa, PG_CI, 1);
+ stpa += NBPG;
+ }
+ DCIS(); /* XXX */
+ }
+ else
+#endif
+ pmap_changebit((vm_offset_t)pmap->pm_stpa, PG_CCB, 0);
+ pmap->pm_stfree = protostfree;
+ }
+#endif
+ /*
+ * XXX may have changed segment table pointer for current
+ * process so update now to reload hardware.
+ */
+ if (active_user_pmap(pmap))
+ PMAP_ACTIVATE(pmap, 1);
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter: pmap %x stab %x(%x)\n",
+ pmap, pmap->pm_stab, pmap->pm_stpa);
+#endif
+ }
+
+ ste = pmap_ste(pmap, va);
+#if defined(M68040) || defined(M68060)
+ /*
+ * Allocate level 2 descriptor block if necessary
+ */
+ if (mmutype <= MMU_68040) {
+ if (*ste == SG_NV) {
+ int ix;
+ caddr_t addr;
+
+ ix = bmtol2(pmap->pm_stfree);
+ if (ix == -1)
+ panic("enter: out of address space"); /* XXX */
+ pmap->pm_stfree &= ~l2tobm(ix);
+ addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
+ bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
+ addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
+ *ste = (u_int)addr | SG_RW | SG_U | SG_V;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter: alloc ste2 %d(%x)\n", ix, addr);
+#endif
+ }
+ ste = pmap_ste2(pmap, va);
+ /*
+ * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
+ * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
+ * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
+ * PT page--the unit of allocation. We set `ste' to point
+ * to the first entry of that chunk which is validated in its
+ * entirety below.
+ */
+ ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter: ste2 %x (%x)\n",
+ pmap_ste2(pmap, va), ste);
+#endif
+ }
+#endif
+ va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
+
+ /*
+ * In the kernel we allocate a page from the kernel PT page
+ * free list and map it into the kernel page table map (via
+ * pmap_enter).
+ */
+ if (pmap == pmap_kernel()) {
+ register struct kpt_page *kpt;
+
+ s = splimp();
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
+ /*
+ * No PT pages available.
+ * Try once to free up unused ones.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_COLLECT)
+ printf("enter: no KPT pages, collecting...\n");
+#endif
+ pmap_collect(pmap_kernel());
+ if ((kpt = kpt_free_list) == (struct kpt_page *)0)
+ panic("pmap_enter_ptpage: can't get KPT page");
+ }
+ kpt_free_list = kpt->kpt_next;
+ kpt->kpt_next = kpt_used_list;
+ kpt_used_list = kpt;
+ ptpa = kpt->kpt_pa;
+ bzero((caddr_t)kpt->kpt_va, NBPG);
+ pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, VM_PROT_DEFAULT|PMAP_WIRED);
+#if defined(M68060)
+ if (mmutype == MMU_68060) {
+ pmap_changebit(ptpa, PG_CCB, 0);
+ pmap_changebit(ptpa, PG_CI, 1);
+ DCIS();
+ }
+#endif
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
+ int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
+
+ printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
+ ix, Sysptmap[ix], kpt->kpt_va);
+ }
+#endif
+ splx(s);
+ }
+ /*
+ * For user processes we just simulate a fault on that location
+ * letting the VM system allocate a zero-filled page.
+ */
+ else {
+ /*
+ * Count the segment table reference now so that we won't
+ * lose the segment table when low on memory.
+ */
+ pmap->pm_sref++;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+ printf("enter: about to fault UPT pg at %x\n", va);
+#endif
+ if (uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE))
+ panic("pmap_enter: uvm_fault failed");
+ pmap_extract(pmap_kernel(), va, &ptpa);
+ }
+#if defined(M68040) || defined(M68060)
+ /*
+ * Turn off copyback caching of page table pages,
+ * could get ugly otherwise.
+ */
+#ifdef DEBUG
+ if (dowriteback && dokwriteback)
+#endif
+ if (mmutype <= MMU_68040) {
+ pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
+#ifdef DEBUG
+ if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
+ printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
+ pmap == pmap_kernel() ? "Kernel" : "User",
+ va, ptpa, pte, *pte);
+#endif
+ pmap_changebit(ptpa, PG_CCB, 0);
+#ifdef M68060
+ if (mmutype == MMU_68060) {
+ pmap_changebit(ptpa, PG_CI, 1);
+ DCIS();
+ }
+#endif
+ }
+#endif
+ /*
+ * Locate the PV entry in the kernel for this PT page and
+ * record the STE address. This is so that we can invalidate
+ * the STE when we remove the mapping for the page.
+ */
+ pv = pa_to_pvh(ptpa);
+ s = splimp();
+ if (pv) {
+ pv->pv_flags |= PV_PTPAGE;
+ do {
+ if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
+ break;
+ } while (pv = pv->pv_next);
+ }
+#ifdef DEBUG
+ if (pv == NULL) {
+ pv = pa_to_pvh(ptpa);
+ if (pv) {
+ printf("pv->pv_next = %x\n", pv->pv_next);
+ do {
+ printf("pmap %x va %x ptste %x ptpmap %x flags %x\n",
+ pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
+ pv->pv_flags);
+ if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
+ break;
+ } while (pv = pv->pv_next);
+ } else
+ printf("pv at ptpa is 0\n");
+ panic("pmap_enter_ptpage: PT page not entered");
+ }
+#endif
+ pv->pv_ptste = ste;
+ pv->pv_ptpmap = pmap;
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
+ printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
+#endif
+
+ /*
+ * Map the new PT page into the segment table.
+ * Also increment the reference count on the segment table if this
+ * was a user page table page. Note that we don't use vm_map_pageable
+ * to keep the count like we do for PT pages, this is mostly because
+ * it would be difficult to identify ST pages in pmap_pageable to
+ * release them. We also avoid the overhead of vm_map_pageable.
+ */
+#if defined(M68040) || defined(M68060)
+ if (mmutype <= MMU_68040) {
+ st_entry_t *este;
+
+ for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
+ *ste = ptpa | SG_U | SG_RW | SG_V;
+ ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
+ }
+ } else
+#endif
+ *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
+ if (pmap != pmap_kernel()) {
+#ifdef DEBUG
+ if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
+ printf("enter: stab %x refcnt %d\n",
+ pmap->pm_stab, pmap->pm_sref);
+#endif
+ }
+#if 0
+ /*
+ * Flush stale TLB info.
+ */
+ if (pmap == pmap_kernel())
+ TBIAS();
+ else
+ TBIAU();
+#endif
+ pmap->pm_ptpages++;
+ splx(s);
+}
+
+/*
+ * pmap_ptpage_addref:
+ *
+ * Add a reference to the specified PT page.
+ */
+void
+pmap_ptpage_addref(ptpva)
+ vaddr_t ptpva;
+{
+ struct vm_page *m;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ m->wire_count++;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+}
+
+/*
+ * pmap_ptpage_delref:
+ *
+ * Delete a reference to the specified PT page.
+ */
+int
+pmap_ptpage_delref(ptpva)
+ vaddr_t ptpva;
+{
+ struct vm_page *m;
+ int rv;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ rv = --m->wire_count;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+ return (rv);
+}
+
+#ifdef DEBUG
+/* static */
+void
+pmap_pvdump(pa)
+ vm_offset_t pa;
+{
+ register struct pv_entry *pv;
+
+ printf("pa %x", pa);
+ for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
+ printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
+ pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
+ pv->pv_flags);
+ printf("\n");
+}
+
+/* static */
+void
+pmap_check_wiring(str, va)
+ char *str;
+ vm_offset_t va;
+{
+ struct vm_map_entry *entry;
+ register int count;
+ register pt_entry_t *pte;
+
+ va = trunc_page(va);
+ if (!pmap_ste_v(pmap_kernel(), va) ||
+ !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
+ return;
+ if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
+ printf("wired_check: entry for %lx not found\n", va);
+ return;
+ }
+ count = 0;
+ for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
+ if (*pte)
+ count++;
+ if (entry->wired_count != count)
+ printf("*%s*: %x: w%d/a%d\n",
+ str, va, entry->wired_count, count);
+}
+#endif
+
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ pmap_enter(pmap_kernel(), va, pa, prot, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
+ pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ }
+}
diff --git a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c
index 1bdeb80b05c..1f30a142fa4 100644
--- a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c
+++ b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_bootstrap.c,v 1.11 2001/12/14 21:44:05 miod Exp $ */
+/* $OpenBSD: pmap_bootstrap.c,v 1.12 2001/12/20 19:02:29 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -91,6 +91,7 @@ extern pt_entry_t *Sysptmap, *Sysmap;
extern int maxmem, physmem;
extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
extern vm_size_t mem_size;
+extern int protection_codes[];
/*
* Special purpose kernel virtual addresses, used for mapping
@@ -399,6 +400,25 @@ register vm_offset_t firstpa;
RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS;
/*
+ * Initialize protection array.
+ * XXX don't use a switch statement, it might produce an
+ * absolute "jmp" table.
+ */
+ {
+ register int *kp;
+
+ kp = &RELOC(protection_codes, int);
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ }
+
+ /*
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
@@ -803,6 +823,25 @@ register vm_offset_t firstpa;
RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS;
/*
+ * Initialize protection array.
+ * XXX don't use a switch statement, it might produce an
+ * absolute "jmp" table.
+ */
+ {
+ register int *kp;
+
+ kp = &RELOC(protection_codes, int);
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
+ kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
+ kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
+ }
+
+ /*
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
@@ -856,22 +895,3 @@ register vm_offset_t firstpa;
RELOC(virtual_avail, vm_offset_t) = va;
}
}
-
-void
-pmap_init_md()
-{
- vaddr_t addr;
-
- /*
- * mark as unavailable the regions which we have mapped in
- * pmap_bootstrap().
- */
- addr = (vaddr_t) intiobase;
- if (uvm_map(kernel_map, &addr,
- m68k_ptob(iiomapsize+EIOMAPSIZE),
- NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
- UVM_INH_NONE, UVM_ADV_RANDOM,
- UVM_FLAG_FIXED)))
- panic("pmap_init: bogons in the VM system!\n");
-}