summaryrefslogtreecommitdiff
path: root/sys/arch/hp300
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/hp300')
-rw-r--r--sys/arch/hp300/conf/DISKLESS4
-rw-r--r--sys/arch/hp300/conf/GENERIC4
-rw-r--r--sys/arch/hp300/conf/RAMDISK4
-rw-r--r--sys/arch/hp300/dev/grf.c28
-rw-r--r--sys/arch/hp300/hp300/genassym.cf17
-rw-r--r--sys/arch/hp300/hp300/hpux_machdep.c21
-rw-r--r--sys/arch/hp300/hp300/intr.c17
-rw-r--r--sys/arch/hp300/hp300/locore.s112
-rw-r--r--sys/arch/hp300/hp300/machdep.c172
-rw-r--r--sys/arch/hp300/hp300/mem.c29
-rw-r--r--sys/arch/hp300/hp300/pmap.c1822
-rw-r--r--sys/arch/hp300/hp300/trap.c48
-rw-r--r--sys/arch/hp300/hp300/vm_machdep.c136
-rw-r--r--sys/arch/hp300/include/cpu.h12
-rw-r--r--sys/arch/hp300/include/pmap.h26
-rw-r--r--sys/arch/hp300/include/vmparam.h48
16 files changed, 1482 insertions, 1018 deletions
diff --git a/sys/arch/hp300/conf/DISKLESS b/sys/arch/hp300/conf/DISKLESS
index 939a98b2b6e..20cf279c1bf 100644
--- a/sys/arch/hp300/conf/DISKLESS
+++ b/sys/arch/hp300/conf/DISKLESS
@@ -1,4 +1,4 @@
-# $OpenBSD: DISKLESS,v 1.17 1999/08/15 07:42:45 downsj Exp $
+# $OpenBSD: DISKLESS,v 1.18 2001/05/04 22:48:58 aaron Exp $
# $NetBSD: GENERIC,v 1.23 1997/01/31 06:12:57 thorpej Exp $
#
# Generic kernel - one size fits all.
@@ -24,6 +24,8 @@ option HP400
option HP425
option HP433
+option UVM # use the UVM virtual memory system
+
# Need to set locally
maxusers 8
diff --git a/sys/arch/hp300/conf/GENERIC b/sys/arch/hp300/conf/GENERIC
index 3af3172cc8a..a0e3ca36866 100644
--- a/sys/arch/hp300/conf/GENERIC
+++ b/sys/arch/hp300/conf/GENERIC
@@ -1,4 +1,4 @@
-# $OpenBSD: GENERIC,v 1.25 2001/03/08 03:37:17 itojun Exp $
+# $OpenBSD: GENERIC,v 1.26 2001/05/04 22:48:58 aaron Exp $
# $NetBSD: GENERIC,v 1.23 1997/01/31 06:12:57 thorpej Exp $
#
# Generic kernel - one size fits all.
@@ -24,6 +24,8 @@ option HP400
option HP425
option HP433
+option UVM # use the UVM virtual memory system
+
# Need to set locally
maxusers 32
diff --git a/sys/arch/hp300/conf/RAMDISK b/sys/arch/hp300/conf/RAMDISK
index 80aceb302ef..fdc16d32c19 100644
--- a/sys/arch/hp300/conf/RAMDISK
+++ b/sys/arch/hp300/conf/RAMDISK
@@ -1,4 +1,4 @@
-# $OpenBSD: RAMDISK,v 1.8 2000/10/31 16:52:50 millert Exp $
+# $OpenBSD: RAMDISK,v 1.9 2001/05/04 22:48:58 aaron Exp $
#
# Ram disk kernel.
#
@@ -20,6 +20,8 @@ option HP400
option HP425
option HP433
+option UVM # use the UVM virtual memory system
+
maxusers 8
option TIMEZONE=0 # time zone to adjust RTC time by
diff --git a/sys/arch/hp300/dev/grf.c b/sys/arch/hp300/dev/grf.c
index 81559ec8aff..1d0445c18c7 100644
--- a/sys/arch/hp300/dev/grf.c
+++ b/sys/arch/hp300/dev/grf.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: grf.c,v 1.8 2001/01/31 22:39:41 jason Exp $ */
-/* $NetBSD: grf.c,v 1.25 1997/04/02 22:37:30 scottr Exp $ */
+/* $OpenBSD: grf.c,v 1.9 2001/05/04 22:48:58 aaron Exp $ */
+/* $NetBSD: grf.c,v 1.30 1998/08/20 08:33:41 kleink Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -77,6 +77,10 @@ extern struct emul emul_hpux;
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#if defined(UVM)
+#include <uvm/uvm.h>
+#endif
+
#include <miscfs/specfs/specdev.h>
#include "ite.h"
@@ -493,7 +497,6 @@ grflock(gp, block)
{
struct proc *p = curproc; /* XXX */
int error;
- extern char devioc[];
#ifdef DEBUG
if (grfdebug & GDB_LOCK)
@@ -638,9 +641,15 @@ grfmap(dev, addrp, p)
vn.v_type = VCHR; /* XXX */
vn.v_specinfo = &si; /* XXX */
vn.v_rdev = dev; /* XXX */
- error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)addrp,
- (vm_size_t)len, VM_PROT_ALL, VM_PROT_ALL,
+#if defined(UVM)
+ error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
+ (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
+ flags, (caddr_t)&vn, 0);
+#else
+ error = vm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
+ (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
flags, (caddr_t)&vn, 0);
+#endif
if (error == 0)
(void) (*gp->g_sw->gd_mode)(gp, GM_MAP, *addrp);
return(error);
@@ -654,7 +663,7 @@ grfunmap(dev, addr, p)
{
struct grf_softc *sc = grf_cd.cd_devs[GRFUNIT(dev)];
struct grf_data *gp = sc->sc_data;
- vm_size_t size;
+ vsize_t size;
int rv;
#ifdef DEBUG
@@ -665,7 +674,12 @@ grfunmap(dev, addr, p)
return(EINVAL); /* XXX: how do we deal with this? */
(void) (*gp->g_sw->gd_mode)(gp, GM_UNMAP, 0);
size = round_page(gp->g_display.gd_regsize + gp->g_display.gd_fbsize);
- rv = vm_deallocate(&p->p_vmspace->vm_map, (vm_offset_t)addr, size);
+#if defined(UVM)
+ rv = uvm_unmap(&p->p_vmspace->vm_map, (vaddr_t)addr,
+ (vaddr_t)addr + size);
+#else
+ rv = vm_deallocate(&p->p_vmspace->vm_map, (vaddr_t)addr, size);
+#endif
return(rv == KERN_SUCCESS ? 0 : EINVAL);
}
diff --git a/sys/arch/hp300/hp300/genassym.cf b/sys/arch/hp300/hp300/genassym.cf
index cc2731611fb..bce69f533e0 100644
--- a/sys/arch/hp300/hp300/genassym.cf
+++ b/sys/arch/hp300/hp300/genassym.cf
@@ -1,5 +1,5 @@
-# $OpenBSD: genassym.cf,v 1.5 1997/11/06 19:42:33 millert Exp $
-# $NetBSD: genassym.cf,v 1.8 1997/05/13 18:01:01 gwr Exp $
+# $OpenBSD: genassym.cf,v 1.6 2001/05/04 22:48:59 aaron Exp $
+# $NetBSD: genassym.cf,v 1.11 1998/02/16 20:58:29 thorpej Exp $
#
# Copyright (c) 1982, 1990, 1993
@@ -47,6 +47,10 @@ include <sys/user.h>
include <vm/vm.h>
+ifdef UVM
+include <uvm/uvm_extern.h>
+endif
+
include <machine/hp300spu.h>
include <machine/cpu.h>
include <machine/psl.h>
@@ -151,12 +155,12 @@ define P_MD_REGS offsetof(struct proc, p_md.md_regs)
define SSLEEP SSLEEP
define SRUN SRUN
-# VM structure fields
-define VM_PMAP offsetof(struct vmspace, vm_map.pmap)
-define PM_STCHG offsetof(struct pmap, pm_stchanged)
-
# interrupt/fault metering
+ifdef UVM
+define UVMEXP_INTRS offsetof(struct uvmexp, intrs)
+else
define V_INTR offsetof(struct vmmeter, v_intr)
+endif
# PSL values (should just include psl.h?)
define PSL_S PSL_S
@@ -209,7 +213,6 @@ define SG_ISHIFT SG_ISHIFT
# pcb fields
define PCB_PS offsetof(struct pcb, pcb_ps)
-define PCB_USTP offsetof(struct pcb, pcb_ustp)
define PCB_USP offsetof(struct pcb, pcb_usp)
define PCB_REGS offsetof(struct pcb, pcb_regs)
define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
diff --git a/sys/arch/hp300/hp300/hpux_machdep.c b/sys/arch/hp300/hp300/hpux_machdep.c
index 4be537be0db..ed43734daed 100644
--- a/sys/arch/hp300/hp300/hpux_machdep.c
+++ b/sys/arch/hp300/hp300/hpux_machdep.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: hpux_machdep.c,v 1.7 1997/07/06 08:01:59 downsj Exp $ */
-/* $NetBSD: hpux_machdep.c,v 1.13 1997/04/27 21:38:57 thorpej Exp $ */
+/* $OpenBSD: hpux_machdep.c,v 1.8 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: hpux_machdep.c,v 1.19 1998/02/16 20:58:30 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Jason R. Thorpe. All rights reserved.
@@ -78,6 +78,10 @@
#include <vm/vm_param.h>
#include <vm/vm_map.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
#include <sys/syscallargs.h>
#include <compat/hpux/hpux.h>
@@ -414,8 +418,13 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
psp->ps_sigstk.ss_flags |= SS_ONSTACK;
} else
fp = (struct hpuxsigframe *)(frame->f_regs[SP] - fsize);
+#if defined(UVM)
+ if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
+ (void)uvm_grow(p, (unsigned)fp);
+#else
if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
(void)grow(p, (unsigned)fp);
+#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
@@ -423,7 +432,11 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
p->p_pid, sig, &oonstack, fp, &fp->hsf_sc, ft);
#endif
+#if defined(UVM)
+ if (uvm_useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
+#else
if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
+#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
printf("hpux_sendsig(%d): useracc failed on sig %d\n",
@@ -581,7 +594,11 @@ hpux_sys_sigreturn(p, v, retval)
* Fetch and test the HP-UX context structure.
* We grab it all at once for speed.
*/
+#if defined(UVM)
+ if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
+#else
if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
+#endif
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
return (EINVAL);
scp = &tsigc;
diff --git a/sys/arch/hp300/hp300/intr.c b/sys/arch/hp300/hp300/intr.c
index 7cd9efa2fcf..4acf8a1b52c 100644
--- a/sys/arch/hp300/hp300/intr.c
+++ b/sys/arch/hp300/hp300/intr.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: intr.c,v 1.6 1999/12/08 06:50:15 itojun Exp $ */
-/* $NetBSD: intr.c,v 1.2 1997/05/01 16:24:26 thorpej Exp $ */
+/* $OpenBSD: intr.c,v 1.7 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: intr.c,v 1.5 1998/02/16 20:58:30 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@@ -27,8 +27,8 @@
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
@@ -48,6 +48,11 @@
#include <sys/malloc.h>
#include <sys/vmmeter.h>
+#if defined(UVM)
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+#endif
+
#include <net/netisr.h>
#include "ppp.h"
#include "bridge.h"
@@ -258,7 +263,11 @@ intr_dispatch(evec)
ipl = vec - ISRLOC;
intrcnt[ipl]++;
+#if defined(UVM)
+ uvmexp.intrs++;
+#else
cnt.v_intr++;
+#endif
list = &isr_list[ipl];
if (list->lh_first == NULL) {
diff --git a/sys/arch/hp300/hp300/locore.s b/sys/arch/hp300/hp300/locore.s
index 5e0e831126c..9036ced9806 100644
--- a/sys/arch/hp300/hp300/locore.s
+++ b/sys/arch/hp300/hp300/locore.s
@@ -1,5 +1,5 @@
-/* $OpenBSD: locore.s,v 1.22 2000/06/05 11:02:57 art Exp $ */
-/* $NetBSD: locore.s,v 1.79 1997/09/12 08:41:55 mycroft Exp $ */
+/* $OpenBSD: locore.s,v 1.23 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: locore.s,v 1.91 1998/11/11 06:41:25 thorpej Exp $ */
/*
* Copyright (c) 1997 Theo de Raadt
@@ -494,7 +494,11 @@ Lehighcode:
Lenab1:
/* select the software page size now */
lea _ASM_LABEL(tmpstk),sp | temporary stack
+#if defined(UVM)
+ jbsr _C_LABEL(uvm_setpagesize) | select software page size
+#else
jbsr _C_LABEL(vm_set_page_size) | select software page size
+#endif
/* set kernel stack, user SP, and initial pcb */
movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
lea a1@(USPACE-4),sp | set kernel stack to end of area
@@ -523,8 +527,7 @@ Lenab2:
orl #MMU_CEN,a0@(MMUCMD) | turn on external cache
Lnocache0:
/* Final setup for call to main(). */
- jbsr _C_LABEL(intr_init) | initialize interrupt handlers
- jbsr _C_LABEL(hp300_calibrate_delay) | calibrate delay() loop
+ jbsr _C_LABEL(hp300_init)
/*
* Create a fake exception frame so that cpu_fork() can copy it.
@@ -543,10 +546,14 @@ Lnocache0:
PANIC("main() returned")
/* NOTREACHED */
+/*
+ * proc_trampoline: call function in register a2 with a3 as an arg
+ * and then rei.
+ */
GLOBAL(proc_trampoline)
- movl a3,sp@-
- jbsr a2@
- addql #4,sp
+ movl a3,sp@- | push function arg
+ jbsr a2@ | call function
+ addql #4,sp | pop arg
movl sp@(FR_SP),a0 | grab and load
movl a0,usp | user SP
moveml sp@+,#0x7FFF | restore most user regs
@@ -1000,7 +1007,11 @@ Lbrkpt3:
ENTRY_NOPROFILE(spurintr) /* level 0 */
addql #1,_C_LABEL(intrcnt)+0
+#if defined(UVM)
+ addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
+#else
addql #1,_C_LABEL(cnt)+V_INTR
+#endif
jra _ASM_LABEL(rei)
ENTRY_NOPROFILE(lev1intr) /* level 1: HIL XXX this needs to go away */
@@ -1008,7 +1019,11 @@ ENTRY_NOPROFILE(lev1intr) /* level 1: HIL XXX this needs to go away */
jbsr _C_LABEL(hilint)
INTERRUPT_RESTOREREG
addql #1,_C_LABEL(intrcnt)+4
+#if defined(UVM)
+ addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
+#else
addql #1,_C_LABEL(cnt)+V_INTR
+#endif
jra _ASM_LABEL(rei)
ENTRY_NOPROFILE(intrhand) /* levels 2 through 5 */
@@ -1079,7 +1094,11 @@ Lnoleds0:
addql #4,sp
CLKADDR(a0)
Lrecheck:
+#if defined(UVM)
+ addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS | chalk up another interrupt
+#else
addql #1,_C_LABEL(cnt)+V_INTR | chalk up another interrupt
+#endif
movb a0@(CLKSR),d0 | see if anything happened
jmi Lclkagain | while we were in hardclock/statintr
INTERRUPT_RESTOREREG
@@ -1328,64 +1347,19 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
- /* see if pmap_activate needs to be called; should remove this */
- movl a0@(P_VMSPACE),a0 | vmspace = p->p_vmspace
-#ifdef DIAGNOSTIC
- tstl a0 | map == VM_MAP_NULL?
- jeq Lbadsw | panic
-#endif
- movl a0@(VM_PMAP),a0 | pmap = vmspace->vm_map.pmap
- tstl a0@(PM_STCHG) | pmap->st_changed?
- jeq Lswnochg | no, skip
- pea a1@ | push pcb (at p_addr)
- pea a0@ | push pmap
- jbsr _C_LABEL(pmap_activate) | pmap_activate(pmap, pcb)
- addql #8,sp
+ /*
+ * Activate process's address space.
+ * XXX Should remember the last USTP value loaded, and call this
+ * XXX only if it has changed.
+ */
+ pea a0@ | push proc
+ jbsr _C_LABEL(pmap_activate) | pmap_activate(p)
+ addql #4,sp
movl _C_LABEL(curpcb),a1 | restore p_addr
-Lswnochg:
lea _ASM_LABEL(tmpstk),sp | now goto a tmp stack for NMI
-#if defined(M68040)
-#if defined(M68020) || defined(M68030)
- cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
- jne Lres1a | no, skip
-#endif
- .word 0xf518 | yes, pflusha
- movl a1@(PCB_USTP),d0 | get USTP
- moveq #PGSHIFT,d1
- lsll d1,d0 | convert to addr
- .long 0x4e7b0806 | movc d0,urp
- jra Lcxswdone
-Lres1a:
-#endif
- movl #CACHE_CLR,d0
- movc d0,cacr | invalidate cache(s)
-#if defined(M68K_MMU_MOTOROLA)
-#if defined(M68K_MMU_HP)
- tstl _C_LABEL(mmutype) | HP MMU?
- jeq Lhpmmu4 | yes, skip
-#endif
- pflusha | flush entire TLB
- movl a1@(PCB_USTP),d0 | get USTP
- moveq #PGSHIFT,d1
- lsll d1,d0 | convert to addr
- lea _C_LABEL(protorp),a0 | CRP prototype
- movl d0,a0@(4) | stash USTP
- pmove a0@,crp | load new user root pointer
- jra Lcxswdone | thats it
-Lhpmmu4:
-#endif
-#if defined(M68K_MMU_HP)
- MMUADDR(a0)
- movl a0@(MMUTBINVAL),d1 | invalidate TLB
- tstl _C_LABEL(ectype) | got external VAC?
- jle Lnocache1 | no, skip
- andl #~MMU_CEN,a0@(MMUCMD) | toggle cache enable
- orl #MMU_CEN,a0@(MMUCMD) | to clear data cache
-Lnocache1:
- movl a1@(PCB_USTP),a0@(MMUUSTP) | context switch
-#endif
Lcxswdone:
+
moveml a1@(PCB_REGS),#0xFCFC | and registers
movl a1@(PCB_USP),a0
movl a0,usp | and USP
@@ -1799,20 +1773,30 @@ ENTRY(loadustp)
#if defined(M68040)
cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
jne LmotommuC | no, skip
+ .word 0xf518 | yes, pflusha
.long 0x4e7b0806 | movc d0,urp
rts
LmotommuC:
#endif
+ pflusha | flush entire TLB
lea _C_LABEL(protorp),a0 | CRP prototype
movl d0,a0@(4) | stash USTP
pmove a0@,crp | load root pointer
- movl #DC_CLEAR,d0
- movc d0,cacr | invalidate on-chip d-cache
- rts | since pmove flushes TLB
+ movl #CACHE_CLR,d0
+ movc d0,cacr | invalidate cache(s)
+ rts
Lhpmmu9:
#endif
#if defined(M68K_MMU_HP)
+ movl #CACHE_CLR,d0
+ movc d0,cacr | invalidate cache(s)
MMUADDR(a0)
+ movl a0@(MMUTBINVAL),d1 | invalid TLB
+ tstl _C_LABEL(ectype) | have external VAC?
+ jle 1f
+ andl #~MMU_CEN,a0@(MMUCMD) | toggle cache enable
+ orl #MMU_CEN,a0@(MMUCMD) | to clear data cache
+1:
movl sp@(4),a0@(MMUUSTP) | load a new USTP
#endif
rts
diff --git a/sys/arch/hp300/hp300/machdep.c b/sys/arch/hp300/hp300/machdep.c
index a6b85a3c25d..074eefbacd0 100644
--- a/sys/arch/hp300/hp300/machdep.c
+++ b/sys/arch/hp300/hp300/machdep.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: machdep.c,v 1.40 2000/03/23 09:59:54 art Exp $ */
-/* $NetBSD: machdep.c,v 1.94 1997/06/12 15:46:29 mrg Exp $ */
+/* $OpenBSD: machdep.c,v 1.41 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: machdep.c,v 1.121 1999/03/26 23:41:29 mycroft Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -55,6 +55,7 @@
#include <sys/file.h>
#include <sys/ioctl.h>
#include <sys/kernel.h>
+#include <sys/device.h>
#include <sys/malloc.h>
#include <sys/map.h>
#include <sys/mbuf.h>
@@ -104,6 +105,10 @@
#include <vm/vm_kern.h>
#include <vm/vm_param.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
#include "opt_useleds.h"
#include <arch/hp300/dev/hilreg.h>
@@ -116,8 +121,15 @@
/* the following is used externally (sysctl_hw) */
char machine[] = MACHINE; /* from <machine/param.h> */
+#if defined(UVM)
+vm_map_t exec_map = NULL;
+vm_map_t mb_map = NULL;
+vm_map_t phys_map = NULL;
+#else
vm_map_t buffer_map;
-extern vm_offset_t avail_end;
+#endif
+
+extern paddr_t avail_start, avail_end;
/*
* Declare these as initialized data so we can patch them.
@@ -163,6 +175,7 @@ char *hexstr __P((int, int));
/* functions called from locore.s */
void dumpsys __P((void));
+void hp300_init __P((void));
void straytrap __P((int, u_short));
void nmihand __P((struct frame));
@@ -185,6 +198,31 @@ int conforced; /* console has been forced */
int cpuspeed; /* relative cpu speed; XXX skewed on 68040 */
int delay_divisor; /* delay constant */
+ /*
+ * Early initialization, before main() is called.
+ */
+void
+hp300_init()
+{
+ /*
+ * Tell the VM system about available physical memory. The
+ * hp300 only has one segment.
+ */
+#if defined(UVM)
+ uvm_page_physload(atop(avail_start), atop(avail_end),
+ atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
+#else
+ vm_page_physload(atop(avail_start), atop(avail_end),
+ atop(avail_start), atop(avail_end));
+#endif /* UVM */
+
+ /* Initialize the interrupt handlers. */
+ intr_init();
+
+ /* Calibrate the delay loop. */
+ hp300_calibrate_delay();
+}
+
/*
* Console initialization: called early on from main,
* before vm init or startup. Do enough configuration
@@ -233,8 +271,8 @@ cpu_startup()
unsigned i;
caddr_t v;
int base, residual;
- vm_offset_t minaddr, maxaddr;
- vm_size_t size;
+ vaddr_t minaddr, maxaddr;
+ vsize_t size;
#ifdef DEBUG
extern int pmapdebug;
int opmapdebug = pmapdebug;
@@ -247,9 +285,8 @@ cpu_startup()
* avail_end was pre-decremented in pmap_bootstrap to compensate.
*/
for (i = 0; i < btoc(MSGBUFSIZE); i++)
- pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
- avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE,
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), (vaddr_t)msgbufp,
+ avail_end + i * NBPG, VM_PROT_ALL, TRUE, VM_PROT_ALL);
initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));
/*
@@ -263,28 +300,72 @@ cpu_startup()
* Find out how much space we need, allocate it,
* and the give everything true virtual addresses.
*/
- size = (vm_size_t)allocsys((caddr_t)0);
+ size = (vsize_t)allocsys((caddr_t)0);
+#if defined(UVM)
+ if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
+ panic("startup: no room for tables");
+#else
if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(size))) == 0)
panic("startup: no room for tables");
+#endif
if ((allocsys(v) - v) != size)
- panic("startup: talbe size inconsistency");
+ panic("startup: table size inconsistency");
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
- buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
+#if defined(UVM)
+ if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
+ panic("startup: cannot allocate VM for buffers");
+ minaddr = (vaddr_t)buffers;
+#else
+ buffer_map = kmem_suballoc(kernel_map, (vaddr_t *)&buffers,
&maxaddr, size, TRUE);
- minaddr = (vm_offset_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
+ minaddr = (vaddr_t)buffers;
+ if (vm_map_find(buffer_map, vm_object_allocate(size), (vaddr_t)0,
&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
+#endif /* UVM */
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
- vm_size_t curbufsize;
- vm_offset_t curbuf;
+#if defined(UVM)
+ vsize_t curbufsize;
+ vaddr_t curbuf;
+ struct vm_page *pg;
+
+ /*
+ * Each buffer has MAXBSIZE bytes of VM space allocated. Of
+ * that MAXBSIZE space, we allocate and map (base+1) pages
+ * for the first "residual" buffers, and then we allocate
+ * "base" pages for the rest.
+ */
+ curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
+ curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
+
+ while (curbufsize) {
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (pg == NULL)
+ panic("cpu_startup: not enough memory for "
+ "buffer cache");
+#if defined(PMAP_NEW)
+ pmap_kenter_pgs(curbuf, &pg, 1);
+#else
+ pmap_enter(kernel_map->pmap, curbuf,
+ VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
+ TRUE, VM_PROT_READ|VM_PROT_WRITE);
+#endif
+ curbuf += PAGE_SIZE;
+ curbufsize -= PAGE_SIZE;
+ }
+#else /* ! UVM */
+ vsize_t curbufsize;
+ vaddr_t curbuf;
/*
* First <residual> buffers get (base+1) physical pages
@@ -293,22 +374,35 @@ cpu_startup()
* The rest of each buffer occupies virtual space,
* but has no physical memory allocated for it.
*/
- curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
+ curbuf = (vaddr_t)buffers + i * MAXBSIZE;
curbufsize = CLBYTES * (i < residual ? base+1 : base);
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
+#endif /* UVM */
}
+
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
+#if defined(UVM)
+ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, TRUE, FALSE, NULL);
+#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE);
+#endif
+
/*
* Allocate a submap for physio
*/
+#if defined(UVM)
+ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, TRUE, FALSE, NULL);
+#else
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
+#endif
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@@ -317,8 +411,14 @@ cpu_startup()
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
+#if defined(UVM)
+ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
+#else
+ mb_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_MBUF_SIZE, FALSE);
+#endif
+
/*
* Initialize timeouts
*/
@@ -327,7 +427,11 @@ cpu_startup()
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
+#if defined(UVM)
+ printf("avail mem = %ld\n", ptoa(uvmexp.free));
+#else
printf("avail mem = %ld\n", ptoa(cnt.v_free_count));
+#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@@ -337,9 +441,15 @@ cpu_startup()
* XXX This is bogus; should just fix KERNBASE and
* XXX VM_MIN_KERNEL_ADDRESS, but not right now.
*/
+#if defined(UVM)
+ if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE)
+ != KERN_SUCCESS)
+ panic("can't mark page 0 off-limits");
+#else
if (vm_map_protect(kernel_map, 0, NBPG, VM_PROT_NONE, TRUE)
!= KERN_SUCCESS)
panic("can't mark page 0 off-limits");
+#endif
/*
* Tell the VM system that writing to kernel text isn't allowed.
@@ -348,9 +458,15 @@ cpu_startup()
* XXX Should be m68k_trunc_page(&kernel_text) instead
* XXX of NBPG.
*/
+#if defined(UVM)
+ if (uvm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
+ UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != KERN_SUCCESS)
+ panic("can't protect kernel text");
+#else
if (vm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
+#endif
/*
* Set up CPU-specific registers, cache, etc.
@@ -434,7 +550,9 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
+#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
+#endif
valloc(buf, struct buf, nbuf);
return (v);
}
@@ -845,7 +963,7 @@ dumpconf()
/*
* XXX include the final RAM page which is not included in physmem.
*/
- dumpsize = physmem + 1;
+ dumpsize = physmem;
#ifdef HP300_NEWKVM
/* hp300 only uses a single segment. */
@@ -878,7 +996,7 @@ dumpsys()
/* dump routine */
int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
int pg; /* page being dumped */
- vm_offset_t maddr; /* PA being dumped */
+ paddr_t maddr; /* PA being dumped */
int error; /* error code from (*dump)() */
#ifdef HP300_NEWKVM
kcore_seg_t *kseg_p;
@@ -902,12 +1020,16 @@ dumpsys()
if (dumpsize == 0)
return;
}
- if (dumplo < 0)
+ if (dumplo <= 0) {
+ printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
+ minor(dumpdev));
return;
+ }
dump = bdevsw[major(dumpdev)].d_dump;
blkno = dumplo;
- printf("\ndumping to dev 0x%x, offset %ld\n", dumpdev, dumplo);
+ printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
+ minor(dumpdev), dumplo);
#ifdef HP300_NEWKVM
kseg_p = (kcore_seg_t *)dump_hdr;
@@ -969,8 +1091,8 @@ dumpsys()
if (pg && (pg % NPGMB) == 0)
printf("%d ", pg / NPGMB);
#undef NPGMB
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, maddr,
- VM_PROT_READ, TRUE, 0);
+ pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr,
+ VM_PROT_READ, TRUE, VM_PROT_READ);
error = (*dump)(dumpdev, blkno, vmmap, NBPG);
switch (error) {
@@ -1267,7 +1389,7 @@ parityerrorfind()
looking = 1;
ecacheoff();
for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, ctob(pg),
+ pmap_enter(pmap_kernel(), (vaddr_t)vmmap, ctob(pg),
VM_PROT_READ, TRUE, VM_PROT_READ);
ip = (int *)vmmap;
for (o = 0; o < NBPG; o += sizeof(int))
@@ -1280,7 +1402,7 @@ parityerrorfind()
found = 0;
done:
looking = 0;
- pmap_remove(pmap_kernel(), (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
+ pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)&vmmap[NBPG]);
ecacheon();
splx(s);
return(found);
diff --git a/sys/arch/hp300/hp300/mem.c b/sys/arch/hp300/hp300/mem.c
index 2b3173a335b..55fadfcfd16 100644
--- a/sys/arch/hp300/hp300/mem.c
+++ b/sys/arch/hp300/hp300/mem.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: mem.c,v 1.11 1999/12/14 18:24:02 downsj Exp $ */
-/* $NetBSD: mem.c,v 1.17 1997/06/10 18:51:31 veego Exp $ */
+/* $OpenBSD: mem.c,v 1.12 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: mem.c,v 1.25 1999/03/27 00:30:06 mycroft Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -56,6 +56,9 @@
#include <machine/cpu.h>
#include <vm/vm.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
extern u_int lowram;
extern char *extiobase;
@@ -102,11 +105,12 @@ mmrw(dev, uio, flags)
struct uio *uio;
int flags;
{
- vm_offset_t o, v;
+ vaddr_t o, v;
int c;
struct iovec *iov;
int error = 0;
static int physlock;
+ vm_prot_t prot;
if (minor(dev) == 0) {
/* lock against other uses of shared vmmap */
@@ -142,23 +146,30 @@ mmrw(dev, uio, flags)
goto unlock;
}
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
- trunc_page(v), uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE, TRUE, 0);
+ prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
+ VM_PROT_WRITE;
+ pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
+ trunc_page(v), prot, TRUE, prot);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
- pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
- (vm_offset_t)vmmap + NBPG);
+ pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
+ (vaddr_t)vmmap + NBPG);
continue;
/* minor device 1 is kernel memory */
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
+#if defined(UVM)
+ if (!uvm_kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return (EFAULT);
+#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
+#endif
/*
* Don't allow reading intio or dio
@@ -203,7 +214,7 @@ mmrw(dev, uio, flags)
}
if (error)
break;
- iov->iov_base += c;
+ iov->iov_base = (caddr_t)iov->iov_base + c;
iov->iov_len -= c;
uio->uio_offset += c;
uio->uio_resid -= c;
diff --git a/sys/arch/hp300/hp300/pmap.c b/sys/arch/hp300/hp300/pmap.c
index 86a5a153e56..bafa0095a47 100644
--- a/sys/arch/hp300/hp300/pmap.c
+++ b/sys/arch/hp300/hp300/pmap.c
@@ -1,5 +1,41 @@
-/* $OpenBSD: pmap.c,v 1.15 2001/04/06 23:54:47 millert Exp $ */
-/* $NetBSD: pmap.c,v 1.36 1997/06/10 18:52:23 veego Exp $ */
+/* $OpenBSD: pmap.c,v 1.16 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: pmap.c,v 1.75 1999/06/15 22:18:07 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1991, 1993
@@ -101,8 +137,8 @@
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
-#include <sys/pool.h>
#include <sys/user.h>
+#include <sys/pool.h>
#include <machine/pte.h>
@@ -110,60 +146,13 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#include <machine/cpu.h>
-
-#ifdef PMAPSTATS
-struct {
- int collectscans;
- int collectpages;
- int kpttotal;
- int kptinuse;
- int kptmaxuse;
-} kpt_stats;
-struct {
- int kernel; /* entering kernel mapping */
- int user; /* entering user mapping */
- int ptpneeded; /* needed to allocate a PT page */
- int nochange; /* no change at all */
- int pwchange; /* no mapping change, just wiring or protection */
- int wchange; /* no mapping change, just wiring */
- int pchange; /* no mapping change, just protection */
- int mchange; /* was mapped but mapping to different page */
- int managed; /* a managed page */
- int firstpv; /* first mapping for this PA */
- int secondpv; /* second mapping for this PA */
- int ci; /* cache inhibited */
- int unmanaged; /* not a managed page */
- int flushes; /* cache flushes */
-} enter_stats;
-struct {
- int calls;
- int removes;
- int pvfirst;
- int pvsearch;
- int ptinvalid;
- int uflushes;
- int sflushes;
-} remove_stats;
-struct {
- int calls;
- int changed;
- int alreadyro;
- int alreadyrw;
-} protect_stats;
-struct chgstats {
- int setcalls;
- int sethits;
- int setmiss;
- int clrcalls;
- int clrhits;
- int clrmiss;
-} changebit_stats[16];
+#if defined(UVM)
+#include <uvm/uvm.h>
#endif
+#include <machine/cpu.h>
+
#ifdef DEBUG
-int debugmap = 0;
-int pmapdebug = 0x2000;
#define PDB_FOLLOW 0x0001
#define PDB_INIT 0x0002
#define PDB_ENTER 0x0004
@@ -180,34 +169,31 @@ int pmapdebug = 0x2000;
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
-#ifdef M68K_MMU_HP
-int pmapvacflush = 0;
-#define PVF_ENTER 0x01
-#define PVF_REMOVE 0x02
-#define PVF_PROTECT 0x04
-#define PVF_TOTAL 0x80
-#endif
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
+
+#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
#if defined(M68040)
int dowriteback = 1; /* 68040: enable writeback caching */
int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
#endif
-
-extern vm_offset_t pager_sva, pager_eva;
-#endif
+#else /* ! DEBUG */
+#define PMAP_DPRINTF(l, x) /* nothing */
+#endif /* DEBUG */
/*
* Get STEs and PTEs for user/kernel address space
*/
#if defined(M68040)
#define pmap_ste1(m, v) \
- (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
+ (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
/* XXX assumes physically contiguous ST pages (if more than one) */
#define pmap_ste2(m, v) \
(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
#define pmap_ste(m, v) \
- (&((m)->pm_stab[(vm_offset_t)(v) \
+ (&((m)->pm_stab[(vaddr_t)(v) \
>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
#define pmap_ste_v(m, v) \
(mmutype == MMU_68040 \
@@ -215,11 +201,11 @@ extern vm_offset_t pager_sva, pager_eva;
(*pmap_ste2(m, v) & SG_V)) \
: (*pmap_ste(m, v) & SG_V))
#else
-#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
+#define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
#endif
-#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
+#define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
#define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
#define pmap_pte_w(pte) (*(pte) & PG_W)
#define pmap_pte_ci(pte) (*(pte) & PG_CI)
@@ -247,8 +233,8 @@ int protection_codes[8];
*/
struct kpt_page {
struct kpt_page *kpt_next; /* link on either used or free list */
- vm_offset_t kpt_va; /* always valid kernel VA */
- vm_offset_t kpt_pa; /* PA of this page (for speed) */
+ vaddr_t kpt_va; /* always valid kernel VA */
+ paddr_t kpt_pa; /* PA of this page (for speed) */
};
struct kpt_page *kpt_free_list, *kpt_used_list;
struct kpt_page *kpt_pages;
@@ -265,19 +251,20 @@ struct kpt_page *kpt_pages;
st_entry_t *Sysseg;
pt_entry_t *Sysmap, *Sysptmap;
st_entry_t *Segtabzero, *Segtabzeropa;
-vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
+vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
struct pmap kernel_pmap_store;
vm_map_t st_map, pt_map;
+#if defined(UVM)
+struct vm_map st_map_store, pt_map_store;
+#endif
-vm_offset_t avail_start; /* PA of first available physical page */
-vm_offset_t avail_end; /* PA of last available physical page */
-vm_size_t mem_size; /* memory size in bytes */
-vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
-vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-vm_offset_t vm_first_phys; /* PA of first managed page */
-vm_offset_t vm_last_phys; /* PA just past last managed page */
-int npages;
+paddr_t avail_start; /* PA of first available physical page */
+paddr_t avail_end; /* PA of last available physical page */
+vsize_t mem_size; /* memory size in bytes */
+vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int page_cnt; /* number of pages managed by VM system */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
struct pv_entry *pv_table;
@@ -285,8 +272,6 @@ char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
-struct pool pmap_pmap_pool; /* pool that pmap structs are allocated from */
-
#ifdef M68K_MMU_HP
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
@@ -294,135 +279,209 @@ int pmap_aliasmask; /* seperation at which VA aliasing ok */
int protostfree; /* prototype (default) free ST map */
#endif
+extern caddr_t CADDR1, CADDR2;
+
+pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
+pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
+
+struct pool pmap_pmap_pool; /* memory pool for pmap structures */
+
struct pv_entry *pmap_alloc_pv __P((void));
void pmap_free_pv __P((struct pv_entry *));
void pmap_collect_pv __P((void));
-void pmap_activate __P((pmap_t, struct pcb *));
-void pmap_deactivate __P((pmap_t, struct pcb *));
#ifdef COMPAT_HPUX
-int pmap_mapmulti __P((pmap_t, vm_offset_t));
+int pmap_mapmulti __P((pmap_t, vaddr_t));
#endif /* COMPAT_HPUX */
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
+
+#define pa_to_pvh(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.pvent[pg_]; \
+})
+
+#define pa_to_attribute(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
+})
+
/*
* Internal routines
*/
-void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
-boolean_t pmap_testbit __P((vm_offset_t, int));
-void pmap_changebit __P((vm_offset_t, int, boolean_t));
-void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
+boolean_t pmap_testbit __P((paddr_t, int));
+void pmap_changebit __P((paddr_t, int, int));
+void pmap_enter_ptpage __P((pmap_t, vaddr_t));
+void pmap_ptpage_addref __P((vaddr_t));
+int pmap_ptpage_delref __P((vaddr_t));
+void pmap_collect1 __P((pmap_t, paddr_t, paddr_t));
+void pmap_pinit __P((pmap_t));
+void pmap_release __P((pmap_t));
#ifdef DEBUG
-void pmap_pvdump __P((vm_offset_t));
-void pmap_check_wiring __P((char *, vm_offset_t));
+void pmap_pvdump __P((paddr_t));
+void pmap_check_wiring __P((char *, vaddr_t));
#endif
/* pmap_remove_mapping flags */
-#define PRM_TFLUSH 1
-#define PRM_CFLUSH 2
+#define PRM_TFLUSH 0x01
+#define PRM_CFLUSH 0x02
+#define PRM_KEEPPTPAGE 0x04
/*
- * Bootstrap memory allocator. This function allows for early dynamic
- * memory allocation until the virtual memory system has been bootstrapped.
- * After that point, either kmem_alloc or malloc should be used. This
- * function works by stealing pages from the (to be) managed page pool,
- * stealing virtual address space, then mapping the pages and zeroing them.
- *
- * It should be used from pmap_bootstrap till vm_page_startup, afterwards
- * it cannot be used, and will generate a panic if tried. Note that this
- * memory will never be freed, and in essence it is wired down.
+ * pmap_virtual_space: [ INTERFACE ]
+ *
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap.
+ *
+ * This is only an interface function if we do not use
+ * pmap_steal_memory()!
+ *
+ * Note: no locking is necessary in this function.
*/
-void *
-pmap_bootstrap_alloc(size)
- int size;
+void
+pmap_virtual_space(vstartp, vendp)
+ vaddr_t *vstartp, *vendp;
{
- extern boolean_t vm_page_startup_initialized;
- vm_offset_t val;
-
- if (vm_page_startup_initialized)
- panic("pmap_bootstrap_alloc: called after startup initialized");
- size = round_page(size);
- val = virtual_avail;
-
- virtual_avail = pmap_map(virtual_avail, avail_start,
- avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
- avail_start += size;
- bzero ((caddr_t) val, size);
-
- pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
-
- return ((void *) val);
+ *vstartp = virtual_avail;
+ *vendp = virtual_end;
}
/*
- * Initialize the pmap module.
- * Called by vm_init, to initialize any structures that the pmap
- * system needs to map virtual memory.
+ * pmap_init: [ INTERFACE ]
+ *
+ * Initialize the pmap module. Called by vm_init(), to initialize any
+ * structures that the pmap system needs to map virtual memory.
+ *
+ * Note: no locking is necessary in this function.
*/
void
-pmap_init(phys_start, phys_end)
- vm_offset_t phys_start, phys_end;
+pmap_init()
{
- vm_offset_t addr, addr2;
- vm_size_t s;
+ vaddr_t addr, addr2;
+ vsize_t s;
+ struct pv_entry *pv;
+ char *attr;
int rv;
+ int npages;
+ int bank;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
+
+ /*
+ * Before we do anything else, initialize the PTE pointers
+ * used by pmap_zero_page() and pmap_copy_page().
+ */
+ caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
+ caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_init(%lx, %lx)\n", phys_start, phys_end);
-#endif
/*
* Now that kernel map has been allocated, we can mark as
- * unavailable regions which we have mapped in locore.
+ * unavailable regions which we have mapped in pmap_bootstrap().
*/
- addr = (vm_offset_t) intiobase;
- (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
+#if defined(UVM)
+ addr = (vaddr_t) intiobase;
+ if (uvm_map(kernel_map, &addr,
+ m68k_ptob(IIOMAPSIZE+EIOMAPSIZE),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS)
+ goto bogons;
+ addr = (vaddr_t) Sysmap;
+ if (uvm_map(kernel_map, &addr, HP_MAX_PTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+ bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+#else
+ addr = (vaddr_t) intiobase;
+ (void) vm_map_find(kernel_map, NULL, (vaddr_t) 0,
&addr, m68k_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
- if (addr != (vm_offset_t)intiobase)
+ if (addr != (vaddr_t)intiobase)
goto bogons;
- addr = (vm_offset_t) Sysmap;
+ addr = (vaddr_t) Sysmap;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
&addr, HP_MAX_PTSIZE, FALSE);
/*
* If this fails it is probably because the static portion of
* the kernel page table isn't big enough and we overran the
- * page table map. Need to adjust pmap_size() in hp300_init.c.
+ * page table map.
*/
- if (addr != (vm_offset_t)Sysmap)
+ if (addr != (vaddr_t)Sysmap)
bogons:
panic("pmap_init: bogons in the VM system!");
+#endif /* UVM */
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT) {
- printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
- Sysseg, Sysmap, Sysptmap);
- printf(" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
- avail_start, avail_end, virtual_avail, virtual_end);
- }
-#endif
+ PMAP_DPRINTF(PDB_INIT,
+ ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
+ Sysseg, Sysmap, Sysptmap));
+ PMAP_DPRINTF(PDB_INIT,
+ (" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
+ avail_start, avail_end, virtual_avail, virtual_end));
/*
* Allocate memory for random pmap data structures. Includes the
* initial segment table, pv_head_table and pmap_attributes.
*/
- npages = atop(phys_end - phys_start);
- s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npages + npages);
+ for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
+ page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+ s = HP_STSIZE; /* Segtabzero */
+ s += page_cnt * sizeof(struct pv_entry); /* pv table */
+ s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
- addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#if defined(UVM)
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: can't allocate data structures");
+#else
+ addr = kmem_alloc(kernel_map, s);
+#endif
+
Segtabzero = (st_entry_t *) addr;
Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr);
addr += HP_STSIZE;
+
pv_table = (struct pv_entry *) addr;
- addr += sizeof(struct pv_entry) * npages;
+ addr += page_cnt * sizeof(struct pv_entry);
+
pmap_attributes = (char *) addr;
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: %lx bytes: npages %x s0 %p(%p) tbl %p atr %p\n",
- s, npages, Segtabzero, Segtabzeropa,
- pv_table, pmap_attributes);
-#endif
+
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes));
+
+ /*
+ * Now that the pv and attribute tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_table;
+ attr = pmap_attributes;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npages = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += npages;
+ attr += npages;
+ }
/*
* Allocate physical memory for kernel PT pages and their management.
@@ -435,17 +494,35 @@ bogons:
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
*/
+#if defined(UVM)
+ addr = 0;
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv != KERN_SUCCESS || (addr + s) >= (vaddr_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ rv = uvm_unmap(kernel_map, addr, addr + s);
+ if (rv != KERN_SUCCESS)
+ panic("pmap_init: uvm_unmap failed");
+#else
addr = 0;
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
+ if (rv != KERN_SUCCESS || addr + s >= (vaddr_t)Sysmap)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map, addr, addr + s);
+#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
- addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#if defined(UVM)
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+#else
+ addr = kmem_alloc(kernel_map, s);
+#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -457,15 +534,33 @@ bogons:
kpt_pages->kpt_va = addr2;
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
} while (addr != addr2);
-#ifdef PMAPSTATS
- kpt_stats.kpttotal = atop(s);
-#endif
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: KPT: %ld pages from %lx to %lx\n",
- atop(s), addr, addr + s);
-#endif
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
+ atop(s), addr, addr + s));
+
+#if defined(UVM)
+ /*
+ * Allocate the segment table map and the page table map.
+ */
+ s = maxproc * HP_STSIZE;
+ st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
+ &st_map_store);
+
+ addr = HP_PTBASE;
+ if ((HP_PTMAXSIZE / HP_MAX_PTSIZE) < maxproc) {
+ s = HP_PTMAXSIZE;
+ /*
+ * XXX We don't want to hang when we run out of
+ * page tables, so we lower maxproc so that fork()
+ * will fail instead. Note that root could still raise
+ * this value via sysctl(2).
+ */
+ maxproc = (HP_PTMAXSIZE / HP_MAX_PTSIZE);
+ } else
+ s = (maxproc * HP_MAX_PTSIZE);
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ TRUE, &pt_map_store);
+#else
/*
* Allocate the segment table map
*/
@@ -499,10 +594,10 @@ bogons:
rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
if (rv != KERN_SUCCESS)
panic("pmap_init: cannot map range to pt_map");
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2);
-#endif
+
+ PMAP_DPRINTF(PDB_INIT,
+ /* ( */ ("pmap_init: pt_map [%lx - %lx)\n", addr, addr2));
+#endif /* UVM */
#if defined(M68040)
if (mmutype == MMU_68040) {
@@ -513,13 +608,22 @@ bogons:
#endif
/*
+ * Initialize the pmap pools.
+ */
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+
+ /*
* Now it is safe to enable pv_table recording.
*/
- vm_first_phys = phys_start;
- vm_last_phys = phys_end;
pmap_initialized = TRUE;
}
+/*
+ * pmap_alloc_pv:
+ *
+ * Allocate a pv_entry.
+ */
struct pv_entry *
pmap_alloc_pv()
{
@@ -528,9 +632,15 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
+#if defined(UVM)
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
+#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -554,6 +664,11 @@ pmap_alloc_pv()
return pv;
}
+/*
+ * pmap_free_pv:
+ *
+ * Free a pv_entry.
+ */
void
pmap_free_pv(pv)
struct pv_entry *pv;
@@ -572,11 +687,20 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
+#else
+ kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
+#endif
break;
}
}
+/*
+ * pmap_collect_pv:
+ *
+ * Perform compaction on the PV list, called via pmap_collect().
+ */
void
pmap_collect_pv()
{
@@ -593,8 +717,9 @@ pmap_collect_pv()
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp, pvp_pgi.pgi_list);
- pv_nfree -= pvp->pvp_pgi.pgi_nfree;
+ TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
+ pvp_pgi.pgi_list);
+ pv_nfree -= NPVPPG;
pvp->pvp_pgi.pgi_nfree = -1;
}
}
@@ -602,7 +727,7 @@ pmap_collect_pv()
if (pv_page_collectlist.tqh_first == 0)
return;
- for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
+ for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
if (ph->pv_pmap == 0)
continue;
s = splimp();
@@ -611,7 +736,8 @@ pmap_collect_pv()
if (pvp->pvp_pgi.pgi_nfree == -1) {
pvp = pv_page_freelist.tqh_first;
if (--pvp->pvp_pgi.pgi_nfree == 0) {
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+ TAILQ_REMOVE(&pv_page_freelist, pvp,
+ pvp_pgi.pgi_list);
}
npv = pvp->pvp_pgi.pgi_freelist;
#ifdef DIAGNOSTIC
@@ -630,27 +756,34 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
+#else
+ kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
+#endif
}
}
/*
+ * pmap_map:
+ *
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
+ *
+ * Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
*/
-vm_offset_t
+vaddr_t
pmap_map(va, spa, epa, prot)
- vm_offset_t va, spa, epa;
+ vaddr_t va;
+ paddr_t spa, epa;
int prot;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
while (spa < epa) {
pmap_enter(pmap_kernel(), va, spa, prot, FALSE, 0);
@@ -661,27 +794,20 @@ pmap_map(va, spa, epa, prot)
}
/*
- * Create and return a physical map.
+ * pmap_create: [ INTERFACE ]
*
- * If the size specified for the map
- * is zero, the map is an actual physical
- * map, and may be referenced by the
- * hardware.
+ * Create and return a physical map.
*
- * If the size specified is non-zero,
- * the map will be used in software only, and
- * is bounded by that size.
+ * Note: no locking is necessary in this function.
*/
pmap_t
pmap_create(size)
- vm_size_t size;
+ vsize_t size;
{
pmap_t pmap;
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
- printf("pmap_create(%lx)\n", size);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
+ ("pmap_create(%lx)\n", size));
/*
* Software use map does not need a pmap
@@ -690,24 +816,26 @@ pmap_create(size)
return (NULL);
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
+
bzero(pmap, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
}
/*
- * Initialize a preallocated and zeroed pmap structure,
- * such as one in a vmspace structure.
+ * pmap_pinit:
+ *
+ * Initialize a preallocated and zeroed pmap structure.
+ *
+ * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
*/
void
pmap_pinit(pmap)
struct pmap *pmap;
{
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
- printf("pmap_pinit(%p)\n", pmap);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
+ ("pmap_pinit(%p)\n", pmap));
/*
* No need to allocate page table space yet but we do need a
@@ -721,15 +849,15 @@ pmap_pinit(pmap)
if (mmutype == MMU_68040)
pmap->pm_stfree = protostfree;
#endif
- pmap->pm_stchanged = TRUE;
pmap->pm_count = 1;
simple_lock_init(&pmap->pm_lock);
}
/*
- * Retire the given physical map from service.
- * Should only be called if the map contains
- * no valid mappings.
+ * pmap_destroy: [ INTERFACE ]
+ *
+ * Drop the reference count on the specified pmap, releasing
+ * all resources if the reference count drops to zero.
*/
void
pmap_destroy(pmap)
@@ -740,10 +868,7 @@ pmap_destroy(pmap)
if (pmap == NULL)
return;
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_destroy(%p)\n", pmap);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
count = --pmap->pm_count;
@@ -755,19 +880,18 @@ pmap_destroy(pmap)
}
/*
- * Release any resources held by the given physical map.
- * Called when a pmap initialized by pmap_pinit is being released.
- * Should only be called if the map contains no valid mappings.
+ * pmap_release:
+ *
+ * Release the sources held by a pmap.
+ *
+ * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
*/
void
pmap_release(pmap)
struct pmap *pmap;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_release(%p)\n", pmap);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
#ifdef notdef /* DIAGNOSTIC */
/* count would be 0 from pmap_destroy... */
@@ -777,14 +901,26 @@ pmap_release(pmap)
#endif
if (pmap->pm_ptab)
- kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
+#if defined(UVM)
+ uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
+ HP_MAX_PTSIZE);
+#else
+ kmem_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
HP_MAX_PTSIZE);
+#endif
if (pmap->pm_stab != Segtabzero)
- kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
+#if defined(UVM)
+ uvm_km_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
+ HP_STSIZE);
+#else
+ kmem_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
HP_STSIZE);
+#endif
}
/*
+ * pmap_reference: [ INTERFACE ]
+ *
* Add a reference to the specified pmap.
*/
void
@@ -795,41 +931,55 @@ pmap_reference(pmap)
if (pmap == NULL)
return;
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_reference(%p)\n", pmap);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
}
+/*
+ * pmap_activate: [ INTERFACE ]
+ *
+ * Activate the pmap used by the specified process. This includes
+ * reloading the MMU context of the current process, and marking
+ * the pmap in use by the processor.
+ *
+ * Note: we may only use spin locks here, since we are called
+ * by a critical section in cpu_switch()!
+ */
void
-pmap_activate(pmap, pcb)
- pmap_t pmap;
- struct pcb *pcb;
+pmap_activate(p)
+ struct proc *p;
{
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
- if (pmap == NULL)
- return;
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
+ ("pmap_activate(%p)\n", p));
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
- printf("pmap_activate(%p, %p)\n", pmap, pcb);
-#endif
-
- PMAP_ACTIVATE(pmap, pcb, pmap == curproc->p_vmspace->vm_map.pmap);
+ PMAP_ACTIVATE(pmap, p == curproc);
}
+/*
+ * pmap_deactivate: [ INTERFACE ]
+ *
+ * Mark that the pmap used by the specified process is no longer
+ * in use by the processor.
+ *
+ * The comment above pmap_activate() wrt. locking applies here,
+ * as well.
+ */
void
-pmap_deactivate(pmap, pcb)
- pmap_t pmap;
- struct pcb *pcb;
+pmap_deactivate(p)
+ struct proc *p;
{
+
+ /* No action necessary in this pmap implementation. */
}
/*
+ * pmap_remove: [ INTERFACE ]
+ *
* Remove the given range of addresses from the specified map.
*
* It is assumed that the start and end are properly
@@ -838,24 +988,19 @@ pmap_deactivate(pmap, pcb)
void
pmap_remove(pmap, sva, eva)
pmap_t pmap;
- vm_offset_t sva, eva;
+ vaddr_t sva, eva;
{
- vm_offset_t nssva;
+ vaddr_t nssva;
pt_entry_t *pte;
boolean_t firstpage, needcflush;
int flags;
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
- printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- remove_stats.calls++;
-#endif
firstpage = TRUE;
needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
@@ -886,9 +1031,6 @@ pmap_remove(pmap, sva, eva)
*/
if (firstpage) {
DCIS();
-#ifdef PMAPSTATS
- remove_stats.sflushes++;
-#endif
}
/*
* Remember if we may need to
@@ -923,40 +1065,25 @@ pmap_remove(pmap, sva, eva)
*/
if (pmap_aliasmask && !active_user_pmap(pmap))
needcflush = FALSE;
-#ifdef DEBUG
- if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
- if (pmapvacflush & PVF_TOTAL)
- DCIA();
- else if (pmap == pmap_kernel())
- DCIS();
- else
- DCIU();
- } else
-#endif
if (needcflush) {
if (pmap == pmap_kernel()) {
DCIS();
-#ifdef PMAPSTATS
- remove_stats.sflushes++;
-#endif
} else {
DCIU();
-#ifdef PMAPSTATS
- remove_stats.uflushes++;
-#endif
}
}
#endif
}
/*
- * pmap_page_protect:
+ * pmap_page_protect: [ INTERFACE ]
*
- * Lower the permission for all mappings to a given page.
+ * Lower the permission for all mappings to a given page to
+ * the permissions specified.
*/
void
pmap_page_protect(pa, prot)
- vm_offset_t pa;
+ paddr_t pa;
vm_prot_t prot;
{
struct pv_entry *pv;
@@ -967,7 +1094,7 @@ pmap_page_protect(pa, prot)
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
switch (prot) {
@@ -977,7 +1104,7 @@ pmap_page_protect(pa, prot)
/* copy_on_write */
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
- pmap_changebit(pa, PG_RO, TRUE);
+ pmap_changebit(pa, PG_RO, ~0);
return;
/* remove_all */
default:
@@ -1004,37 +1131,37 @@ pmap_page_protect(pa, prot)
printf("%s wired mapping for %lx not removed\n",
"pmap_page_protect:", pa);
#endif
+ if (pv == NULL)
+ break;
}
}
splx(s);
}
/*
- * Set the physical protection on the
- * specified range of this map as requested.
+ * pmap_protect: [ INTERFACE ]
+ *
+ * Set the physical protection on the specified range of this map
+ * as requested.
*/
void
pmap_protect(pmap, sva, eva, prot)
- pmap_t pmap;
- vm_offset_t sva, eva;
- vm_prot_t prot;
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
{
- vm_offset_t nssva;
+ vaddr_t nssva;
pt_entry_t *pte;
boolean_t firstpage, needtflush;
int isro;
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
- printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
+ ("pmap_protect(%p, %lx, %lx, %x)\n",
+ pmap, sva, eva, prot));
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- protect_stats.calls++;
-#endif
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@@ -1082,7 +1209,7 @@ pmap_protect(pmap, sva, eva, prot)
* "7.3 Cache Coherency" in the manual).
*/
if (isro && mmutype == MMU_68040) {
- vm_offset_t pa = pmap_pte_pa(pte);
+ paddr_t pa = pmap_pte_pa(pte);
DCFP(pa);
ICPP(pa);
@@ -1091,82 +1218,70 @@ pmap_protect(pmap, sva, eva, prot)
pmap_pte_set_prot(pte, isro);
if (needtflush)
TBIS(sva);
-#ifdef PMAPSTATS
- protect_stats.changed++;
-#endif
firstpage = FALSE;
}
-#ifdef PMAPSTATS
- else if (pmap_pte_v(pte)) {
- if (isro)
- protect_stats.alreadyro++;
- else
- protect_stats.alreadyrw++;
- }
-#endif
pte++;
sva += NBPG;
}
}
-#if defined(M68K_MMU_HP) && defined(DEBUG)
- if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
- if (pmapvacflush & PVF_TOTAL)
- DCIA();
- else if (pmap == pmap_kernel())
- DCIS();
- else
- DCIU();
- }
-#endif
}
/*
- * Insert the given physical page (p) at
- * the specified virtual address (v) in the
+ * pmap_enter: [ INTERFACE ]
+ *
+ * Insert the given physical page (pa) at
+ * the specified virtual address (va) in the
* target physical map with the protection requested.
*
* If specified, the page will be wired down, meaning
- * that the related pte can not be reclaimed.
+ * that the related pte cannot be reclaimed.
*
- * NB: This is the only routine which MAY NOT lazy-evaluate
+ * Note: This is the only routine which MAY NOT lazy-evaluate
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap, va, pa, prot, wired, access_type)
pmap_t pmap;
- vm_offset_t va;
- vm_offset_t pa;
+ vaddr_t va;
+ paddr_t pa;
vm_prot_t prot;
boolean_t wired;
vm_prot_t access_type;
{
pt_entry_t *pte;
int npte;
- vm_offset_t opa;
+ paddr_t opa;
boolean_t cacheable = TRUE;
boolean_t checkpv = TRUE;
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
- pmap, va, pa, prot, wired);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
+ pmap, va, pa, prot, wired));
+
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- if (pmap == pmap_kernel())
- enter_stats.kernel++;
- else
- enter_stats.user++;
+#ifdef DIAGNOSTIC
+ /*
+ * pmap_enter() should never be used for CADDR1 and CADDR2.
+ */
+ if (pmap == pmap_kernel() &&
+ (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
+ panic("pmap_enter: used for CADDR1 or CADDR2");
#endif
+
/*
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
+#if defined(UVM)
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, HP_MAX_PTSIZE);
+#else
pmap->pm_ptab = (pt_entry_t *)
kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
+#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1177,18 +1292,13 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
pa = m68k_trunc_page(pa);
pte = pmap_pte(pmap, va);
opa = pmap_pte_pa(pte);
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: pte %p, *pte %x\n", pte, *pte);
-#endif
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
/*
* Mapping has not changed, must be protection or wiring change.
*/
if (opa == pa) {
-#ifdef PMAPSTATS
- enter_stats.pwchange++;
-#endif
/*
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
@@ -1196,25 +1306,13 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* Hence, if a user page is wired, the PT page will be also.
*/
if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: wiring change -> %x\n", wired);
-#endif
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: wiring change -> %x\n", wired));
if (wired)
pmap->pm_stats.wired_count++;
else
pmap->pm_stats.wired_count--;
-#ifdef PMAPSTATS
- if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
- enter_stats.wchange++;
-#endif
}
-#ifdef PMAPSTATS
- else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
- enter_stats.pchange++;
- else
- enter_stats.nochange++;
-#endif
/*
* Retain cache inhibition status
*/
@@ -1229,14 +1327,10 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* handle validating new mapping.
*/
if (opa) {
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: removing old mapping %lx\n", va);
-#endif
- pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
-#ifdef PMAPSTATS
- enter_stats.mchange++;
-#endif
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: removing old mapping %lx\n", va));
+ pmap_remove_mapping(pmap, va, pte,
+ PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
}
/*
@@ -1245,35 +1339,26 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel())
- (void) vm_map_pageable(pt_map, trunc_page(pte),
- round_page(pte+1), FALSE);
+ pmap_ptpage_addref(trunc_page(pte));
/*
* Enter on the PV list if part of our managed memory
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
*/
- if (pa >= vm_first_phys && pa < vm_last_phys) {
+ if (PAGE_IS_MANAGED(pa)) {
struct pv_entry *pv, *npv;
int s;
-#ifdef PMAPSTATS
- enter_stats.managed++;
-#endif
pv = pa_to_pvh(pa);
s = splimp();
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: pv at %p: %lx/%p/%p\n",
- pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
-#endif
+ PMAP_DPRINTF(PDB_ENTER,
+ ("enter: pv at %p: %lx/%p/%p\n",
+ pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
/*
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
-#ifdef PMAPSTATS
- enter_stats.firstpv++;
-#endif
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_next = NULL;
@@ -1299,10 +1384,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
npv->pv_ptpmap = NULL;
npv->pv_flags = 0;
pv->pv_next = npv;
-#ifdef PMAPSTATS
- if (!npv->pv_next)
- enter_stats.secondpv++;
-#endif
#ifdef M68K_MMU_HP
/*
* Since there is another logical mapping for the
@@ -1328,11 +1409,9 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
*/
if (pmap_aliasmask) {
if (pv->pv_flags & PV_CI) {
-#ifdef DEBUG
- if (pmapdebug & PDB_CACHE)
- printf("enter: pa %lx already CI'ed\n",
- pa);
-#endif
+ PMAP_DPRINTF(PDB_CACHE,
+ ("enter: pa %lx already CI'ed\n",
+ pa));
checkpv = cacheable = FALSE;
} else if (npv->pv_next ||
((pmap == pv->pv_pmap ||
@@ -1340,20 +1419,29 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
pv->pv_pmap == pmap_kernel()) &&
((pv->pv_va & pmap_aliasmask) !=
(va & pmap_aliasmask)))) {
-#ifdef DEBUG
- if (pmapdebug & PDB_CACHE)
- printf("enter: pa %lx CI'ing all\n",
- pa);
-#endif
+ PMAP_DPRINTF(PDB_CACHE,
+ ("enter: pa %lx CI'ing all\n",
+ pa));
cacheable = FALSE;
pv->pv_flags |= PV_CI;
-#ifdef PMAPSTATS
- enter_stats.ci++;
-#endif
}
}
#endif
}
+
+ /*
+ * Speed pmap_is_referenced() or pmap_is_modified() based
+ * on the hint provided in access_type.
+ */
+#ifdef DIAGNOSTIC
+ if (access_type & ~prot)
+ panic("pmap_enter: access_type exceeds prot");
+#endif
+ if (access_type & VM_PROT_WRITE)
+ *pa_to_attribute(pa) |= (PG_U|PG_M);
+ else if (access_type & VM_PROT_ALL)
+ *pa_to_attribute(pa) |= PG_U;
+
splx(s);
}
/*
@@ -1362,9 +1450,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
*/
else if (pmap_initialized) {
checkpv = cacheable = FALSE;
-#ifdef PMAPSTATS
- enter_stats.unmanaged++;
-#endif
}
/*
@@ -1407,10 +1492,9 @@ validate:
#endif
npte |= PG_CCB;
#endif
-#ifdef DEBUG
- if (pmapdebug & PDB_ENTER)
- printf("enter: new pte value %x\n", npte);
-#endif
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
+
/*
* Remember if this was a wiring-only change.
* If so, we need not flush the TLB and caches.
@@ -1434,53 +1518,39 @@ validate:
* external VAC.
*/
if (checkpv && !cacheable) {
- pmap_changebit(pa, PG_CI, TRUE);
+ pmap_changebit(pa, PG_CI, ~0);
DCIA();
-#ifdef PMAPSTATS
- enter_stats.flushes++;
-#endif
#ifdef DEBUG
if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
(PDB_CACHE|PDB_PVDUMP))
pmap_pvdump(pa);
#endif
}
-#ifdef DEBUG
- else if (pmapvacflush & PVF_ENTER) {
- if (pmapvacflush & PVF_TOTAL)
- DCIA();
- else if (pmap == pmap_kernel())
- DCIS();
- else
- DCIU();
- }
-#endif
#endif
#ifdef DEBUG
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
- pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
+ pmap_check_wiring("enter", trunc_page(pte));
#endif
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * pmap_change_wiring: [ INTERFACE]
+ *
+ * Change the wiring attribute for a map/virtual-address pair.
+ *
+ * The mapping must already exist in the pmap.
*/
void
pmap_change_wiring(pmap, va, wired)
- pmap_t pmap;
- vm_offset_t va;
+ pmap_t pmap;
+ vaddr_t va;
boolean_t wired;
{
pt_entry_t *pte;
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_change_wiring(%p, %lx, %x)\n", pmap, va, wired);
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_change_wiring(%p, %lx, %x)\n", pmap, va, wired));
+
if (pmap == NULL)
return;
@@ -1520,110 +1590,141 @@ pmap_change_wiring(pmap, va, wired)
}
/*
- * Routine: pmap_extract
- * Function:
- * Extract the physical page address associated
- * with the given map/virtual_address pair.
+ * pmap_extract: [ INTERFACE ]
+ *
+ * Extract the physical address associated with the given
+ * pmap/virtual address pair.
*/
-
-vm_offset_t
+paddr_t
pmap_extract(pmap, va)
pmap_t pmap;
- vm_offset_t va;
+ vaddr_t va;
{
- vm_offset_t pa;
+ paddr_t pa;
+
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_extract(%p, %lx) -> ", pmap, va));
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_extract(%p, %lx) -> ", pmap, va);
-#endif
pa = 0;
if (pmap && pmap_ste_v(pmap, va))
pa = *pmap_pte(pmap, va);
if (pa)
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("%lx\n", pa);
-#endif
- return(pa);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("%lx\n", pa));
+
+ return (pa);
}
/*
- * Copy the range specified by src_addr/len
+ * pmap_copy: [ INTERFACE ]
+ *
+ * Copy the mapping range specified by src_addr/len
* from the source map to the range dst_addr/len
* in the destination map.
*
* This routine is only advisory and need not do anything.
*/
-void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+void
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap;
pmap_t src_pmap;
- vm_offset_t dst_addr;
- vm_size_t len;
- vm_offset_t src_addr;
+ vaddr_t dst_addr;
+ vsize_t len;
+ vaddr_t src_addr;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
- dst_pmap, src_pmap, dst_addr, len, src_addr);
-#endif
+
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
+ dst_pmap, src_pmap, dst_addr, len, src_addr));
}
/*
+ * pmap_update:
+ *
* Require that all active physical maps contain no
- * incorrect entries NOW. [This update includes
- * forcing updates of any address map caching.]
+ * incorrect entries NOW, by processing any deferred
+ * map operations.
+ */
+void
+pmap_update()
+{
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_update()\n"));
+
+ TBIA(); /* XXX should not be here. */
+}
+
+/*
+ * pmap_collect: [ INTERFACE ]
*
- * Generally used to insure that a thread about
- * to run will see a semantically correct world.
+ * Garbage collects the physical map system for pages which are no
+ * longer used. Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but others may be
+ * collected.
+ *
+ * Called by the pageout daemon when pages are scarce.
*/
-void pmap_update()
+void
+pmap_collect(pmap)
+ pmap_t pmap;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_update()\n");
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
+
+ if (pmap == pmap_kernel()) {
+ int bank, s;
+
+ /*
+ * XXX This is very bogus. We should handle kernel PT
+ * XXX pages much differently.
+ */
+
+ s = splimp();
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+ splx(s);
+ } else {
+ /*
+ * This process is about to be swapped out; free all of
+ * the PT pages by removing the physical mappings for its
+ * entire address space. Note: pmap_remove() performs
+ * all necessary locking.
+ */
+ pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+ }
+
+#ifdef notyet
+ /* Go compact and garbage-collect the pv_table. */
+ pmap_collect_pv();
#endif
- TBIA();
}
/*
- * Routine: pmap_collect
- * Function:
- * Garbage collects the physical map system for
- * pages which are no longer used.
- * Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but
- * others may be collected.
- * Usage:
- * Called by the pageout daemon when pages are scarce.
+ * pmap_collect1:
+ *
+ * Garbage-collect KPT pages. Helper for the above (bogus)
+ * pmap_collect().
+ *
+ * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
+ * WAY OF HANDLING PT PAGES!
*/
void
-pmap_collect(pmap)
+pmap_collect1(pmap, startpa, endpa)
pmap_t pmap;
+ paddr_t startpa, endpa;
{
- vm_offset_t pa;
+ paddr_t pa;
struct pv_entry *pv;
pt_entry_t *pte;
- vm_offset_t kpa;
- int s;
-
+ paddr_t kpa;
#ifdef DEBUG
st_entry_t *ste;
int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
#endif
- if (pmap != pmap_kernel())
- return;
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_collect(%p)\n", pmap);
-#endif
-#ifdef PMAPSTATS
- kpt_stats.collectscans++;
-#endif
- s = splimp();
- for (pa = vm_first_phys; pa < vm_last_phys; pa += NBPG) {
+ for (pa = startpa; pa < endpa; pa += NBPG) {
struct kpt_page *kpt, **pkpt;
/*
@@ -1640,8 +1741,8 @@ pmap_collect(pmap)
if (pv == NULL)
continue;
#ifdef DEBUG
- if (pv->pv_va < (vm_offset_t)Sysmap ||
- pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
+ if (pv->pv_va < (vaddr_t)Sysmap ||
+ pv->pv_va >= (vaddr_t)Sysmap + HP_MAX_PTSIZE)
printf("collect: kernel PT VA out of range\n");
else
goto ok;
@@ -1693,10 +1794,6 @@ ok:
*pkpt = kpt->kpt_next;
kpt->kpt_next = kpt_free_list;
kpt_free_list = kpt;
-#ifdef PMAPSTATS
- kpt_stats.kptinuse--;
- kpt_stats.collectpages++;
-#endif
#ifdef DEBUG
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
pmapdebug = opmapdebug;
@@ -1710,191 +1807,192 @@ ok:
ste, *ste);
#endif
}
- splx(s);
}
/*
- * pmap_zero_page zeros the specified (machine independent)
- * page by mapping the page into virtual memory and using
- * bzero to clear its contents, one machine dependent page
- * at a time.
- *
- * XXX this is a bad implementation for virtual cache machines
- * (320/350) because pmap_enter doesn't cache-inhibit the temporary
- * kernel mapping and we wind up with data cached for that KVA.
- * It is probably a win for physical cache machines (370/380)
- * as the cache loading is not wasted.
+ * pmap_zero_page: [ INTERFACE ]
+ *
+ * Zero the specified (machine independent) page by mapping the page
+ * into virtual memory and using bzero to clear its contents, one
+ * machine dependent page at a time.
+ *
+ * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
+ * (Actually, we go to splimp(), and since we don't
+ * support multiple processors, this is sufficient.)
*/
void
pmap_zero_page(phys)
- vm_offset_t phys;
+ paddr_t phys;
{
- vm_offset_t kva;
- extern caddr_t CADDR1;
+ int s, npte;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
+
+ npte = phys | PG_V;
+#ifdef M68K_MMU_HP
+ if (pmap_aliasmask) {
+ /*
+ * Cache-inhibit the mapping on VAC machines, as we would
+ * be wasting the cache load.
+ */
+ npte |= PG_CI;
+ }
+#endif
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ /*
+ * Set copyback caching on the page; this is required
+ * for cache consistency (since regular mappings are
+ * copyback as well).
+ */
+ npte |= PG_CCB;
+ }
+#endif
+
+ s = splimp();
+
+ *caddr1_pte = npte;
+ TBIS((vaddr_t)CADDR1);
+
+ zeropage(CADDR1);
#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_zero_page(%lx)\n", phys);
-#endif
- kva = (vm_offset_t) CADDR1;
- pmap_enter(pmap_kernel(), kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE,
- VM_PROT_READ|VM_PROT_WRITE);
- zeropage((caddr_t)kva);
- pmap_remove_mapping(pmap_kernel(), kva, PT_ENTRY_NULL,
- PRM_TFLUSH|PRM_CFLUSH);
+ *caddr1_pte = PG_NV;
+ TBIS((vaddr_t)CADDR1);
+#endif
+
+ splx(s);
}
/*
- * pmap_copy_page copies the specified (machine independent)
- * page by mapping the page into virtual memory and using
- * bcopy to copy the page, one machine dependent page at a
- * time.
+ * pmap_copy_page: [ INTERFACE ]
*
+ * Copy the specified (machine independent) page by mapping the page
+ * into virtual memory and using bcopy to copy the page, one machine
+ * dependent page at a time.
*
- * XXX this is a bad implementation for virtual cache machines
- * (320/350) because pmap_enter doesn't cache-inhibit the temporary
- * kernel mapping and we wind up with data cached for that KVA.
- * It is probably a win for physical cache machines (370/380)
- * as the cache loading is not wasted.
+ * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
+ * (Actually, we go to splimp(), and since we don't
+ * support multiple processors, this is sufficient.)
*/
void
pmap_copy_page(src, dst)
- vm_offset_t src, dst;
+ paddr_t src, dst;
{
- vm_offset_t skva, dkva;
- extern caddr_t CADDR1, CADDR2;
+ int s, npte1, npte2;
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
+
+ npte1 = src | PG_RO | PG_V;
+ npte2 = dst | PG_V;
+#ifdef M68K_MMU_HP
+ if (pmap_aliasmask) {
+ /*
+ * Cache-inhibit the mapping on VAC machines, as we would
+ * be wasting the cache load.
+ */
+ npte1 |= PG_CI;
+ npte2 |= PG_CI;
+ }
+#endif
+
+#if defined(M68040) || defined(M68060)
+ if (mmutype == MMU_68040) {
+ /*
+ * Set copyback caching on the pages; this is required
+ * for cache consistency (since regular mappings are
+ * copyback as well).
+ */
+ npte1 |= PG_CCB;
+ npte2 |= PG_CCB;
+ }
+#endif
+
+ s = splimp();
+
+ *caddr1_pte = npte1;
+ TBIS((vaddr_t)CADDR1);
+
+ *caddr2_pte = npte2;
+ TBIS((vaddr_t)CADDR2);
+
+ copypage(CADDR1, CADDR2);
#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_copy_page(%lx, %lx)\n", src, dst);
-#endif
- skva = (vm_offset_t) CADDR1;
- dkva = (vm_offset_t) CADDR2;
- pmap_enter(pmap_kernel(), skva, src, VM_PROT_READ, TRUE, VM_PROT_READ);
- pmap_enter(pmap_kernel(), dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE,
- VM_PROT_READ|VM_PROT_WRITE);
- copypage((caddr_t)skva, (caddr_t)dkva);
- /* CADDR1 and CADDR2 are virtually contiguous */
- pmap_remove(pmap_kernel(), skva, skva + (2 * NBPG));
+ *caddr1_pte = PG_NV;
+ TBIS((vaddr_t)CADDR1);
+
+ *caddr2_pte = PG_NV;
+ TBIS((vaddr_t)CADDR2);
+#endif
+
+ splx(s);
}
/*
- * Routine: pmap_pageable
- * Function:
- * Make the specified pages (by pmap, offset)
- * pageable (or not) as requested.
- *
- * A page which is not pageable may not take
- * a fault; therefore, its page table entry
- * must remain valid for the duration.
- *
- * This routine is merely advisory; pmap_enter
- * will specify that these pages are to be wired
- * down (or not) as appropriate.
+ * pmap_pageable: [ INTERFACE ]
+ *
+ * Make the specified pages (by pmap, offset) pageable (or not) as
+ * requested.
+ *
+ * A page which is not pageable may not take a fault; therefore,
+ * its page table entry must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter() will specify that
+ * these pages are to be wired down (or not) as appropriate.
*/
void
pmap_pageable(pmap, sva, eva, pageable)
pmap_t pmap;
- vm_offset_t sva, eva;
+ vaddr_t sva, eva;
boolean_t pageable;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_pageable(%p, %lx, %lx, %x)\n",
- pmap, sva, eva, pageable);
-#endif
- /*
- * If we are making a PT page pageable then all valid
- * mappings must be gone from that page. Hence it should
- * be all zeros and there is no need to clean it.
- * Assumptions:
- * - we are called with only one page at a time
- * - PT pages have only one pv_table entry
- */
- if (pmap == pmap_kernel() && pageable && sva + NBPG == eva) {
- struct pv_entry *pv;
- vm_offset_t pa;
-#ifdef DEBUG
- if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
- printf("pmap_pageable(%p, %lx, %lx, %x)\n",
- pmap, sva, eva, pageable);
-#endif
- if (!pmap_ste_v(pmap, sva))
- return;
- pa = pmap_pte_pa(pmap_pte(pmap, sva));
- if (pa < vm_first_phys || pa >= vm_last_phys)
- return;
- pv = pa_to_pvh(pa);
- if (pv->pv_ptste == NULL)
- return;
-#ifdef DEBUG
- if (pv->pv_va != sva || pv->pv_next) {
- printf("pmap_pageable: bad PT page va %lx next %p\n",
- pv->pv_va, pv->pv_next);
- return;
- }
-#endif
- /*
- * Mark it unmodified to avoid pageout
- */
- pmap_changebit(pa, PG_M, FALSE);
-#ifdef DEBUG
- if ((PHYS_TO_VM_PAGE(pa)->flags & PG_CLEAN) == 0) {
- printf("pa %lx: flags=%x: not clean\n",
- pa, PHYS_TO_VM_PAGE(pa)->flags);
- PHYS_TO_VM_PAGE(pa)->flags |= PG_CLEAN;
- }
- if (pmapdebug & PDB_PTPAGE)
- printf("pmap_pageable: PT page %lx(%x) unmodified\n",
- sva, *pmap_pte(pmap, sva));
- if (pmapdebug & PDB_WIRING)
- pmap_check_wiring("pageable", sva);
-#endif
- }
+ PMAP_DPRINTF(PDB_FOLLOW,
+ ("pmap_pageable(%p, %lx, %lx, %x)\n",
+ pmap, sva, eva, pageable));
}
/*
+ * pmap_clear_modify: [ INTERFACE ]
+ *
* Clear the modify bits on the specified physical page.
*/
-
void
pmap_clear_modify(pa)
- vm_offset_t pa;
+ paddr_t pa;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_clear_modify(%lx)\n", pa);
-#endif
- pmap_changebit(pa, PG_M, FALSE);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
+
+ pmap_changebit(pa, 0, ~PG_M);
}
/*
- * pmap_clear_reference:
+ * pmap_clear_reference: [ INTERFACE ]
*
* Clear the reference bit on the specified physical page.
*/
-
-void pmap_clear_reference(pa)
- vm_offset_t pa;
+void
+pmap_clear_reference(pa)
+ paddr_t pa;
{
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_clear_reference(%lx)\n", pa);
-#endif
- pmap_changebit(pa, PG_U, FALSE);
+
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa));
+
+ pmap_changebit(pa, 0, ~PG_U);
}
/*
- * pmap_is_referenced:
+ * pmap_is_referenced: [ INTERFACE ]
*
* Return whether or not the specified physical page is referenced
* by any physical maps.
*/
-
boolean_t
pmap_is_referenced(pa)
- vm_offset_t pa;
+ paddr_t pa;
{
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
@@ -1907,15 +2005,14 @@ pmap_is_referenced(pa)
}
/*
- * pmap_is_modified:
+ * pmap_is_modified: [ INTERFACE ]
*
* Return whether or not the specified physical page is modified
* by any physical maps.
*/
-
boolean_t
pmap_is_modified(pa)
- vm_offset_t pa;
+ paddr_t pa;
{
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
@@ -1927,26 +2024,63 @@ pmap_is_modified(pa)
return(pmap_testbit(pa, PG_M));
}
-vm_offset_t
+/*
+ * pmap_phys_address: [ INTERFACE ]
+ *
+ * Return the physical address corresponding to the specified
+ * cookie. Used by the device pager to decode a device driver's
+ * mmap entry point return value.
+ *
+ * Note: no locking is necessary in this function.
+ */
+paddr_t
pmap_phys_address(ppn)
int ppn;
{
return(m68k_ptob(ppn));
}
+#ifdef M68K_MMU_HP
+/*
+ * pmap_prefer: [ INTERFACE ]
+ *
+ * Find the first virtual address >= *vap that does not
+ * cause a virtually-tagged cache alias problem.
+ */
+void
+pmap_prefer(foff, vap)
+ vaddr_t foff, *vap;
+{
+ vaddr_t va;
+ vsize_t d;
+
+#ifdef M68K_MMU_MOTOROLA
+ if (pmap_aliasmask)
+#endif
+ {
+ va = *vap;
+ d = foff - va;
+ d &= pmap_aliasmask;
+ *vap = va + d;
+ }
+}
+#endif /* M68K_MMU_HP */
+
#ifdef COMPAT_HPUX
/*
- * 'PUX hack for dealing with the so called multi-mapped address space.
- * The first 256mb is mapped in at every 256mb region from 0x10000000
- * up to 0xF0000000. This allows for 15 bits of tag information.
+ * pmap_mapmulti:
*
- * We implement this at the segment table level, the machine independent
- * VM knows nothing about it.
+ * 'PUX hack for dealing with the so called multi-mapped address space.
+ * The first 256mb is mapped in at every 256mb region from 0x10000000
+ * up to 0xF0000000. This allows for 15 bits of tag information.
+ *
+ * We implement this at the segment table level, the machine independent
+ * VM knows nothing about it.
*/
int
pmap_mapmulti(pmap, va)
pmap_t pmap;
- vm_offset_t va;
+ vaddr_t va;
{
st_entry_t *ste, *bste;
@@ -1968,39 +2102,48 @@ pmap_mapmulti(pmap, va)
}
return (KERN_INVALID_ADDRESS);
}
-#endif
+#endif /* COMPAT_HPUX */
/*
* Miscellaneous support routines follow
*/
/*
- * Invalidate a single page denoted by pmap/va.
- * If (pte != NULL), it is the already computed PTE for the page.
- * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
- * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ * pmap_remove_mapping:
+ *
+ * Invalidate a single page denoted by pmap/va.
+ *
+ * If (pte != NULL), it is the already computed PTE for the page.
+ *
+ * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
+ *
+ * If (flags & PRM_CFLUSH), we must flush/invalidate any cache
+ * information.
+ *
+ * If (flags & PRM_KEEPPTPAGE), we don't free the page table page
+ * if the reference drops to zero.
*/
/* static */
void
pmap_remove_mapping(pmap, va, pte, flags)
pmap_t pmap;
- vm_offset_t va;
+ vaddr_t va;
pt_entry_t *pte;
int flags;
{
- vm_offset_t pa;
+ paddr_t pa;
struct pv_entry *pv, *npv;
pmap_t ptpmap;
st_entry_t *ste;
int s, bits;
#ifdef DEBUG
pt_entry_t opte;
-
- if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
- printf("pmap_remove_mapping(%p, %lx, %p, %x)\n",
- pmap, va, pte, flags);
#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
+ pmap, va, pte, flags));
+
/*
* PTE not provided, compute it from pmap and va.
*/
@@ -2016,9 +2159,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
* state of any hardware maintained bits.
*/
DCIS();
-#ifdef PMAPSTATS
- remove_stats.sflushes++;
-#endif
/*
* If this is a non-CI user mapping for the current process,
* flush the VAC. Note that the kernel side was flushed
@@ -2026,9 +2166,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
*/
if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
DCIU();
-#ifdef PMAPSTATS
- remove_stats.uflushes++;
-#endif
}
}
#endif
@@ -2036,9 +2173,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
#ifdef DEBUG
opte = *pte;
#endif
-#ifdef PMAPSTATS
- remove_stats.removes++;
-#endif
/*
* Update statistics
*/
@@ -2049,33 +2183,63 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* Invalidate the PTE after saving the reference modify info.
*/
-#ifdef DEBUG
- if (pmapdebug & PDB_REMOVE)
- printf("remove: invalidating pte at %p\n", pte);
-#endif
+ PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
bits = *pte & (PG_U|PG_M);
*pte = PG_NV;
if ((flags & PRM_TFLUSH) && active_pmap(pmap))
TBIS(va);
/*
* For user mappings decrement the wiring count on
- * the PT page. We do this after the PTE has been
- * invalidated because vm_map_pageable winds up in
- * pmap_pageable which clears the modify bit for the
- * PT page.
+ * the PT page.
*/
if (pmap != pmap_kernel()) {
- (void) vm_map_pageable(pt_map, trunc_page(pte),
- round_page(pte+1), TRUE);
+ vaddr_t ptpva = trunc_page(pte);
+ int refs = pmap_ptpage_delref(ptpva);
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
- pmap_check_wiring("remove", trunc_page(pte));
+ pmap_check_wiring("remove", ptpva);
#endif
+ /*
+ * If reference count drops to 1, and we're not instructed
+ * to keep it around, free the PT page.
+ *
+ * Note: refcnt == 1 comes from the fact that we allocate
+ * the page with uvm_fault_wire(), which initially wires
+ * the page. The first reference we actually add causes
+ * the refcnt to be 2.
+ */
+ if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+ struct pv_entry *pv;
+ paddr_t pa;
+
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
+#ifdef DIAGNOSTIC
+ if (PAGE_IS_MANAGED(pa) == 0)
+ panic("pmap_remove_mapping: unmanaged PT page");
+#endif
+ pv = pa_to_pvh(pa);
+#ifdef DIAGNOSTIC
+ if (pv->pv_ptste == NULL)
+ panic("pmap_remove_mapping: ptste == NULL");
+ if (pv->pv_pmap != pmap_kernel() ||
+ pv->pv_va != ptpva ||
+ pv->pv_next != NULL)
+ panic("pmap_remove_mapping: "
+ "bad PT page pmap %p, va 0x%lx, next %p",
+ pv->pv_pmap, pv->pv_va, pv->pv_next);
+#endif
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ NULL, PRM_TFLUSH|PRM_CFLUSH);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+ PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
+ ("remove: PT page 0x%lx (0x%lx) freed\n",
+ ptpva, pa));
+ }
}
/*
* If this isn't a managed page, we are all done.
*/
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
/*
* Otherwise remove it from the PV table
@@ -2100,14 +2264,8 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap_free_pv(npv);
} else
pv->pv_pmap = NULL;
-#ifdef PMAPSTATS
- remove_stats.pvfirst++;
-#endif
} else {
for (npv = pv->pv_next; npv; npv = npv->pv_next) {
-#ifdef PMAPSTATS
- remove_stats.pvsearch++;
-#endif
if (pmap == npv->pv_pmap && va == npv->pv_va)
break;
pv = npv;
@@ -2128,12 +2286,10 @@ pmap_remove_mapping(pmap, va, pte, flags)
*/
if (pmap_aliasmask &&
pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
-#ifdef DEBUG
- if (pmapdebug & PDB_CACHE)
- printf("remove: clearing CI for pa %lx\n", pa);
-#endif
+ PMAP_DPRINTF(PDB_CACHE,
+ ("remove: clearing CI for pa %lx\n", pa));
pv->pv_flags &= ~PV_CI;
- pmap_changebit(pa, PG_CI, FALSE);
+ pmap_changebit(pa, 0, ~PG_CI);
#ifdef DEBUG
if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
(PDB_CACHE|PDB_PVDUMP))
@@ -2146,14 +2302,9 @@ pmap_remove_mapping(pmap, va, pte, flags)
* mapping from the associated segment table.
*/
if (ste) {
-#ifdef PMAPSTATS
- remove_stats.ptinvalid++;
-#endif
-#ifdef DEBUG
- if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
- printf("remove: ste was %x@%p pte was %x@%p\n",
- *ste, ste, opte, pmap_pte(pmap, va));
-#endif
+ PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
+ ("remove: ste was %x@%p pte was %x@%p\n",
+ *ste, ste, opte, pmap_pte(pmap, va)));
#if defined(M68040)
if (mmutype == MMU_68040) {
st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
@@ -2172,38 +2323,40 @@ pmap_remove_mapping(pmap, va, pte, flags)
* freeing it if it is now empty.
*/
if (ptpmap != pmap_kernel()) {
+ PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
+ ("remove: stab %p, refcnt %d\n",
+ ptpmap->pm_stab, ptpmap->pm_sref - 1));
#ifdef DEBUG
- if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
- printf("remove: stab %p, refcnt %d\n",
- ptpmap->pm_stab, ptpmap->pm_sref - 1);
if ((pmapdebug & PDB_PARANOIA) &&
ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
panic("remove: bogus ste");
#endif
if (--(ptpmap->pm_sref) == 0) {
-#ifdef DEBUG
- if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
- printf("remove: free stab %p\n",
- ptpmap->pm_stab);
-#endif
+ PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
+ ("remove: free stab %p\n",
+ ptpmap->pm_stab));
+#if defined(UVM)
+ uvm_km_free_wakeup(st_map,
+ (vaddr_t)ptpmap->pm_stab,
+ HP_STSIZE);
+#else
kmem_free_wakeup(st_map,
- (vm_offset_t)ptpmap->pm_stab,
+ (vaddr_t)ptpmap->pm_stab,
HP_STSIZE);
+#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040)
if (mmutype == MMU_68040)
ptpmap->pm_stfree = protostfree;
#endif
- ptpmap->pm_stchanged = TRUE;
/*
* XXX may have changed segment table
* pointer for current process so
* update now to reload hardware.
*/
if (active_user_pmap(ptpmap))
- PMAP_ACTIVATE(ptpmap,
- &curproc->p_addr->u_pcb, 1);
+ PMAP_ACTIVATE(ptpmap, 1);
}
#ifdef DEBUG
else if (ptpmap->pm_sref < 0)
@@ -2226,21 +2379,26 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* Update saved attributes for managed page
*/
- pmap_attributes[pmap_page_index(pa)] |= bits;
+ *pa_to_attribute(pa) |= bits;
splx(s);
}
+/*
+ * pmap_testbit:
+ *
+ * Test the modified/referenced bits of a physical page.
+ */
/* static */
boolean_t
pmap_testbit(pa, bit)
- vm_offset_t pa;
+ paddr_t pa;
int bit;
{
struct pv_entry *pv;
pt_entry_t *pte;
int s;
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return(FALSE);
pv = pa_to_pvh(pa);
@@ -2248,7 +2406,7 @@ pmap_testbit(pa, bit)
/*
* Check saved info first
*/
- if (pmap_attributes[pmap_page_index(pa)] & bit) {
+ if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
}
@@ -2260,13 +2418,14 @@ pmap_testbit(pa, bit)
DCIS();
#endif
/*
- * Not found, check current mappings returning
- * immediately if found.
+ * Not found. Check current mappings, returning immediately if
+ * found. Cache a hit to speed future lookups.
*/
if (pv->pv_pmap != NULL) {
for (; pv; pv = pv->pv_next) {
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if (*pte & bit) {
+ *pa_to_attribute(pa) |= bit;
splx(s);
return(TRUE);
}
@@ -2276,46 +2435,40 @@ pmap_testbit(pa, bit)
return(FALSE);
}
+/*
+ * pmap_changebit:
+ *
+ * Change the modified/referenced bits, or other PTE bits,
+ * for a physical page.
+ */
/* static */
void
-pmap_changebit(pa, bit, setem)
- vm_offset_t pa;
- int bit;
- boolean_t setem;
+pmap_changebit(pa, set, mask)
+ paddr_t pa;
+ int set, mask;
{
struct pv_entry *pv;
pt_entry_t *pte, npte;
- vm_offset_t va;
+ vaddr_t va;
int s;
#if defined(M68K_MMU_HP) || defined(M68040)
boolean_t firstpage = TRUE;
#endif
-#ifdef PMAPSTATS
- struct chgstats *chgp;
-#endif
-#ifdef DEBUG
- if (pmapdebug & PDB_BITS)
- printf("pmap_changebit(%lx, %x, %s)\n",
- pa, bit, setem ? "set" : "clear");
-#endif
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ PMAP_DPRINTF(PDB_BITS,
+ ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
+
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
-#ifdef PMAPSTATS
- chgp = &changebit_stats[(bit>>2)-1];
- if (setem)
- chgp->setcalls++;
- else
- chgp->clrcalls++;
-#endif
pv = pa_to_pvh(pa);
s = splimp();
+
/*
* Clear saved attributes (modify, reference)
*/
- if (!setem)
- pmap_attributes[pmap_page_index(pa)] &= ~bit;
+ *pa_to_attribute(pa) &= mask;
+
/*
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
@@ -2333,11 +2486,16 @@ pmap_changebit(pa, bit, setem)
/*
* XXX don't write protect pager mappings
*/
- if (bit == PG_RO) {
- extern vm_offset_t pager_sva, pager_eva;
+ if (set == PG_RO) {
+#if defined(UVM)
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+#else
+ extern vaddr_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
+#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@@ -2351,10 +2509,7 @@ pmap_changebit(pa, bit, setem)
DCIS();
}
#endif
- if (setem)
- npte = *pte | bit;
- else
- npte = *pte & ~bit;
+ npte = (*pte | set) & mask;
if (*pte != npte) {
#if defined(M68040)
/*
@@ -2362,9 +2517,10 @@ pmap_changebit(pa, bit, setem)
* protection make sure the caches are
* flushed (but only once).
*/
- if (firstpage && mmutype == MMU_68040 &&
- ((bit == PG_RO && setem) ||
- (bit & PG_CMASK))) {
+ if (firstpage && (mmutype == MMU_68040) &&
+ ((set == PG_RO) ||
+ (set & PG_CMASK) ||
+ (mask & PG_CMASK) == 0)) {
firstpage = FALSE;
DCFP(pa);
ICPP(pa);
@@ -2373,54 +2529,31 @@ pmap_changebit(pa, bit, setem)
*pte = npte;
if (active_pmap(pv->pv_pmap))
TBIS(va);
-#ifdef PMAPSTATS
- if (setem)
- chgp->sethits++;
- else
- chgp->clrhits++;
-#endif
- }
-#ifdef PMAPSTATS
- else {
- if (setem)
- chgp->setmiss++;
- else
- chgp->clrmiss++;
}
-#endif
}
-#if defined(M68K_MMU_HP) && defined(DEBUG)
- if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
- if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
- DCIA();
- else if (toflush == 2)
- DCIS();
- else
- DCIU();
- }
-#endif
}
splx(s);
}
+/*
+ * pmap_enter_ptpage:
+ *
+ * Allocate and map a PT page for the specified pmap/va pair.
+ */
/* static */
void
pmap_enter_ptpage(pmap, va)
pmap_t pmap;
- vm_offset_t va;
+ vaddr_t va;
{
- vm_offset_t ptpa;
+ paddr_t ptpa;
struct pv_entry *pv;
st_entry_t *ste;
int s;
-#ifdef DEBUG
- if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
- printf("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va);
-#endif
-#ifdef PMAPSTATS
- enter_stats.ptpneeded++;
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
+ ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
+
/*
* Allocate a segment table if necessary. Note that it is allocated
* from a private map and not pt_map. This keeps user page tables
@@ -2429,31 +2562,34 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
+#if defined(UVM)
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(st_map, HP_STSIZE);
+#else
pmap->pm_stab = (st_entry_t *)
kmem_alloc(st_map, HP_STSIZE);
+#endif
pmap->pm_stpa = (st_entry_t *)
- pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab);
+ pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab);
#if defined(M68040)
if (mmutype == MMU_68040) {
#ifdef DEBUG
if (dowriteback && dokwriteback)
#endif
- pmap_changebit((vm_offset_t)pmap->pm_stpa, PG_CCB, 0);
+ pmap_changebit((paddr_t)pmap->pm_stpa, 0, ~PG_CCB);
pmap->pm_stfree = protostfree;
}
#endif
- pmap->pm_stchanged = TRUE;
/*
* XXX may have changed segment table pointer for current
* process so update now to reload hardware.
*/
if (active_user_pmap(pmap))
- PMAP_ACTIVATE(pmap, &curproc->p_addr->u_pcb, 1);
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
- printf("enter: pmap %p stab %p(%p)\n",
- pmap, pmap->pm_stab, pmap->pm_stpa);
-#endif
+ PMAP_ACTIVATE(pmap, 1);
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: pmap %p stab %p(%p)\n",
+ pmap, pmap->pm_stab, pmap->pm_stpa));
}
ste = pmap_ste(pmap, va);
@@ -2474,10 +2610,9 @@ pmap_enter_ptpage(pmap, va)
bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
*ste = (u_int)addr | SG_RW | SG_U | SG_V;
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
- printf("enter: alloc ste2 %d(%p)\n", ix, addr);
-#endif
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: alloc ste2 %d(%p)\n", ix, addr));
}
ste = pmap_ste2(pmap, va);
/*
@@ -2489,14 +2624,12 @@ pmap_enter_ptpage(pmap, va)
* entirety below.
*/
ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
- printf("enter: ste2 %p (%p)\n",
- pmap_ste2(pmap, va), ste);
-#endif
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
}
#endif
- va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
+ va = trunc_page((vaddr_t)pmap_pte(pmap, va));
/*
* In the kernel we allocate a page from the kernel PT page
@@ -2512,25 +2645,19 @@ pmap_enter_ptpage(pmap, va)
* No PT pages available.
* Try once to free up unused ones.
*/
-#ifdef DEBUG
- if (pmapdebug & PDB_COLLECT)
- printf("enter: no KPT pages, collecting...\n");
-#endif
+ PMAP_DPRINTF(PDB_COLLECT,
+ ("enter: no KPT pages, collecting...\n"));
pmap_collect(pmap_kernel());
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
panic("pmap_enter_ptpage: can't get KPT page");
}
-#ifdef PMAPSTATS
- if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
- kpt_stats.kptmaxuse = kpt_stats.kptinuse;
-#endif
kpt_free_list = kpt->kpt_next;
kpt->kpt_next = kpt_used_list;
kpt_used_list = kpt;
ptpa = kpt->kpt_pa;
bzero((caddr_t)kpt->kpt_va, NBPG);
pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE,
- VM_PROT_DEFAULT);
+ VM_PROT_DEFAULT);
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
@@ -2544,6 +2671,11 @@ pmap_enter_ptpage(pmap, va)
/*
* For user processes we just simulate a fault on that location
* letting the VM system allocate a zero-filled page.
+ *
+ * Note we use a wire-fault to keep the page off the paging
+ * queues. This sets our PT page's reference (wire) count to
+ * 1, which is what we use to check if the page can be freed.
+ * See pmap_remove_mapping().
*/
else {
/*
@@ -2551,25 +2683,29 @@ pmap_enter_ptpage(pmap, va)
* lose the segment table when low on memory.
*/
pmap->pm_sref++;
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
- printf("enter: about to fault UPT pg at %lx\n", va);
-#endif
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
+ ("enter: about to fault UPT pg at %lx\n", va));
+#if defined(UVM)
+ s = uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE);
+ if (s != KERN_SUCCESS) {
+ printf("uvm_fault_wire(pt_map, 0x%lx, 0x%lx, RW) "
+ "-> %d\n", va, va + PAGE_SIZE, s);
+ panic("pmap_enter: uvm_fault_wire failed");
+ }
+#else
s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
if (s != KERN_SUCCESS) {
printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s);
panic("pmap_enter: vm_fault failed");
}
- ptpa = pmap_extract(pmap_kernel(), va);
- /*
- * Mark the page clean now to avoid its pageout (and
- * hence creation of a pager) between now and when it
- * is wired; i.e. while it is on a paging queue.
- */
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
+#endif
+ ptpa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
+#if !defined(UVM)
#ifdef DEBUG
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
#endif
+#endif
}
#if defined(M68040)
/*
@@ -2587,7 +2723,7 @@ pmap_enter_ptpage(pmap, va)
pmap == pmap_kernel() ? "Kernel" : "User",
va, ptpa, pte, *pte);
#endif
- pmap_changebit(ptpa, PG_CCB, 0);
+ pmap_changebit(ptpa, 0, ~PG_CCB);
}
#endif
/*
@@ -2610,10 +2746,9 @@ pmap_enter_ptpage(pmap, va)
#endif
pv->pv_ptste = ste;
pv->pv_ptpmap = pmap;
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
- printf("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste);
-#endif
+
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
+ ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
/*
* Map the new PT page into the segment table.
@@ -2635,11 +2770,9 @@ pmap_enter_ptpage(pmap, va)
#endif
*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
if (pmap != pmap_kernel()) {
-#ifdef DEBUG
- if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
- printf("enter: stab %p refcnt %d\n",
- pmap->pm_stab, pmap->pm_sref);
-#endif
+ PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
+ ("enter: stab %p refcnt %d\n",
+ pmap->pm_stab, pmap->pm_sref));
}
#if 0
/*
@@ -2654,11 +2787,52 @@ pmap_enter_ptpage(pmap, va)
splx(s);
}
+/*
+ * pmap_ptpage_addref:
+ *
+ * Add a reference to the specified PT page.
+ */
+void
+pmap_ptpage_addref(ptpva)
+ vaddr_t ptpva;
+{
+ vm_page_t m;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ m->wire_count++;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+}
+
+/*
+ * pmap_ptpage_delref:
+ *
+ * Delete a reference to the specified PT page.
+ */
+int
+pmap_ptpage_delref(ptpva)
+ vaddr_t ptpva;
+{
+ vm_page_t m;
+ int rv;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ rv = --m->wire_count;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+ return (rv);
+}
+
#ifdef DEBUG
+/*
+ * pmap_pvdump:
+ *
+ * Dump the contents of the PV list for the specified physical page.
+ */
/* static */
void
pmap_pvdump(pa)
- vm_offset_t pa;
+ paddr_t pa;
{
struct pv_entry *pv;
@@ -2670,31 +2844,41 @@ pmap_pvdump(pa)
printf("\n");
}
+/*
+ * pmap_check_wiring:
+ *
+ * Count the number of valid mappings in the specified PT page,
+ * and ensure that it is consistent with the number of wirings
+ * to that page that the VM system has.
+ */
/* static */
void
pmap_check_wiring(str, va)
char *str;
- vm_offset_t va;
+ vaddr_t va;
{
- vm_map_entry_t entry;
- int count;
pt_entry_t *pte;
+ paddr_t pa;
+ vm_page_t m;
+ int count;
- va = trunc_page(va);
if (!pmap_ste_v(pmap_kernel(), va) ||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;
- if (!vm_map_lookup_entry(pt_map, va, &entry)) {
- printf("wired_check: entry for %lx not found\n", va);
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
+ m = PHYS_TO_VM_PAGE(pa);
+ if (m->wire_count < 1) {
+ printf("*%s*: 0x%lx: wire count %d\n", str, va, m->wire_count);
return;
}
+
count = 0;
for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)
count++;
- if (entry->wired_count != count)
- printf("*%s*: %lx: w%d/a%d\n",
- str, va, entry->wired_count, count);
+ if ((m->wire_count - 1) != count)
+ printf("*%s*: 0x%lx: w%d/a%d\n",
+ str, va, (m->wire_count - 1), count);
}
-#endif
+#endif /* DEBUG */
diff --git a/sys/arch/hp300/hp300/trap.c b/sys/arch/hp300/hp300/trap.c
index 7cd925eddab..df1278a074d 100644
--- a/sys/arch/hp300/hp300/trap.c
+++ b/sys/arch/hp300/hp300/trap.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: trap.c,v 1.17 2000/11/10 18:15:38 art Exp $ */
-/* $NetBSD: trap.c,v 1.55 1997/07/08 16:56:36 kleink Exp $ */
+/* $OpenBSD: trap.c,v 1.18 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: trap.c,v 1.57 1998/02/16 20:58:31 thorpej Exp $ */
/*
* Copyright (c) 1997 Theo de Raadt
@@ -100,6 +100,10 @@
#include <vm/vm.h>
#include <vm/pmap.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
#include <dev/cons.h>
#ifdef COMPAT_HPUX
@@ -125,6 +129,8 @@ void dumpwb __P((int, u_short, u_int, u_int));
static inline void userret __P((struct proc *p, struct frame *fp,
u_quad_t oticks, u_int faultaddr, int fromtrap));
+int astpending;
+
char *trap_type[] = {
"Bus error",
"Address error",
@@ -302,7 +308,11 @@ trap(type, code, v, frame)
int typ = 0;
union sigval sv;
+#if defined(UVM)
+ uvmexp.traps++;
+#else
cnt.v_trap++;
+#endif
p = curproc;
ucode = 0;
@@ -577,19 +587,31 @@ trap(type, code, v, frame)
if (ssir & SIR_NET) {
void netintr __P((void));
siroff(SIR_NET);
+#if defined(UVM)
+ uvmexp.softs++;
+#else
cnt.v_soft++;
+#endif
netintr();
}
if (ssir & SIR_CLOCK) {
siroff(SIR_CLOCK);
+#if defined(UVM)
+ uvmexp.softs++;
+#else
cnt.v_soft++;
+#endif
softclock();
}
/*
* If this was not an AST trap, we are all done.
*/
if (type != (T_ASTFLT|T_USER)) {
+#if defined(UVM)
+ uvmexp.traps--;
+#else
cnt.v_trap--;
+#endif
return;
}
spl0();
@@ -660,18 +682,31 @@ trap(type, code, v, frame)
rv = pmap_mapmulti(map->pmap, va);
if (rv != KERN_SUCCESS) {
bva = HPMMBASEADDR(va);
+#if defined(UVM)
+ rv = uvm_fault(map, bva, 0, ftype);
+#else
rv = vm_fault(map, bva, ftype, FALSE);
+#endif
if (rv == KERN_SUCCESS)
(void) pmap_mapmulti(map->pmap, va);
}
} else
#endif
+#if defined(UVM)
+ rv = uvm_fault(map, va, 0, ftype);
+#ifdef DEBUG
+ if (rv && MDB_ISPID(p->p_pid))
+ printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n",
+ map, va, ftype, rv);
+#endif
+#else /* ! UVM */
rv = vm_fault(map, va, ftype, FALSE);
#ifdef DEBUG
if (rv && MDB_ISPID(p->p_pid))
printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
map, va, ftype, rv);
#endif
+#endif /* UVM */
/*
* If this was a stack access we keep track of the maximum
* accessed stack size. Also, if vm_fault gets a protection
@@ -703,8 +738,13 @@ trap(type, code, v, frame)
if (type == T_MMUFLT) {
if (p->p_addr->u_pcb.pcb_onfault)
goto copyfault;
+#if defined(UVM)
+ printf("uvm_fault(%p, 0x%lx, 0, 0x%x\n) -> 0x%x\n",
+ map, va, ftype, rv);
+#else
printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
map, va, ftype, rv);
+#endif
printf(" type %x, code [mmu,,ssw]: %x\n",
type, code);
goto dopanic;
@@ -1044,7 +1084,11 @@ syscall(code, frame)
register_t args[8], rval[2];
u_quad_t sticks;
+#if defined(UVM)
+ uvmexp.syscalls++;
+#else
cnt.v_syscall++;
+#endif
if (!USERMODE(frame.f_sr))
panic("syscall");
p = curproc;
diff --git a/sys/arch/hp300/hp300/vm_machdep.c b/sys/arch/hp300/hp300/vm_machdep.c
index bbfa8c655bf..d155f28f120 100644
--- a/sys/arch/hp300/hp300/vm_machdep.c
+++ b/sys/arch/hp300/hp300/vm_machdep.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_machdep.c,v 1.16 2000/06/08 22:25:18 niklas Exp $ */
-/* $NetBSD: vm_machdep.c,v 1.37 1997/05/26 00:27:43 thorpej Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.17 2001/05/04 22:48:59 aaron Exp $ */
+/* $NetBSD: vm_machdep.c,v 1.47 1999/03/26 23:41:29 mycroft Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -62,6 +62,10 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the kernel stack and pcb, making the child
@@ -85,15 +89,20 @@ cpu_fork(p1, p2, stack, stacksize)
p2->p_md.md_flags = p1->p_md.md_flags;
- /* Sync curpcb (which is presumably p1's PCB) and copy it to p2. */
- savectx(curpcb);
+ /* Copy pcb from proc p1 to p2. */
+ if (p1 == curproc) {
+ /* Sync the PCB before we copy it. */
+ savectx(curpcb);
+ }
+#ifdef DIAGNOSTIC
+ else if (p1 != &proc0)
+ panic("cpu_fork: curproc");
+#endif
*pcb = p1->p_addr->u_pcb;
- PMAP_ACTIVATE(p2->p_vmspace->vm_map.pmap, pcb, 0);
-
/*
* Copy the trap frame, and arrange for the child to return directly
- * through return_to_user(). Note the inline cpu_set_kpc().
+ * through child_return(). Note the in-line cpu_set_kpc().
*/
tf = (struct trapframe *)((u_int)p2->p_addr + USPACE) - 1;
p2->p_md.md_regs = (int *)tf;
@@ -112,6 +121,14 @@ cpu_fork(p1, p2, stack, stacksize)
pcb->pcb_regs[11] = (int)sf; /* SSP */
}
+/*
+ * Arrange for in-kernel execution of a process to continue at the
+ * named pc, as if the code at that address were called as a function
+ * with the supplied argument.
+ *
+ * Note that it's assumed that when the named process returns, rei()
+ * should be invoked, to return to user code.
+ */
void
cpu_set_kpc(p, pc, arg)
struct proc *p;
@@ -136,7 +153,11 @@ cpu_exit(p)
{
(void) splimp();
+#if defined(UVM)
+ uvmexp.swtch++;
+#else
cnt.v_swtch++;
+#endif
switch_exit(p);
/* NOTREACHED */
}
@@ -159,7 +180,7 @@ cpu_coredump(p, vp, cred, chdr)
struct coreseg cseg;
int error;
- CORE_SETMAGIC(*chdr, COREMAGIC, MID_M68K, 0);
+ CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
chdr->c_hdrsize = ALIGN(sizeof(*chdr));
chdr->c_seghdrsize = ALIGN(sizeof(cseg));
chdr->c_cpusize = sizeof(md_core);
@@ -179,7 +200,7 @@ cpu_coredump(p, vp, cred, chdr)
bzero((caddr_t)&md_core.freg, sizeof(md_core.freg));
}
- CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_M68K, CORE_CPU);
+ CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
cseg.c_addr = 0;
cseg.c_size = chdr->c_cpusize;
@@ -209,24 +230,24 @@ pagemove(from, to, size)
caddr_t from, to;
size_t size;
{
- vm_offset_t pa;
+ paddr_t pa;
#ifdef DEBUG
if (size & CLOFSET)
panic("pagemove");
#endif
while (size > 0) {
- pa = pmap_extract(pmap_kernel(), (vm_offset_t)from);
+ pa = pmap_extract(pmap_kernel(), (vaddr_t)from);
#ifdef DEBUG
if (pa == 0)
panic("pagemove 2");
- if (pmap_extract(pmap_kernel(), (vm_offset_t)to) != 0)
+ if (pmap_extract(pmap_kernel(), (vaddr_t)to) != 0)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
- (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
+ (vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
- (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1,
+ (vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1,
VM_PROT_READ|VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
@@ -276,12 +297,12 @@ int
kvtop(addr)
caddr_t addr;
{
- vm_offset_t va;
+ paddr_t pa;
- va = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- if (va == 0)
+ pa = pmap_extract(pmap_kernel(), (vaddr_t)addr);
+ if (pa == 0)
panic("kvtop: zero page frame");
- return((int)va);
+ return((int)pa);
}
extern vm_map_t phys_map;
@@ -294,59 +315,70 @@ extern vm_map_t phys_map;
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/
-/*ARGSUSED*/
void
-vmapbuf(bp, sz)
+vmapbuf(bp, len)
struct buf *bp;
- vm_size_t sz;
+ vsize_t len;
{
- int npf;
- caddr_t addr;
- long flags = bp->b_flags;
- struct proc *p;
- int off;
- vm_offset_t kva;
- vm_offset_t pa;
+ struct pmap *upmap, *kpmap;
+ vaddr_t uva; /* User VA (map from) */
+ vaddr_t kva; /* Kernel VA (new to) */
+ paddr_t pa; /* physical address */
+ vm_size_t off;
- if ((flags & B_PHYS) == 0)
+ if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- addr = bp->b_saveaddr = bp->b_data;
- off = (int)addr & PGOFSET;
- p = bp->b_proc;
- npf = btoc(round_page(bp->b_bcount + off));
- kva = kmem_alloc_wait(phys_map, ctob(npf));
+
+ uva = trunc_page(bp->b_saveaddr = bp->b_data);
+ off = (vaddr_t)bp->b_data - uva;
+ len = round_page(off + len);
+#if defined(UVM)
+ kva = uvm_km_valloc_wait(phys_map, len);
+#else
+ kva = kmem_alloc_wait(phys_map, len);
+#endif
bp->b_data = (caddr_t)(kva + off);
- while (npf--) {
- pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
- (vm_offset_t)addr);
+
+ upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
+ kpmap = vm_map_pmap(phys_map);
+ do {
+ pa = pmap_extract(upmap, uva);
if (pa == 0)
panic("vmapbuf: null page frame");
- pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
- VM_PROT_READ|VM_PROT_WRITE, TRUE, 0);
- addr += PAGE_SIZE;
+ pmap_enter(kpmap, kva, pa, VM_PROT_READ|VM_PROT_WRITE, TRUE, 0);
+ uva += PAGE_SIZE;
kva += PAGE_SIZE;
- }
+ len -= PAGE_SIZE;
+ } while (len);
}
/*
* Free the io map PTEs associated with this IO operation.
*/
-/*ARGSUSED*/
void
-vunmapbuf(bp, sz)
+vunmapbuf(bp, len)
struct buf *bp;
- vm_size_t sz;
+ vsize_t len;
{
- caddr_t addr;
- int npf;
- vm_offset_t kva;
+ vaddr_t kva;
+ vsize_t off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- addr = bp->b_data;
- npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
- kva = (vm_offset_t)((int)addr & ~PGOFSET);
- kmem_free_wakeup(phys_map, kva, ctob(npf));
+
+ kva = trunc_page(bp->b_data);
+ off = (vaddr_t)bp->b_data - kva;
+ len = round_page(off + len);
+
+ /*
+ * pmap_remove() is unnecessary here, as kmem_free_wakeup()
+ * will do it for us.
+ */
+#if defined(UVM)
+ uvm_km_free_wakeup(phys_map, kva, len);
+#else
+ kmem_free_wakeup(phys_map, kva, len);
+#endif
bp->b_data = bp->b_saveaddr;
- bp->b_saveaddr = NULL;
+ bp->b_saveaddr = 0;
}
diff --git a/sys/arch/hp300/include/cpu.h b/sys/arch/hp300/include/cpu.h
index 56740ac457d..192ea7d9145 100644
--- a/sys/arch/hp300/include/cpu.h
+++ b/sys/arch/hp300/include/cpu.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: cpu.h,v 1.9 1998/03/01 00:37:31 niklas Exp $ */
-/* $NetBSD: cpu.h,v 1.25 1997/04/27 20:37:07 thorpej Exp $ */
+/* $OpenBSD: cpu.h,v 1.10 2001/05/04 22:49:00 aaron Exp $ */
+/* $NetBSD: cpu.h,v 1.28 1998/02/13 07:41:51 scottr Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -95,6 +95,7 @@ struct clockframe {
* Preempt the current process if in interrupt from user mode,
* or after the current trap/syscall if in system mode.
*/
+extern int want_resched; /* resched() was called */
#define need_resched() { want_resched++; aston(); }
/*
@@ -110,11 +111,9 @@ struct clockframe {
*/
#define signotify(p) aston()
+extern int astpending; /* need to trap before returning to user mode */
#define aston() (astpending++)
-int astpending; /* need to trap before returning to user mode */
-int want_resched; /* resched() was called */
-
/*
* CTL_MACHDEP definitions.
*/
@@ -174,6 +173,9 @@ void doboot __P((void))
void ecacheon __P((void));
void ecacheoff __P((void));
+/* clock.c functions */
+void hp300_calibrate_delay __P((void));
+
/* machdep.c functions */
int badaddr __P((caddr_t));
int badbaddr __P((caddr_t));
diff --git a/sys/arch/hp300/include/pmap.h b/sys/arch/hp300/include/pmap.h
index 2c71617d631..d3583e6ec37 100644
--- a/sys/arch/hp300/include/pmap.h
+++ b/sys/arch/hp300/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.5 1997/07/06 08:02:13 downsj Exp $ */
+/* $OpenBSD: pmap.h,v 1.6 2001/05/04 22:49:00 aaron Exp $ */
/* $NetBSD: pmap.h,v 1.13 1997/06/10 18:58:19 veego Exp $ */
/*
@@ -93,13 +93,11 @@ typedef struct pmap *pmap_t;
/*
* Macros for speed
*/
-#define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
- if ((pmapp)->pm_stchanged) { \
- (pcbp)->pcb_ustp = m68k_btop((vm_offset_t)(pmapp)->pm_stpa); \
- if (iscurproc) \
- loadustp((pcbp)->pcb_ustp); \
- (pmapp)->pm_stchanged = FALSE; \
- }
+#define PMAP_ACTIVATE(pmap, loadhw) \
+{ \
+ if ((loadhw)) \
+ loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
+}
#define PMAP_DEACTIVATE(pmapp, pcbp)
/*
@@ -148,13 +146,19 @@ extern struct pmap kernel_pmap_store;
extern struct pv_entry *pv_table; /* array of entries, one per page */
-#define pmap_page_index(pa) atop(pa - vm_first_phys)
-#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)])
-
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+struct proc;
+void pmap_activate __P((struct proc *));
+void pmap_deactivate __P((struct proc *));
+
extern pt_entry_t *Sysmap;
extern char *vmmap; /* map for mem, dumps, etc. */
+#ifdef M68K_MMU_HP
+void pmap_prefer __P((vaddr_t, vaddr_t *));
+#define PMAP_PREFER(foff, vap) pmap_prefer((foff), (vap))
+#endif
+
#endif /* !_HP300_PMAP_H_ */
diff --git a/sys/arch/hp300/include/vmparam.h b/sys/arch/hp300/include/vmparam.h
index 4df2b97204e..835b3014db3 100644
--- a/sys/arch/hp300/include/vmparam.h
+++ b/sys/arch/hp300/include/vmparam.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vmparam.h,v 1.3 1997/01/15 02:55:29 downsj Exp $ */
-/* $NetBSD: vmparam.h,v 1.9 1996/10/20 23:23:28 thorpej Exp $ */
+/* $OpenBSD: vmparam.h,v 1.4 2001/05/04 22:49:00 aaron Exp $ */
+/* $NetBSD: vmparam.h,v 1.16 1998/08/20 08:33:48 kleink Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -43,9 +43,13 @@
* @(#)vmparam.h 8.2 (Berkeley) 4/19/94
*/
+#ifndef _HP300_VMPARAM_H_
+#define _HP300_VMPARAM_H_
+
/*
* Machine dependent constants for HP300
*/
+
/*
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. LOWPAGES and HIGHPAGES are
@@ -162,6 +166,10 @@
* so we loan each swapped in process memory worth 100$, or just admit
* that we don't consider it worthwhile and swap it out to disk which costs
* $30/mb or about $0.75.
+ * Update: memory prices have changed recently (9/96). At the current
+ * value of $6 per megabyte, we lend each swapped in process memory worth
+ * $0.15, or just admit that we don't consider it worthwhile and swap it out
+ * to disk which costs $0.20/MB, or just under half a cent.
*/
#define SAFERSS 4 /* nominal ``small'' resident set size
protected against replacement */
@@ -232,11 +240,11 @@
*/
/* user/kernel map constants */
-#define VM_MIN_ADDRESS ((vm_offset_t)0)
-#define VM_MAXUSER_ADDRESS ((vm_offset_t)0xFFF00000)
-#define VM_MAX_ADDRESS ((vm_offset_t)0xFFF00000)
-#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0)
-#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFFFFF000)
+#define VM_MIN_ADDRESS ((vaddr_t)0)
+#define VM_MAXUSER_ADDRESS ((vaddr_t)0xFFF00000)
+#define VM_MAX_ADDRESS ((vaddr_t)0xFFF00000)
+#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0)
+#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xFFFFF000)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
@@ -244,7 +252,31 @@
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
/* # of kernel PT pages (initial only, can grow dynamically) */
-#define VM_KERNEL_PT_PAGES ((vm_size_t)2) /* XXX: SYSPTSIZE */
+#define VM_KERNEL_PT_PAGES ((vsize_t)2) /* XXX: SYSPTSIZE */
/* pcb base */
#define pcbb(p) ((u_int)(p)->p_addr)
+
+/* Use new VM page bootstrap interface. */
+#define MACHINE_NEW_NONCONTIG
+
+/*
+ * Constants which control the way the VM system deals with memory segments.
+ * The hp300 only has one physical memory segment.
+ */
+#define VM_PHYSSEG_MAX 1
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD
+
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+/*
+ * pmap-specific data stored in the vm_physmem[] array.
+ */
+struct pmap_physseg {
+ struct pv_entry *pvent; /* pv table for this seg */
+ char *attrs; /* page attributes for this seg */
+};
+
+#endif /* _HP300_VMPARAM_H_ */