summaryrefslogtreecommitdiff
path: root/sys/arch/mvme68k
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2001-06-26 21:35:44 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2001-06-26 21:35:44 +0000
commit546f71602d1b89872b474138129ca2dc206599de (patch)
tree3c5a5c29512d5dc9a673f45efc33462a91e105d5 /sys/arch/mvme68k
parent7fea0a8c3c3f70d2f713aff3502a60fef9de91ea (diff)
A few changes to mvme68k:
+ switch to UVM + fix the system trace problem + big cleanup of locore.s (macro for BUG calls, use more common m68k code whenever possible, and the macros in <m68k/asm.h>) + better indentation on some parts (old KNF) + call doshutdownhooks() at shutdown + use <net/netisr_dispatch.h> + upgrade pmap.c to something very close to our current hp300 pmap.c, minus support for PMAP_NEW and for HP MMU + various tidbits I forget to mention here work and tests by smurph@ and me.
Diffstat (limited to 'sys/arch/mvme68k')
-rw-r--r--sys/arch/mvme68k/conf/GENERIC3
-rw-r--r--sys/arch/mvme68k/conf/MINIROOT3
-rw-r--r--sys/arch/mvme68k/conf/MVME1474
-rw-r--r--sys/arch/mvme68k/conf/MVME1624
-rw-r--r--sys/arch/mvme68k/conf/MVME1674
-rw-r--r--sys/arch/mvme68k/conf/MVME1773
-rw-r--r--sys/arch/mvme68k/dev/vs.c29
-rw-r--r--sys/arch/mvme68k/dev/vsdma.c27
-rw-r--r--sys/arch/mvme68k/include/vmparam.h7
-rw-r--r--sys/arch/mvme68k/mvme68k/genassym.cf12
-rw-r--r--sys/arch/mvme68k/mvme68k/hpux_machdep.c21
-rw-r--r--sys/arch/mvme68k/mvme68k/locore.s920
-rw-r--r--sys/arch/mvme68k/mvme68k/machdep.c351
-rw-r--r--sys/arch/mvme68k/mvme68k/mem.c21
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap.c418
-rw-r--r--sys/arch/mvme68k/mvme68k/trap.c835
-rw-r--r--sys/arch/mvme68k/mvme68k/vectors.s182
-rw-r--r--sys/arch/mvme68k/mvme68k/vm_machdep.c40
18 files changed, 1514 insertions, 1370 deletions
diff --git a/sys/arch/mvme68k/conf/GENERIC b/sys/arch/mvme68k/conf/GENERIC
index 8da3850637c..3f19885b71e 100644
--- a/sys/arch/mvme68k/conf/GENERIC
+++ b/sys/arch/mvme68k/conf/GENERIC
@@ -1,8 +1,9 @@
-# $OpenBSD: GENERIC,v 1.17 2001/06/12 22:51:42 miod Exp $
+# $OpenBSD: GENERIC,v 1.18 2001/06/26 21:35:24 miod Exp $
machine mvme68k m68k
include "../../../conf/GENERIC"
+option UVM
option M68030 # support for 030
option M68040 # support for 040
diff --git a/sys/arch/mvme68k/conf/MINIROOT b/sys/arch/mvme68k/conf/MINIROOT
index bfd3d85a84d..30aec99464c 100644
--- a/sys/arch/mvme68k/conf/MINIROOT
+++ b/sys/arch/mvme68k/conf/MINIROOT
@@ -1,8 +1,9 @@
-# $OpenBSD: MINIROOT,v 1.8 2001/06/12 22:51:42 miod Exp $
+# $OpenBSD: MINIROOT,v 1.9 2001/06/26 21:35:24 miod Exp $
machine mvme68k m68k
include "../../../conf/GENERIC"
+option UVM
option M68060 # support for 060
option M68040 # support for 040
diff --git a/sys/arch/mvme68k/conf/MVME147 b/sys/arch/mvme68k/conf/MVME147
index b71a74b0612..91f3f42b2dc 100644
--- a/sys/arch/mvme68k/conf/MVME147
+++ b/sys/arch/mvme68k/conf/MVME147
@@ -1,7 +1,9 @@
-# $OpenBSD: MVME147,v 1.12 2001/05/16 05:07:49 millert Exp $
+# $OpenBSD: MVME147,v 1.13 2001/06/26 21:35:24 miod Exp $
machine mvme68k m68k
+option UVM
+
#option "M68040" # support for 040
#option FPSP # MC68040 floating point support
option "M68030" # support for 030
diff --git a/sys/arch/mvme68k/conf/MVME162 b/sys/arch/mvme68k/conf/MVME162
index a5dd57b48b2..cf3d6e9adcd 100644
--- a/sys/arch/mvme68k/conf/MVME162
+++ b/sys/arch/mvme68k/conf/MVME162
@@ -1,7 +1,9 @@
-# $OpenBSD: MVME162,v 1.15 2001/06/12 22:51:42 miod Exp $
+# $OpenBSD: MVME162,v 1.16 2001/06/26 21:35:24 miod Exp $
machine mvme68k m68k
+option UVM
+
option "M68040" # support for 040
option FPSP # MC68040 floating point support
#option "M68030" # support for 030
diff --git a/sys/arch/mvme68k/conf/MVME167 b/sys/arch/mvme68k/conf/MVME167
index 4b41b0b580d..6065aba4b0b 100644
--- a/sys/arch/mvme68k/conf/MVME167
+++ b/sys/arch/mvme68k/conf/MVME167
@@ -1,7 +1,9 @@
-# $OpenBSD: MVME167,v 1.15 2001/06/12 22:51:43 miod Exp $
+# $OpenBSD: MVME167,v 1.16 2001/06/26 21:35:24 miod Exp $
machine mvme68k m68k
+option UVM
+
option "M68040" # support for 040
option FPSP # MC68040 floating point support
#option "M68030" # support for 030
diff --git a/sys/arch/mvme68k/conf/MVME177 b/sys/arch/mvme68k/conf/MVME177
index 60e5d7f3e77..f57b25b1e96 100644
--- a/sys/arch/mvme68k/conf/MVME177
+++ b/sys/arch/mvme68k/conf/MVME177
@@ -1,8 +1,9 @@
-# $OpenBSD: MVME177,v 1.5 2001/06/12 22:51:43 miod Exp $
+# $OpenBSD: MVME177,v 1.6 2001/06/26 21:35:25 miod Exp $
machine mvme68k m68k
include "../../../conf/GENERIC"
+option UVM
#option M68030 # support for 030
#option M68040 # support for 040
diff --git a/sys/arch/mvme68k/dev/vs.c b/sys/arch/mvme68k/dev/vs.c
index 44681883b8e..0dd744f0727 100644
--- a/sys/arch/mvme68k/dev/vs.c
+++ b/sys/arch/mvme68k/dev/vs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vs.c,v 1.4 2001/06/25 00:43:13 mickey Exp $ */
+/* $OpenBSD: vs.c,v 1.5 2001/06/26 21:35:38 miod Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
@@ -48,23 +48,22 @@
#include <sys/dkstat.h>
#include <sys/buf.h>
#include <sys/malloc.h>
+
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
+
+#include <vm/vm_param.h>
+
#include <machine/autoconf.h>
#include <machine/param.h>
-#define PAGESIZE 4096 /* should get this out of a header? XXX - smurph */
-#ifdef __m88k_
+#ifdef mvme88k
#include <mvme88k/dev/vsreg.h>
#include <mvme88k/dev/vsvar.h>
-#include "machine/mmu.h"
-#define ROUND_PAGE m88k_round_page
-#define TRUNC_PAGE m88k_trunc_page
+#include <machine/mmu.h>
#else
#include <mvme68k/dev/vsreg.h>
#include <mvme68k/dev/vsvar.h>
-#define ROUND_PAGE m68k_round_page
-#define TRUNC_PAGE m68k_trunc_page
#endif
int vs_checkintr __P((struct vs_softc *, struct scsi_xfer *, int *));
@@ -330,7 +329,7 @@ struct scsi_xfer *xs;
* a read, prior to starting the IO.
*/
if (xs->flags & SCSI_DATA_IN) { /* read */
-#if defined(MVME187) || defined(MVME188) || defined(MVME197)
+#if defined(mvme88k)
dma_cachectl((vm_offset_t)xs->data, xs->datalen,
DMA_CACHE_SYNC_INVAL);
#endif
@@ -916,19 +915,19 @@ M328_IOPB *iopb; /* the iopb */
* Check if we need scatter/gather
*/
- if (len > PAGESIZE) {
- for (level = 0, point_virt = ROUND_PAGE(starting_point_virt+1);
+ if (len > PAGE_SIZE) {
+ for (level = 0, point_virt = round_page(starting_point_virt+1);
/* if we do already scatter/gather we have to stay in the loop and jump */
point_virt < virt + (vm_offset_t)len || sg ;
- point_virt += PAGESIZE) { /* out later */
+ point_virt += PAGE_SIZE) { /* out later */
point2_phys = kvtop(point_virt);
- if ((point2_phys - TRUNC_PAGE(point1_phys) - PAGESIZE) || /* physical memory is not contiguous */
+ if ((point2_phys - trunc_page(point1_phys) - PAGE_SIZE) || /* physical memory is not contiguous */
(point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE && sg)) { /* we only can access (1<<16)-1 bytes in scatter/gather_mode */
if (point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE) { /* We were walking too far for one scatter/gather block ... */
- assert( MAX_SG_BLOCK_SIZE > PAGESIZE );
- point_virt = TRUNC_PAGE(starting_point_virt+MAX_SG_BLOCK_SIZE-1); /* So go back to the beginning of the last matching page */
+ assert( MAX_SG_BLOCK_SIZE > PAGE_SIZE );
+ point_virt = trunc_page(starting_point_virt+MAX_SG_BLOCK_SIZE-1); /* So go back to the beginning of the last matching page */
/* and gererate the physadress of this location for the next time. */
point2_phys = kvtop(point_virt);
}
diff --git a/sys/arch/mvme68k/dev/vsdma.c b/sys/arch/mvme68k/dev/vsdma.c
index da8a05fc5f9..5c0979d859d 100644
--- a/sys/arch/mvme68k/dev/vsdma.c
+++ b/sys/arch/mvme68k/dev/vsdma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vsdma.c,v 1.2 2000/06/10 19:53:23 deraadt Exp $ */
+/* $OpenBSD: vsdma.c,v 1.3 2001/06/26 21:35:39 miod Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* All rights reserved.
@@ -46,17 +46,16 @@
#include <scsi/scsiconf.h>
#include <machine/autoconf.h>
-#ifdef __m88k__
-#include <machine/board.h>
+#ifdef mvme88k
#include <mvme88k/dev/vsreg.h>
#include <mvme88k/dev/vsvar.h>
#include <mvme88k/dev/vme.h>
-#include "machine/mmu.h"
+#include <machine/mmu.h>
#else
#include <mvme68k/dev/vsreg.h>
#include <mvme68k/dev/vsvar.h>
#include <mvme68k/dev/vme.h>
-#endif /* defined(MVME187) || defined(MVME188) || defined(MVME197) */
+#endif
int vsmatch __P((struct device *, void *, void *));
void vsattach __P((struct device *, struct device *, void *));
@@ -112,9 +111,9 @@ vsattach(parent, self, auxp)
sc->sc_vsreg = rp = ca->ca_vaddr;
sc->sc_ipl = ca->ca_ipl;
- sc->sc_nvec = ca->ca_vec + 0;
- sc->sc_evec = ca->ca_vec + 1;
- sc->sc_link.adapter_softc = sc;
+ sc->sc_nvec = ca->ca_vec + 0;
+ sc->sc_evec = ca->ca_vec + 1;
+ sc->sc_link.adapter_softc = sc;
sc->sc_link.adapter_target = 7;
sc->sc_link.adapter = &vs_scsiswitch;
sc->sc_link.device = &vs_scsidev;
@@ -124,7 +123,7 @@ vsattach(parent, self, auxp)
sc->sc_ih_n.ih_arg = sc;
sc->sc_ih_n.ih_ipl = ca->ca_ipl;
- sc->sc_ih_e.ih_fn = vs_eintr;
+ sc->sc_ih_e.ih_fn = vs_eintr;
sc->sc_ih_e.ih_arg = sc;
sc->sc_ih_e.ih_ipl = ca->ca_ipl;
@@ -132,8 +131,8 @@ vsattach(parent, self, auxp)
vmeintr_establish(sc->sc_nvec, &sc->sc_ih_n);
vmeintr_establish(sc->sc_evec, &sc->sc_ih_e);
- evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_n);
- evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_e);
+ evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_n);
+ evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_e);
/*
* attach all scsi units on us, watching for boot device
@@ -165,9 +164,9 @@ vs_nintr(sc)
struct vs_softc *sc;
{
#ifdef SDEBUG
- printf("Normal Interrupt!!!\n");
+ printf("Normal Interrupt!!!\n");
#endif
- vs_intr(sc);
+ vs_intr(sc);
sc->sc_intrcnt_n.ev_count++;
return (1);
}
@@ -178,7 +177,7 @@ vs_eintr(sc)
struct vs_softc *sc;
{
#ifdef SDEBUG
- printf("Error Interrupt!!!\n");
+ printf("Error Interrupt!!!\n");
#endif
vs_intr(sc);
sc->sc_intrcnt_e.ev_count++;
diff --git a/sys/arch/mvme68k/include/vmparam.h b/sys/arch/mvme68k/include/vmparam.h
index 27c16cd6b69..23c5e83216e 100644
--- a/sys/arch/mvme68k/include/vmparam.h
+++ b/sys/arch/mvme68k/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.5 2001/05/05 20:56:44 art Exp $ */
+/* $OpenBSD: vmparam.h,v 1.6 2001/06/26 21:35:40 miod Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -173,12 +173,15 @@
#if defined(MACHINE_NEW_NONCONTIG)
/*
* Constants which control the way the VM system deals with memory segments.
- * The hp300 only has one physical memory segment.
+ * The mvme68k only has one physical memory segment.
*/
#define VM_PHYSSEG_MAX 1
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
/*
* pmap-specific data stored in the vm_physmem[] array.
*/
diff --git a/sys/arch/mvme68k/mvme68k/genassym.cf b/sys/arch/mvme68k/mvme68k/genassym.cf
index dfb1eec5957..1264deaae86 100644
--- a/sys/arch/mvme68k/mvme68k/genassym.cf
+++ b/sys/arch/mvme68k/mvme68k/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.6 2001/04/05 20:39:38 deraadt Exp $
+# $OpenBSD: genassym.cf,v 1.7 2001/06/26 21:35:41 miod Exp $
#
# Copyright (c) 1995 Theo de Raadt
@@ -83,6 +83,10 @@ include <machine/prom.h>
include <machine/pte.h>
include <vm/vm.h>
+ifdef UVM
+include <uvm/uvm_extern.h>
+endif
+
define __XXX_BUG_FODDER 0
# CPU options
@@ -127,8 +131,11 @@ define SSLEEP SSLEEP
define SRUN SRUN
# interrupt/fault metering
-define V_SWTCH offsetof(struct vmmeter, v_swtch)
+ifdef UVM
+define UVMEXP_INTRS offsetof(struct uvmexp, intrs)
+else
define V_INTR offsetof(struct vmmeter, v_intr)
+endif
# trap types (should just include trap.h?)
define T_BUSERR T_BUSERR
@@ -202,6 +209,7 @@ define SIZEOF_TRAPFRAME sizeof(struct trapframe)
define FR_SP offsetof(struct frame, f_regs[15])
define FR_HW offsetof(struct frame, f_sr)
define FR_ADJ offsetof(struct frame, f_stackadj)
+define FR_SIZE sizeof(struct trapframe)
# system calls
define SYS_exit SYS_exit
diff --git a/sys/arch/mvme68k/mvme68k/hpux_machdep.c b/sys/arch/mvme68k/mvme68k/hpux_machdep.c
index 0646fe937fc..3fb571d117f 100644
--- a/sys/arch/mvme68k/mvme68k/hpux_machdep.c
+++ b/sys/arch/mvme68k/mvme68k/hpux_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpux_machdep.c,v 1.1 1997/03/31 00:24:11 downsj Exp $ */
+/* $OpenBSD: hpux_machdep.c,v 1.2 2001/06/26 21:35:41 miod Exp $ */
/* $NetBSD: hpux_machdep.c,v 1.9 1997/03/16 10:00:45 thorpej Exp $ */
/*
@@ -76,6 +76,10 @@
#include <vm/vm_param.h>
#include <vm/vm_map.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
#include <machine/cpu.h>
#include <machine/reg.h>
@@ -140,7 +144,7 @@ hpux_cpu_makecmds(p, epp)
struct proc *p;
struct exec_package *epp;
{
- struct hpux_exec *hpux_ep = epp->ep_hdr;
+ /* struct hpux_exec *hpux_ep = epp->ep_hdr; */
/* set up command for exec header */
NEW_VMCMD(&epp->ep_vmcmds, hpux_cpu_vmcmd,
@@ -454,8 +458,13 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
psp->ps_sigstk.ss_flags |= SS_ONSTACK;
} else
fp = (struct hpuxsigframe *)(frame->f_regs[SP] - fsize);
+#if defined(UVM)
+ if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
+ (void)uvm_grow(p, (unsigned)fp);
+#else
if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
(void)grow(p, (unsigned)fp);
+#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
@@ -463,7 +472,11 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
#endif
+#if defined(UVM)
+ if (uvm_useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
+#else
if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
+#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
printf("hpux_sendsig(%d): useracc failed on sig %d\n",
@@ -620,7 +633,11 @@ hpux_sys_sigreturn(p, v, retval)
* Fetch and test the HP-UX context structure.
* We grab it all at once for speed.
*/
+#if defined(UVM)
+ if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
+#else
if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
+#endif
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
return (EINVAL);
scp = &tsigc;
diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s
index c20395ca18d..b446c88c1f5 100644
--- a/sys/arch/mvme68k/mvme68k/locore.s
+++ b/sys/arch/mvme68k/mvme68k/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.24 2001/04/05 20:39:39 deraadt Exp $ */
+/* $OpenBSD: locore.s,v 1.25 2001/06/26 21:35:41 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -78,6 +78,22 @@
#include <machine/prom.h>
/*
+ * Macro to relocate a symbol, used before MMU is enabled.
+ */
+#define _RELOC(var, ar) \
+ lea var,ar
+
+#define RELOC(var, ar) _RELOC(_C_LABEL(var), ar)
+#define ASRELOC(var, ar) _RELOC(_ASM_LABEL(var), ar)
+
+/*
+ * Macro to invoke a BUG routine.
+ */
+#define BUGCALL(id) \
+ trap #15; \
+ .short id
+
+/*
* Temporary stack for a variety of purposes.
* Try and make this the first thing is the data segment so it
* is page aligned. Note that if we overflow here, we run into
@@ -85,11 +101,7 @@
*/
.data
.space NBPG
- .globl tmpstk
-tmpstk:
-
-#define RELOC(var, ar) \
- lea var,ar
+ASLOCAL(tmpstk)
/*
* Initialization
@@ -98,21 +110,23 @@ tmpstk:
* On entry, args on stack are boot device, boot filename, console unit,
* boot flags (howto), boot device name, filesystem type name.
*/
- .comm _lowram,4
- .comm _esym,4
- .comm _emini,4
- .comm _smini,4
- .comm _needprom,4
- .comm _promvbr,4
- .comm _promcall,4
+BSS(lowram, 4)
+BSS(esym, 4)
+BSS(emini, 4)
+BSS(smini, 4)
+BSS(needprom, 4)
+BSS(promvbr, 4)
+BSS(promcall, 4)
.text
- .globl _edata
- .globl _etext,_end
- .globl start
- .globl _kernel_text
-_kernel_text:
-start:
+/*
+GLOBAL(edata)
+GLOBAL(etext)
+GLOBAL(end)
+*/
+GLOBAL(kernel_text)
+
+ASENTRY_NOPROFILE(start)
movw #PSL_HIGHIPL,sr | no interrupts
movl #0,a5 | RAM starts at 0
movl sp@(4), d7 | get boothowto
@@ -123,41 +137,40 @@ start:
movl sp@(24),d2 | get esyms
/* note: d2-d7 in use */
- RELOC(tmpstk, a0)
+ ASRELOC(tmpstk, a0)
movl a0,sp | give ourselves a temporary stack
- RELOC(_edata, a0) | clear out BSS
- movl #_end-4,d0 | (must be <= 256 kB)
- subl #_edata,d0
+ RELOC(edata, a0) | clear out BSS
+ movl #_C_LABEL(end)-4,d0 | (must be <= 256 kB)
+ subl #_C_LABEL(edata),d0
lsrl #2,d0
1: clrl a0@+
dbra d0,1b
movc vbr,d0 | save prom's trap #15 vector
- RELOC(_promvbr, a0)
+ RELOC(promvbr, a0)
movl d0, a0@
- RELOC(_esym, a0)
+ RELOC(esym, a0)
movl d2,a0@ | store end of symbol table
/* note: d2 now free, d3-d7 still in use */
- RELOC(_lowram, a0)
+ RELOC(lowram, a0)
movl a5,a0@ | store start of physical memory
clrl sp@-
- trap #15
- .short MVMEPROM_GETBRDID
+ BUGCALL(MVMEPROM_GETBRDID)
movl sp@+, a1
movl #SIZEOF_MVMEPROM_BRDID, d0 | copy to local variables
- RELOC(_brdid, a0)
+ RELOC(brdid, a0)
1: movb a1@+, a0@+
subql #1, d0
bne 1b
clrl d0
- RELOC(_brdid, a1)
+ RELOC(brdid, a1)
movw a1@(MVMEPROM_BRDID_MODEL), d0
- RELOC(_cputyp, a0)
+ RELOC(cputyp, a0)
movl d0, a0@ | init _cputyp
#ifdef MVME147
@@ -192,7 +205,7 @@ start:
#endif
.data
-notsup: .ascii "kernel does not support this model."
+notsup: .asciz "kernel does not support this model."
notsupend:
.even
.text
@@ -200,20 +213,18 @@ notsupend:
| first we bitch, then we die.
movl #notsupend, sp@-
movl #notsup, sp@-
- trap #15
- .short MVMEPROM_OUTSTRCRLF
+ BUGCALL(MVMEPROM_OUTSTRCRLF)
addql #8,sp
- trap #15
- .short MVMEPROM_EXIT | return to m68kbug
- /*NOTREACHED */
+ BUGCALL(MVMEPROM_EXIT) | return to m68kbug
+ /*NOTREACHED*/
#ifdef MVME147
is147:
- RELOC(_mmutype, a0) | no, we have 68030
+ RELOC(mmutype, a0) | no, we have 68030
movl #MMU_68030,a0@ | set to reflect 68030 PMMU
- RELOC(_cputype, a0) | no, we have 68030
+ RELOC(cputype, a0) | no, we have 68030
movl #CPU_68030,a0@ | set to reflect 68030 CPU
movl #CACHE_OFF,d0
@@ -225,14 +236,14 @@ is147:
movl #0xfffe0000, a0 | mvme147 nvram base
| move nvram component of etheraddr (only last 3 bytes)
- RELOC(_myea, a1)
+ RELOC(myea, a1)
movw a0@(NVRAM_147_ETHER+0), a1@(3+0)
movb a0@(NVRAM_147_ETHER+2), a1@(3+2)
movl a0@(NVRAM_147_EMEM), d1 | pass memory size
- RELOC(_iiomapsize, a1)
+ RELOC(iiomapsize, a1)
movl #INTIOSIZE_147, a1@
- RELOC(_iiomapbase, a1)
+ RELOC(iiomapbase, a1)
movl #INTIOBASE_147, a1@
bra Lstart1
#endif
@@ -249,17 +260,17 @@ is162:
clrl 0xfff42018 | XXX MCchip timers irq off
clrb 0xfff4201d | XXX MCchip scc irq off
#endif
- RELOC(_memsize162, a1) | how much memory?
+ RELOC(memsize162, a1) | how much memory?
jbsr a1@
movl d0, d2
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
movl #MMU_68040,a0@ | with a 68040 MMU
- RELOC(_cputype, a0) | no, we have 68040
+ RELOC(cputype, a0) | no, we have 68040
movl #CPU_68040,a0@ | set to reflect 68040 CPU
- RELOC(_fputype, a0)
+ RELOC(fputype, a0)
movl #FPU_68040,a0@ | and a 68040 FPU
bra is16x
@@ -267,20 +278,20 @@ is162:
#ifdef MVME167
is167:
-| RELOC(_needprom,a0) | this machine needs the prom mapped!
+| RELOC(needprom,a0) | this machine needs the prom mapped!
| movl #1,a0@
- RELOC(_memsize1x7, a1) | how much memory?
+ RELOC(memsize1x7, a1) | how much memory?
jbsr a1@
movl d0, d2
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
movl #MMU_68040,a0@ | with a 68040 MMU
- RELOC(_cputype, a0) | no, we have 68040
+ RELOC(cputype, a0) | no, we have 68040
movl #CPU_68040,a0@ | set to reflect 68040 CPU
- RELOC(_fputype, a0)
+ RELOC(fputype, a0)
movl #FPU_68040,a0@ | and a 68040 FPU
bra is16x
@@ -289,7 +300,7 @@ is167:
#ifdef MVME172
is172:
- RELOC(_memsize162, a1) | how much memory?
+ RELOC(memsize162, a1) | how much memory?
jbsr a1@
movl d0, d2
@@ -298,13 +309,13 @@ is172:
bset #0,d0 | turn on bit 0.
.word 0x4e7b,0x0808 | movc d0,pcr Bang!
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
movl #MMU_68060,a0@ | with a 68060 MMU
- RELOC(_cputype, a0) | no, we have 68060
+ RELOC(cputype, a0) | no, we have 68060
movl #CPU_68060,a0@ | set to reflect 68060 CPU
- RELOC(_fputype, a0)
+ RELOC(fputype, a0)
movl #FPU_68060,a0@ | and a 68060 FPU
bra is16x
@@ -313,10 +324,10 @@ is172:
#ifdef MVME177
is177:
-| RELOC(_needprom,a0) | this machine needs the prom mapped!
+| RELOC(needprom,a0) | this machine needs the prom mapped!
| movl #1,a0@
- RELOC(_memsize1x7, a1) | how much memory?
+ RELOC(memsize1x7, a1) | how much memory?
jbsr a1@
movl d0, d2
@@ -325,42 +336,40 @@ is177:
bset #0,d0 | turn on bit 0.
.word 0x4e7b,0x0808 | movc d0,pcr Bang! We are smokin' !
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
movl #MMU_68060,a0@ | with a 68060 MMU
- RELOC(_cputype, a0) | no, we have 68060
+ RELOC(cputype, a0) | no, we have 68060
movl #CPU_68060,a0@ | set to reflect 68060 CPU
- RELOC(_fputype, a0)
+ RELOC(fputype, a0)
movl #FPU_68060,a0@ | and a 68060 FPU
bra is16x
#endif
#if defined(MVME162) || defined(MVME167) || defined(MVME177) || defined(MVME172)
- .data
#define ROMPKT_LEN 200
- .comm _rompkt, ROMPKT_LEN
+BSS(rompkt, ROMPKT_LEN)
.even
.text
is16x:
- RELOC(_iiomapsize, a1)
+ RELOC(iiomapsize, a1)
movl #INTIOSIZE_162, a1@
- RELOC(_iiomapbase, a1)
+ RELOC(iiomapbase, a1)
movl #INTIOBASE_162, a1@
/* get ethernet address */
- RELOC(_rompkt, a0) | build a .NETCTRL packet
+ RELOC(rompkt, a0) | build a .NETCTRL packet
movb #0, a0@(NETCTRL_DEV) | onboard ethernet
movb #0, a0@(NETCTRL_CTRL) | onboard ethernet
movl #NETCTRLCMD_GETETHER, a0@(NETCTRL_CMD)
- RELOC(_myea, a1)
+ RELOC(myea, a1)
movl a1, a0@(NETCTRL_ADDR) | where to put it
movl #6, a0@(NETCTRL_LEN) | it is 6 bytes long
movl a0, sp@-
- trap #15
- .short MVMEPROM_NETCTRL | ask the rom
+ BUGCALL(MVMEPROM_NETCTRL) | ask the rom
addl #4, sp
#if 0
@@ -368,12 +377,11 @@ is16x:
* get memory size using ENVIRON. unfortunately i've not managed
* to get this working.
*/
- RELOC(_rompkt, a0)
+ RELOC(rompkt, a0)
movl #ENVIRONCMD_READ, sp@- | request environment information
movl #ROMPKT_LEN, sp@- | max length
movl a0, sp@- | point to info packet
- trap #15
- .short MVMEPROM_ENVIRON | ask the rom
+ BUGCALL(MVMEPROM_ENVIRON) | ask the rom
addl #12, sp
| XXX should check return values
@@ -399,15 +407,14 @@ is16x:
movl #unkmemend, sp@-
movl #unkmem, sp@-
- trap #15
- .short MVMEPROM_OUTSTRCRLF
+ BUGCALL(MVMEPROM_OUTSTRCRLF)
addql #8,sp
movl #4*1024*1024, d1 | XXX assume 4M of ram
bra Lstart1
.data
-unkmem: .ascii "could not figure out how much memory; assuming 4M."
+unkmem: .asciz "could not figure out how much memory; assuming 4M."
unkmemend:
.even
.text
@@ -421,21 +428,21 @@ Lstart1:
movc d0,dfc | and destination of transfers
moveq #PGSHIFT,d2
lsrl d2,d1 | convert to page (click) number
- RELOC(_maxmem, a0)
+ RELOC(maxmem, a0)
movl d1,a0@ | save as maxmem
movl a5,d0 | lowram value from ROM via boot
lsrl d2,d0 | convert to page number
subl d0,d1 | compute amount of RAM present
- RELOC(_physmem, a0)
+ RELOC(physmem, a0)
movl d1,a0@ | and physmem
+
/* configure kernel and proc0 VA space so we can get going */
- .globl _Sysseg, _pmap_bootstrap, _avail_start
#if defined(DDB) || NKSYMS > 0
- RELOC(_esym,a0) | end of static kernel text/data/syms
+ RELOC(esym,a0) | end of static kernel text/data/syms
movl a0@,d2
jne Lstart2
#endif
- movl #_end,d2 | end of static kernel text/data
+ movl #_C_LABEL(end),d2 | end of static kernel text/data
Lstart2:
addl #NBPG-1,d2
andl #PG_FRAME,d2 | round to a page
@@ -456,12 +463,12 @@ Lstart2:
#endif
/* do pmap_bootstrap stuff */
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
cmpl #MMU_68060,a0@ | 68060?
jne Lpmap040 | no, skip
pea a5@ | firstpa
pea a4@ | nextpa
- RELOC(_pmap_bootstrap060,a0)
+ RELOC(pmap_bootstrap060,a0)
jbsr a0@ | pmap_bootstrap(firstpa, nextpa)
addql #8,sp
bra Lmmu_enable
@@ -469,7 +476,7 @@ Lstart2:
Lpmap040:
pea a5@ | firstpa
pea a4@ | nextpa
- RELOC(_pmap_bootstrap,a0)
+ RELOC(pmap_bootstrap,a0)
jbsr a0@ | pmap_bootstrap(firstpa, nextpa)
addql #8,sp
@@ -478,27 +485,27 @@ Lpmap040:
* Since the kernel is mapped logical == physical, we just turn it on.
*/
Lmmu_enable:
- RELOC(_Sysseg, a0) | system segment table addr
+ RELOC(Sysseg, a0) | system segment table addr
movl a0@,d1 | read value (a KVA)
addl a5,d1 | convert to PA
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
cmpl #MMU_68040,a0@ | 68040 or 68060?
jgt Lmotommu1 | no, skip
.long 0x4e7b1807 | movc d1,srp
.long 0x4e7b1806 | movc d1,urp
jra Lstploaddone
Lmotommu1:
- RELOC(_protorp, a0)
+ RELOC(protorp, a0)
movl #0x80000202,a0@ | nolimit + share global + 4 byte PTEs
movl d1,a0@(4) | + segtable address
pmove a0@,srp | load the supervisor root pointer
movl #0x80000002,a0@ | reinit upper half for CRP loads
Lstploaddone:
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
cmpl #MMU_68040,a0@ | 68040 or 68060?
jgt Lmotommu2 | no, skip
- RELOC(_needprom,a0)
+ RELOC(needprom,a0)
cmpl #0,a0@
beq 1f
/*
@@ -527,7 +534,7 @@ Lstploaddone:
movl #0x8000,d0
.long 0x4e7b0003 | movc d0,tc
/* Enable 68060 extensions here */
- RELOC(_mmutype, a0)
+ RELOC(mmutype, a0)
cmpl #MMU_68060,a0@ | 68060?
jne Lchache040
movl #CACHE60_ON,d0 | branch cache, etc...
@@ -546,25 +553,31 @@ Lenab1:
* Should be running mapped from this point on
*/
/* select the software page size now */
- lea tmpstk,sp | temporary stack
- jbsr _vm_set_page_size | select software page size
+ lea _ASM_LABEL(tmpstk),sp | temporary stack
+#if defined(UVM)
+ jbsr _C_LABEL(uvm_setpagesize) | select software page size
+#else
+ jbsr _C_LABEL(vm_set_page_size) | select software page size
+#endif
/* set kernel stack, user SP, and initial pcb */
- movl _proc0paddr,a1 | get proc0 pcb addr
+ movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
lea a1@(USPACE-4),sp | set kernel stack to end of area
+ lea _C_LABEL(proc0), a2 | initialize proc0.p_addr so that
+ movl a1,a2@(P_ADDR) | we don't deref NULL in trap()
movl #USRSTACK-4,a2
movl a2,usp | init user SP
- movl a1,_curpcb | proc0 is running
+ movl a1,_C_LABEL(curpcb) | proc0 is running
- tstl _fputype | Have an FPU?
+ tstl _C_LABEL(fputype) | Have an FPU?
jeq Lenab2 | No, skip.
clrl a1@(PCB_FPCTX) | ensure null FP context
movl a1,sp@-
- jbsr _m68881_restore | restore it (does not kill a1)
+ jbsr _C_LABEL(m68881_restore) | restore it (does not kill a1)
addql #4,sp
Lenab2:
/* flush TLB and turn on caches */
- jbsr _TBIA | invalidate TLB
- cmpl #MMU_68040,_mmutype | 68040?
+ jbsr _C_LABEL(TBIA) | invalidate TLB
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
jeq Lnocache0 | yes, cache already on
cmpl #MMU_68060,_mmutype | 68060?
jeq Lnocache0 | yes, cache already on
@@ -573,15 +586,15 @@ Lenab2:
Lnocache0:
/* final setup for C code */
#if 1
- movl #_vectab,d2 | set VBR
+ movl #_vectab,d2 | set VBR
movc d2,vbr
#endif
- movw #PSL_LOWIPL,sr | lower SPL
- movl d3, _bootpart | save bootpart
- movl d4, _bootdevlun | save bootdevlun
- movl d5, _bootctrllun | save bootctrllun
- movl d6, _bootaddr | save bootaddr
- movl d7, _boothowto | save boothowto
+ movw #PSL_LOWIPL,sr | lower SPL
+ movl d3, _C_LABEL(bootpart) | save bootpart
+ movl d4, _C_LABEL(bootdevlun) | save bootdevlun
+ movl d5, _C_LABEL(bootctrllun) | save bootctrllun
+ movl d6, _C_LABEL(bootaddr) | save bootaddr
+ movl d7, _C_LABEL(boothowto) | save boothowto
/* d3-d7 now free */
/* Final setup for call to main(). */
@@ -597,76 +610,35 @@ Lnocache0:
movw #PSL_USER,sp@- | in user mode
clrl sp@- | stack adjust count and padding
lea sp@(-64),sp | construct space for D0-D7/A0-A7
- lea _proc0,a0 | save pointer to frame
+ lea _C_LABEL(proc0),a0 | save pointer to frame
movl sp,a0@(P_MD_REGS) | in proc0.p_md.md_regs
- jra _main | main()
-
- pea 1f
- jbsr _panic
-1:
- .asciz "main returned"
- .even
+ jra _C_LABEL(main) | main()
+ PANIC("main() returned")
+ /* NOTREACHED */
- .globl _proc_trampoline
-_proc_trampoline:
- movl a3,sp@-
- jbsr a2@
- addql #4,sp
+/*
+ * proc_trampoline: call function in register a2 with a3 as an arg
+ * and then rei.
+ */
+GLOBAL(proc_trampoline)
+ movl a3,sp@- | push function arg
+ jbsr a2@ | call function
+ addql #4,sp | pop arg
movl sp@(FR_SP),a0 | grab and load
movl a0,usp | user SP
moveml sp@+,#0x7FFF | restore most user regs
addql #8,sp | toss SP and stack adjust
- jra rei | and return
+ jra _ASM_LABEL(rei) | and return
-/*
- * Signal "trampoline" code (18 bytes). Invoked from RTE setup by sendsig().
- *
- * Stack looks like:
- *
- * sp+0 -> signal number
- * sp+4 pointer to siginfo (sip)
- * sp+8 pointer to signal context frame (scp)
- * sp+12 address of handler
- * sp+16 saved hardware state
- * .
- * .
- * scp+0-> beginning of signal context frame
- */
- .globl _sigcode, _esigcode, _sigcodetrap
- .data
-_sigcode:
- movl sp@(12),a0 | signal handler addr (4 bytes)
- jsr a0@ | call signal handler (2 bytes)
- addql #4,sp | pop signo (2 bytes)
-_sigcodetrap:
- trap #1 | special syscall entry (2 bytes)
- movl d0,sp@(4) | save errno (4 bytes)
- moveq #1,d0 | syscall == exit (2 bytes)
- trap #0 | exit(errno) (2 bytes)
- .align 2
-_esigcode:
- .text
/*
- * Do a dump.
- * Called by auto-restart.
- */
- .globl _dumpsys
- .globl _doadump
-_doadump:
- jbsr _dumpsys
- jbsr _doboot
- /*NOTREACHED*/
-/*
* Trap/interrupt vector routines - new for 060
*/
#include <m68k/m68k/trap_subr.s>
- .globl _trap, _nofault, _longjmp
#if defined(M68040) || defined(M68060)
- .globl _addrerr4060
-_addrerr4060:
+ENTRY_NOPROFILE(addrerr4060)
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
movl usp,a0 | save the user SP
@@ -678,12 +650,11 @@ _addrerr4060:
#endif
#if defined(M68060)
- .globl _buserr60
-_buserr60:
- tstl _nofault | device probe?
+ENTRY_NOPROFILE(buserr60)
+ tstl _C_LABEL(nofault) | device probe?
jeq Lbuserr60 | no, handle as usual
- movl _nofault,sp@- | yes,
- jbsr _longjmp | longjmp(nofault)
+ movl _C_LABEL(nofault),sp@- | yes,
+ jbsr _C_LABEL(longjmp) | longjmp(nofault)
Lbuserr60:
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
@@ -700,7 +671,7 @@ Lbuserr60:
andl #0x7ffd,d1
jeq _ASM_LABEL(faultstkadjnotrap2)
Lnobpe:
- | we need to adjust for misaligned addresses
+| we need to adjust for misaligned addresses
movl sp@(FR_HW+8),d1 | grab VA
btst #27,d0 | check for mis-aligned access
jeq Lberr3 | no, skip
@@ -716,12 +687,12 @@ Lberr3:
jra Lismerr | no, MMU fault.
#endif
#if defined(M68040)
- .globl _buserr40
-_buserr40:
- tstl _nofault | device probe?
+ENTRY_NOPROFILE(buserr40)
+ tstl _C_LABEL(nofault) | device probe?
jeq Lbuserr40 | no, handle as usual
- movl _nofault,sp@- | yes,
- jbsr _longjmp | longjmp(nofault)
+ movl _C_LABEL(nofault),sp@- | yes,
+ jbsr _C_LABEL(longjmp) | longjmp(nofault)
+ /* NOTREACHED */
Lbuserr40:
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
@@ -743,19 +714,16 @@ Lbe1stpg:
jra _ASM_LABEL(faultstkadj) | and deal with it
#endif
-/*
- * Trap/interrupt vector routines
- */
- .globl _trap, _nofault, _longjmp
-_buserr:
- tstl _nofault | device probe?
+_C_LABEL(buserr):
+ tstl _C_LABEL(nofault) | device probe?
jeq Lberr | no, handle as usual
- movl _nofault,sp@- | yes,
- jbsr _longjmp | longjmp(nofault)
+ movl _C_LABEL(nofault),sp@- | yes,
+ jbsr _C_LABEL(longjmp) | longjmp(nofault)
+ /* NOTREACHED */
Lberr:
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040?
- jgt _addrerr | no, skip
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
+ jgt _C_LABEL(addrerr) | no, skip
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
movl usp,a0 | save the user SP
@@ -782,14 +750,14 @@ Lberr2:
movw d0,sp@ | save (ONLY LOW 16 BITS!)
jra Lismerr
#endif
-_addrerr:
+_C_LABEL(addrerr):
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
movl usp,a0 | save the user SP
movl a0,sp@(FR_SP) | in the savearea
lea sp@(FR_HW),a1 | grab base of HW berr frame
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040?
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
jgt Lbenot040 | no, skip
movl a1@(8),sp@- | yes, push fault address
clrl sp@- | no SSW for address fault
@@ -864,49 +832,28 @@ Lmightnotbemerr:
jeq Lisberr1 | yes, was not WPE, must be bus err
Lismerr:
movl #T_MMUFLT,sp@- | show that we are an MMU fault
- jra Ltrapnstkadj | and deal with it
+ jra _ASM_LABEL(faultstkadj) | and deal with it
Lisaerr:
movl #T_ADDRERR,sp@- | mark address error
- jra Ltrapnstkadj | and deal with it
+ jra _ASM_LABEL(faultstkadj) | and deal with it
Lisberr1:
clrw sp@ | re-clear pad word
Lisberr:
movl #T_BUSERR,sp@- | mark bus error
-Ltrapnstkadj:
- jbsr _trap | handle the error
- lea sp@(12),sp | pop value args
- movl sp@(FR_SP),a0 | restore user SP
- movl a0,usp | from save area
- movw sp@(FR_ADJ),d0 | need to adjust stack?
- jne Lstkadj | yes, go to it
- moveml sp@+,#0x7FFF | no, restore most user regs
- addql #8,sp | toss SSP and stkadj
- jra rei | all done
-Lstkadj:
- lea sp@(FR_HW),a1 | pointer to HW frame
- addql #8,a1 | source pointer
- movl a1,a0 | source
- addw d0,a0 | + hole size = dest pointer
- movl a1@-,a0@- | copy
- movl a1@-,a0@- | 8 bytes
- movl a0,sp@(FR_SP) | new SSP
- moveml sp@+,#0x7FFF | restore user registers
- movl sp@,sp | and our SP
- jra rei | all done
+ jra _ASM_LABEL(faultstkadj) | and deal with it
/*
* FP exceptions.
*/
-_fpfline:
+ENTRY_NOPROFILE(fpfline)
#if defined(M68040) || defined(M68060)
- cmpl #FPU_68040,_fputype | 68040 or 68060 FPU?
+ cmpl #FPU_68040,_C_LABEL(fputype) | 68040 or 68060 FPU?
jlt Lfp_unimp | no, skip FPSP
cmpw #0x202c,sp@(6) | format type 2?
- jne _illinst | no, not an FP emulation
+ jne _C_LABEL(illinst) | no, not an FP emulation
Ldofp_unimp:
#ifdef FPSP
- .globl fpsp_unimp
- jmp fpsp_unimp | yes, go handle it
+ jmp _ASM_LABEL(fpsp_unimp) | yes, go handle it
#endif
Lfp_unimp:
#endif/* M68040 || M68060 */
@@ -914,18 +861,17 @@ Lfp_unimp:
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save registers
moveq #T_FPEMULI,d0 | denote as FP emulation trap
- jra fault | do it
+ jra _ASM_LABEL(fault) | do it
#else
- jra _illinst
+ jra _C_LABEL(illinst)
#endif
-_fpunsupp:
+ENTRY_NOPROFILE(fpunsupp)
#if defined(M68040) || defined(M68060)
- cmpl #FPU_68040,_fputype | 68040 or 68060 FPU?
- jlt _illinst | no, treat as illinst
+ cmpl #FPU_68040,_C_LABEL(fputype) | 68040 or 68060 FPU?
+ jlt _C_LABEL(illinst) | no, treat as illinst
#ifdef FPSP
- .globl fpsp_unsupp
- jmp fpsp_unsupp | yes, go handle it
+ jmp _ASM_LABEL(fpsp_unsupp) | yes, go handle it
#endif
Lfp_unsupp:
#endif /* M68040 */
@@ -933,9 +879,9 @@ Lfp_unsupp:
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save registers
moveq #T_FPEMULD,d0 | denote as FP emulation trap
- jra fault | do it
+ jra _ASM_LABEL(fault) | do it
#else
- jra _illinst
+ jra _C_LABEL(illinst)
#endif
/*
@@ -944,19 +890,18 @@ Lfp_unsupp:
* and may cause signal delivery, we need to test for stack adjustment
* after the trap call.
*/
- .globl _fpfault
-_fpfault:
+ENTRY_NOPROFILE(fpfault)
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
movl usp,a0 | and save
movl a0,sp@(FR_SP) | the user stack pointer
clrl sp@- | no VA arg
- movl _curpcb,a0 | current pcb
+ movl _C_LABEL(curpcb),a0 | current pcb
lea a0@(PCB_FPCTX),a0 | address of FP savearea
fsave a0@ | save state
#if defined(M68040) || defined(M68060)
/* always null state frame on 68040, 68060 */
- cmpl #CPU_68040,_cputype
+ cmpl #CPU_68040,_C_LABEL(cputype)
jge Lfptnull
#endif
tstb a0@ | null state frame?
@@ -968,47 +913,48 @@ Lfptnull:
fmovem fpsr,sp@- | push fpsr as code argument
frestore a0@ | restore state
movl #T_FPERR,sp@- | push type arg
- jra Ltrapnstkadj | call trap and deal with stack cleanup
+ jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
- .globl _hardtrap, _hardintr
-_hardtrap:
+/*
+ * Other exceptions only cause four and six word stack frame and require
+ * no post-trap stack adjustment.
+ */
+ENTRY_NOPROFILE(hardtrap)
moveml #0xC0C0,sp@- | save scratch regs
lea sp@(16),a1 | get pointer to frame
movl a1,sp@-
movw sp@(26),d0
movl d0,sp@- | push exception vector info
movl sp@(26),sp@- | and PC
- jbsr _hardintr | doit
+ jbsr _C_LABEL(hardintr) | doit
lea sp@(12),sp | pop args
moveml sp@+,#0x0303 | restore regs
- jra rei | all done
+ jra _ASM_LABEL(rei) | all done
- .globl _straytrap
-_badtrap:
+ENTRY_NOPROFILE(badtrap)
moveml #0xC0C0,sp@- | save scratch regs
movw sp@(22),sp@- | push exception vector info
clrw sp@-
movl sp@(22),sp@- | and PC
- jbsr _straytrap | report
+ jbsr _C_LABEL(straytrap) | report
addql #8,sp | pop args
moveml sp@+,#0x0303 | restore regs
- jra rei | all done
+ jra _ASM_LABEL(rei) | all done
- .globl _syscall
-_trap0:
+ENTRY_NOPROFILE(trap0)
clrl sp@- | stack adjust count
moveml #0xFFFF,sp@- | save user registers
movl usp,a0 | save the user SP
movl a0,sp@(FR_SP) | in the savearea
movl d0,sp@- | push syscall number
- jbsr _syscall | handle it
+ jbsr _C_LABEL(syscall) | handle it
addql #4,sp | pop syscall arg
- tstl _astpending
+ tstl _C_LABEL(astpending)
jne Lrei2
- tstb _ssir
+ tstb _C_LABEL(ssir)
jeq Ltrap1
movw #SPL1,sr
- tstb _ssir
+ tstb _C_LABEL(ssir)
jne Lsir1
Ltrap1:
movl sp@(FR_SP),a0 | grab and restore
@@ -1020,13 +966,13 @@ Ltrap1:
/*
* Trap 1 - sigreturn
*/
-_trap1:
+ENTRY_NOPROFILE(trap1)
jra _ASM_LABEL(sigreturn)
/*
* Trap 2 - trace trap
*/
-_trap2:
+ENTRY_NOPROFILE(trap2)
jra _C_LABEL(trace)
/*
@@ -1034,26 +980,48 @@ _trap2:
* cachectl(command, addr, length)
* command in d0, addr in a1, length in d1
*/
- .globl _cachectl
-_trap12:
+ENTRY_NOPROFILE(trap12)
movl d1,sp@- | push length
movl a1,sp@- | push addr
movl d0,sp@- | push command
- jbsr _cachectl | do it
+ jbsr _C_LABEL(cachectl) | do it
lea sp@(12),sp | pop args
- jra rei | all done
+ jra _ASM_LABEL(rei) | all done
+
+/*
+ * Trace (single-step) trap (trap 1 or 2) instruction. Kernel-mode is
+ * special. User mode traps are simply passed on to trap().
+ */
+ENTRY_NOPROFILE(trace)
+ clrl sp@-
+ moveml #0xFFFF,sp@-
+ moveq #T_TRACE,d0
+
+ | Check PSW and see what happened.
+ | T=0 S=0 (should not happen)
+ | T=1 S=0 trace trap from user mode
+ | T=0 S=1 trace trap on a trap instruction
+ | T=0 S=0 trace trap from system mode (kernel breakpoint)
+
+ movw sp@(FR_HW),d1 | get SSW
+ notw d1 | XXX no support for T0 on 680[234]0
+ andw #PSL_S,d1 | from system mode (T=1, S=1)?
+ jeq Lkbrkpt | yes, kernel breakpoint
+ jra _ASM_LABEL(fault) | no, user-mode fault
/*
* Trap 15 is used for:
- * - KGDB traps
+ * - GDB breakpoints (in user programs)
+ * - KGDB breakpoints (in the kernel)
* - trace traps for SUN binaries (not fully supported yet)
* - calling the prom, but only from the kernel
* We just pass it on and let trap() sort it all out
*/
-_trap15:
- clrl sp@-
+ENTRY_NOPROFILE(trap15)
+ clrl sp@- | stack adjust count
moveml #0xFFFF,sp@-
- tstl _promcall
+
+ tstl _C_LABEL(promcall)
jeq L_notpromcall
moveml sp@+,#0xFFFF
addql #4, sp
@@ -1063,7 +1031,7 @@ _trap15:
subql #4,sp
link a6,#0
moveml #0xFFFE,sp@-
- movl _promvbr,a0
+ movl _C_LABEL(promvbr),a0
movw a6@(14),d0
andl #0xfff,d0
movl a0@(d0:w),a6@(4)
@@ -1072,38 +1040,79 @@ _trap15:
rts
| really jumps to the bug trap handler
L_notpromcall:
-#ifdef KGDB
moveq #T_TRAP15,d0
movw sp@(FR_HW),d1 | get PSW
- andw #PSL_S,d1 | from user mode?
- jeq fault | yes, just a regular fault
- movl d0,sp@-
- .globl _kgdb_trap_glue
- jbsr _kgdb_trap_glue | returns if no debugger
- addl #4,sp
-#endif
- moveq #T_TRAP15,d0
- jra fault
+ andw #PSL_S,d1 | from system mode?
+ jne Lkbrkpt | yes, kernel breakpoint
+ jra _ASM_LABEL(fault) | no, user-mode fault
-/*
- * Hit a breakpoint (trap 1 or 2) instruction.
- * Push the code and treat as a normal fault.
- */
-_trace:
- clrl sp@-
- moveml #0xFFFF,sp@-
+Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
+ | Save the system sp rather than the user sp.
+ movw #PSL_HIGHIPL,sr | lock out interrupts
+ lea sp@(FR_SIZE),a6 | Save stack pointer
+ movl a6,sp@(FR_SP) | from before trap
+
+ | If we are not on tmpstk switch to it.
+ | (so debugger can change the stack pointer)
+ movl a6,d1
+ cmpl #_ASM_LABEL(tmpstk),d1
+ jls Lbrkpt2 | already on tmpstk
+ | Copy frame to the temporary stack
+ movl sp,a0 | a0=src
+ lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
+ movl a1,sp | sp=new frame
+ moveq #FR_SIZE,d1
+Lbrkpt1:
+ movl a0@+,a1@+
+ subql #4,d1
+ bgt Lbrkpt1
+
+Lbrkpt2:
+ | Call the trap handler for the kernel debugger.
+ | Do not call trap() to do it, so that we can
+ | set breakpoints in trap() if we want. We know
+ | the trap type is either T_TRACE or T_BREAKPOINT.
+ | If we have both DDB and KGDB, let KGDB see it first,
+ | because KGDB will just return 0 if not connected.
+ | Save args in d2, a2
+ movl d0,d2 | trap type
+ movl sp,a2 | frame ptr
#ifdef KGDB
- moveq #T_TRACE,d0
- movw sp@(FR_HW),d1 | get SSW
- andw #PSL_S,d1 | from user mode?
- jeq fault | no, regular fault
- movl d0,sp@-
- jbsr _kgdb_trap_glue | returns if no debugger
- addl #4,sp
-#endif
- moveq #T_TRACE,d0
- jra fault
-
+ | Let KGDB handle it (if connected)
+ movl a2,sp@- | push frame ptr
+ movl d2,sp@- | push trap type
+ jbsr _C_LABEL(kgdb_trap) | handle the trap
+ addql #8,sp | pop args
+ cmpl #0,d0 | did kgdb handle it?
+ jne Lbrkpt3 | yes, done
+#endif
+#ifdef DDB
+ | Let DDB handle it
+ movl a2,sp@- | push frame ptr
+ movl d2,sp@- | push trap type
+ jbsr _C_LABEL(kdb_trap) | handle the trap
+ addql #8,sp | pop args
+ cmpl #0,d0 | did ddb handle it?
+ jne Lbrkpt3 | yes, done
+#endif
+ | Drop into the prom
+ BUGCALL(MVMEPROM_EXIT)
+Lbrkpt3:
+ | The stack pointer may have been modified, or
+ | data below it modified (by kgdb push call),
+ | so push the hardware frame at the current sp
+ | before restoring registers and returning.
+
+ movl sp@(FR_SP),a0 | modified sp
+ lea sp@(FR_SIZE),a1 | end of our frame
+ movl a1@-,a0@- | copy 2 longs with
+ movl a1@-,a0@- | ... predecrement
+ movl a0,sp@(FR_SP) | sp = h/w frame
+ moveml sp@+,#0x7FFF | restore all but sp
+ movl sp@,sp | ... and sp
+ rte | all done
+
+/* Use common m68k sigreturn */
#include <m68k/m68k/sigreturn.s>
/*
@@ -1111,10 +1120,14 @@ _trace:
* No device interrupts are auto-vectored.
*/
-_spurintr:
- addql #1,_intrcnt+0
- addql #1,_cnt+V_INTR
- jra rei
+ENTRY_NOPROFILE(spurintr)
+ addql #1,_C_LABEL(intrcnt)+0
+#if defined(UVM)
+ addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
+#else
+ addql #1,_C_LABEL(cnt)+V_INTR
+#endif
+ jra _ASM_LABEL(rei) | all done
/*
* Emulation of VAX REI instruction.
@@ -1129,11 +1142,11 @@ _spurintr:
* This code is complicated by the fact that sendsig may have been called
* necessitating a stack cleanup.
*/
- .comm _ssir,1
- .globl _astpending
- .globl rei
-rei:
- tstl _astpending | AST pending?
+
+BSS(ssir,1)
+
+ASENTRY_NOPROFILE(rei)
+ tstl _C_LABEL(astpending) | AST pending?
jeq Lchksir | no, go check for SIR
Lrei1:
btst #5,sp@ | yes, are we returning to user mode?
@@ -1147,7 +1160,7 @@ Lrei2:
clrl sp@- | VA == none
clrl sp@- | code == none
movl #T_ASTFLT,sp@- | type == async system trap
- jbsr _trap | go handle it
+ jbsr _C_LABEL(trap) | go handle it
lea sp@(12),sp | pop value args
movl sp@(FR_SP),a0 | restore user SP
movl a0,usp | from save area
@@ -1168,7 +1181,7 @@ Laststkadj:
movl sp@,sp | and our SP
rte | and do real RTE
Lchksir:
- tstb _ssir | SIR pending?
+ tstb _C_LABEL(ssir) | SIR pending?
jeq Ldorte | no, all done
movl d0,sp@- | need a scratch register
movw sp@(4),d0 | get SR
@@ -1177,7 +1190,7 @@ Lchksir:
movl sp@+,d0 | restore scratch register
Lgotsir:
movw #SPL1,sr | prevent others from servicing int
- tstb _ssir | too late?
+ tstb _C_LABEL(ssir) | too late?
jeq Ldorte | yes, oh well...
clrl sp@- | stack adjust
moveml #0xFFFF,sp@- | save all registers
@@ -1187,7 +1200,7 @@ Lsir1:
clrl sp@- | VA == none
clrl sp@- | code == none
movl #T_SSIR,sp@- | type == software interrupt
- jbsr _trap | go handle it
+ jbsr _C_LABEL(trap) | go handle it
lea sp@(12),sp | pop value args
movl sp@(FR_SP),a0 | restore
movl a0,usp | user SP
@@ -1199,113 +1212,31 @@ Lnosir:
Ldorte:
rte | real return
-/* Use standard m68k support. */
-#include <m68k/m68k/support.s>
-
/*
- * The following primitives manipulate the run queues. _whichqs tells which
- * of the 32 queues _qs have processes in them. Setrunqueue puts processes
- * into queues, Remrq removes them from queues. The running process is on
- * no queue, other processes are on a queue related to p->p_priority, divided
- * by 4 actually to shrink the 0-127 range of priorities into the 32 available
- * queues.
+ * Use common m68k signal trampoline.
*/
-
- .globl _whichqs,_qs,_cnt,_panic
- .globl _curproc,_want_resched
+#include <m68k/m68k/sigcode.s>
/*
- * Setrunqueue(p)
- *
- * Call should be made at spl6(), and p->p_stat should be SRUN
+ * Use common m68k support routines.
*/
-ENTRY(setrunqueue)
- movl sp@(4),a0
-#ifdef DIAGNOSTIC
- tstl a0@(P_BACK)
- jne Lset1
- tstl a0@(P_WCHAN)
- jne Lset1
- cmpb #SRUN,a0@(P_STAT)
- jne Lset1
-#endif
- clrl d0
- movb a0@(P_PRIORITY),d0
- lsrb #2,d0
- movl _whichqs,d1
- bset d0,d1
- movl d1,_whichqs
- lslb #3,d0
- addl #_qs,d0
- movl d0,a0@(P_FORW)
- movl d0,a1
- movl a1@(P_BACK),a0@(P_BACK)
- movl a0,a1@(P_BACK)
- movl a0@(P_BACK),a1
- movl a0,a1@(P_FORW)
- rts
-#ifdef DIAGNOSTIC
-Lset1:
- movl #Lset2,sp@-
- jbsr _panic
-Lset2:
- .asciz "setrunqueue"
- .even
-#endif
+#include <m68k/m68k/support.s>
/*
- * Remrq(p)
- *
- * Call should be made at spl6().
+ * Use common m68k process manipulation routines.
*/
-ENTRY(remrunqueue)
- movl sp@(4),a0
- movb a0@(P_PRIORITY),d0
-#ifdef DIAGNOSTIC
- lsrb #2,d0
- movl _whichqs,d1
- btst d0,d1
- jeq Lrem2
-#endif
- movl a0@(P_BACK),a1
- clrl a0@(P_BACK)
- movl a0@(P_FORW),a0
- movl a0,a1@(P_FORW)
- movl a1,a0@(P_BACK)
- cmpal a0,a1
- jne Lrem1
-#ifndef DIAGNOSTIC
- lsrb #2,d0
- movl _whichqs,d1
-#endif
- bclr d0,d1
- movl d1,_whichqs
-Lrem1:
- rts
-#ifdef DIAGNOSTIC
-Lrem2:
- movl #Lrem3,sp@-
- jbsr _panic
-Lrem3:
- .asciz "remrunqueue"
- .even
-#endif
-
-Lsw0:
- .asciz "switch"
- .even
+#include <m68k/m68k/proc_subr.s>
- .globl _curpcb
- .globl _masterpaddr | XXX compatibility (debuggers)
.data
-_masterpaddr: | XXX compatibility (debuggers)
-_curpcb:
+GLOBAL(curpcb)
+GLOBAL(masterpaddr) | XXX compatibility (debuggers)
.long 0
-mdpflag:
+
+ASLOCAL(mdpflag)
.byte 0 | copy of proc md_flags low byte
.align 2
- .comm nullpcb,SIZEOF_PCB
- .text
+
+ASBSS(nullpcb,SIZEOF_PCB)
/*
* At exit of a process, do a switch for the last time.
@@ -1313,31 +1244,30 @@ mdpflag:
*/
ENTRY(switch_exit)
movl sp@(4),a0
- movl #nullpcb,_curpcb | save state into garbage pcb
- lea tmpstk,sp | goto a tmp stack
+ | save state into garbage pcb
+ movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
+ lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
/* Schedule the vmspace and stack to be freed. */
movl a0,sp@- | exit2(p)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
- jra _cpu_switch
+ jra _C_LABEL(cpu_switch)
/*
* When no processes are on the runq, Swtch branches to Idle
* to wait for something to come ready.
*/
- .globl Idle
-Idle:
+ASENTRY_NOPROFILE(Idle)
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,sr
- movl _whichqs,d0
- jeq Idle
+ movl _C_LABEL(whichqs),d0
+ jeq _ASM_LABEL(Idle)
jra Lsw1
Lbadsw:
- movl #Lsw0,sp@-
- jbsr _panic
+ PANIC("switch")
/*NOTREACHED*/
/*
@@ -1352,20 +1282,20 @@ Lbadsw:
* bit). For now, we just always flush the full ATC.
*/
ENTRY(cpu_switch)
- movl _curpcb,a0 | current pcb
+ movl _C_LABEL(curpcb),a0 | current pcb
movw sr,a0@(PCB_PS) | save sr before changing ipl
-
#ifdef notyet
- movl _curproc,sp@- | remember last proc running
+ movl _C_LABEL(curproc),sp@- | remember last proc running
#endif
- clrl _curproc
+ clrl _C_LABEL(curproc)
+
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,sr | lock out interrupts
- movl _whichqs,d0
- jeq Idle
+ movl _C_LABEL(whichqs),d0
+ jeq _ASM_LABEL(Idle)
Lsw1:
movl d0,d1
negl d0
@@ -1375,7 +1305,7 @@ Lsw1:
movl d1,d0
lslb #3,d1 | convert queue number to index
- addl #_qs,d1 | locate queue (q)
+ addl #_C_LABEL(qs),d1 | locate queue (q)
movl d1,a1
movl a1@(P_FORW),a0 | p = q->p_forw
cmpal d1,a0 | anyone on queue?
@@ -1385,12 +1315,12 @@ Lsw1:
movl d1,a1@(P_BACK) | n->p_back = q
cmpal d1,a1 | anyone left on queue?
jne Lsw2 | yes, skip
- movl _whichqs,d1
+ movl _C_LABEL(whichqs),d1
bclr d0,d1 | no, clear bit
- movl d1,_whichqs
+ movl d1,_C_LABEL(whichqs)
Lsw2:
- movl a0,_curproc
- clrl _want_resched
+ movl a0,_C_LABEL(curproc)
+ clrl _C_LABEL(want_resched)
#ifdef notyet
movl sp@+,a1
cmpl a0,a1 | switching to same proc?
@@ -1399,18 +1329,18 @@ Lsw2:
/*
* Save state of previous process in its pcb.
*/
- movl _curpcb,a1
+ movl _C_LABEL(curpcb),a1
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
#ifdef FPU_EMULATE
- tstl _fputype | do we have any FPU?
+ tstl _C_LABEL(fputype) | do we have any FPU?
jeq Lswnofpsave | no, dont save
#endif
lea a1@(PCB_FPCTX),a2 | pointer to FP save area
fsave a2@ | save FP state
#ifdef M68060
- cmpl #MMU_68060,_mmutype | is 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | is 68060?
jeq Lsavfp60 | yes, goto Lsavfp60
#endif /* M68060 */
tstb a2@ | null state frame?
@@ -1435,9 +1365,10 @@ Lswnofpsave:
jne Lbadsw
#endif
clrl a0@(P_BACK) | clear back link
- movb a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
+ | low byte of p_md.md_flags
+ movb a0@(P_MD_FLAGS+3),_ASM_LABEL(mdpflag)
movl a0@(P_ADDR),a1 | get p_addr
- movl a1,_curpcb
+ movl a1,_C_LABEL(curpcb)
/*
* Activate process's address space.
@@ -1445,16 +1376,18 @@ Lswnofpsave:
* XXX only of it has changed.
*/
pea a0@ | push proc
- jbsr _pmap_activate | pmap_activate(p)
+ jbsr _C_LABEL(pmap_activate) | pmap_activate(p)
addql #4,sp
- movl _curpcb,a1 | restore p_addr
+ movl _C_LABEL(curpcb),a1 | restore p_addr
+
+ lea _ASM_LABEL(tmpstk),sp | now goto a tmp stack for NMI
- lea tmpstk,sp | now goto a tmp stack for NMI
moveml a1@(PCB_REGS),#0xFCFC | and registers
movl a1@(PCB_USP),a0
movl a0,usp | and USP
+
#ifdef FPU_EMULATE
- tstl _fputype | do we _have_ any fpu?
+ tstl _C_LABEL(fputype) | do we _have_ any fpu?
jne Lresnonofpatall
movw a1@(PCB_PS),sr | no, restore PS
moveq #1,d0 | return 1 (for alternate returns)
@@ -1463,7 +1396,7 @@ Lresnonofpatall:
#endif
lea a1@(PCB_FPCTX),a0 | pointer to FP save area
#ifdef M68060
- cmpl #MMU_68060,_mmutype | is 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | is 68060?
jeq Lresfp60rest1 | yes, goto Lresfp60rest1
#endif /* M68060 */
tstb a0@ | null state frame?
@@ -1503,7 +1436,7 @@ ENTRY(savectx)
movl a0,a1@(PCB_USP) | and save it
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
#ifdef FPU_EMULATE
- tstl _fputype
+ tstl _C_LABEL(fputype)
jeq Lsavedone
#endif
lea a1@(PCB_FPCTX),a0 | pointer to FP save area
@@ -1534,7 +1467,7 @@ Lsavedone:
#if defined(M68040) || defined(M68060)
ENTRY(suline)
movl sp@(4),a0 | address to write
- movl _curpcb,a1 | current pcb
+ movl _C_LABEL(curpcb),a1 | current pcb
movl #Lslerr,a1@(PCB_ONFAULT) | where to return to on a fault
movl sp@(8),a1 | address of line
movl a1@+,d0 | get lword
@@ -1554,7 +1487,7 @@ ENTRY(suline)
Lslerr:
moveq #-1,d0
Lsldone:
- movl _curpcb,a1 | current pcb
+ movl _C_LABEL(curpcb),a1 | current pcb
clrl a1@(PCB_ONFAULT) | clear fault address
rts
#endif
@@ -1563,11 +1496,11 @@ Lsldone:
* Invalidate entire TLB.
*/
ENTRY(TBIA)
-__TBIA:
- cmpl #MMU_68040,_mmutype | 68040 or 68060?
+_C_LABEL(_TBIA):
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040 or 68060?
jle Ltbia040 | yes, goto Ltbia040
pflusha | flush entire TLB
- tstl _mmutype
+ tstl _C_LABEL(mmutype)
jpl Lmc68851a | 68851 implies no d-cache
movl #DC_CLEAR,d0
movc d0,cacr | invalidate on-chip d-cache
@@ -1576,7 +1509,7 @@ Lmc68851a:
Ltbia040:
.word 0xf518 | pflusha
#ifdef M68060
- cmpl #MMU_68060,_mmutype | is 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | is 68060?
jne Ltbiano60 | no, skip
movc cacr,d0
orl #IC60_CABC,d0 | and clear all branch cache entries
@@ -1591,13 +1524,13 @@ Ltbiano60:
*/
ENTRY(TBIS)
#ifdef DEBUG
- tstl fulltflush | being conservative?
- jne __TBIA | yes, flush entire TLB
+ tstl _ASM_LABEL(fulltflush) | being conservative?
+ jne _C_LABEL(_TBIA) | yes, flush entire TLB
#endif
movl sp@(4),a0 | get addr to flush
- cmpl #MMU_68040,_mmutype | 68040 or 68060 ?
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040 or 68060 ?
jle Ltbis040 | yes, goto Ltbis040
- tstl _mmutype
+ tstl _C_LABEL(mmutype)
jpl Lmc68851b | is 68851?
pflush #0,#0,a0@ | flush address from both sides
movl #DC_CLEAR,d0
@@ -1614,7 +1547,7 @@ Ltbis040:
movc d0,dfc
.word 0xf508 | pflush a0@
#ifdef M68060
- cmpl #MMU_68060,_mmutype | is 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | is 68060?
jne Ltbisno60 | no, skip
movc cacr,d0
orl #IC60_CABC,d0 | and clear all branch cache entries
@@ -1628,12 +1561,12 @@ Ltbisno60:
*/
ENTRY(TBIAS)
#ifdef DEBUG
- tstl fulltflush | being conservative?
- jne __TBIA | yes, flush everything
+ tstl _ASM_LABEL(fulltflush) | being conservative?
+ jne _C_LABEL(_TBIA) | yes, flush everything
#endif
- cmpl #MMU_68040,_mmutype | 68040 or 68060 ?
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040 or 68060 ?
jle Ltbias040 | yes, goto Ltbias040
- tstl _mmutype
+ tstl _C_LABEL(mmutype)
jpl Lmc68851c | 68851?
pflush #4,#4 | flush supervisor TLB entries
movl #DC_CLEAR,d0
@@ -1646,7 +1579,7 @@ Ltbias040:
| 68040 cannot specify supervisor/user on pflusha, so we flush all
.word 0xf518 | pflusha
#ifdef M68060
- cmpl #MMU_68060,_mmutype
+ cmpl #MMU_68060,_C_LABEL(mmutype)
jne Ltbiasno60
movc cacr,d0
orl #IC60_CABC,d0 | and clear all branch cache entries
@@ -1660,12 +1593,12 @@ Ltbiasno60:
*/
ENTRY(TBIAU)
#ifdef DEBUG
- tstl fulltflush | being conservative?
- jne __TBIA | yes, flush everything
+ tstl _ASM_LABEL(fulltflush) | being conservative?
+ jne _C_LABEL(_TBIA) | yes, flush everything
#endif
- cmpl #MMU_68040,_mmutype
+ cmpl #MMU_68040,_C_LABEL(mmutype)
jle Ltbiau040
- tstl _mmutype
+ tstl _C_LABEL(mmutype)
jpl Lmc68851d | 68851?
pflush #0,#4 | flush user TLB entries
movl #DC_CLEAR,d0
@@ -1678,7 +1611,7 @@ Ltbiau040:
| 68040 cannot specify supervisor/user on pflusha, so we flush all
.word 0xf518 | pflusha
#ifdef M68060
- cmpl #MMU_68060,_mmutype
+ cmpl #MMU_68060,_C_LABEL(mmutype)
jne Ltbiauno60
movc cacr,d0
orl #IC60_CUBC,d0 | but only user branch cache entries
@@ -1693,7 +1626,7 @@ Ltbiauno60:
ENTRY(ICIA)
#if defined(M68040) || defined(M68060)
ENTRY(ICPA)
- cmpl #MMU_68040,_mmutype | 68040
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jgt Lmotommu7 | no, skip
.word 0xf498 | cinva ic
rts
@@ -1711,9 +1644,9 @@ Lmotommu7:
* and TBI*.
*/
ENTRY(DCIA)
-__DCIA:
+_C_LABEL(_DCIA):
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jgt Lmotommu8 | no, skip
.word 0xf478 | cpusha dc
rts
@@ -1722,9 +1655,9 @@ Lmotommu8:
rts
ENTRY(DCIS)
-__DCIS:
+_C_LABEL(_DCIS):
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jgt Lmotommu9 | no, skip
.word 0xf478 | cpusha dc
rts
@@ -1734,10 +1667,10 @@ Lmotommu9:
| Invalid single cache line
ENTRY(DCIAS)
-__DCIAS:
- cmpl #MMU_68040,_mmutype | 68040
+_C_LABEL(_DCIAS):
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jeq Ldciasx
- cmpl #MMU_68060,_mmutype | 68060
+ cmpl #MMU_68060,_C_LABEL(mmutype) | 68060
jeq Ldciasx
movl sp@(4),a0
.word 0xf468 | cpushl dc,a0@
@@ -1745,9 +1678,9 @@ Ldciasx:
rts
ENTRY(DCIU)
-__DCIU:
+_C_LABEL(_DCIU):
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jgt LmotommuA | no, skip
.word 0xf478 | cpusha dc
rts
@@ -1788,7 +1721,7 @@ ENTRY(DCFP)
ENTRY(PCIA)
#if defined(M68040) || defined(M68060)
ENTRY(DCFA)
- cmpl #MMU_68040,_mmutype | 68040
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040
jgt LmotommuB | no, skip
.word 0xf478 | cpusha dc
rts
@@ -1804,41 +1737,29 @@ ENTRY(ecacheon)
ENTRY(ecacheoff)
rts
-/*
- * Get callers current SP value.
- * Note that simply taking the address of a local variable in a C function
- * doesn't work because callee saved registers may be outside the stack frame
- * defined by A6 (e.g. GCC generated code).
- */
- .globl _getsp
-_getsp:
- movl sp,d0 | get current SP
- addql #4,d0 | compensate for return address
- rts
-
- .globl _getsfc, _getdfc
-_getsfc:
+ENTRY(getsfc)
movc sfc,d0
rts
-_getdfc:
+
+ENTRY(getdfc)
movc dfc,d0
rts
/*
* Load a new user segment table pointer.
*/
-ENTRY(loadustp) /* XXX - smuprh */
+ENTRY(loadustp) /* XXX - smurph */
movl sp@(4),d0 | new USTP
moveq #PGSHIFT,d1
lsll d1,d0 | convert to addr
#ifdef M68060
- cmpl #MMU_68060,_mmutype | 68040 or 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | 68040 or 68060?
jeq Lldustp060 | yes, goto Lldustp060
#endif
- cmpl #MMU_68040,_mmutype
+ cmpl #MMU_68040,_C_LABEL(mmutype)
jeq Lldustp040
pflusha | flush entire TLB
- lea _protorp,a0 | CRP prototype
+ lea _C_LABEL(protorp),a0 | CRP prototype
movl d0,a0@(4) | stash USTP
pmove a0@,crp | load root pointer
movl #CACHE_CLR,d0
@@ -1871,7 +1792,7 @@ ENTRY(spl0)
moveq #0,d0
movw sr,d0 | get old SR for return
movw #PSL_LOWIPL,sr | restore new SR
- tstb _ssir | software interrupt pending?
+ tstb _C_LABEL(ssir) | software interrupt pending?
jeq Lspldone | no, all done
subql #4,sp | make room for RTE frame
movl sp@(4),sp@(2) | position return address
@@ -1890,7 +1811,7 @@ ENTRY(m68881_save)
movl sp@(4),a0 | save area pointer
fsave a0@ | save state
#ifdef M68060
- cmpl #MMU_68060,_mmutype | 68040 or 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | 68040 or 68060?
jeq Lm68060fpsave | yes, goto Lm68060fpsave
#endif
tstb a0@ | null state frame?
@@ -1915,7 +1836,7 @@ Lm68060sdone:
ENTRY(m68881_restore)
movl sp@(4),a0 | save area pointer
#ifdef M68060
- cmpl #MMU_68060,_mmutype | 68040 or 68060?
+ cmpl #MMU_68060,_C_LABEL(mmutype) | 68040 or 68060?
jeq Lm68060fprestore | yes, goto Lm68060fprestore
#endif
tstb a0@ | null state frame?
@@ -1945,11 +1866,10 @@ Lm68060fprdone:
* XXX add support for rebooting -- that means looking at boothowto and doing
* the right thing
*/
- .globl _doboot
-_doboot:
- lea tmpstk,sp | physical SP in case of NMI
+ENTRY_NOPROFILE(doboot)
+ lea _ASM_LABEL(tmpstk),sp | physical SP in case of NMI
#if defined(M68040) || defined(M68060)
- cmpl #MMU_68040,_mmutype | 68040?
+ cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
jgt Lbootnot040 | no, skip
movl #0,d0
movc d0,cacr | caches off
@@ -1968,7 +1888,7 @@ Lbootnot040:
/*
* We're going down. Make various sick attempts to reset the board.
*/
- RELOC(_cputyp, a0)
+ RELOC(cputyp, a0)
movl a0@,d0
cmpw #CPU_147,d0
bne not147
@@ -2006,62 +1926,55 @@ not147:
jsr a0@
| still alive! just return to the prom..
-3: trap #15
- .short MVMEPROM_EXIT | return to m68kbug
+3: BUGCALL(MVMEPROM_EXIT) | return to m68kbug
/*NOTREACHED*/
#ifdef M68060
- .globl _intemu60, _fpiemu60, _fpdemu60, _fpeaemu60
-_intemu60:
+GLOBAL(intemu60)
addql #1,L60iem
jra _I_CALL_TOP+128+0x00
-_fpiemu60:
+GLOBAL(fpiemu60)
addql #1,L60fpiem
jra _FP_CALL_TOP+128+0x30
-_fpdemu60:
+GLOBAL(fpdemu60)
addql #1,L60fpdem
jra _FP_CALL_TOP+128+0x38
-_fpeaemu60:
+GLOBAL(fpeaemu60)
addql #1,L60fpeaem
jra _FP_CALL_TOP+128+0x40
#endif
.data
- .globl _mmutype,_protorp,_cputype,_fputype
-_mmutype:
+GLOBAL(mmutype)
.long MMU_68030 | default to MMU_68030
-_cputype:
+GLOBAL(cputype)
.long CPU_68030 | default to CPU_68030
-_fputype:
+GLOBAL(fputype)
.long FPU_68881 | default to 68881 FPU
-_protorp:
+GLOBAL(protorp)
.long 0,0 | prototype root pointer
- .globl _cold
-_cold:
+GLOBAL(cold)
.long 1 | cold start flag
- .globl _want_resched
-_want_resched:
+GLOBAL(want_resched)
.long 0
- .globl _intiobase, _intiolimit, _extiobase
- .globl _proc0paddr
-_proc0paddr:
+GLOBAL(proc0paddr)
.long 0 | KVA of proc0 u-area
-_intiobase:
+GLOBAL(intiobase)
.long 0 | KVA of base of internal IO space
-_intiolimit:
+GLOBAL(intiolimit)
.long 0 | KVA of end of internal IO space
-_extiobase:
+GLOBAL(extiobase)
.long 0 | KVA of base of external IO space
+
#ifdef DEBUG
- .globl fulltflush, fullcflush
-fulltflush:
+ASGLOBAL(fulltflush)
.long 0
-fullcflush:
+ASGLOBAL(fullcflush)
.long 0
#endif
+
/* interrupt counters */
- .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames
-_intrnames:
+GLOBAL(intrnames)
.asciz "spur"
.asciz "lev1"
.asciz "lev2"
@@ -2081,9 +1994,10 @@ _intrnames:
#ifdef FPU_EMULATE
.asciz "fpe"
#endif
-_eintrnames:
+GLOBAL(eintrnames)
.even
-_intrcnt:
+
+GLOBAL(intrcnt)
.long 0,0,0,0,0,0,0,0,0,0
#ifdef M68060
L60iem: .long 0
@@ -2095,6 +2009,6 @@ L60bpe: .long 0
#ifdef FPU_EMULATE
Lfpecnt: .long 0
#endif
-_eintrcnt:
+GLOBAL(eintrcnt)
#include <mvme68k/mvme68k/vectors.s>
diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c
index 76d2948219d..11e51304f39 100644
--- a/sys/arch/mvme68k/mvme68k/machdep.c
+++ b/sys/arch/mvme68k/mvme68k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.39 2001/05/17 18:41:49 provos Exp $ */
+/* $OpenBSD: machdep.c,v 1.40 2001/06/26 21:35:41 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -117,16 +117,29 @@
#define MAXMEM 64*1024 /* XXX - from cmap.h */
#include <vm/vm_kern.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
+
/* the following is used externally (sysctl_hw) */
char machine[] = "mvme68k"; /* cpu "architecture" */
+#if defined(UVM)
+vm_map_t exec_map = NULL;
+vm_map_t mb_map = NULL;
+vm_map_t phys_map = NULL;
+#else
vm_map_t buffer_map;
+#endif
+
extern vm_offset_t avail_end;
/*
* Declare these as initialized data so we can patch them.
*/
+#if !defined(UVM)
int nswbuf = 0;
+#endif
#ifdef NBUF
int nbuf = NBUF;
#else
@@ -187,14 +200,17 @@ mvme68k_init()
/*
* Tell the VM system about available physical memory. The
- * hp300 only has one segment.
+ * mvme68k only has one segment.
*/
+
#if defined(UVM)
+ uvmexp.pagesize = NBPG;
+ uvm_setpagesize();
uvm_page_physload(atop(avail_start), atop(avail_end),
- atop(avail_start), atop(avail_end));
+ atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
#else
vm_page_physload(atop(avail_start), atop(avail_end),
- atop(avail_start), atop(avail_end));
+ atop(avail_start), atop(avail_end));
#endif /* UVM */
#endif /* MACHINE_NEW_NONCONTIG */
@@ -240,7 +256,12 @@ cpu_startup()
register unsigned i;
register caddr_t v, firstaddr;
int base, residual;
+
+#if defined(UVM)
+ vaddr_t minaddr, maxaddr;
+#else
vm_offset_t minaddr, maxaddr;
+#endif
vm_size_t size;
#ifdef BUFFERS_UNMANAGED
vm_offset_t bufmemp;
@@ -287,7 +308,7 @@ cpu_startup()
* addresses to the various data structures.
*/
firstaddr = 0;
- again:
+again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
@@ -326,23 +347,33 @@ cpu_startup()
if (nbuf < 16)
nbuf = 16;
}
+#if !defined(UVM)
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
valloc(swbuf, struct buf, nswbuf);
+#endif
valloc(buf, struct buf, nbuf);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
+#if defined(UVM)
+ firstaddr = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size));
+#else
firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
+#endif
if (firstaddr == 0)
panic("startup: no room for tables");
#ifdef BUFFERS_UNMANAGED
+#if defined(UVM)
+ buffermem = (caddr_t) uvm_km_zalloc(kernel_map, bufpages*PAGE_SIZE);
+#else
buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*PAGE_SIZE);
+#endif
if (buffermem == 0)
panic("startup: no room for buffers");
#endif
@@ -358,15 +389,56 @@ cpu_startup()
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
+
+#if defined(UVM)
+ if (uvm_map(kernel_map, (vaddr_t *) &buffers, m68k_round_page(size),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
+ panic("cpu_startup: cannot allocate VM for buffers");
+ minaddr = (vaddr_t)buffers;
+#else
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
- &maxaddr, size, TRUE);
+ &maxaddr, size, TRUE);
minaddr = (vm_offset_t)buffers;
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
- &minaddr, size, FALSE) != KERN_SUCCESS)
+ (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
+#endif
+
+ if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
+ /* don't want to alloc more physical mem than needed */
+ bufpages = btoc(MAXBSIZE) * nbuf;
+ }
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
+#if defined(UVM)
+ vsize_t curbufsize;
+ vaddr_t curbuf;
+ struct vm_page *pg;
+
+ /*
+ * Each buffer has MAXBSIZE bytes of VM space allocated. Of
+ * that MAXBSIZE space, we allocate and map (base+1) pages
+ * for the first "residual" buffers, and then we allocate
+ * "base" pages for the rest.
+ */
+ curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
+ curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
+
+ while (curbufsize) {
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (pg == NULL)
+ panic("cpu_startup: not enough memory for "
+ "buffer cache");
+ pmap_enter(kernel_map->pmap, curbuf,
+ VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE,
+ VM_PROT_READ|VM_PROT_WRITE);
+ curbuf += PAGE_SIZE;
+ curbufsize -= PAGE_SIZE;
+ }
+#else
vm_size_t curbufsize;
vm_offset_t curbuf;
@@ -379,23 +451,40 @@ cpu_startup()
*/
curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
curbufsize = PAGE_SIZE * (i < residual ? base+1 : base);
+ /* this faults in the required physical pages */
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
+#endif
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
+#if defined(UVM)
+ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
+#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- 16*NCARGS, TRUE);
+ 16*NCARGS, TRUE);
+#endif
/*
* Allocate a submap for physio
*/
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- VM_PHYS_SIZE, TRUE);
+#if defined(UVM)
+ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, 0, FALSE, NULL);
+#else
+ phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, TRUE);
+#endif
+#if defined(UVM)
+ mb_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
+#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
- VM_MBUF_SIZE, FALSE);
+ VM_MBUF_SIZE, FALSE);
+#endif
/*
* Initialize timeouts
*/
@@ -404,7 +493,12 @@ cpu_startup()
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
- printf("avail mem = %d\n", ptoa(cnt.v_free_count));
+#if defined(UVM)
+ printf("avail mem = %ld (%ld pages)\n", ptoa(uvmexp.free), uvmexp.free);
+#else
+ printf("avail mem = %ld (%ld pages)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count)/NBPG);
+#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * PAGE_SIZE);
#ifdef MFS
@@ -529,58 +623,58 @@ identifycpu()
}
switch (cputyp) {
#ifdef MVME147
- case CPU_147:
- bcopy(&brdid.suffix, suffix, sizeof brdid.suffix);
- sprintf(suffix, "MVME%x", brdid.model, suffix);
- cpuspeed = pccspeed((struct pccreg *)IIOV(0xfffe1000));
- sprintf(speed, "%02d", cpuspeed);
- break;
+ case CPU_147:
+ bcopy(&brdid.suffix, suffix, sizeof brdid.suffix);
+ sprintf(suffix, "MVME%x", brdid.model, suffix);
+ cpuspeed = pccspeed((struct pccreg *)IIOV(0xfffe1000));
+ sprintf(speed, "%02d", cpuspeed);
+ break;
#endif
#if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
- case CPU_162:
- case CPU_167:
- case CPU_172:
- case CPU_177:
- bzero(speed, sizeof speed);
- speed[0] = brdid.speed[0];
- speed[1] = brdid.speed[1];
- if (brdid.speed[2] != '0' &&
- brdid.speed[3] != '0') {
- speed[2] = '.';
- speed[3] = brdid.speed[2];
- speed[4] = brdid.speed[3];
- }
- cpuspeed = (speed[0] - '0') * 10 + (speed[1] - '0');
- bcopy(brdid.longname, suffix, sizeof(brdid.longname));
- for (len = strlen(suffix)-1; len; len--) {
- if (suffix[len] == ' ')
- suffix[len] = '\0';
- else
- break;
- }
- break;
+ case CPU_162:
+ case CPU_167:
+ case CPU_172:
+ case CPU_177:
+ bzero(speed, sizeof speed);
+ speed[0] = brdid.speed[0];
+ speed[1] = brdid.speed[1];
+ if (brdid.speed[2] != '0' &&
+ brdid.speed[3] != '0') {
+ speed[2] = '.';
+ speed[3] = brdid.speed[2];
+ speed[4] = brdid.speed[3];
+ }
+ cpuspeed = (speed[0] - '0') * 10 + (speed[1] - '0');
+ bcopy(brdid.longname, suffix, sizeof(brdid.longname));
+ for (len = strlen(suffix)-1; len; len--) {
+ if (suffix[len] == ' ')
+ suffix[len] = '\0';
+ else
+ break;
+ }
+ break;
#endif
}
sprintf(cpu_model, "Motorola %s: %sMHz MC680%s CPU",
suffix, speed, mc);
switch (mmutype) {
- case MMU_68060:
- case MMU_68040:
+ case MMU_68060:
+ case MMU_68040:
#ifdef FPSP
- bcopy(&fpsp_tab, &fpvect_tab,
- (&fpvect_end - &fpvect_tab) * sizeof (fpvect_tab));
+ bcopy(&fpsp_tab, &fpvect_tab,
+ (&fpvect_end - &fpvect_tab) * sizeof (fpvect_tab));
#endif
- strcat(cpu_model, "+MMU");
- break;
- case MMU_68030:
- strcat(cpu_model, "+MMU");
- break;
- case MMU_68851:
- strcat(cpu_model, ", MC68851 MMU");
- break;
- default:
- printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
- panic("startup");
+ strcat(cpu_model, "+MMU");
+ break;
+ case MMU_68030:
+ strcat(cpu_model, "+MMU");
+ break;
+ case MMU_68851:
+ strcat(cpu_model, ", MC68851 MMU");
+ break;
+ default:
+ printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
+ panic("startup");
}
len = strlen(cpu_model);
if (mmutype == MMU_68060)
@@ -594,15 +688,15 @@ identifycpu()
int fpu = fpu_gettype();
switch (fpu) {
- case 0:
- break;
- case 1:
- case 2:
- len += sprintf(cpu_model + len, ", MC6888%d FPU", fpu);
- break;
- case 3:
- len += sprintf(cpu_model + len, ", unknown FPU", speed);
- break;
+ case 0:
+ break;
+ case 1:
+ case 2:
+ len += sprintf(cpu_model + len, ", MC6888%d FPU", fpu);
+ break;
+ case 3:
+ len += sprintf(cpu_model + len, ", unknown FPU", speed);
+ break;
}
}
#endif
@@ -613,13 +707,13 @@ identifycpu()
* machine dependent system variables.
*/
cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
-int *name;
-u_int namelen;
-void *oldp;
-size_t *oldlenp;
-void *newp;
-size_t newlen;
-struct proc *p;
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
{
dev_t consdev;
@@ -628,15 +722,15 @@ struct proc *p;
return (ENOTDIR); /* overloaded */
switch (name[0]) {
- case CPU_CONSDEV:
- if (cn_tab != NULL)
- consdev = cn_tab->cn_dev;
- else
- consdev = NODEV;
- return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
- sizeof consdev));
- default:
- return (EOPNOTSUPP);
+ case CPU_CONSDEV:
+ if (cn_tab != NULL)
+ consdev = cn_tab->cn_dev;
+ else
+ consdev = NODEV;
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
+ sizeof consdev));
+ default:
+ return (EOPNOTSUPP);
}
/* NOTREACHED */
}
@@ -648,8 +742,8 @@ static struct haltvec *halts;
/* XXX insert by priority */
void
halt_establish(fn, pri)
-void (*fn) __P((void));
-int pri;
+ void (*fn) __P((void));
+ int pri;
{
struct haltvec *hv, *h;
@@ -693,7 +787,7 @@ int pri;
void
boot(howto)
-register int howto;
+ register int howto;
{
/* take a snap shot before clobbering any registers */
@@ -719,20 +813,27 @@ register int howto;
printf("WARNING: not updating battery clock\n");
}
}
- splhigh(); /* extreme priority */
+
+ /* Disable interrupts. */
+ splhigh();
+
+ /* If rebooting and a dump is requested, do it. */
+ if (howto & RB_DUMP)
+ dumpsys();
+
+ /* Run any shutdown hooks. */
+ doshutdownhooks();
+
if (howto&RB_HALT) {
printf("halted\n\n");
} else {
struct haltvec *hv;
- if (howto & RB_DUMP)
- dumpsys();
for (hv = halts; hv; hv = hv->hv_next)
(*hv->hv_fn)();
doboot();
}
- while (1)
- ;
+ for (;;);
/*NOTREACHED*/
}
@@ -908,8 +1009,8 @@ initvectors()
}
straytrap(pc, evec)
-int pc;
-u_short evec;
+ int pc;
+ u_short evec;
{
printf("unexpected trap (vector %d) from %x\n",
(evec & 0xFFF) >> 2, pc);
@@ -919,8 +1020,8 @@ int *nofault;
int
badpaddr(addr, size)
-register void *addr;
-int size;
+ register void *addr;
+ int size;
{
int off = (int)addr & PGOFSET;
caddr_t v, p = (void *)((int)addr & ~PGOFSET);
@@ -937,8 +1038,8 @@ int size;
int
badvaddr(addr, size)
-register caddr_t addr;
-int size;
+ register caddr_t addr;
+ int size;
{
register int i;
label_t faultbuf;
@@ -966,55 +1067,18 @@ int size;
return (0);
}
+void
netintr()
{
-#ifdef INET
- if (netisr & (1 << NETISR_ARP)) {
- netisr &= ~(1 << NETISR_ARP);
- arpintr();
- }
- if (netisr & (1 << NETISR_IP)) {
- netisr &= ~(1 << NETISR_IP);
- ipintr();
- }
-#endif
-#ifdef INET6
- if (netisr & (1 << NETISR_IPV6)) {
- netisr &= ~(1 << NETISR_IPV6);
- ip6intr();
- }
-#endif
-#ifdef NETATALK
- if (netisr & (1 << NETISR_ATALK)) {
- netisr &= ~(1 << NETISR_ATALK);
- atintr();
- }
-#endif
-#ifdef NS
- if (netisr & (1 << NETISR_NS)) {
- netisr &= ~(1 << NETISR_NS);
- nsintr();
- }
-#endif
-#ifdef ISO
- if (netisr & (1 << NETISR_ISO)) {
- netisr &= ~(1 << NETISR_ISO);
- clnlintr();
- }
-#endif
-#ifdef CCITT
- if (netisr & (1 << NETISR_CCITT)) {
- netisr &= ~(1 << NETISR_CCITT);
- ccittintr();
- }
-#endif
-#include "ppp.h"
-#if NPPP > 0
- if (netisr & (1 << NETISR_PPP)) {
- netisr &= ~(1 << NETISR_PPP);
- pppintr();
- }
-#endif
+#define DONETISR(bit, fn) \
+ do { \
+ if (netisr & (1 << (bit))) { \
+ netisr &= ~(1 << (bit)); \
+ (fn)(); \
+ } \
+ } while (0)
+#include <net/netisr_dispatch.h>
+#undef DONETISR
}
/*
@@ -1149,9 +1213,10 @@ struct frame fr;
* Determine of the given exec package refers to something which we
* understand and, if so, set up the vmcmds for it.
*/
+int
cpu_exec_aout_makecmds(p, epp)
-struct proc *p;
-struct exec_package *epp;
+ struct proc *p;
+ struct exec_package *epp;
{
int error = ENOEXEC;
struct exec *execp = epp->ep_hdr;
diff --git a/sys/arch/mvme68k/mvme68k/mem.c b/sys/arch/mvme68k/mvme68k/mem.c
index f902cddfe83..9c2453f8c85 100644
--- a/sys/arch/mvme68k/mvme68k/mem.c
+++ b/sys/arch/mvme68k/mvme68k/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.12 2001/05/05 20:56:45 art Exp $ */
+/* $OpenBSD: mem.c,v 1.13 2001/06/26 21:35:42 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -83,6 +83,9 @@
#include <machine/cpu.h>
#include <vm/vm.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
extern u_int lowram;
static caddr_t devzeropage;
@@ -161,11 +164,11 @@ mmrw(dev, uio, flags)
}
#endif
- pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
- trunc_page(v),
- uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
- TRUE,
- uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
+ trunc_page(v),
+ uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
+ TRUE,
+ uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
@@ -178,9 +181,15 @@ mmrw(dev, uio, flags)
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
+#if defined(UVM)
+ if (!uvm_kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return (EFAULT);
+#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
+#endif
if (v < NBPG)
return (EFAULT);
error = uiomove((caddr_t)v, c, uio);
diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c
index c326bfc03c5..9b804d45326 100644
--- a/sys/arch/mvme68k/mvme68k/pmap.c
+++ b/sys/arch/mvme68k/mvme68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.21 2001/06/10 14:54:45 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.22 2001/06/26 21:35:42 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -75,6 +75,7 @@
* XXX 68020 with 68551 MMU
* 68030 with on-chip MMU
* 68040 with on-chip MMU
+ * 68060 with on-chip MMU
*
* Notes:
* Don't even pay lip service to multiprocessor support.
@@ -134,59 +135,14 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#include <machine/cpu.h>
-
-#ifdef PMAPSTATS
-struct {
- int collectscans;
- int collectpages;
- int kpttotal;
- int kptinuse;
- int kptmaxuse;
-} kpt_stats;
-struct {
- int kernel; /* entering kernel mapping */
- int user; /* entering user mapping */
- int ptpneeded; /* needed to allocate a PT page */
- int nochange; /* no change at all */
- int pwchange; /* no mapping change, just wiring or protection */
- int wchange; /* no mapping change, just wiring */
- int pchange; /* no mapping change, just protection */
- int mchange; /* was mapped but mapping to different page */
- int managed; /* a managed page */
- int firstpv; /* first mapping for this PA */
- int secondpv; /* second mapping for this PA */
- int ci; /* cache inhibited */
- int unmanaged; /* not a managed page */
- int flushes; /* cache flushes */
-} enter_stats;
-struct {
- int calls;
- int removes;
- int pvfirst;
- int pvsearch;
- int ptinvalid;
- int uflushes;
- int sflushes;
-} remove_stats;
-struct {
- int calls;
- int changed;
- int alreadyro;
- int alreadyrw;
-} protect_stats;
-struct chgstats {
- int setcalls;
- int sethits;
- int setmiss;
- int clrcalls;
- int clrhits;
- int clrmiss;
-} changebit_stats[16];
+#if defined(UVM)
+#include <uvm/uvm.h>
+#else
#endif
+#include <machine/cpu.h>
+
#ifdef DEBUG
-int debugmap = 0;
#define PDB_FOLLOW 0x0001
#define PDB_INIT 0x0002
#define PDB_ENTER 0x0004
@@ -203,14 +159,14 @@ int debugmap = 0;
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
#define PDB_ALL 0xFFFF
-int pmapdebug = PDB_ALL;
+
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
#if defined(M68040) || defined(M68060)
int dowriteback = 1; /* 68040: enable writeback caching */
int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
#endif
-
-extern vm_offset_t pager_sva, pager_eva;
#endif
/*
@@ -286,6 +242,9 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
struct pmap kernel_pmap_store;
vm_map_t st_map, pt_map;
+#if defined(UVM)
+struct vm_map st_map_store, pt_map_store;
+#endif
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@@ -311,6 +270,8 @@ void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, boolean_t));
void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+void pmap_ptpage_addref __P((vaddr_t));
+int pmap_ptpage_delref __P((vaddr_t));
void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t));
#ifdef DEBUG
@@ -321,6 +282,7 @@ void pmap_check_wiring __P((char *, vm_offset_t));
/* pmap_remove_mapping flags */
#define PRM_TFLUSH 1
#define PRM_CFLUSH 2
+#define PRM_KEEPPTPAGE 4
#if !defined(MACHINE_NEW_NONCONTIG)
vm_offset_t vm_first_phys; /* PA of first managed page */
@@ -444,6 +406,30 @@ pmap_init(phys_start, phys_end)
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in pmap_bootstrap().
*/
+#if defined(UVM)
+ addr = (vaddr_t) intiobase;
+ if (uvm_map(kernel_map, &addr,
+ m68k_ptob(iiomapsize+EIOMAPSIZE),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS)
+ goto bogons;
+ addr = (vaddr_t) Sysmap;
+ if (uvm_map(kernel_map, &addr, M68K_MAX_PTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+ bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+#else
addr = (vm_offset_t) intiobase;
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
&addr, m68k_ptob(iiomapsize+EIOMAPSIZE), FALSE);
@@ -461,6 +447,7 @@ pmap_init(phys_start, phys_end)
if (addr != (vm_offset_t)Sysmap)
bogons:
panic("pmap_init: bogons in the VM system!");
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_INIT) {
@@ -485,7 +472,13 @@ bogons:
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
+#if defined(UVM)
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: can't allocate data structures");
+#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#endif
Segtabzero = (st_entry_t *) addr;
pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
@@ -543,16 +536,33 @@ bogons:
* we already have kernel PT pages.
*/
addr = 0;
+#if defined(UVM)
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv != KERN_SUCCESS || (addr + s) >= (vaddr_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ rv = uvm_unmap(kernel_map, addr, addr + s);
+ if (rv != KERN_SUCCESS)
+ panic("pmap_init: uvm_unmap failed");
+#else
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map, addr, addr + s);
+#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
+#if defined(UVM)
+ addr = uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -571,9 +581,6 @@ bogons:
}
#endif
} while (addr != addr2);
-#ifdef PMAPSTATS
- kpt_stats.kpttotal = atop(s);
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
printf("pmap_init: KPT: %d pages from %x to %x\n",
@@ -584,7 +591,12 @@ bogons:
* Allocate the segment table map
*/
s = maxproc * M68K_STSIZE;
+#if defined(UVM)
+ st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
+ &st_map_store);
+#else
st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
+#endif
/*
* Slightly modified version of kmem_suballoc() to get page table
@@ -602,6 +614,10 @@ bogons:
maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
} else
s = (maxproc * M68K_MAX_PTSIZE);
+#if defined(UVM)
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ TRUE, &pt_map_store);
+#else
addr2 = addr + s;
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS)
@@ -617,6 +633,7 @@ bogons:
if (pmapdebug & PDB_INIT)
printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
#endif
+#endif
#if defined(M68040) || defined(M68060)
if (mmutype <= MMU_68040) {
@@ -644,9 +661,15 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
+#if defined(UVM)
+ pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
+#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -689,7 +712,11 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#endif
break;
}
}
@@ -747,7 +774,11 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#endif
}
}
@@ -898,11 +929,21 @@ pmap_release(pmap)
#endif
if (pmap->pm_ptab)
+#if defined(UVM)
+ uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
+ M68K_MAX_PTSIZE);
+#else
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
M68K_MAX_PTSIZE);
+#endif
if (pmap->pm_stab != Segtabzero)
+#if defined(UVM)
+ uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
+ M68K_STSIZE);
+#else
kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
M68K_STSIZE);
+#endif
}
/*
@@ -973,9 +1014,6 @@ pmap_remove(pmap, sva, eva)
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- remove_stats.calls++;
-#endif
firstpage = TRUE;
needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
@@ -1094,9 +1132,6 @@ pmap_protect(pmap, sva, eva, prot)
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- protect_stats.calls++;
-#endif
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@@ -1141,19 +1176,8 @@ pmap_protect(pmap, sva, eva, prot)
pmap_pte_set_prot(pte, isro);
if (needtflush)
TBIS(sva);
-#ifdef PMAPSTATS
- protect_stats.changed++;
-#endif
firstpage = FALSE;
}
-#ifdef PMAPSTATS
- else if (pmap_pte_v(pte)) {
- if (isro)
- protect_stats.alreadyro++;
- else
- protect_stats.alreadyrw++;
- }
-#endif
pte++;
sva += NBPG;
}
@@ -1196,18 +1220,17 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
if (pmap == NULL)
return;
-#ifdef PMAPSTATS
- if (pmap == pmap_kernel())
- enter_stats.kernel++;
- else
- enter_stats.user++;
-#endif
/*
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
+#ifdef UVM
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, M68K_MAX_PTSIZE);
+#else
pmap->pm_ptab = (pt_entry_t *)
kmem_alloc_wait(pt_map, M68K_MAX_PTSIZE);
+#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1227,9 +1250,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* Mapping has not changed, must be protection or wiring change.
*/
if (opa == pa) {
-#ifdef PMAPSTATS
- enter_stats.pwchange++;
-#endif
/*
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
@@ -1245,17 +1265,7 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
pmap->pm_stats.wired_count++;
else
pmap->pm_stats.wired_count--;
-#ifdef PMAPSTATS
- if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
- enter_stats.wchange++;
-#endif
}
-#ifdef PMAPSTATS
- else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
- enter_stats.pchange++;
- else
- enter_stats.nochange++;
-#endif
/*
* Retain cache inhibition status
*/
@@ -1274,10 +1284,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
if (pmapdebug & PDB_ENTER)
printf("enter: removing old mapping %x\n", va);
#endif
- pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
-#ifdef PMAPSTATS
- enter_stats.mchange++;
-#endif
+ pmap_remove_mapping(pmap, va, pte,
+ PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
}
/*
@@ -1285,9 +1293,14 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* on this PT page. PT pages are wired down as long as there
* is a valid mapping in the page.
*/
- if (pmap != pmap_kernel())
+ if (pmap != pmap_kernel()) {
+#ifdef UVM
+ pmap_ptpage_addref(trunc_page((vaddr_t)pte));
+#else
(void) vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
round_page((vaddr_t)(pte+1)), FALSE);
+#endif
+ }
/*
* Enter on the PV list if part of our managed memory
@@ -1298,9 +1311,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
register struct pv_entry *pv, *npv;
int s;
-#ifdef PMAPSTATS
- enter_stats.managed++;
-#endif
pv = pa_to_pvh(pa);
s = splimp();
#ifdef DEBUG
@@ -1312,9 +1322,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
-#ifdef PMAPSTATS
- enter_stats.firstpv++;
-#endif
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_next = NULL;
@@ -1340,10 +1347,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
npv->pv_ptpmap = NULL;
npv->pv_flags = 0;
pv->pv_next = npv;
-#ifdef PMAPSTATS
- if (!npv->pv_next)
- enter_stats.secondpv++;
-#endif
}
splx(s);
}
@@ -1353,9 +1356,6 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
*/
else if (pmap_initialized) {
checkpv = cacheable = FALSE;
-#ifdef PMAPSTATS
- enter_stats.unmanaged++;
-#endif
}
/*
@@ -1408,7 +1408,7 @@ validate:
TBIS(va);
#ifdef DEBUG
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
- pmap_check_wiring("enter", trunc_page((vaddr_t)pmap_pte(pmap, va)));
+ pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
#endif
}
@@ -1566,9 +1566,6 @@ pmap_collect(pmap)
if (pmapdebug & PDB_FOLLOW)
printf("pmap_collect(%p)\n", pmap);
#endif
-#ifdef PMAPSTATS
- kpt_stats.collectscans++;
-#endif
s = splimp();
#if defined(MACHINE_NEW_NONCONTIG)
for (bank = 0; bank < vm_nphysseg; bank++)
@@ -1676,10 +1673,6 @@ ok:
*pkpt = kpt->kpt_next;
kpt->kpt_next = kpt_free_list;
kpt_free_list = kpt;
-#ifdef PMAPSTATS
- kpt_stats.kptinuse--;
- kpt_stats.collectpages++;
-#endif
#ifdef DEBUG
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
pmapdebug = opmapdebug;
@@ -1882,6 +1875,8 @@ pmap_mapmulti(pmap, va)
* If (pte != NULL), it is the already computed PTE for the page.
* If (flags & PRM_TFLUSH), we must invalidate any TLB information.
* If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
+ * If (flags & PRM_KEEPPTPAGE), we don't free the page table page if the
+ * reference drops to zero.
*/
/* static */
void
@@ -1916,9 +1911,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
#ifdef DEBUG
opte = *pte;
#endif
-#ifdef PMAPSTATS
- remove_stats.removes++;
-#endif
/*
* Update statistics
*/
@@ -1945,11 +1937,55 @@ pmap_remove_mapping(pmap, va, pte, flags)
* PT page.
*/
if (pmap != pmap_kernel()) {
- (void) vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
+ vaddr_t ptpva = trunc_page((vaddr_t)pte);
+#if defined(UVM)
+ int refs = pmap_ptpage_delref(ptpva);
+
+ /*
+ * If reference count drops to 1, and we're not instructed
+ * to keep it around, free the PT page.
+ *
+ * Note: refcnt == 1 comes from the fact that we allocate
+ * the page with uvm_fault_wire(), which initially wires
+ * the page. The first reference we actually add causes
+ * the refcnt to be 2.
+ */
+ if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+ struct pv_entry *pv;
+ paddr_t pa;
+
+ pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
+#ifdef DIAGNOSTIC
+ if (PAGE_IS_MANAGED(pa) == 0)
+ panic("pmap_remove_mapping: unmanaged PT page");
+#endif
+ pv = pa_to_pvh(pa);
+#ifdef DIAGNOSTIC
+ if (pv->pv_ptste == NULL)
+ panic("pmap_remove_mapping: ptste == NULL");
+ if (pv->pv_pmap != pmap_kernel() ||
+ pv->pv_va != ptpva ||
+ pv->pv_next != NULL)
+ panic("pmap_remove_mapping: "
+ "bad PT page pmap %p, va 0x%lx, next %p",
+ pv->pv_pmap, pv->pv_va, pv->pv_next);
+#endif
+ pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ NULL, PRM_TFLUSH|PRM_CFLUSH);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+#ifdef DEBUG
+ if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
+ printf("remove: PT page 0x%lx (0x%lx) freed\n",
+ ptpva, pa));
+#endif
+ }
+#else
+ (void) vm_map_pageable(pt_map, ptpva,
round_page((vaddr_t)(pte+1)), TRUE);
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
- pmap_check_wiring("remove", trunc_page((vaddr_t)pte));
+ pmap_check_wiring("remove", trunc_page(pte));
#endif
}
/*
@@ -1980,14 +2016,8 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap_free_pv(npv);
} else
pv->pv_pmap = NULL;
-#ifdef PMAPSTATS
- remove_stats.pvfirst++;
-#endif
} else {
for (npv = pv->pv_next; npv; npv = npv->pv_next) {
-#ifdef PMAPSTATS
- remove_stats.pvsearch++;
-#endif
if (pmap == npv->pv_pmap && va == npv->pv_va)
break;
pv = npv;
@@ -2007,9 +2037,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
* mapping from the associated segment table.
*/
if (ste) {
-#ifdef PMAPSTATS
- remove_stats.ptinvalid++;
-#endif
#ifdef DEBUG
if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
printf("remove: ste was %x@%x pte was %x@%x\n",
@@ -2038,7 +2065,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
printf("remove: stab %x, refcnt %d\n",
ptpmap->pm_stab, ptpmap->pm_sref - 1);
if ((pmapdebug & PDB_PARANOIA) &&
- ptpmap->pm_stab != (st_entry_t *)trunc_page((vaddr_t)ste))
+ ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
panic("remove: bogus ste");
#endif
if (--(ptpmap->pm_sref) == 0) {
@@ -2047,9 +2074,15 @@ pmap_remove_mapping(pmap, va, pte, flags)
printf("remove: free stab %x\n",
ptpmap->pm_stab);
#endif
+#if defined(UVM)
+ uvm_km_free_wakeup(st_map,
+ (vm_offset_t)ptpmap->pm_stab,
+ M68K_STSIZE);
+#else
kmem_free_wakeup(st_map,
(vm_offset_t)ptpmap->pm_stab,
M68K_STSIZE);
+#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040) || defined(M68060)
@@ -2140,10 +2173,6 @@ pmap_changebit(pa, bit, setem)
vm_offset_t va;
int s;
boolean_t firstpage = TRUE;
-#ifdef PMAPSTATS
- struct chgstats *chgp;
-#endif
-
#ifdef DEBUG
if (pmapdebug & PDB_BITS)
@@ -2153,13 +2182,6 @@ pmap_changebit(pa, bit, setem)
if (PAGE_IS_MANAGED(pa) == 0)
return;
-#ifdef PMAPSTATS
- chgp = &changebit_stats[(bit>>2)-1];
- if (setem)
- chgp->setcalls++;
- else
- chgp->clrcalls++;
-#endif
pv = pa_to_pvh(pa);
s = splimp();
/*
@@ -2185,10 +2207,15 @@ pmap_changebit(pa, bit, setem)
* XXX don't write protect pager mappings
*/
if (bit == PG_RO) {
+#if defined(UVM)
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+#else
extern vm_offset_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
+#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@@ -2214,21 +2241,7 @@ pmap_changebit(pa, bit, setem)
*pte = npte;
if (active_pmap(pv->pv_pmap))
TBIS(va);
-#ifdef PMAPSTATS
- if (setem)
- chgp->sethits++;
- else
- chgp->clrhits++;
-#endif
}
-#ifdef PMAPSTATS
- else {
- if (setem)
- chgp->setmiss++;
- else
- chgp->clrmiss++;
- }
-#endif
}
}
splx(s);
@@ -2252,9 +2265,6 @@ pmap_enter_ptpage(pmap, va)
if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
#endif
-#ifdef PMAPSTATS
- enter_stats.ptpneeded++;
-#endif
/*
* Allocate a segment table if necessary. Note that it is allocated
* from a private map and not pt_map. This keeps user page tables
@@ -2263,8 +2273,13 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
- pmap->pm_stab = (st_entry_t *)
+#if defined(UVM)
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(st_map, M68K_STSIZE);
+#else
+ pmap->pm_stab = (st_entry_t *)
kmem_alloc(st_map, M68K_STSIZE);
+#endif
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040) || defined(M68060)
@@ -2366,10 +2381,6 @@ pmap_enter_ptpage(pmap, va)
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
panic("pmap_enter_ptpage: can't get KPT page");
}
-#ifdef PMAPSTATS
- if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
- kpt_stats.kptmaxuse = kpt_stats.kptinuse;
-#endif
kpt_free_list = kpt->kpt_next;
kpt->kpt_next = kpt_used_list;
kpt_used_list = kpt;
@@ -2407,11 +2418,16 @@ pmap_enter_ptpage(pmap, va)
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter: about to fault UPT pg at %x\n", va);
#endif
- s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
- if (s != KERN_SUCCESS) {
- printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
+#if defined(UVM)
+ if (uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
+ VM_PROT_READ|VM_PROT_WRITE)
+ != KERN_SUCCESS)
+ panic("pmap_enter: uvm_fault failed");
+#else
+ if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
+ != KERN_SUCCESS)
panic("pmap_enter: vm_fault failed");
- }
+#endif
pmap_extract(pmap_kernel(), va, &ptpa);
/*
* Mark the page clean now to avoid its pageout (and
@@ -2419,8 +2435,10 @@ pmap_enter_ptpage(pmap, va)
* is wired; i.e. while it is on a paging queue.
*/
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
+#if !defined(UVM)
#ifdef DEBUG
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
+ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
+#endif
#endif
}
#if defined(M68040) || defined(M68060)
@@ -2441,10 +2459,10 @@ pmap_enter_ptpage(pmap, va)
#endif
pmap_changebit(ptpa, PG_CCB, 0);
#ifdef M68060
- if (mmutype == MMU_68060) {
- pmap_changebit(ptpa, PG_CI, 1);
- DCIS();
- }
+ if (mmutype == MMU_68060) {
+ pmap_changebit(ptpa, PG_CI, 1);
+ DCIS();
+ }
#endif
}
#endif
@@ -2525,6 +2543,44 @@ pmap_enter_ptpage(pmap, va)
splx(s);
}
+#ifdef UVM
+/*
+ * pmap_ptpage_addref:
+ *
+ * Add a reference to the specified PT page.
+ */
+void
+pmap_ptpage_addref(ptpva)
+ vaddr_t ptpva;
+{
+ vm_page_t m;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ m->wire_count++;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+}
+
+/*
+ * pmap_ptpage_delref:
+ *
+ * Delete a reference to the specified PT page.
+ */
+int
+pmap_ptpage_delref(ptpva)
+ vaddr_t ptpva;
+{
+ vm_page_t m;
+ int rv;
+
+ simple_lock(&uvm.kernel_object->vmobjlock);
+ m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ rv = --m->wire_count;
+ simple_unlock(&uvm.kernel_object->vmobjlock);
+ return (rv);
+}
+#endif
+
#ifdef DEBUG
/* static */
void
@@ -2555,11 +2611,17 @@ pmap_check_wiring(str, va)
if (!pmap_ste_v(pmap_kernel(), va) ||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;
-
+#if defined(UVM)
+ if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
+ printf("wired_check: entry for %lx not found\n", va);
+ return;
+ }
+#else
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
- printf("wired_check: entry for %x not found\n", va);
+ printf("wired_check: entry for %lx not found\n", va);
return;
}
+#endif
count = 0;
for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)
diff --git a/sys/arch/mvme68k/mvme68k/trap.c b/sys/arch/mvme68k/mvme68k/trap.c
index a081cf14def..bd7b1d664e7 100644
--- a/sys/arch/mvme68k/mvme68k/trap.c
+++ b/sys/arch/mvme68k/mvme68k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.30 2001/06/08 08:09:09 art Exp $ */
+/* $OpenBSD: trap.c,v 1.31 2001/06/26 21:35:43 miod Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -98,6 +98,9 @@ extern struct emul emul_sunos;
#include <vm/vm.h>
#include <vm/pmap.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
#ifdef COMPAT_HPUX
#include <compat/hpux/hpux.h>
@@ -177,17 +180,17 @@ static inline void userret __P((struct proc *p, struct frame *fp,
*/
static inline void
userret(p, fp, oticks, faultaddr, fromtrap)
-register struct proc *p;
-register struct frame *fp;
-u_quad_t oticks;
-u_int faultaddr;
-int fromtrap;
+ register struct proc *p;
+ register struct frame *fp;
+ u_quad_t oticks;
+ u_int faultaddr;
+ int fromtrap;
{
int sig, s;
#if defined(M68040) || defined(M68060)
int beenhere = 0;
- again:
+again:
#endif
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
@@ -218,7 +221,7 @@ int fromtrap;
extern int psratio;
addupc_task(p, fp->f_pc,
- (int)(p->p_sticks - oticks) * psratio);
+ (int)(p->p_sticks - oticks) * psratio);
}
#if defined(M68040)
/*
@@ -234,9 +237,9 @@ int fromtrap;
#ifdef DEBUG
if (mmudebug & MDB_WBFAILED)
printf(fromtrap ?
- "pid %d(%s): writeback aborted, pc=%x, fa=%x\n" :
- "pid %d(%s): writeback aborted in sigreturn, pc=%x\n",
- p->p_pid, p->p_comm, fp->f_pc, faultaddr);
+ "pid %d(%s): writeback aborted, pc=%x, fa=%x\n" :
+ "pid %d(%s): writeback aborted in sigreturn, pc=%x\n",
+ p->p_pid, p->p_comm, fp->f_pc, faultaddr);
#endif
} else if (sig = writeback(fp, fromtrap)) {
register union sigval sv;
@@ -259,10 +262,10 @@ int fromtrap;
*/
/*ARGSUSED*/
trap(type, code, v, frame)
-int type;
-unsigned code;
-register unsigned v;
-struct frame frame;
+ int type;
+ unsigned code;
+ register unsigned v;
+ struct frame frame;
{
extern char fubail[], subail[];
register struct proc *p;
@@ -278,7 +281,11 @@ struct frame frame;
#endif
register union sigval sv;
+#if defined(UVM)
+ uvmexp.traps++;
+#else
cnt.v_trap++;
+#endif
p = curproc;
ucode = 0;
if (USERMODE(frame.f_sr)) {
@@ -287,375 +294,399 @@ struct frame frame;
p->p_md.md_regs = frame.f_regs;
}
switch (type) {
- default:
- dopanic:
- printf("trap type %d, code = %x, v = %x\n", type, code, v);
+ default:
+dopanic:
+ printf("trap type %d, code = %x, v = %x\n", type, code, v);
#ifdef DDB
- if (kdb_trap(type, &frame))
- return;
+ if (kdb_trap(type, &frame))
+ return;
#endif
- regdump(&frame, 128);
- type &= ~T_USER;
- if ((unsigned)type < trap_types)
- panic(trap_type[type]);
- panic("trap");
-
- case T_BUSERR: /* kernel bus error */
- if (!p || !p->p_addr->u_pcb.pcb_onfault)
- goto dopanic;
- /*
- * If we have arranged to catch this fault in any of the
- * copy to/from user space routines, set PC to return to
- * indicated location and set flag informing buserror code
- * that it may need to clean up stack frame.
- */
+ regdump(&frame, 128);
+ type &= ~T_USER;
+ if ((unsigned)type < trap_types)
+ panic(trap_type[type]);
+ panic("trap");
+
+ case T_BUSERR: /* kernel bus error */
+ if (!p || !p->p_addr->u_pcb.pcb_onfault)
+ goto dopanic;
copyfault:
+ /*
+ * If we have arranged to catch this fault in any of the
+ * copy to/from user space routines, set PC to return to
+ * indicated location and set flag informing buserror code
+ * that it may need to clean up stack frame.
+ */
frame.f_stackadj = exframesize[frame.f_format];
frame.f_format = frame.f_vector = 0;
frame.f_pc = (int) p->p_addr->u_pcb.pcb_onfault;
return;
- case T_BUSERR|T_USER: /* bus error */
- typ = BUS_OBJERR;
- ucode = code & ~T_USER;
- i = SIGBUS;
- break;
- case T_ADDRERR|T_USER: /* address error */
- typ = BUS_ADRALN;
- ucode = code & ~T_USER;
- i = SIGBUS;
- break;
-
- case T_COPERR: /* kernel coprocessor violation */
- case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */
- case T_FMTERR: /* ...just in case... */
- /*
- * The user has most likely trashed the RTE or FP state info
- * in the stack frame of a signal handler.
- */
- printf("pid %d: kernel %s exception\n", p->p_pid,
- type==T_COPERR ? "coprocessor" : "format");
- type |= T_USER;
- p->p_sigacts->ps_sigact[SIGILL] = SIG_DFL;
- i = sigmask(SIGILL);
- p->p_sigignore &= ~i;
- p->p_sigcatch &= ~i;
- p->p_sigmask &= ~i;
- i = SIGILL;
- ucode = frame.f_format; /* XXX was ILL_RESAD_FAULT */
- typ = ILL_COPROC;
- v = frame.f_pc;
- break;
-
- case T_COPERR|T_USER: /* user coprocessor violation */
- /* What is a proper response here? */
- typ = FPE_FLTINV;
- ucode = 0;
- i = SIGFPE;
- break;
-
- case T_FPERR|T_USER: /* 68881 exceptions */
- /*
- * We pass along the 68881 status register which locore stashed
- * in code for us. Note that there is a possibility that the
- * bit pattern of this register will conflict with one of the
- * FPE_* codes defined in signal.h. Fortunately for us, the
- * only such codes we use are all in the range 1-7 and the low
- * 3 bits of the status register are defined as 0 so there is
- * no clash.
- */
- typ = FPE_FLTRES;
- ucode = code;
- i = SIGFPE;
- v = frame.f_pc;
- break;
+ case T_BUSERR|T_USER: /* bus error */
+ typ = BUS_OBJERR;
+ ucode = code & ~T_USER;
+ i = SIGBUS;
+ break;
+ case T_ADDRERR|T_USER: /* address error */
+ typ = BUS_ADRALN;
+ ucode = code & ~T_USER;
+ i = SIGBUS;
+ break;
+
+ case T_COPERR: /* kernel coprocessor violation */
+ case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */
+ case T_FMTERR: /* ...just in case... */
+ /*
+ * The user has most likely trashed the RTE or FP state info
+ * in the stack frame of a signal handler.
+ */
+ printf("pid %d: kernel %s exception\n", p->p_pid,
+ type==T_COPERR ? "coprocessor" : "format");
+ type |= T_USER;
+ p->p_sigacts->ps_sigact[SIGILL] = SIG_DFL;
+ i = sigmask(SIGILL);
+ p->p_sigignore &= ~i;
+ p->p_sigcatch &= ~i;
+ p->p_sigmask &= ~i;
+ i = SIGILL;
+ ucode = frame.f_format; /* XXX was ILL_RESAD_FAULT */
+ typ = ILL_COPROC;
+ v = frame.f_pc;
+ break;
+
+ case T_COPERR|T_USER: /* user coprocessor violation */
+ /* What is a proper response here? */
+ typ = FPE_FLTINV;
+ ucode = 0;
+ i = SIGFPE;
+ break;
+
+ case T_FPERR|T_USER: /* 68881 exceptions */
+ /*
+ * We pass along the 68881 status register which locore stashed
+ * in code for us. Note that there is a possibility that the
+ * bit pattern of this register will conflict with one of the
+ * FPE_* codes defined in signal.h. Fortunately for us, the
+ * only such codes we use are all in the range 1-7 and the low
+ * 3 bits of the status register are defined as 0 so there is
+ * no clash.
+ */
+ typ = FPE_FLTRES;
+ ucode = code;
+ i = SIGFPE;
+ v = frame.f_pc;
+ break;
#if defined(M68040) || defined(M68060)
- case T_FPEMULI|T_USER: /* unimplemented FP instuction */
- case T_FPEMULD|T_USER: /* unimplemented FP data type */
- /* XXX need to FSAVE */
- printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n",
- p->p_pid, p->p_comm,
- frame.f_format == 2 ? "instruction" : "data type",
- frame.f_pc, frame.f_fmt2.f_iaddr);
- /* XXX need to FRESTORE */
- typ = FPE_FLTINV;
- i = SIGFPE;
- v = frame.f_pc;
- break;
+ case T_FPEMULI|T_USER: /* unimplemented FP instuction */
+ case T_FPEMULD|T_USER: /* unimplemented FP data type */
+ /* XXX need to FSAVE */
+ printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n",
+ p->p_pid, p->p_comm,
+ frame.f_format == 2 ? "instruction" : "data type",
+ frame.f_pc, frame.f_fmt2.f_iaddr);
+ /* XXX need to FRESTORE */
+ typ = FPE_FLTINV;
+ i = SIGFPE;
+ v = frame.f_pc;
+ break;
#endif
- case T_ILLINST|T_USER: /* illegal instruction fault */
+ case T_ILLINST|T_USER: /* illegal instruction fault */
#ifdef COMPAT_HPUX
- if (p->p_emul == &emul_hpux) {
- typ = 0;
- ucode = HPUX_ILL_ILLINST_TRAP;
- i = SIGILL;
- break;
- }
-#endif
- ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
- typ = ILL_ILLOPC;
+ if (p->p_emul == &emul_hpux) {
+ typ = 0;
+ ucode = HPUX_ILL_ILLINST_TRAP;
i = SIGILL;
- v = frame.f_pc;
break;
+ }
+#endif
+ ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
+ typ = ILL_ILLOPC;
+ i = SIGILL;
+ v = frame.f_pc;
+ break;
- case T_PRIVINST|T_USER: /* privileged instruction fault */
+ case T_PRIVINST|T_USER: /* privileged instruction fault */
#ifdef COMPAT_HPUX
- if (p->p_emul == &emul_hpux)
- ucode = HPUX_ILL_PRIV_TRAP;
- else
+ if (p->p_emul == &emul_hpux)
+ ucode = HPUX_ILL_PRIV_TRAP;
+ else
#endif
- ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
- typ = ILL_PRVOPC;
- i = SIGILL;
- v = frame.f_pc;
- break;
+ ucode = frame.f_format; /* XXX was ILL_PRIVIN_FAULT */
+ typ = ILL_PRVOPC;
+ i = SIGILL;
+ v = frame.f_pc;
+ break;
- case T_ZERODIV|T_USER: /* Divide by zero */
+ case T_ZERODIV|T_USER: /* Divide by zero */
#ifdef COMPAT_HPUX
- if (p->p_emul == &emul_hpux)
- ucode = HPUX_FPE_INTDIV_TRAP;
- else
+ if (p->p_emul == &emul_hpux)
+ ucode = HPUX_FPE_INTDIV_TRAP;
+ else
#endif
- ucode = frame.f_format; /* XXX was FPE_INTDIV_TRAP */
- typ = FPE_INTDIV;
- i = SIGFPE;
- v = frame.f_pc;
- break;
+ ucode = frame.f_format; /* XXX was FPE_INTDIV_TRAP */
+ typ = FPE_INTDIV;
+ i = SIGFPE;
+ v = frame.f_pc;
+ break;
- case T_CHKINST|T_USER: /* CHK instruction trap */
+ case T_CHKINST|T_USER: /* CHK instruction trap */
#ifdef COMPAT_HPUX
- if (p->p_emul == &emul_hpux) {
- /* handled differently under hp-ux */
- i = SIGILL;
- ucode = HPUX_ILL_CHK_TRAP;
- break;
- }
-#endif
- ucode = frame.f_format; /* XXX was FPE_SUBRNG_TRAP */
- typ = FPE_FLTSUB;
- i = SIGFPE;
- v = frame.f_pc;
+ if (p->p_emul == &emul_hpux) {
+ /* handled differently under hp-ux */
+ i = SIGILL;
+ ucode = HPUX_ILL_CHK_TRAP;
break;
+ }
+#endif
+ ucode = frame.f_format; /* XXX was FPE_SUBRNG_TRAP */
+ typ = FPE_FLTSUB;
+ i = SIGFPE;
+ v = frame.f_pc;
+ break;
- case T_TRAPVINST|T_USER: /* TRAPV instruction trap */
+ case T_TRAPVINST|T_USER: /* TRAPV instruction trap */
#ifdef COMPAT_HPUX
- if (p->p_emul == &emul_hpux) {
- /* handled differently under hp-ux */
- i = SIGILL;
- ucode = HPUX_ILL_TRAPV_TRAP;
- break;
- }
-#endif
- ucode = frame.f_format; /* XXX was FPE_INTOVF_TRAP */
- typ = ILL_ILLTRP;
+ if (p->p_emul == &emul_hpux) {
+ /* handled differently under hp-ux */
i = SIGILL;
- v = frame.f_pc;
+ ucode = HPUX_ILL_TRAPV_TRAP;
break;
+ }
+#endif
+ ucode = frame.f_format; /* XXX was FPE_INTOVF_TRAP */
+ typ = ILL_ILLTRP;
+ i = SIGILL;
+ v = frame.f_pc;
+ break;
- /*
- * XXX: Trace traps are a nightmare.
- *
- * HP-UX uses trap #1 for breakpoints,
- * HPBSD uses trap #2,
- * SUN 3.x uses trap #15,
- * KGDB uses trap #15 (for kernel breakpoints; handled elsewhere).
- *
- * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE.
- * SUN 3.x traps get passed through as T_TRAP15 and are not really
- * supported yet.
- */
- case T_TRACE: /* kernel trace trap */
- case T_TRAP15: /* SUN trace trap */
-#ifdef DDB
- if (kdb_trap(type, &frame))
- return;
+ /*
+ * XXX: Trace traps are a nightmare.
+ *
+ * HP-UX uses trap #1 for breakpoints,
+ * OpenBSD/m68k uses trap #2,
+ * SUN 3.x uses trap #15,
+ * KGDB uses trap #15 (for kernel breakpoints; handled elsewhere).
+ *
+ * OpenBSD and HP-UX traps both get mapped by locore.s into
+ * T_TRACE.
+ * SUN 3.x traps get passed through as T_TRAP15 and are not really
+ * supported yet.
+ */
+ case T_TRAP15: /* kernel breakpoint */
+#ifdef DEBUG
+ printf("unexpected kernel trace trap, type = %d\n", type);
+ printf("program counter = 0x%x\n", frame.f_pc);
#endif
- frame.f_sr &= ~PSL_T;
- i = SIGTRAP;
- typ = TRAP_TRACE;
- break;
+ frame.f_sr &= ~PSL_T;
+ return;
- case T_TRACE|T_USER: /* user trace trap */
- case T_TRAP15|T_USER: /* SUN user trace trap */
+ case T_TRACE|T_USER: /* user trace trap */
#ifdef COMPAT_SUNOS
- /*
- * SunOS uses Trap #2 for a "CPU cache flush"
- * Just flush the on-chip caches and return.
- * XXX - Too bad m68k BSD uses trap 2...
- */
- if (p->p_emul == &emul_sunos) {
- ICIA();
- DCIU();
- /* get out fast */
- return;
- }
+ /*
+ * SunOS uses Trap #2 for a "CPU cache flush"
+ * Just flush the on-chip caches and return.
+ */
+ if (p->p_emul == &emul_sunos) {
+ ICIA();
+ DCIU();
+ return;
+ }
#endif
- frame.f_sr &= ~PSL_T;
- i = SIGTRAP;
- typ = TRAP_TRACE;
- break;
-
- case T_ASTFLT: /* system async trap, cannot happen */
- goto dopanic;
+ /* FALLTHROUGH */
- case T_ASTFLT|T_USER: /* user async trap */
- astpending = 0;
- /*
- * We check for software interrupts first. This is because
- * they are at a higher level than ASTs, and on a VAX would
- * interrupt the AST. We assume that if we are processing
- * an AST that we must be at IPL0 so we don't bother to
- * check. Note that we ensure that we are at least at SIR
- * IPL while processing the SIR.
- */
- spl1();
- /* fall into... */
-
- case T_SSIR: /* software interrupt */
- case T_SSIR|T_USER:
- while (bit = ffs(ssir)) {
- --bit;
- ssir &= ~(1 << bit);
- cnt.v_soft++;
- if (sir_routines[bit])
- sir_routines[bit](sir_args[bit]);
- }
- /*
- * If this was not an AST trap, we are all done.
- */
- if (type != (T_ASTFLT|T_USER)) {
- cnt.v_trap--;
- return;
- }
- spl0();
- if (p->p_flag & P_OWEUPC) {
- p->p_flag &= ~P_OWEUPC;
- ADDUPROF(p);
- }
- goto out;
+ case T_TRACE:
+ case T_TRAP15|T_USER: /* SUN user trace trap */
+ frame.f_sr &= ~PSL_T;
+ i = SIGTRAP;
+ typ = TRAP_TRACE;
+ break;
- case T_MMUFLT: /* kernel mode page fault */
- /*
- * If we were doing profiling ticks or other user mode
- * stuff from interrupt code, Just Say No.
- */
- if (p && (p->p_addr->u_pcb.pcb_onfault == fubail ||
- p->p_addr->u_pcb.pcb_onfault == subail)) {
- goto copyfault;
- }
- /* fall into ... */
+ case T_ASTFLT: /* system async trap, cannot happen */
+ goto dopanic;
- case T_MMUFLT|T_USER: /* page fault */
- {
- register vm_offset_t va;
- register struct vmspace *vm = NULL;
- register vm_map_t map;
- int rv;
- vm_prot_t ftype, vftype;
- extern vm_map_t kernel_map;
+ case T_ASTFLT|T_USER: /* user async trap */
+ astpending = 0;
+ /*
+ * We check for software interrupts first. This is because
+ * they are at a higher level than ASTs, and on a VAX would
+ * interrupt the AST. We assume that if we are processing
+ * an AST that we must be at IPL0 so we don't bother to
+ * check. Note that we ensure that we are at least at SIR
+ * IPL while processing the SIR.
+ */
+ spl1();
+ /* FALLTHROUGH */
+
+ case T_SSIR: /* software interrupt */
+ case T_SSIR|T_USER:
+ while (bit = ffs(ssir)) {
+ --bit;
+ ssir &= ~(1 << bit);
+#if defined(UVM)
+ uvmexp.softs++;
+#else
+ cnt.v_soft++;
+#endif
+ if (sir_routines[bit])
+ sir_routines[bit](sir_args[bit]);
+ }
+ /*
+ * If this was not an AST trap, we are all done.
+ */
+ if (type != (T_ASTFLT|T_USER)) {
+#if defined(UVM)
+ uvmexp.traps--;
+#else
+ cnt.v_trap--;
+#endif
+ return;
+ }
+ spl0();
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ goto out;
- /* vmspace only significant if T_USER */
- if (p)
- vm = p->p_vmspace;
+ case T_MMUFLT: /* kernel mode page fault */
+ /*
+ * If we were doing profiling ticks or other user mode
+ * stuff from interrupt code, Just Say No.
+ */
+ if (p && (p->p_addr->u_pcb.pcb_onfault == fubail ||
+ p->p_addr->u_pcb.pcb_onfault == subail))
+ goto copyfault;
+ /* FALLTHROUGH */
+
+ case T_MMUFLT|T_USER: /* page fault */
+ {
+ register vm_offset_t va;
+ register struct vmspace *vm = NULL;
+ register vm_map_t map;
+ int rv;
+ vm_prot_t ftype, vftype;
+ extern vm_map_t kernel_map;
+
+ /* vmspace only significant if T_USER */
+ if (p)
+ vm = p->p_vmspace;
#ifdef DEBUG
- if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
- printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n",
- p->p_pid, code, v, frame.f_pc, frame.f_sr);
+ if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid))
+ printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n",
+ p->p_pid, code, v, frame.f_pc, frame.f_sr);
#endif
- /*
- * It is only a kernel address space fault iff:
- * 1. (type & T_USER) == 0 and
- * 2. pcb_onfault not set or
- * 3. pcb_onfault set but supervisor space data fault
- * The last can occur during an exec() copyin where the
- * argument space is lazy-allocated.
- */
- if (type == T_MMUFLT &&
- ((p && !p->p_addr->u_pcb.pcb_onfault) || KDFAULT(code)))
- map = kernel_map;
- else
- map = &vm->vm_map;
- if (WRFAULT(code)) {
- vftype = VM_PROT_WRITE;
- ftype = VM_PROT_READ | VM_PROT_WRITE;
- } else
- vftype = ftype = VM_PROT_READ;
- va = trunc_page((vm_offset_t)v);
-
- if (map == kernel_map && va == 0) {
- printf("trap: bad kernel access at %x\n", v);
- goto dopanic;
- }
+ /*
+ * It is only a kernel address space fault iff:
+ * 1. (type & T_USER) == 0 and
+ * 2. pcb_onfault not set or
+ * 3. pcb_onfault set but supervisor space data fault
+ * The last can occur during an exec() copyin where the
+ * argument space is lazy-allocated.
+ */
+ if (type == T_MMUFLT &&
+ ((p && !p->p_addr->u_pcb.pcb_onfault) || KDFAULT(code)))
+ map = kernel_map;
+ else
+ map = &vm->vm_map;
+ if (WRFAULT(code)) {
+ vftype = VM_PROT_WRITE;
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ } else
+ vftype = ftype = VM_PROT_READ;
+ va = trunc_page((vm_offset_t)v);
+
+ if (map == kernel_map && va == 0) {
+ printf("trap: bad kernel access at %x\n", v);
+ goto dopanic;
+ }
#ifdef COMPAT_HPUX
- if (ISHPMMADDR(va)) {
- vm_offset_t bva;
-
- rv = pmap_mapmulti(map->pmap, va);
- if (rv != KERN_SUCCESS) {
- bva = HPMMBASEADDR(va);
- rv = vm_fault(map, bva, ftype, FALSE);
- if (rv == KERN_SUCCESS)
- (void) pmap_mapmulti(map->pmap, va);
- }
- } else
+ if (ISHPMMADDR(va)) {
+ vm_offset_t bva;
+
+ rv = pmap_mapmulti(map->pmap, va);
+ if (rv != KERN_SUCCESS) {
+ bva = HPMMBASEADDR(va);
+#if defined(UVM)
+ rv = uvm_fault(map, bva, 0, ftype);
+#else
+ rv = vm_fault(map, bva, ftype, FALSE);
+#endif
+ if (rv == KERN_SUCCESS)
+ (void) pmap_mapmulti(map->pmap, va);
+ }
+ } else
#endif
- rv = vm_fault(map, va, ftype, FALSE);
+#if defined(UVM)
+ rv = uvm_fault(map, va, 0, ftype);
+#else
+ rv = vm_fault(map, va, ftype, FALSE);
+#endif
#ifdef DEBUG
- if (rv && MDB_ISPID(p->p_pid))
- printf("vm_fault(%x, %x, %x, 0) -> %x\n",
- map, va, ftype, rv);
+ if (rv && MDB_ISPID(p->p_pid))
+#if defined(UVM)
+ printf("uvm_fault(%x, %x, 0, %x) -> %x\n",
+ map, va, ftype, rv);
+#else
+ printf("vm_fault(%x, %x, %x, 0) -> %x\n",
+ map, va, ftype, rv);
+#endif
#endif
- /*
- * If this was a stack access we keep track of the maximum
- * accessed stack size. Also, if vm_fault gets a protection
- * failure it is due to accessing the stack region outside
- * the current limit and we need to reflect that as an access
- * error.
- */
- if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
- if (rv == KERN_SUCCESS) {
- unsigned nss;
-
- nss = btoc(USRSTACK-(unsigned)va);
- if (nss > vm->vm_ssize)
- vm->vm_ssize = nss;
- } else if (rv == KERN_PROTECTION_FAILURE)
- rv = KERN_INVALID_ADDRESS;
- }
+ /*
+ * If this was a stack access we keep track of the maximum
+ * accessed stack size. Also, if vm_fault gets a protection
+ * failure it is due to accessing the stack region outside
+ * the current limit and we need to reflect that as an access
+ * error.
+ */
+ if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
if (rv == KERN_SUCCESS) {
- if (type == T_MMUFLT) {
+ unsigned nss;
+
+ nss = btoc(USRSTACK-(unsigned)va);
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (rv == KERN_PROTECTION_FAILURE)
+ rv = KERN_INVALID_ADDRESS;
+ }
+ if (rv == KERN_SUCCESS) {
+ if (type == T_MMUFLT) {
#if defined(M68040)
- if (mmutype == MMU_68040)
- (void) writeback(&frame, 1);
+ if (mmutype == MMU_68040)
+ (void) writeback(&frame, 1);
#endif
- return;
- }
- goto out;
- }
- if (type == T_MMUFLT) {
- if (p && p->p_addr->u_pcb.pcb_onfault)
- goto copyfault;
- printf("vm_fault(%x, %x, %x, 0) -> %x\n",
- map, va, ftype, rv);
- printf(" type %x, code [mmu,,ssw]: %x\n",
- type, code);
- goto dopanic;
+ return;
}
- frame.f_pad = code & 0xffff;
- ucode = vftype;
- typ = SEGV_MAPERR;
- i = SIGSEGV;
- break;
+ goto out;
}
+ if (type == T_MMUFLT) {
+ if (p && p->p_addr->u_pcb.pcb_onfault)
+ goto copyfault;
+#if defined(UVM)
+ printf("uvm_fault(%x, %x, 0, %x) -> %x\n",
+ map, va, ftype, rv);
+#else
+ printf("vm_fault(%x, %x, %x, 0) -> %x\n",
+ map, va, ftype, rv);
+#endif
+ printf(" type %x, code [mmu,,ssw]: %x\n",
+ type, code);
+ goto dopanic;
+ }
+ frame.f_pad = code & 0xffff;
+ ucode = vftype;
+ typ = SEGV_MAPERR;
+ i = SIGSEGV;
+ break;
+ }
}
sv.sival_int = v;
trapsignal(p, i, ucode, typ, sv);
if ((type & T_USER) == 0)
return;
- out:
+out:
userret(p, &frame, sticks, v, 1);
}
@@ -679,8 +710,8 @@ char wberrstr[] =
int
writeback(fp, docachepush)
-struct frame *fp;
-int docachepush;
+ struct frame *fp;
+ int docachepush;
{
register struct fmt7 *f = &fp->f_fmt7;
register struct proc *p = curproc;
@@ -726,7 +757,7 @@ int docachepush;
paddr_t pa;
pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
- trunc_page((vaddr_t)f->f_fa), VM_PROT_WRITE, TRUE, VM_PROT_WRITE);
+ trunc_page(f->f_fa), VM_PROT_WRITE, TRUE, VM_PROT_WRITE);
fa = (u_int)&vmmap[(f->f_fa & PGOFSET) & ~0xF];
bcopy((caddr_t)&f->f_pd0, (caddr_t)fa, 16);
pmap_extract(pmap_kernel(), (vm_offset_t)fa, &pa);
@@ -915,8 +946,9 @@ int docachepush;
}
#ifdef DEBUG
+void
dumpssw(ssw)
-register u_short ssw;
+ register u_short ssw;
{
printf(" SSW: %x: ", ssw);
if (ssw & SSW4_CP)
@@ -941,10 +973,11 @@ register u_short ssw;
f7tm[ssw & SSW4_TMMASK]);
}
+void
dumpwb(num, s, a, d)
-int num;
-u_short s;
-u_int a, d;
+ int num;
+ u_short s;
+ u_int a, d;
{
register struct proc *p = curproc;
vm_offset_t pa;
@@ -966,8 +999,8 @@ u_int a, d;
* Process a system call.
*/
syscall(code, frame)
-register_t code;
-struct frame frame;
+ register_t code;
+ struct frame frame;
{
register caddr_t params;
register struct sysent *callp;
@@ -979,8 +1012,12 @@ struct frame frame;
#ifdef COMPAT_SUNOS
extern struct emul emul_sunos;
#endif
-
+#if defined(UVM)
+ uvmexp.syscalls++;
+#else
cnt.v_syscall++;
+#endif
+
if (!USERMODE(frame.f_sr))
panic("syscall");
p = curproc;
@@ -1024,32 +1061,32 @@ struct frame frame;
params = (caddr_t)frame.f_regs[SP] + sizeof(int);
switch (code) {
- case SYS_syscall:
- /*
- * Code is first argument, followed by actual args.
- */
- code = fuword(params);
- params += sizeof(int);
- /*
- * XXX sigreturn requires special stack manipulation
- * that is only done if entered via the sigreturn
- * trap. Cannot allow it here so make sure we fail.
- */
- if (code == SYS_sigreturn)
- code = nsys;
- break;
- case SYS___syscall:
- /*
- * Like syscall, but code is a quad, so as to maintain
- * quad alignment for the rest of the arguments.
- */
- if (callp != sysent)
- break;
- code = fuword(params + _QUAD_LOWWORD * sizeof(int));
- params += sizeof(quad_t);
- break;
- default:
+ case SYS_syscall:
+ /*
+ * Code is first argument, followed by actual args.
+ */
+ code = fuword(params);
+ params += sizeof(int);
+ /*
+ * XXX sigreturn requires special stack manipulation
+ * that is only done if entered via the sigreturn
+ * trap. Cannot allow it here so make sure we fail.
+ */
+ if (code == SYS_sigreturn)
+ code = nsys;
+ break;
+ case SYS___syscall:
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ if (callp != sysent)
break;
+ code = fuword(params + _QUAD_LOWWORD * sizeof(int));
+ params += sizeof(quad_t);
+ break;
+ default:
+ break;
}
if (code < 0 || code >= nsys)
callp += p->p_emul->e_nosys; /* illegal */
@@ -1073,28 +1110,28 @@ struct frame frame;
rval[1] = frame.f_regs[D1];
error = (*callp->sy_call)(p, args, rval);
switch (error) {
- case 0:
- frame.f_regs[D0] = rval[0];
- frame.f_regs[D1] = rval[1];
- frame.f_sr &= ~PSL_C; /* carry bit */
- break;
- case ERESTART:
- /*
- * We always enter through a `trap' instruction, which is 2
- * bytes, so adjust the pc by that amount.
- */
- frame.f_pc = opc - 2;
- break;
- case EJUSTRETURN:
- /* nothing to do */
- break;
- default:
- bad:
- if (p->p_emul->e_errno)
- error = p->p_emul->e_errno[error];
- frame.f_regs[D0] = error;
- frame.f_sr |= PSL_C; /* carry bit */
- break;
+ case 0:
+ frame.f_regs[D0] = rval[0];
+ frame.f_regs[D1] = rval[1];
+ frame.f_sr &= ~PSL_C; /* carry bit */
+ break;
+ case ERESTART:
+ /*
+ * We always enter through a `trap' instruction, which is 2
+ * bytes, so adjust the pc by that amount.
+ */
+ frame.f_pc = opc - 2;
+ break;
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+ default:
+bad:
+ if (p->p_emul->e_errno)
+ error = p->p_emul->e_errno[error];
+ frame.f_regs[D0] = error;
+ frame.f_sr |= PSL_C; /* carry bit */
+ break;
}
#ifdef SYSCALL_DEBUG
@@ -1114,8 +1151,8 @@ struct frame frame;
void
child_return(p, frame)
-struct proc *p;
-struct frame frame;
+ struct proc *p;
+ struct frame frame;
{
frame.f_regs[D0] = 0;
@@ -1134,8 +1171,8 @@ struct frame frame;
*/
u_long
allocate_sir(proc, arg)
-void (*proc)();
-void *arg;
+ void (*proc)();
+ void *arg;
{
int bit;
@@ -1166,9 +1203,9 @@ struct intrhand *intrs[256];
#ifndef INTR_ASM
int
hardintr(pc, evec, frame)
-int pc;
-int evec;
-void *frame;
+ int pc;
+ int evec;
+ void *frame;
{
int vec = (evec & 0xfff) >> 2; /* XXX should be m68k macro? */
extern u_long intrcnt[]; /* XXX from locore */
@@ -1176,7 +1213,11 @@ void *frame;
int count = 0;
int r;
+#if defined(UVM)
+ uvmexp.intrs++;
+#else
cnt.v_intr++;
+#endif
/* intrcnt[level]++; */
for (ih = intrs[vec]; ih; ih = ih->ih_next) {
#if 0
@@ -1201,7 +1242,7 @@ void *frame;
*/
int
intr_findvec(start, end)
-int start, end;
+ int start, end;
{
extern u_long *vectab[], hardtrap, badtrap;
int vec;
@@ -1222,8 +1263,8 @@ int start, end;
*/
int
intr_establish(vec, ih)
-int vec;
-struct intrhand *ih;
+ int vec;
+ struct intrhand *ih;
{
extern u_long *vectab[], hardtrap, badtrap;
struct intrhand *ihx;
diff --git a/sys/arch/mvme68k/mvme68k/vectors.s b/sys/arch/mvme68k/mvme68k/vectors.s
index f3c1db3a416..cb10381c9d9 100644
--- a/sys/arch/mvme68k/mvme68k/vectors.s
+++ b/sys/arch/mvme68k/mvme68k/vectors.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: vectors.s,v 1.4 1996/05/06 21:55:32 deraadt Exp $ */
+/* $OpenBSD: vectors.s,v 1.5 2001/06/26 21:35:43 miod Exp $ */
| Copyright (c) 1995 Theo de Raadt
|
@@ -64,95 +64,89 @@
| @(#)vectors.s 8.2 (Berkeley) 1/21/94
|
- .data
- .globl _buserr,_addrerr
- .globl _illinst,_zerodiv,_chkinst,_trapvinst,_privinst,_trace
- .globl _badtrap
- .globl _spurintr
- .globl _trap0,_trap1,_trap2,_trap15
- .globl _fpfline, _fpunsupp
- .globl _trap12
+#define BADTRAP16 \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap) ; \
+ VECTOR(badtrap) ; VECTOR(badtrap)
- .globl _vectab
-_vectab:
+ .text
+GLOBAL(vectab)
.long 0x12345678 /* 0: jmp 0x7400:w (unused reset SSP) */
- .long 0 /* 1: NOT USED (reset PC) */
- .long _buserr /* 2: bus error */
- .long _addrerr /* 3: address error */
- .long _illinst /* 4: illegal instruction */
- .long _zerodiv /* 5: zero divide */
- .long _chkinst /* 6: CHK instruction */
- .long _trapvinst /* 7: TRAPV instruction */
- .long _privinst /* 8: privilege violation */
- .long _trace /* 9: trace */
- .long _illinst /* 10: line 1010 emulator */
- .long _fpfline /* 11: line 1111 emulator */
- .long _badtrap /* 12: unassigned, reserved */
- .long _coperr /* 13: coprocessor protocol violation */
- .long _fmterr /* 14: format error */
- .long _badtrap /* 15: uninitialized interrupt vector */
- .long _badtrap /* 16: unassigned, reserved */
- .long _badtrap /* 17: unassigned, reserved */
- .long _badtrap /* 18: unassigned, reserved */
- .long _badtrap /* 19: unassigned, reserved */
- .long _badtrap /* 20: unassigned, reserved */
- .long _badtrap /* 21: unassigned, reserved */
- .long _badtrap /* 22: unassigned, reserved */
- .long _badtrap /* 23: unassigned, reserved */
- .long _spurintr /* 24: spurious interrupt */
- .long _badtrap /* 25: level 1 interrupt autovector */
- .long _badtrap /* 26: level 2 interrupt autovector */
- .long _badtrap /* 27: level 3 interrupt autovector */
- .long _badtrap /* 28: level 4 interrupt autovector */
- .long _badtrap /* 29: level 5 interrupt autovector */
- .long _badtrap /* 30: level 6 interrupt autovector */
- .long _badtrap /* 31: level 7 interrupt autovector */
- .long _trap0 /* 32: syscalls */
- .long _trap1 /* 33: sigreturn syscall or breakpoint */
- .long _trap2 /* 34: breakpoint or sigreturn syscall */
- .long _illinst /* 35: TRAP instruction vector */
- .long _illinst /* 36: TRAP instruction vector */
- .long _illinst /* 37: TRAP instruction vector */
- .long _illinst /* 38: TRAP instruction vector */
- .long _illinst /* 39: TRAP instruction vector */
- .long _illinst /* 40: TRAP instruction vector */
- .long _illinst /* 41: TRAP instruction vector */
- .long _illinst /* 42: TRAP instruction vector */
- .long _illinst /* 43: TRAP instruction vector */
- .long _trap12 /* 44: TRAP instruction vector */
- .long _illinst /* 45: TRAP instruction vector */
- .long _illinst /* 46: TRAP instruction vector */
- .long _trap15 /* 47: TRAP instruction vector */
+ VECTOR_UNUSED /* 1: NOT USED (reset PC) */
+ VECTOR(buserr) /* 2: bus error */
+ VECTOR(addrerr) /* 3: address error */
+ VECTOR(illinst) /* 4: illegal instruction */
+ VECTOR(zerodiv) /* 5: zero divide */
+ VECTOR(chkinst) /* 6: CHK instruction */
+ VECTOR(trapvinst) /* 7: TRAPV instruction */
+ VECTOR(privinst) /* 8: privilege violation */
+ VECTOR(trace) /* 9: trace */
+ VECTOR(illinst) /* 10: line 1010 emulator */
+ VECTOR(fpfline) /* 11: line 1111 emulator */
+ VECTOR(badtrap) /* 12: unassigned, reserved */
+ VECTOR(coperr) /* 13: coprocessor protocol violation */
+ VECTOR(fmterr) /* 14: format error */
+ VECTOR(badtrap) /* 15: uninitialized interrupt vector */
+ VECTOR(badtrap) /* 16: unassigned, reserved */
+ VECTOR(badtrap) /* 17: unassigned, reserved */
+ VECTOR(badtrap) /* 18: unassigned, reserved */
+ VECTOR(badtrap) /* 19: unassigned, reserved */
+ VECTOR(badtrap) /* 20: unassigned, reserved */
+ VECTOR(badtrap) /* 21: unassigned, reserved */
+ VECTOR(badtrap) /* 22: unassigned, reserved */
+ VECTOR(badtrap) /* 23: unassigned, reserved */
+ VECTOR(spurintr) /* 24: spurious interrupt */
+ VECTOR(badtrap) /* 25: level 1 interrupt autovector */
+ VECTOR(badtrap) /* 26: level 2 interrupt autovector */
+ VECTOR(badtrap) /* 27: level 3 interrupt autovector */
+ VECTOR(badtrap) /* 28: level 4 interrupt autovector */
+ VECTOR(badtrap) /* 29: level 5 interrupt autovector */
+ VECTOR(badtrap) /* 30: level 6 interrupt autovector */
+ VECTOR(badtrap) /* 31: level 7 interrupt autovector */
+ VECTOR(trap0) /* 32: syscalls */
+ VECTOR(trap1) /* 33: sigreturn syscall or breakpoint */
+ VECTOR(trap2) /* 34: breakpoint or sigreturn syscall */
+ VECTOR(illinst) /* 35: TRAP instruction vector */
+ VECTOR(illinst) /* 36: TRAP instruction vector */
+ VECTOR(illinst) /* 37: TRAP instruction vector */
+ VECTOR(illinst) /* 38: TRAP instruction vector */
+ VECTOR(illinst) /* 39: TRAP instruction vector */
+ VECTOR(illinst) /* 40: TRAP instruction vector */
+ VECTOR(illinst) /* 41: TRAP instruction vector */
+ VECTOR(illinst) /* 42: TRAP instruction vector */
+ VECTOR(illinst) /* 43: TRAP instruction vector */
+ VECTOR(trap12) /* 44: TRAP instruction vector */
+ VECTOR(illinst) /* 45: TRAP instruction vector */
+ VECTOR(illinst) /* 46: TRAP instruction vector */
+ VECTOR(trap15) /* 47: TRAP instruction vector */
/*
- * 68881/68882: _fpfault zone
+ * 68881/68882: fpfault zone
*/
- .globl _fpvect_tab, _fpvect_end
-_fpvect_tab:
- .globl _fpfault
- .long _fpfault /* 48: FPCP branch/set on unordered cond */
- .long _fpfault /* 49: FPCP inexact result */
- .long _fpfault /* 50: FPCP divide by zero */
- .long _fpfault /* 51: FPCP underflow */
- .long _fpfault /* 52: FPCP operand error */
- .long _fpfault /* 53: FPCP overflow */
- .long _fpfault /* 54: FPCP signalling NAN */
-_fpvect_end:
+GLOBAL(fpvect_tab)
+ VECTOR(fpfault) /* 48: FPCP branch/set on unordered cond */
+ VECTOR(fpfault) /* 49: FPCP inexact result */
+ VECTOR(fpfault) /* 50: FPCP divide by zero */
+ VECTOR(fpfault) /* 51: FPCP underflow */
+ VECTOR(fpfault) /* 52: FPCP operand error */
+ VECTOR(fpfault) /* 53: FPCP overflow */
+ VECTOR(fpfault) /* 54: FPCP signalling NAN */
+GLOBAL(fpvect_end)
- .long _fpunsupp /* 55: FPCP unimplemented data type */
- .long _badtrap /* 56: unassigned, reserved */
- .long _badtrap /* 57: unassigned, reserved */
- .long _badtrap /* 58: unassigned, reserved */
- .long _badtrap /* 59: unassigned, reserved */
- .long _badtrap /* 60: unassigned, reserved */
- .long _badtrap /* 61: unassigned, reserved */
- .long _badtrap /* 62: unassigned, reserved */
- .long _badtrap /* 63: unassigned, reserved */
-
-#define BADTRAP16 .long _badtrap,_badtrap,_badtrap,_badtrap,\
- _badtrap,_badtrap,_badtrap,_badtrap,\
- _badtrap,_badtrap,_badtrap,_badtrap,\
- _badtrap,_badtrap,_badtrap,_badtrap
+ VECTOR(fpunsupp) /* 55: FPCP unimplemented data type */
+ VECTOR(badtrap) /* 56: unassigned, reserved */
+ VECTOR(badtrap) /* 57: unassigned, reserved */
+ VECTOR(badtrap) /* 58: unassigned, reserved */
+ VECTOR(badtrap) /* 59: unassigned, reserved */
+ VECTOR(badtrap) /* 60: unassigned, reserved */
+ VECTOR(badtrap) /* 61: unassigned, reserved */
+ VECTOR(badtrap) /* 62: unassigned, reserved */
+ VECTOR(badtrap) /* 63: unassigned, reserved */
BADTRAP16 /* 64-79: user interrupt vectors */
BADTRAP16 /* 80-95: user interrupt vectors */
@@ -170,16 +164,14 @@ _fpvect_end:
#ifdef FPSP
/*
- * 68040: this chunk of vectors is copied into the _fpfault zone
+ * 68040: this chunk of vectors is copied into the fpfault zone
*/
- .globl _fpsp_tab
-_fpsp_tab:
- .globl bsun, inex, dz, unfl, operr, ovfl, snan
- .long bsun /* 48: FPCP branch/set on unordered cond */
- .long inex /* 49: FPCP inexact result */
- .long dz /* 50: FPCP divide by zero */
- .long unfl /* 51: FPCP underflow */
- .long operr /* 52: FPCP operand error */
- .long ovfl /* 53: FPCP overflow */
- .long snan /* 54: FPCP signalling NAN */
+GLOBAL(fpsp_tab)
+ ASVECTOR(bsun) /* 48: FPCP branch/set on unordered cond */
+ ASVECTOR(inex) /* 49: FPCP inexact result */
+ ASVECTOR(dz) /* 50: FPCP divide by zero */
+ ASVECTOR(unfl) /* 51: FPCP underflow */
+ ASVECTOR(operr) /* 52: FPCP operand error */
+ ASVECTOR(ovfl) /* 53: FPCP overflow */
+ ASVECTOR(snan) /* 54: FPCP signalling NAN */
#endif FPSP
diff --git a/sys/arch/mvme68k/mvme68k/vm_machdep.c b/sys/arch/mvme68k/mvme68k/vm_machdep.c
index 24c2e00e968..41bd463898e 100644
--- a/sys/arch/mvme68k/mvme68k/vm_machdep.c
+++ b/sys/arch/mvme68k/mvme68k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.24 2001/06/10 14:54:46 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.25 2001/06/26 21:35:43 miod Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -57,6 +57,9 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
+#if defined(UVM)
+#include <uvm/uvm_extern.h>
+#endif
/*
* Finish a fork operation, with process p2 nearly set up.
@@ -80,8 +83,17 @@ cpu_fork(p1, p2, stack, stacksize)
extern struct pcb *curpcb;
extern void proc_trampoline(), child_return();
- /* Sync curpcb (which is presumably p1's PCB) and copy it to p2. */
- savectx(curpcb);
+ p2->p_md.md_flags = p1->p_md.md_flags;
+
+ /* Copy pcb from proc p1 to p2. */
+ if (p1 == curproc) {
+ /* Sync the PCB before we copy it. */
+ savectx(curpcb);
+ }
+#ifdef DIAGNOSTIC
+ else if (p1 != &proc0)
+ panic("cpu_fork: curproc");
+#endif
*pcb = p1->p_addr->u_pcb;
/*
@@ -92,15 +104,14 @@ cpu_fork(p1, p2, stack, stacksize)
p2->p_md.md_regs = (int *)tf;
*tf = *(struct trapframe *)p1->p_md.md_regs;
- /*
+ /*
* If specified, give the child a different stack.
*/
if (stack != NULL)
tf->tf_regs[15] = (u_int)stack + stacksize;
- sf = (struct switchframe *)tf - 1;
+ sf = (struct switchframe *)tf - 1;
sf->sf_pc = (u_int)proc_trampoline;
-
pcb->pcb_regs[6] = (int)child_return; /* A2 */
pcb->pcb_regs[7] = (int)p2; /* A3 */
pcb->pcb_regs[11] = (int)sf; /* SSP */
@@ -130,7 +141,11 @@ cpu_exit(p)
{
(void) splimp();
+#if defined(UVM)
+ uvmexp.swtch++;
+#else
cnt.v_swtch++;
+#endif
switch_exit(p);
/* NOTREACHED */
}
@@ -198,8 +213,9 @@ pagemove(from, to, size)
* kernel VA space at `vaddr'. Read/write and cache-inhibit status
* are specified by `prot'.
*/
+void
physaccess(vaddr, paddr, size, prot)
- void *vaddr, *paddr;
+ caddr_t vaddr, paddr;
register int size, prot;
{
register pt_entry_t *pte;
@@ -214,6 +230,7 @@ physaccess(vaddr, paddr, size, prot)
TBIAS();
}
+void
physunaccess(vaddr, size)
caddr_t vaddr;
register int size;
@@ -247,6 +264,7 @@ setredzone(pte, vaddr)
/*
* Convert kernel VA to physical address
*/
+int
kvtop(addr)
caddr_t addr;
{
@@ -286,7 +304,11 @@ vmapbuf(bp, siz)
off = (int)addr & PGOFSET;
p = bp->b_proc;
npf = btoc(round_page(bp->b_bcount + off));
+#if defined(UVM)
+ kva = uvm_km_valloc_wait(phys_map, ctob(npf));
+#else
kva = kmem_alloc_wait(phys_map, ctob(npf));
+#endif
bp->b_data = (caddr_t)(kva + off);
while (npf--) {
if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
@@ -316,7 +338,11 @@ vunmapbuf(bp, siz)
addr = bp->b_data;
npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
kva = (vm_offset_t)((int)addr & ~PGOFSET);
+#if defined(UVM)
+ uvm_km_free_wakeup(phys_map, kva, ctob(npf));
+#else
kmem_free_wakeup(phys_map, kva, ctob(npf));
+#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}