summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorSteve Murphree <smurph@cvs.openbsd.org>2001-02-01 03:38:24 +0000
committerSteve Murphree <smurph@cvs.openbsd.org>2001-02-01 03:38:24 +0000
commit8e8d908c6c5754fd0205eb089e6c5822f92d4473 (patch)
tree29b74cfdddc396e8a0985b07dd1affefefd062bf /sys/arch
parentbe6f95e73cf569c2b374d15432a35d294774c196 (diff)
Major changes to get MVME188 working. More header and code cleanups. The
kernel is tested on MVME188A/2P256 and MVME188A/1P64.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/mvme88k/conf/GENERIC3
-rw-r--r--sys/arch/mvme88k/ddb/db_interface.c23
-rw-r--r--sys/arch/mvme88k/dev/bugio.c65
-rw-r--r--sys/arch/mvme88k/dev/busswitch.c4
-rw-r--r--sys/arch/mvme88k/dev/cl.c34
-rw-r--r--sys/arch/mvme88k/dev/clock.c251
-rw-r--r--sys/arch/mvme88k/dev/dart.c1684
-rw-r--r--sys/arch/mvme88k/dev/if_ve.c49
-rw-r--r--sys/arch/mvme88k/dev/sclock.c182
-rw-r--r--sys/arch/mvme88k/dev/siop.c6
-rw-r--r--sys/arch/mvme88k/dev/syscon.c114
-rw-r--r--sys/arch/mvme88k/dev/sysconreg.h72
-rw-r--r--sys/arch/mvme88k/dev/vme.c143
-rw-r--r--sys/arch/mvme88k/dev/vme.h63
-rw-r--r--sys/arch/mvme88k/dev/vs.c1674
-rw-r--r--sys/arch/mvme88k/dev/vsdma.c102
-rw-r--r--sys/arch/mvme88k/dev/vsreg.h80
-rw-r--r--sys/arch/mvme88k/dev/vsvar.h104
-rw-r--r--sys/arch/mvme88k/include/asm.h15
-rw-r--r--sys/arch/mvme88k/include/asm_macro.h39
-rw-r--r--sys/arch/mvme88k/include/board.h64
-rw-r--r--sys/arch/mvme88k/include/m88100.h28
-rw-r--r--sys/arch/mvme88k/include/mvme188.h437
-rw-r--r--sys/arch/mvme88k/include/mvme1x7.h54
-rw-r--r--sys/arch/mvme88k/include/param.h111
-rw-r--r--sys/arch/mvme88k/include/reg.h4
-rw-r--r--sys/arch/mvme88k/include/trap.h62
-rw-r--r--sys/arch/mvme88k/mvme88k/cmmu.c90
-rw-r--r--sys/arch/mvme88k/mvme88k/eh.S157
-rw-r--r--sys/arch/mvme88k/mvme88k/genassym.c33
-rw-r--r--sys/arch/mvme88k/mvme88k/locore.S8
-rw-r--r--sys/arch/mvme88k/mvme88k/locore_asm_routines.S39
-rw-r--r--sys/arch/mvme88k/mvme88k/locore_c_routines.c556
-rw-r--r--sys/arch/mvme88k/mvme88k/m18x_cmmu.c67
-rw-r--r--sys/arch/mvme88k/mvme88k/machdep.c237
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c5981
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap_table.c85
-rw-r--r--sys/arch/mvme88k/mvme88k/process_machdep.c50
-rw-r--r--sys/arch/mvme88k/mvme88k/trap.c3081
-rw-r--r--sys/arch/mvme88k/stand/bugcrt/Makefile4
-rw-r--r--sys/arch/mvme88k/stand/bugcrt/crt.c6
41 files changed, 8032 insertions, 7829 deletions
diff --git a/sys/arch/mvme88k/conf/GENERIC b/sys/arch/mvme88k/conf/GENERIC
index 9bf6bb19b25..72d2d7a9541 100644
--- a/sys/arch/mvme88k/conf/GENERIC
+++ b/sys/arch/mvme88k/conf/GENERIC
@@ -1,4 +1,4 @@
-# $OpenBSD: GENERIC,v 1.12 2001/01/14 20:25:22 smurph Exp $
+# $OpenBSD: GENERIC,v 1.13 2001/02/01 03:38:11 smurph Exp $
machine mvme88k
@@ -12,6 +12,7 @@ option UVM # use the UVM virtual memory system
option FFS_SOFTUPDATES # Soft Updates
option "NCPUS=1" # number of CPUs supported (max 4)
option BUGMAP # use the Bug ROM VME mappings
+#option DEBUG # print debugging statements
maxusers 64
diff --git a/sys/arch/mvme88k/ddb/db_interface.c b/sys/arch/mvme88k/ddb/db_interface.c
index 99483202d57..ff92ac28f2f 100644
--- a/sys/arch/mvme88k/ddb/db_interface.c
+++ b/sys/arch/mvme88k/ddb/db_interface.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_interface.c,v 1.4 1999/02/09 06:36:24 smurph Exp $ */
+/* $OpenBSD: db_interface.c,v 1.5 2001/02/01 03:38:12 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -42,6 +42,7 @@
#include <machine/db_machdep.h> /* local ddb stuff */
#include <machine/bug.h> /* bug routines */
#include <machine/mmu.h>
+#include <machine/cpu_number.h>
#include <ddb/db_command.h>
#include <ddb/db_sym.h>
@@ -705,12 +706,11 @@ m88k_db_noise(db_expr_t addr, int have_addr, db_expr_t count, char *modif)
static void
m88k_db_translate(db_expr_t addr, int have_addr, db_expr_t count, char *modif)
{
-#if 0
+#if 1
char c;
int verbose_flag = 0;
int supervisor_flag = 1;
int wanthelp = 0;
-
if (!have_addr)
wanthelp = 1;
else {
@@ -742,9 +742,21 @@ m88k_db_translate(db_expr_t addr, int have_addr, db_expr_t count, char *modif)
db_printf(" u - use cmmu's user area pointer\n");
return;
}
+ cmmu_show_translation(addr, supervisor_flag, verbose_flag, -1);
+#endif
+ return;
+}
- cmmu_show_translation(addr, supervisor_flag, verbose_flag);
-#endif /* 0 */
+static void
+m88k_db_cmmucfg(db_expr_t addr, int have_addr, int count, char *modif)
+{
+ if (modif && *modif) {
+ db_printf("usage: mach cmmucfg\n");
+ return;
+ }
+
+ cmmu_dump_config();
+ return;
}
void cpu_interrupt_to_db(int cpu_no)
@@ -771,6 +783,7 @@ struct db_command db_machine_cmds[] =
{"regs", m88k_db_registers, 0, 0},
{"searchframe", m88k_db_frame_search, 0, 0},
{"translate", m88k_db_translate, 0, 0},
+ {"cmmucfg", m88k_db_cmmucfg, 0, 0},
{"where", m88k_db_where, 0, 0},
{(char *) 0,}
};
diff --git a/sys/arch/mvme88k/dev/bugio.c b/sys/arch/mvme88k/dev/bugio.c
index aee175f7206..24346d014e0 100644
--- a/sys/arch/mvme88k/dev/bugio.c
+++ b/sys/arch/mvme88k/dev/bugio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bugio.c,v 1.4 1999/09/27 18:43:21 smurph Exp $ */
+/* $OpenBSD: bugio.c,v 1.5 2001/02/01 03:38:13 smurph Exp $ */
/* Copyright (c) 1998 Steve Murphree, Jr. */
#include <machine/bugio.h>
@@ -21,6 +21,7 @@
#define RTC_RD "0x0053"
#define RETURN "0x0063"
#define BRD_ID "0x0070"
+#define FORKMPU "0x0100"
#define BUGTRAP "0x01F0"
int ossr0, ossr1, ossr2, ossr3;
@@ -65,13 +66,13 @@ char
buginchr(void)
{
register int cc;
- int ret;
+ int ret;
BUGCTXT();
asm volatile ("or r9,r0," INCHR);
asm volatile ("tb0 0,r0,0x1F0");
asm volatile ("or %0,r0,r2" : "=r" (cc) : );
- ret = cc;
- OSCTXT();
+ ret = cc;
+ OSCTXT();
return ((char)ret & 0xFF);
}
@@ -83,7 +84,7 @@ bugoutchr(unsigned char c)
bugpcrlf();
return;
}
-
+
BUGCTXT();
asm("or r2,r0,%0" : : "r" (cc));
@@ -97,7 +98,7 @@ bugoutchr(unsigned char c)
buginstat(void)
{
- int ret;
+ register int ret;
BUGCTXT();
asm volatile ("or r9,r0," INSTAT);
@@ -131,7 +132,7 @@ bugdskrd(struct bugdisk_io *arg)
BUGCTXT();
asm("or r9,r0, " DSKRD);
- asm("tb0 0,r0,0x1F0");
+ asm("tb0 0,r0,0x1F0");
asm("or %0,r0,r2" : "=r" (ret) : );
OSCTXT();
@@ -145,7 +146,7 @@ bugdskwr(struct bugdisk_io *arg)
int ret;
BUGCTXT();
asm("or r9,r0, " DSKWR);
- asm("tb0 0,r0,0x1F0");
+ asm("tb0 0,r0,0x1F0");
asm("or %0,r0,r2" : "=r" (ret) : );
OSCTXT();
return ((ret&0x4) == 0x4 ? 1 : 0);
@@ -168,6 +169,14 @@ bugdelay(int delay)
OSCTXT();
}
+bugfork(int cpu, unsigned address)
+{
+ BUGCTXT();
+ asm("or r9,r0, " FORKMPU);
+ asm("tb0 0,r0,0x1F0");
+ OSCTXT();
+}
+
bugreturn(void)
{
BUGCTXT();
@@ -196,26 +205,26 @@ bugnetctrl(struct bugniocall *niocall)
/* OSCTXT();*/
}
-typedef struct netcnfgp {
- unsigned int magic;
- unsigned int nodemem;
- unsigned int bfla;
- unsigned int bfea;
- unsigned int bfed;
- unsigned int bfl;
- unsigned int bfbo;
- unsigned int tbuffer;
- unsigned char cipa[4];
- unsigned char sipa[4];
- unsigned char netmask[4];
- unsigned char broadcast[4];
- unsigned char gipa[4];
- unsigned char bootp_retry;
- unsigned char tftp_retry;
- unsigned char bootp_ctl;
- unsigned char cnfgp_ctl;
- unsigned char filename[64];
- unsigned char argfname[64];
+typedef struct netcnfgp {
+ unsigned int magic;
+ unsigned int nodemem;
+ unsigned int bfla;
+ unsigned int bfea;
+ unsigned int bfed;
+ unsigned int bfl;
+ unsigned int bfbo;
+ unsigned int tbuffer;
+ unsigned char cipa[4];
+ unsigned char sipa[4];
+ unsigned char netmask[4];
+ unsigned char broadcast[4];
+ unsigned char gipa[4];
+ unsigned char bootp_retry;
+ unsigned char tftp_retry;
+ unsigned char bootp_ctl;
+ unsigned char cnfgp_ctl;
+ unsigned char filename[64];
+ unsigned char argfname[64];
} NETCNFGP;
struct bugniotcall {
diff --git a/sys/arch/mvme88k/dev/busswitch.c b/sys/arch/mvme88k/dev/busswitch.c
index 71eb44d9937..e6637439613 100644
--- a/sys/arch/mvme88k/dev/busswitch.c
+++ b/sys/arch/mvme88k/dev/busswitch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: busswitch.c,v 1.1 1999/09/27 18:43:22 smurph Exp $ */
+/* $OpenBSD: busswitch.c,v 1.2 2001/02/01 03:38:13 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
@@ -134,7 +134,7 @@ busswitch_scan(parent, child, args)
bzero(&oca, sizeof oca);
oca.ca_offset = cf->cf_loc[0];
oca.ca_ipl = cf->cf_loc[1];
- if ((oca.ca_offset != (void*)-1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) {
+ if (((int)oca.ca_offset != -1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) {
oca.ca_vaddr = sc->sc_vaddr + oca.ca_offset;
oca.ca_paddr = sc->sc_paddr + oca.ca_offset;
} else {
diff --git a/sys/arch/mvme88k/dev/cl.c b/sys/arch/mvme88k/dev/cl.c
index be0684214b6..06aec3f0659 100644
--- a/sys/arch/mvme88k/dev/cl.c
+++ b/sys/arch/mvme88k/dev/cl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cl.c,v 1.6 2000/03/26 23:32:00 deraadt Exp $ */
+/* $OpenBSD: cl.c,v 1.7 2001/02/01 03:38:14 smurph Exp $ */
/*
* Copyright (c) 1995 Dale Rahn. All rights reserved.
@@ -895,25 +895,25 @@ clstop(tp, flag)
int
clcnprobe(cp)
- struct consdev *cp;
+struct consdev *cp;
{
/* always there ? */
/* serial major */
- int maj;
-
- /* bomb if it'a a MVME188 */
- if (cputyp == CPU_188){
- cp->cn_pri = CN_DEAD;
- return 0;
- }
- /* locate the major number */
- for (maj = 0; maj < nchrdev; maj++)
- if (cdevsw[maj].d_open == clopen)
- break;
- cp->cn_dev = makedev (maj, 0);
- cp->cn_pri = CN_NORMAL;
-
- return 1;
+ int maj;
+
+ /* bomb if it'a a MVME188 */
+ if (cputyp == CPU_188) {
+ cp->cn_pri = CN_DEAD;
+ return 0;
+ }
+ /* locate the major number */
+ for (maj = 0; maj < nchrdev; maj++)
+ if (cdevsw[maj].d_open == clopen)
+ break;
+ cp->cn_dev = makedev (maj, 0);
+ cp->cn_pri = CN_NORMAL;
+
+ return 1;
}
int
diff --git a/sys/arch/mvme88k/dev/clock.c b/sys/arch/mvme88k/dev/clock.c
index 2609694ac5d..06295289264 100644
--- a/sys/arch/mvme88k/dev/clock.c
+++ b/sys/arch/mvme88k/dev/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.6 1999/09/27 18:43:23 smurph Exp $ */
+/* $OpenBSD: clock.c,v 1.7 2001/02/01 03:38:14 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1995 Theo de Raadt
@@ -88,10 +88,13 @@
#include <sys/gmon.h>
#endif
+#include <machine/asm_macro.h> /* for stack_pointer() */
+#include <machine/board.h> /* for register defines */
#include <machine/psl.h>
#include <machine/autoconf.h>
#include <machine/bugio.h>
#include <machine/cpu.h>
+#include <machine/mmu.h> /* DMA_CACHE_SYNC, etc... */
#include "pcctwo.h"
#if NPCCTWO > 0
#include <mvme88k/dev/pcctworeg.h>
@@ -107,17 +110,17 @@ int timerok = 0;
u_long delay_factor = 1;
-static int clockmatch __P((struct device *, void *, void *));
-static void clockattach __P((struct device *, struct device *, void *));
+static int clockmatch __P((struct device *, void *, void *));
+static void clockattach __P((struct device *, struct device *, void *));
-void sbc_initclock(void);
-void m188_initclock(void);
-void m188_timer_init __P((unsigned));
+void sbc_initclock(void);
+void m188_initclock(void);
+void m188_timer_init __P((unsigned));
struct clocksoftc {
- struct device sc_dev;
- struct intrhand sc_profih;
- struct intrhand sc_statih;
+ struct device sc_dev;
+ struct intrhand sc_profih;
+ struct intrhand sc_statih;
};
struct cfattach clock_ca = {
@@ -128,10 +131,10 @@ struct cfdriver clock_cd = {
NULL, "clock", DV_DULL, 0
};
-int sbc_clockintr __P((void *));
-int sbc_statintr __P((void *));
-int m188_clockintr __P((void *));
-int m188_statintr __P((void *));
+int sbc_clockintr __P((void *));
+int sbc_statintr __P((void *));
+int m188_clockintr __P((void *));
+int m188_statintr __P((void *));
int clockbus;
u_char prof_reset;
@@ -142,8 +145,8 @@ u_char prof_reset;
*/
int
clockmatch(parent, vcf, args)
- struct device *parent;
- void *vcf, *args;
+struct device *parent;
+void *vcf, *args;
{
register struct confargs *ca = args;
register struct cfdata *cf = vcf;
@@ -160,57 +163,55 @@ clockmatch(parent, vcf, args)
ca->ca_ipl = IPL_CLOCK;
/* set size to 0 - see pcctwo.c:match for details */
ca->ca_len = 0;
-
-
return (1);
}
void
clockattach(parent, self, args)
- struct device *parent, *self;
- void *args;
+struct device *parent, *self;
+void *args;
{
struct confargs *ca = args;
struct clocksoftc *sc = (struct clocksoftc *)self;
clockbus = ca->ca_bustype;
-
- switch (clockbus) {
-#if NPCCTWO > 0
- case BUS_PCCTWO:
- sc->sc_profih.ih_fn = sbc_clockintr;
- sc->sc_profih.ih_arg = 0;
- sc->sc_profih.ih_wantframe = 1;
- sc->sc_profih.ih_ipl = ca->ca_ipl;
- prof_reset = ca->ca_ipl | PCC2_IRQ_IEN | PCC2_IRQ_ICLR;
- pcctwointr_establish(PCC2V_TIMER1, &sc->sc_profih);
- mdfp.clock_init_func = &sbc_initclock;
- printf(": VME1x7");
- break;
+
+ switch (clockbus) {
+#if NPCCTWO > 0
+ case BUS_PCCTWO:
+ sc->sc_profih.ih_fn = sbc_clockintr;
+ sc->sc_profih.ih_arg = 0;
+ sc->sc_profih.ih_wantframe = 1;
+ sc->sc_profih.ih_ipl = ca->ca_ipl;
+ prof_reset = ca->ca_ipl | PCC2_IRQ_IEN | PCC2_IRQ_ICLR;
+ pcctwointr_establish(PCC2V_TIMER1, &sc->sc_profih);
+ mdfp.clock_init_func = &sbc_initclock;
+ printf(": VME1x7");
+ break;
#endif /* NPCCTWO */
-#if NSYSCON > 0
- case BUS_SYSCON:
- sc->sc_profih.ih_fn = m188_clockintr;
- sc->sc_profih.ih_arg = 0;
- sc->sc_profih.ih_wantframe = 1;
- sc->sc_profih.ih_ipl = ca->ca_ipl;
- sysconintr_establish(SYSCV_TIMER1, &sc->sc_profih);
- mdfp.clock_init_func = &m188_initclock;
- printf(": VME188");
- break;
+#if NSYSCON > 0 && defined(MVME188)
+ case BUS_SYSCON:
+ sc->sc_profih.ih_fn = m188_clockintr;
+ sc->sc_profih.ih_arg = 0;
+ sc->sc_profih.ih_wantframe = 1;
+ sc->sc_profih.ih_ipl = ca->ca_ipl;
+ sysconintr_establish(SYSCV_TIMER1, &sc->sc_profih);
+ mdfp.clock_init_func = &m188_initclock;
+ printf(": VME188");
+ break;
#endif /* NSYSCON */
- }
+ }
printf("\n");
}
-#if NPCCTWO > 0
+#if NPCCTWO > 0
void
sbc_initclock(void)
{
register int statint, minint;
-#ifdef DEBUG
- printf("SBC clock init\n");
+#ifdef CLOCK_DEBUG
+ printf("SBC clock init\n");
#endif
if (1000000 % hz) {
printf("cannot get %d Hz clock; using 100 Hz\n", hz);
@@ -223,7 +224,7 @@ sbc_initclock(void)
sys_pcc2->pcc2_t1cmp = pcc2_timer_us2lim(tick);
sys_pcc2->pcc2_t1count = 0;
sys_pcc2->pcc2_t1ctl = PCC2_TCTL_CEN | PCC2_TCTL_COC |
- PCC2_TCTL_COVF;
+ PCC2_TCTL_COVF;
sys_pcc2->pcc2_t1irq = prof_reset;
}
@@ -233,74 +234,99 @@ sbc_initclock(void)
*/
int
sbc_clockintr(arg)
- void *arg;
+void *arg;
{
sys_pcc2->pcc2_t1irq = prof_reset;
hardclock(arg);
#include "bugtty.h"
#if NBUGTTY > 0
- bugtty_chkinput();
+/* bugtty_chkinput();*/
#endif /* NBUGTTY */
timerok = 1;
- return (1);
+ return (1);
}
#endif /* NPCCTWO */
-
+int
delay(us)
- register int us;
+register int us;
{
volatile register int c;
unsigned long st;
/*
* We use the vme system controller for the delay clock.
* Do not go to the real timer until vme device is present.
- * Or, in the case of MVME188, not at all.
+ * Or, in the case of MVME188, not at all.
*/
if (sys_vme2 == NULL || cputyp == CPU_188) {
- c = 3 * us;
- while (--c > 0);
- return(0);
+ c = 3 * us;
+ while (--c > 0);
+ return (0);
}
- sys_vme2->vme2_irql1 |= (0 << VME2_IRQL1_TIC1SHIFT);
- sys_vme2->vme2_t1count = 0;
- sys_vme2->vme2_tctl |= (VME2_TCTL1_CEN | VME2_TCTL1_COVF);
+ sys_vme2->vme2_irql1 |= (0 << VME2_IRQL1_TIC1SHIFT);
+ sys_vme2->vme2_t1count = 0;
+ sys_vme2->vme2_tctl |= (VME2_TCTL1_CEN | VME2_TCTL1_COVF);
- while (sys_vme2->vme2_t1count < us)
+ while (sys_vme2->vme2_t1count < us)
;
- sys_vme2->vme2_tctl &= ~(VME2_TCTL1_CEN | VME2_TCTL1_COVF);
+ sys_vme2->vme2_tctl &= ~(VME2_TCTL1_CEN | VME2_TCTL1_COVF);
return (0);
}
#if NSYSCON > 0
int counter = 0;
-
+#define IST
int
m188_clockintr(arg)
- void *arg;
+void *arg;
{
- volatile int tmp;
- /* acknowledge the timer interrupt */
- /* clear the counter/timer output OP3 while we program the DART */
- *((volatile int *) DART_OPCR) = 0x00;
+ volatile int tmp;
+ volatile int *dti_stop = (volatile int *)DART_STOPC;
+ volatile int *dti_start = (volatile int *)DART_STARTC;
+ volatile int *ist = (volatile int *)MVME188_IST;
+ register unsigned long sp;
+
+ /* acknowledge the timer interrupt */
+ dma_cachectl(0xFFF82000, 0x1000, DMA_CACHE_SYNC_INVAL);
+ tmp = *dti_stop;
+
+
+ /* check kernel stack for overflow */
+ sp = stack_pointer();
+ if (sp < UADDR + NBPG && sp > UADDR) {
+ if (*ist & DTI_BIT) {
+ printf("DTI not clearing!\n");
+ }
+ printf("kernel stack @ 0x%8x\n", sp);
+ panic("stack overflow eminant!");
+ }
+
+#if 0
+ /* clear the counter/timer output OP3 while we program the DART */
+ *((volatile int *) DART_OPCR) = 0x00;
- /* do the stop counter/timer command */
- tmp = *((volatile int *) DART_STOPC);
+ /* do the stop counter/timer command */
+ tmp = *((volatile int *) DART_STOPC);
- /* set counter/timer to counter mode, clock/16 */
- *((volatile int *) DART_ACR) = 0x30;
-
- *((volatile int *) DART_CTUR) = counter / 256; /* set counter MSB */
- *((volatile int *) DART_CTLR) = counter % 256; /* set counter LSB */
- *((volatile int *) DART_IVR) = SYSCV_TIMER1; /* set interrupt vec */
+ /* set counter/timer to counter mode, clock/16 */
+ *((volatile int *) DART_ACR) = 0x30;
+ *((volatile int *) DART_CTUR) = counter / 256; /* set counter MSB */
+ *((volatile int *) DART_CTLR) = counter % 256; /* set counter LSB */
+ *((volatile int *) DART_IVR) = SYSCV_TIMER1; /* set interrupt vec */
+#endif
hardclock(arg);
#include "bugtty.h"
#if NBUGTTY > 0
- bugtty_chkinput();
+/* bugtty_chkinput(); */
#endif /* NBUGTTY */
- /* give the start counter/timer command */
- tmp = *((volatile int *) DART_STARTC);
- *((volatile int *) DART_OPCR) = 0x04;
+ /* give the start counter/timer command */
+ tmp = *dti_start;
+#if 0
+ *((volatile int *) DART_OPCR) = 0x04;
+#endif
+ if (*ist & DTI_BIT) {
+ printf("DTI not clearing!\n");
+ }
return (1);
}
@@ -309,8 +335,8 @@ m188_initclock(void)
{
register int statint, minint;
-#ifdef DEBUG
- printf("VME188 clock init\n");
+#ifdef CLOCK_DEBUG
+ printf("VME188 clock init\n");
#endif
if (1000000 % hz) {
printf("cannot get %d Hz clock; using 100 Hz\n", hz);
@@ -323,36 +349,39 @@ m188_initclock(void)
void
m188_timer_init(unsigned period)
{
- int imr;
-
- /* make sure the counter range is proper. */
- if ( period < 9 )
- counter = 2;
- else if ( period > 284421 )
- counter = 65535;
- else
- counter = period / 4.34;
-#ifdef DEBUG
- printf("tick == %d, period == %d\n", tick, period);
- printf("timer will interrupt every %d usec\n", (int) (counter * 4.34));
+ int imr;
+ dma_cachectl(0xFFF82000, 0x1000, DMA_CACHE_SYNC_INVAL);
+
+ /* make sure the counter range is proper. */
+ if ( period < 9 )
+ counter = 2;
+ else if ( period > 284421 )
+ counter = 65535;
+ else
+ counter = period / 4.34;
+#ifdef CLOCK_DEBUG
+ printf("tick == %d, period == %d\n", tick, period);
+ printf("timer will interrupt every %d usec\n", (int) (counter * 4.34));
#endif
- /* clear the counter/timer output OP3 while we program the DART */
- *((volatile int *) DART_OPCR) = 0x00;
-
- /* do the stop counter/timer command */
- imr = *((volatile int *) DART_STOPC);
-
- /* set counter/timer to counter mode, clock/16 */
- *((volatile int *) DART_ACR) = 0x30;
-
- *((volatile int *) DART_CTUR) = counter / 256; /* set counter MSB */
- *((volatile int *) DART_CTLR) = counter % 256; /* set counter LSB */
- *((volatile int *) DART_IVR) = SYSCV_TIMER1; /* set interrupt vec */
- /* give the start counter/timer command */
- /* (yes, this is supposed to be a read) */
- imr = *((volatile int *) DART_STARTC);
-
- /* set the counter/timer output OP3 */
- *((volatile int *) DART_OPCR) = 0x04;
+ /* clear the counter/timer output OP3 while we program the DART */
+ *((volatile int *) DART_OPCR) = 0x00;
+
+ /* do the stop counter/timer command */
+ imr = *((volatile int *) DART_STOPC);
+
+ /* set counter/timer to counter mode, clock/16 */
+ *((volatile int *) DART_ACR) = 0x30;
+
+ *((volatile int *) DART_CTUR) = counter / 256; /* set counter MSB */
+ *((volatile int *) DART_CTLR) = counter % 256; /* set counter LSB */
+ *((volatile int *) DART_IVR) = SYSCV_TIMER1; /* set interrupt vec */
+
+ /* give the start counter/timer command */
+ /* (yes, this is supposed to be a read) */
+ imr = *((volatile int *) DART_STARTC);
+
+ /* set the counter/timer output OP3 */
+ *((volatile int *) DART_OPCR) = 0x04;
}
#endif /* NSYSCON */
+
diff --git a/sys/arch/mvme88k/dev/dart.c b/sys/arch/mvme88k/dev/dart.c
index 3592c80186a..c46ae78a9c6 100644
--- a/sys/arch/mvme88k/dev/dart.c
+++ b/sys/arch/mvme88k/dev/dart.c
@@ -46,32 +46,36 @@
#include <machine/psl.h>
#define spldart() splx(IPL_TTY)
-#if DDB
- #include <machine/db_machdep.h> /* for details on entering kdb */
-extern unsigned char ddb_break_mode, ddb_break_char;
+#if defined(DDB)
+#include <machine/db_machdep.h> /* for details on entering kdb */
+#define DDB_ENTER_BREAK 0x1
+#define DDB_ENTER_CHAR 0x2
+unsigned char ddb_break_mode = DDB_ENTER_BREAK | DDB_ENTER_CHAR;
+unsigned char ddb_break_char = '!';
#endif
#if DEBUG
- #define dprintf(stuff) /*printf stuff*/
+ int dart_debug = 0;
+ #define dprintf(stuff) if (dart_debug) printf stuff
#else
#define dprintf(stuff)
#endif
struct dart_info {
- struct tty *tty;
- u_char dart_swflags;
- struct simplelock t_lock;
+ struct tty *tty;
+ u_char dart_swflags;
+ struct simplelock t_lock;
};
struct dartsoftc {
- struct device sc_dev;
- struct evcnt sc_intrcnt;
- union dartreg *dart_reg;
- struct dart_info sc_dart[2];
- struct intrhand sc_ih;
- int sc_flags;
- int sc_ipl;
- int sc_vec;
+ struct device sc_dev;
+ struct evcnt sc_intrcnt;
+ union dartreg *dart_reg;
+ struct dart_info sc_dart[2];
+ struct intrhand sc_ih;
+ int sc_flags;
+ int sc_ipl;
+ int sc_vec;
};
int dartmatch __P((struct device *parent, void *self, void *aux));
@@ -116,32 +120,33 @@ struct dart_sv_reg dart_sv_reg;
/* speed tables */
int dart_speeds[] =
{
- 0, /* 0 baud, special HUP condition */
- NOBAUD, /* 50 baud, not implemented */
- BD75, /* 75 baud */
- BD110, /* 110 baud */
- BD134, /* 134.5 baud */
- BD150, /* 150 baud */
- NOBAUD, /* 200 baud, not implemented */
- BD300, /* 300 baud */
- BD600, /* 600 baud */
- BD1200, /* 1200 baud */
- BD1800, /* 1800 baud */
- BD2400, /* 2400 baud */
- BD4800, /* 4800 baud */
- BD9600, /* 9600 baud */
- BD19200, /* 19200 baud */
- NOBAUD /* 38400 baud, not implemented */
+ 0, /* 0 baud, special HUP condition */
+ NOBAUD, /* 50 baud, not implemented */
+ BD75, /* 75 baud */
+ BD110, /* 110 baud */
+ BD134, /* 134.5 baud */
+ BD150, /* 150 baud */
+ NOBAUD, /* 200 baud, not implemented */
+ BD300, /* 300 baud */
+ BD600, /* 600 baud */
+ BD1200, /* 1200 baud */
+ BD1800, /* 1800 baud */
+ BD2400, /* 2400 baud */
+ BD4800, /* 4800 baud */
+ BD9600, /* 9600 baud */
+ BD19200, /* 19200 baud */
+ NOBAUD /* 38400 baud, not implemented */
};
-struct tty * darttty(dev)
+struct tty*
+darttty(dev)
dev_t dev;
{
- int port;
- struct dartsoftc *sc;
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- port = DART_PORT(dev);
- return sc->sc_dart[port].tty;
+ int port;
+ struct dartsoftc *sc;
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ port = DART_PORT(dev);
+ return sc->sc_dart[port].tty;
}
int
@@ -149,19 +154,19 @@ dartmatch(parent, vcf, args)
struct device *parent;
void *vcf, *args;
{
- struct cfdata *cf = vcf;
- struct confargs *ca = args;
- union dartreg *addr;
-
- /* Don't match if wrong cpu */
- if (cputyp != CPU_188) return (0);
- ca->ca_vaddr = ca->ca_paddr; /* 1:1 */
- addr = (union dartreg *)ca->ca_vaddr;
- if (badvaddr(addr, 2) <= 0) {
- printf("==> syscon: failed address check.\n");
- return (0);
- }
- return (1);
+ struct cfdata *cf = vcf;
+ struct confargs *ca = args;
+ union dartreg *addr;
+
+ /* Don't match if wrong cpu */
+ if (cputyp != CPU_188) return (0);
+ ca->ca_vaddr = ca->ca_paddr; /* 1:1 */
+ addr = (union dartreg *)ca->ca_vaddr;
+ if (badvaddr(addr, 2) <= 0) {
+ printf("==> dart: failed address check.\n");
+ return (0);
+ }
+ return (1);
}
void
@@ -170,96 +175,96 @@ struct device *parent;
struct device *self;
void *aux;
{
- struct dartsoftc *sc = (struct dartsoftc *)self;
- struct confargs *ca = aux;
- int i;
- union dartreg *addr; /* pointer to DUART regs */
- union dart_pt_io *ptaddr; /* pointer to port regs */
- int port; /* port index */
-
- /* set up dual port memory and registers and init*/
- sc->dart_reg = (union dartreg *)ca->ca_vaddr;
- sc->sc_ipl = ca->ca_ipl;
- ca->ca_vec = SYSCV_SCC; /* hard coded vector */
- sc->sc_vec = ca->ca_vec;
-
- addr = sc->dart_reg;
-
- /* save standard initialization */
- dart_sv_reg.sv_mr1[A_PORT] = PARDIS | RXRTS | CL8;
- dart_sv_reg.sv_mr2[A_PORT] = /* TXCTS | */ SB1;
- dart_sv_reg.sv_csr[A_PORT] = BD9600;
- dart_sv_reg.sv_cr[A_PORT] = TXEN | RXEN;
-
- dart_sv_reg.sv_mr1[B_PORT] = PARDIS | RXRTS | CL8;
- dart_sv_reg.sv_mr2[B_PORT] = /* TXCTS | */ SB1;
- dart_sv_reg.sv_csr[B_PORT] = BD9600;
- dart_sv_reg.sv_cr[B_PORT] = TXEN | RXEN;
-
- dart_sv_reg.sv_acr = BDSET2 | CCLK16 | IPDCDIB | IPDCDIA;
-
- /* Start out with Tx and RX interrupts disabled */
- /* Enable input port change interrupt */
- dart_sv_reg.sv_imr = IIPCHG;
-
- dprintf(("dartattach: resetting port A\n"));
-
- /* reset port a */
- addr->write.wr_cra = RXRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_cra = TXRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_cra = ERRRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_cra = BRKINTRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_cra = MRRESET | TXDIS | RXDIS;
-
- dprintf(("dartattach: resetting port B\n"));
-
- /* reset port b */
- addr->write.wr_crb = RXRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_crb = TXRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_crb = ERRRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_crb = BRKINTRESET | TXDIS | RXDIS;
- DELAY_CR;
- addr->write.wr_crb = MRRESET | TXDIS | RXDIS;
- DELAY_CR;
-
- /* initialize ports */
- for (port = 0, ptaddr = (union dart_pt_io *)addr;
- port < MAXPORTS;
- port++, ptaddr++) {
- dprintf(("dartattach: init port %c\n", 'A' + port));
- ptaddr->write.wr_mr = dart_sv_reg.sv_mr1[port];
- ptaddr->write.wr_mr = dart_sv_reg.sv_mr2[port];
- ptaddr->write.wr_csr = dart_sv_reg.sv_csr[port];
- ptaddr->write.wr_cr = dart_sv_reg.sv_cr [port];
- }
-
- dprintf(("dartattach: init common regs\n"));
-
- /* initialize common register of a DUART */
- addr->write.wr_oprset = OPDTRA | OPRTSA | OPDTRB | OPRTSB;
-
- addr->write.wr_ctur = SLCTIM>>8;
- addr->write.wr_ctlr = SLCTIM & 0xFF;
- addr->write.wr_acr = dart_sv_reg.sv_acr;
- addr->write.wr_imr = dart_sv_reg.sv_imr;
- addr->write.wr_opcr = OPSET;
- addr->write.wr_ivr = sc->sc_vec;
-
- /* enable interrupts */
- sc->sc_ih.ih_fn = dartintr;
- sc->sc_ih.ih_arg = sc;
- sc->sc_ih.ih_ipl = ca->ca_ipl;
- sc->sc_ih.ih_wantframe = 0;
-
- intr_establish(ca->ca_vec, &sc->sc_ih);
- evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt);
+ struct dartsoftc *sc = (struct dartsoftc *)self;
+ struct confargs *ca = aux;
+ int i;
+ union dartreg *addr; /* pointer to DUART regs */
+ union dart_pt_io *ptaddr; /* pointer to port regs */
+ int port; /* port index */
+
+ /* set up dual port memory and registers and init*/
+ sc->dart_reg = (union dartreg *)ca->ca_vaddr;
+ sc->sc_ipl = ca->ca_ipl = IPL_TTY; /* always... hard coded ipl */
+ ca->ca_vec = SYSCV_SCC; /* hard coded vector */
+ sc->sc_vec = ca->ca_vec;
+
+ addr = sc->dart_reg;
+
+ /* save standard initialization */
+ dart_sv_reg.sv_mr1[A_PORT] = PARDIS | RXRTS | CL8;
+ dart_sv_reg.sv_mr2[A_PORT] = /* TXCTS | */ SB1;
+ dart_sv_reg.sv_csr[A_PORT] = BD9600;
+ dart_sv_reg.sv_cr[A_PORT] = TXEN | RXEN;
+
+ dart_sv_reg.sv_mr1[B_PORT] = PARDIS | RXRTS | CL8;
+ dart_sv_reg.sv_mr2[B_PORT] = /* TXCTS | */ SB1;
+ dart_sv_reg.sv_csr[B_PORT] = BD9600;
+ dart_sv_reg.sv_cr[B_PORT] = TXEN | RXEN;
+
+ dart_sv_reg.sv_acr = BDSET2 | CCLK16 | IPDCDIB | IPDCDIA;
+
+ /* Start out with Tx and RX interrupts disabled */
+ /* Enable input port change interrupt */
+ dart_sv_reg.sv_imr = IIPCHG;
+
+ dprintf(("dartattach: resetting port A\n"));
+
+ /* reset port a */
+ addr->write.wr_cra = RXRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_cra = TXRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_cra = ERRRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_cra = BRKINTRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_cra = MRRESET | TXDIS | RXDIS;
+
+ dprintf(("dartattach: resetting port B\n"));
+
+ /* reset port b */
+ addr->write.wr_crb = RXRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_crb = TXRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_crb = ERRRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_crb = BRKINTRESET | TXDIS | RXDIS;
+ DELAY_CR;
+ addr->write.wr_crb = MRRESET | TXDIS | RXDIS;
+ DELAY_CR;
+
+ /* initialize ports */
+ for (port = 0, ptaddr = (union dart_pt_io *)addr;
+ port < MAXPORTS;
+ port++, ptaddr++) {
+ dprintf(("dartattach: init port %c\n", 'A' + port));
+ ptaddr->write.wr_mr = dart_sv_reg.sv_mr1[port];
+ ptaddr->write.wr_mr = dart_sv_reg.sv_mr2[port];
+ ptaddr->write.wr_csr = dart_sv_reg.sv_csr[port];
+ ptaddr->write.wr_cr = dart_sv_reg.sv_cr [port];
+ }
+
+ dprintf(("dartattach: init common regs\n"));
+
+ /* initialize common register of a DUART */
+ addr->write.wr_oprset = OPDTRA | OPRTSA | OPDTRB | OPRTSB;
+
+ addr->write.wr_ctur = SLCTIM>>8;
+ addr->write.wr_ctlr = SLCTIM & 0xFF;
+ addr->write.wr_acr = dart_sv_reg.sv_acr;
+ addr->write.wr_imr = dart_sv_reg.sv_imr;
+ addr->write.wr_opcr = OPSET;
+ addr->write.wr_ivr = sc->sc_vec;
+
+ /* enable interrupts */
+ sc->sc_ih.ih_fn = dartintr;
+ sc->sc_ih.ih_arg = sc;
+ sc->sc_ih.ih_ipl = ca->ca_ipl;
+ sc->sc_ih.ih_wantframe = 0;
+
+ intr_establish(ca->ca_vec, &sc->sc_ih);
+ evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt);
printf("\n");
}
@@ -270,65 +275,68 @@ void
dartstart(tp)
struct tty *tp;
{
- dev_t dev;
- struct dartsoftc *sc;
- int s, cnt;
- union dart_pt_io *ptaddr;
- union dartreg *addr;
- int port;
- int c;
-
- dev = tp->t_dev;
- port = DART_PORT(dev);
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
-
- if ((tp->t_state & TS_ISOPEN) == 0)
- return;
-
- addr = sc->dart_reg;
- ptaddr = (union dart_pt_io *)addr + port;
-
- if (tp->t_state & (TS_TIMEOUT|TS_BUSY|TS_TTSTOP))
- goto out;
-
- /*
- if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
- ttwakeup(tp);
- }
- */
-
- if (tp->t_outq.c_cc != 0) {
-
- tp->t_state |= TS_BUSY;
-
- /* load transmitter until it is full */
- while (ptaddr->read.rd_sr & TXRDY) {
- c = getc(&tp->t_outq);
-
- if (tp->t_flags & CS8 || c <= 0177) {
-
- dprintf(("dartstart: writing char \"%c\" (0x%02x)\n",
- c & 0xff, c % 0xff));
- ptaddr->write.wr_tb = c & 0xff;
-
- dprintf(("dartstart: enabling Tx int\n"));
- if (port == A_PORT)
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYA;
- else
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYB;
- addr -> write.wr_imr = dart_sv_reg.sv_imr;
- } else {
- tp->t_state &= ~TS_BUSY;
- dprintf(("dartxint: timing out char \"%c\" (0x%02x)\n",
- c & 0xff, c % 0xff));
- ttrstrt(tp);
- tp->t_state |= TS_TIMEOUT;
- }
- }
- }
-
- out:
- return;
+ dev_t dev;
+ struct dartsoftc *sc;
+ int s, cnt;
+ union dart_pt_io *ptaddr;
+ union dartreg *addr;
+ int port;
+ int c;
+
+ dev = tp->t_dev;
+ port = DART_PORT(dev);
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ return;
+
+ addr = sc->dart_reg;
+ ptaddr = (union dart_pt_io *)addr + port;
+
+ if (tp->t_state & (TS_TIMEOUT|TS_BUSY|TS_TTSTOP))
+ goto out;
+
+ /*
+ if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
+ ttwakeup(tp);
+ }
+ */
+
+ if (tp->t_outq.c_cc != 0) {
+
+ tp->t_state |= TS_BUSY;
+
+ /* load transmitter until it is full */
+ while (ptaddr->read.rd_sr & TXRDY) {
+ c = getc(&tp->t_outq);
+
+ if (tp->t_flags & CS8 || c <= 0177) {
+
+ dprintf(("dartstart: writing char \"%c\" (0x%02x)\n",
+ c & 0xff, c % 0xff));
+ ptaddr->write.wr_tb = c & 0xff;
+
+ dprintf(("dartstart: enabling Tx int\n"));
+ if (port == A_PORT)
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYA;
+ else
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYB;
+ addr -> write.wr_imr = dart_sv_reg.sv_imr;
+ } else {
+ tp->t_state &= ~TS_BUSY;
+ dprintf(("dartxint: timing out char \"%c\" (0x%02x)\n",
+ c & 0xff, c % 0xff));
+#if 1
+ timeout_add(&tp->t_rstrt_to, 1);
+#else
+ ttrstrt(tp);
+#endif
+ tp->t_state |= TS_TIMEOUT;
+ }
+ }
+ }
+out:
+ return;
}
/*
@@ -339,13 +347,13 @@ dartstop(tp, flag)
struct tty *tp;
int flag;
{
- int s;
+ int s;
- if (tp->t_state & TS_BUSY) {
- if ((tp->t_state & TS_TTSTOP) == 0)
- tp->t_state |= TS_FLUSH;
- }
- return 0;
+ if (tp->t_state & TS_BUSY) {
+ if ((tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+ }
+ return 0;
}
#define HANDLE_FLAG(_FLAG_, _PORT_, _AFLAG_, _BFLAG_) \
@@ -373,108 +381,111 @@ dev_t dev;
int flags;
int how;
{
- union dartreg *addr;
- int port;
- unsigned int dcdstate;
- int newflags = 0;
- struct dart_info *dart;
- struct dartsoftc *sc;
-
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- addr = sc->dart_reg;
-
- /* special case: set or clear break */
- if (flags & TIOCSBRK) {
- dartbreak(port, 1);
- flags &= ~TIOCSBRK;
- }
- if (flags & TIOCCBRK) {
- dartbreak(port, 0);
- flags &= ~TIOCCBRK;
- }
-
- HANDLE_FLAG(TIOCM_DTR, port, OPDTRA, OPDTRB);
- HANDLE_FLAG(TIOCM_RTS, port, OPRTSA, OPRTSB);
+ union dartreg *addr;
+ int port;
+ unsigned int dcdstate;
+ int newflags = 0;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ addr = sc->dart_reg;
+
+ /* special case: set or clear break */
+ if (flags & TIOCSBRK) {
+ dartbreak(port, 1);
+ flags &= ~TIOCSBRK;
+ }
+ if (flags & TIOCCBRK) {
+ dartbreak(port, 0);
+ flags &= ~TIOCCBRK;
+ }
+
+ HANDLE_FLAG(TIOCM_DTR, port, OPDTRA, OPDTRB);
+ HANDLE_FLAG(TIOCM_RTS, port, OPRTSA, OPRTSB);
#if 0
- if (flags) {
- printf("dartmctl: currently only BRK, DTR and RTS supported\n");
- printf("dartmctl: op=%s flags left = 0x%b\n",
- HOW2STR(how), flags, FLAGSTRING);
- panic("dartmctl");
- }
+ if (flags) {
+ printf("dartmctl: currently only BRK, DTR and RTS supported\n");
+ printf("dartmctl: op=%s flags left = 0x%b\n",
+ HOW2STR(how), flags, FLAGSTRING);
+ panic("dartmctl");
+ }
#endif
- dprintf(("dartmctl: action=%s flags=0x%x\n",
- HOW2STR(how), newflags));
-
- switch (how) {
- case DMSET:
- addr->write.wr_oprset = newflags;
- addr->write.wr_oprreset = ~newflags;
- break;
- case DMBIS:
- addr->write.wr_oprset = newflags;
- break;
- case DMBIC:
- addr->write.wr_oprreset = newflags;
- break;
- case DMGET:
- panic("dartmctl: DMGET not supported (yet)\n");
- break;
- }
-
- /* read DCD input */
- /* input is inverted at port */
- dcdstate = !(addr->read.rd_ip & ((port == A_PORT) ? IPDCDA : IPDCDB));
-
- dprintf(("dartmctl: DCD is %s\n", dcdstate ? "up" : "down"));
-
- return dcdstate;
+ dprintf(("dartmctl: action=%s flags=0x%x\n",
+ HOW2STR(how), newflags));
+
+ switch (how) {
+ case DMSET:
+ addr->write.wr_oprset = newflags;
+ addr->write.wr_oprreset = ~newflags;
+ break;
+ case DMBIS:
+ addr->write.wr_oprset = newflags;
+ break;
+ case DMBIC:
+ addr->write.wr_oprreset = newflags;
+ break;
+ case DMGET:
+ panic("dartmctl: DMGET not supported (yet)\n");
+ break;
+ }
+
+ /* read DCD input */
+ /* input is inverted at port */
+ dcdstate = !(addr->read.rd_ip & ((port == A_PORT) ? IPDCDA : IPDCDB));
+
+ dprintf(("dartmctl: DCD is %s\n", dcdstate ? "up" : "down"));
+
+ return dcdstate;
}
/*
* To be called at spltty - tty already locked.
*/
void
-dartbreak(dev_t dev, int state)
+dartbreak(dev, state)
+dev_t dev;
+int state;
{
- union dartreg *addr;
- union dart_pt_io *ptaddr;
- int port;
- struct dart_info *dart;
- struct dartsoftc *sc;
-
- dprintf(("dartbreak: break %s\n", (state == 1) ? "on" : "off"));
-
- port = DART_PORT(dev);
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- addr = sc->dart_reg;
-
- ptaddr = (union dart_pt_io *) addr + port;
-
- if (state == 1) {
- /* the duart must be enabled with a dummy byte,
- to prevent the transmitter empty interrupt */
- ptaddr->write.wr_cr = BRKSTART|TXEN;
- ptaddr->write.wr_tb = 0;
- } else {
- ptaddr->write.wr_cr = BRKSTOP; /* stop a break*/
- }
-
- return;
+ union dartreg *addr;
+ union dart_pt_io *ptaddr;
+ int port;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+
+ dprintf(("dartbreak: break %s\n", (state == 1) ? "on" : "off"));
+
+ port = DART_PORT(dev);
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ addr = sc->dart_reg;
+
+ ptaddr = (union dart_pt_io *) addr + port;
+
+ if (state == 1) {
+ /* the duart must be enabled with a dummy byte,
+ to prevent the transmitter empty interrupt */
+ ptaddr->write.wr_cr = BRKSTART|TXEN;
+ ptaddr->write.wr_tb = 0;
+ } else {
+ ptaddr->write.wr_cr = BRKSTOP; /* stop a break*/
+ }
+
+ return;
}
-int dartioctl (dev, cmd, data, flag, p)
- dev_t dev;
- int cmd;
- caddr_t data;
- int flag;
- struct proc *p;
+int
+dartioctl (dev, cmd, data, flag, p)
+dev_t dev;
+int cmd;
+caddr_t data;
+int flag;
+struct proc *p;
{
int error;
int port;
@@ -555,304 +566,308 @@ dartparam(tp, t)
struct tty *tp;
struct termios *t;
{
- union dartreg *addr;
- union dart_pt_io *ptaddr;
- int flags;
- int port;
- int speeds;
- unsigned char mr1, mr2;
- struct dart_info *dart;
- struct dartsoftc *sc;
- dev_t dev;
-
- dprintf(("dartparam: setting param for dev %d\n", dev));
-
- dev = tp->t_dev;
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
-
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- addr = sc->dart_reg;
- ptaddr = (union dart_pt_io *) addr + port;
- tp->t_ispeed = t->c_ispeed;
- tp->t_ospeed = t->c_ospeed;
- tp->t_cflag = t->c_cflag;
-
- flags = tp->t_flags;
-
- /* Reset to make global changes*/
- /* disable Tx and Rx */
- dprintf(("dartparam: disabling Tx and Rx int\n"));
-
- if (port == A_PORT)
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~(ITXRDYA | IRXRDYA);
- else
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~(ITXRDYB | IRXRDYB);
- addr -> write.wr_imr = dart_sv_reg.sv_imr;
-
- /* hang up on zero baud rate */
- if (tp->t_ispeed == 0) {
- dprintf(("dartparam: ispeed == 0 -> HUP\n"));
- dartmctl(tp, HUPCL, DMSET);
- return;
- } else {
- /* set baudrate */
- speeds = dart_speeds[(unsigned char)tp->t_ispeed];
- if (speeds == NOBAUD)
- speeds = dart_sv_reg.sv_csr[port];
- ptaddr->write.wr_csr = speeds;
- dart_sv_reg.sv_csr[port] = speeds;
- dprintf(("dartparam: baudrate set param = %d\n", speeds));
- }
-
- /* get saved mode registers and clear set up parameters */
- mr1 = dart_sv_reg.sv_mr1[port];
- mr1 &= ~(CLMASK | PARTYPEMASK | PARMODEMASK);
-
- mr2 = dart_sv_reg.sv_mr2[port];
- mr2 &= ~SBMASK;
-
- /* set up character size */
- if (flags & CS8) {
- mr1 |= CL8;
- dprintf(("dartparam: PASS8\n"));
- } else if (tp->t_ispeed == B134) {
- mr1 |= CL6;
- dprintf(("dartparam: CS6\n"));
- } else {
- mr1 |= CL7;
- dprintf(("dartparam: CS7\n"));
- }
-
- /* set up stop bits */
- if (tp->t_ospeed == B110) {
- mr2 |= SB2;
- dprintf(("dartparam: two stop bits\n"));
- } else {
- mr2 |= SB1;
- dprintf(("dartparam: one stop bit\n"));
- }
-
- /* set up parity */
- if (((flags & PARENB) != PARENB) &&
- (flags & PARENB)) {
- mr1 |= PAREN;
- if (flags & PARODD) {
- mr1 |= ODDPAR;
- dprintf(("dartparam: odd parity\n"));
- } else {
- mr1 |= EVENPAR;
- dprintf(("dartparam: even parity\n"));
- }
- } else {
- mr1 |= PARDIS;
- dprintf(("dartparam: no parity\n"));
- }
-
- if ((dart_sv_reg.sv_mr1[port] != mr1)
- || (dart_sv_reg.sv_mr2[port] != mr2)) {
- /* write mode registers to duart */
- ptaddr->write.wr_cr = MRRESET;
- ptaddr->write.wr_mr = mr1;
- ptaddr->write.wr_mr = mr2;
-
- /* save changed mode registers */
- dart_sv_reg.sv_mr1[port] = mr1;
- dart_sv_reg.sv_mr2[port] = mr2;
- }
-
- /* enable transmitter? */
- if (tp->t_state & TS_BUSY) {
- dprintf(("dartparam: reenabling Tx int\n"));
-
- if (port == A_PORT)
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYA;
- else
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYB;
- addr -> write.wr_imr = dart_sv_reg.sv_imr;
- } else {
- dprintf(("dartparam: not enabling Tx\n"));
- }
-
- /* re-enable the receiver */
- dprintf(("dartparam: reenabling Rx int\n"));
-
- DELAY_CR;
- if (port == A_PORT)
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | IRXRDYA;
- else
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | IRXRDYB;
- addr -> write.wr_imr = dart_sv_reg.sv_imr;
-
- return 0;
+ union dartreg *addr;
+ union dart_pt_io *ptaddr;
+ int flags;
+ int port;
+ int speeds;
+ unsigned char mr1, mr2;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+ dev_t dev;
+
+ dprintf(("dartparam: setting param for dev %d\n", dev));
+
+ dev = tp->t_dev;
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ addr = sc->dart_reg;
+ ptaddr = (union dart_pt_io *) addr + port;
+ tp->t_ispeed = t->c_ispeed;
+ tp->t_ospeed = t->c_ospeed;
+ tp->t_cflag = t->c_cflag;
+
+ flags = tp->t_flags;
+
+ /* Reset to make global changes*/
+ /* disable Tx and Rx */
+ dprintf(("dartparam: disabling Tx and Rx int\n"));
+
+ if (port == A_PORT)
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~(ITXRDYA | IRXRDYA);
+ else
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~(ITXRDYB | IRXRDYB);
+ addr -> write.wr_imr = dart_sv_reg.sv_imr;
+
+ /* hang up on zero baud rate */
+ if (tp->t_ispeed == 0) {
+ dprintf(("dartparam: ispeed == 0 -> HUP\n"));
+ dartmctl(tp, HUPCL, DMSET);
+ return;
+ } else {
+ /* set baudrate */
+ speeds = dart_speeds[(unsigned char)tp->t_ispeed];
+ if (speeds == NOBAUD)
+ speeds = dart_sv_reg.sv_csr[port];
+ ptaddr->write.wr_csr = speeds;
+ dart_sv_reg.sv_csr[port] = speeds;
+ dprintf(("dartparam: baudrate set param = %d\n", speeds));
+ }
+
+ /* get saved mode registers and clear set up parameters */
+ mr1 = dart_sv_reg.sv_mr1[port];
+ mr1 &= ~(CLMASK | PARTYPEMASK | PARMODEMASK);
+
+ mr2 = dart_sv_reg.sv_mr2[port];
+ mr2 &= ~SBMASK;
+
+ /* set up character size */
+ if (flags & CS8) {
+ mr1 |= CL8;
+ dprintf(("dartparam: PASS8\n"));
+ } else if (tp->t_ispeed == B134) {
+ mr1 |= CL6;
+ dprintf(("dartparam: CS6\n"));
+ } else {
+ mr1 |= CL7;
+ dprintf(("dartparam: CS7\n"));
+ }
+
+ /* set up stop bits */
+ if (tp->t_ospeed == B110) {
+ mr2 |= SB2;
+ dprintf(("dartparam: two stop bits\n"));
+ } else {
+ mr2 |= SB1;
+ dprintf(("dartparam: one stop bit\n"));
+ }
+
+ /* set up parity */
+ if (((flags & PARENB) != PARENB) &&
+ (flags & PARENB)) {
+ mr1 |= PAREN;
+ if (flags & PARODD) {
+ mr1 |= ODDPAR;
+ dprintf(("dartparam: odd parity\n"));
+ } else {
+ mr1 |= EVENPAR;
+ dprintf(("dartparam: even parity\n"));
+ }
+ } else {
+ mr1 |= PARDIS;
+ dprintf(("dartparam: no parity\n"));
+ }
+
+ if ((dart_sv_reg.sv_mr1[port] != mr1)
+ || (dart_sv_reg.sv_mr2[port] != mr2)) {
+ /* write mode registers to duart */
+ ptaddr->write.wr_cr = MRRESET;
+ ptaddr->write.wr_mr = mr1;
+ ptaddr->write.wr_mr = mr2;
+
+ /* save changed mode registers */
+ dart_sv_reg.sv_mr1[port] = mr1;
+ dart_sv_reg.sv_mr2[port] = mr2;
+ }
+
+ /* enable transmitter? */
+ if (tp->t_state & TS_BUSY) {
+ dprintf(("dartparam: reenabling Tx int\n"));
+
+ if (port == A_PORT)
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYA;
+ else
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | ITXRDYB;
+ addr -> write.wr_imr = dart_sv_reg.sv_imr;
+ } else {
+ dprintf(("dartparam: not enabling Tx\n"));
+ }
+
+ /* re-enable the receiver */
+ dprintf(("dartparam: reenabling Rx int\n"));
+
+ DELAY_CR;
+ if (port == A_PORT)
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | IRXRDYA;
+ else
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr | IRXRDYB;
+ addr -> write.wr_imr = dart_sv_reg.sv_imr;
+
+ return 0;
}
void
dartmodemtrans(sc, ip, ipcr)
- struct dartsoftc *sc;
- unsigned int ip;
- unsigned int ipcr;
+struct dartsoftc *sc;
+unsigned int ip;
+unsigned int ipcr;
{
- unsigned int dcdstate;
- struct tty *tp;
- int port;
- struct dart_info *dart;
-
- dprintf(("dartmodemtrans: ip=0x%x ipcr=0x%x\n",
- ip, ipcr));
-
- /* input is inverted at port!!! */
- if (ipcr & IPCRDCDA) {
- port = A_PORT;
- dcdstate = !(ip & IPDCDA);
- } else if (ipcr & IPCRDCDB) {
- port = B_PORT;
- dcdstate = !(ip & IPDCDB);
- } else {
- printf("dartmodemtrans: unknown transition:\n");
- printf("dartmodemtrans: ip=0x%x ipcr=0x%x\n",
- ip, ipcr);
- panic("dartmodemtrans");
- }
- dart = &sc->sc_dart[port];
- tp = dart->tty;
-
- dprintf(("dartmodemtrans: tp=0x%x new DCD state: %s\n",
- tp, dcdstate ? "UP" : "DOWN"));
- (void) ttymodem(tp, dcdstate);
+ unsigned int dcdstate;
+ struct tty *tp;
+ int port;
+ struct dart_info *dart;
+
+ dprintf(("dartmodemtrans: ip=0x%x ipcr=0x%x\n",
+ ip, ipcr));
+
+ /* input is inverted at port!!! */
+ if (ipcr & IPCRDCDA) {
+ port = A_PORT;
+ dcdstate = !(ip & IPDCDA);
+ } else if (ipcr & IPCRDCDB) {
+ port = B_PORT;
+ dcdstate = !(ip & IPDCDB);
+ } else {
+ printf("dartmodemtrans: unknown transition:\n");
+ printf("dartmodemtrans: ip=0x%x ipcr=0x%x\n",
+ ip, ipcr);
+ panic("dartmodemtrans");
+ }
+ dart = &sc->sc_dart[port];
+ tp = dart->tty;
+
+ dprintf(("dartmodemtrans: tp=0x%x new DCD state: %s\n",
+ tp, dcdstate ? "UP" : "DOWN"));
+ (void) ttymodem(tp, dcdstate);
}
-int dartopen (dev, flag, mode, p)
+int
+dartopen (dev, flag, mode, p)
dev_t dev;
int flag;
int mode;
struct proc *p;
{
- int s, port;
- struct dart_info *dart;
- struct dartsoftc *sc;
- struct tty *tp;
-
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- s = spldart();
-
- if (dart->tty) {
- tp = dart->tty;
- } else {
- tp = dart->tty = ttymalloc();
- simple_lock_init(&dart->t_lock);
- }
-
- simple_lock(&dart->t_lock);
- tp->t_oproc = dartstart;
- tp->t_param = dartparam;
- tp->t_dev = dev;
-
- if ((tp->t_state & TS_ISOPEN) == 0) {
- ttychars(tp);
- tp->t_iflag = TTYDEF_IFLAG;
- tp->t_oflag = TTYDEF_OFLAG;
- tp->t_lflag = TTYDEF_LFLAG;
- tp->t_ispeed = tp->t_ospeed = B9600;
- dartparam(tp, &tp->t_termios);
- if (port == 0) {
- /* console is 8N1 */
- tp->t_cflag = (CREAD | CS8 | HUPCL);
- } else {
- tp->t_cflag = TTYDEF_CFLAG;
- }
- ttsetwater(tp);
- (void)dartmctl(dev, TIOCM_DTR | TIOCM_RTS, DMSET);
- tp->t_state |= TS_CARR_ON;
- } else if (tp->t_state & TS_XCLUDE && p->p_ucred->cr_uid != 0) {
- splx(s);
- simple_unlock(&dart->t_lock);
- return (EBUSY);
- }
- /*
- * Reset the tty pointer, as there could have been a dialout
- * use of the tty with a dialin open waiting.
- */
- tp->t_dev = dev;
- simple_unlock(&dart->t_lock);
- splx(s);
- return ((*linesw[tp->t_line].l_open)(dev, tp));
+ int s, port;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+ struct tty *tp;
+
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ s = spldart();
+
+ if (dart->tty) {
+ tp = dart->tty;
+ } else {
+ tp = dart->tty = ttymalloc();
+ simple_lock_init(&dart->t_lock);
+ }
+
+ simple_lock(&dart->t_lock);
+ tp->t_oproc = dartstart;
+ tp->t_param = dartparam;
+ tp->t_dev = dev;
+
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ ttychars(tp);
+ tp->t_iflag = TTYDEF_IFLAG;
+ tp->t_oflag = TTYDEF_OFLAG;
+ tp->t_lflag = TTYDEF_LFLAG;
+ tp->t_ispeed = tp->t_ospeed = B9600;
+ dartparam(tp, &tp->t_termios);
+ if (port == 0) {
+ /* console is 8N1 */
+ tp->t_cflag = (CREAD | CS8 | HUPCL);
+ } else {
+ tp->t_cflag = TTYDEF_CFLAG;
+ }
+ ttsetwater(tp);
+ (void)dartmctl(dev, TIOCM_DTR | TIOCM_RTS, DMSET);
+ tp->t_state |= TS_CARR_ON;
+ } else if (tp->t_state & TS_XCLUDE && p->p_ucred->cr_uid != 0) {
+ splx(s);
+ simple_unlock(&dart->t_lock);
+ return (EBUSY);
+ }
+ /*
+ * Reset the tty pointer, as there could have been a dialout
+ * use of the tty with a dialin open waiting.
+ */
+ tp->t_dev = dev;
+ simple_unlock(&dart->t_lock);
+ splx(s);
+ return ((*linesw[tp->t_line].l_open)(dev, tp));
}
-int dartclose (dev, flag, mode, p)
+int
+dartclose (dev, flag, mode, p)
dev_t dev;
int flag;
int mode;
struct proc *p;
{
- int unit, channel;
- struct tty *tp;
- struct dart_info *dart;
- struct dartsoftc *sc;
- int s, port;
-
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- tp = dart->tty;
- (*linesw[tp->t_line].l_close)(tp, flag);
-
- s = spldart();
- ttyclose(tp);
- splx(s);
- return 0;
+ int unit, channel;
+ struct tty *tp;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+ int s, port;
+
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ tp = dart->tty;
+ (*linesw[tp->t_line].l_close)(tp, flag);
+
+ s = spldart();
+ ttyclose(tp);
+ splx(s);
+ return 0;
}
-int dartread (dev, uio, flag)
+int
+dartread (dev, uio, flag)
dev_t dev;
struct uio *uio;
int flag;
{
- int unit, port;
- struct tty *tp;
- struct dart_info *dart;
- struct dartsoftc *sc;
-
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
- sc = (struct dartsoftc *) dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
- tp = dart->tty;
-
- if (!tp)
- return ENXIO;
- return ((*linesw[tp->t_line].l_read)(tp, uio, flag));
+ int unit, port;
+ struct tty *tp;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+ sc = (struct dartsoftc *) dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+ tp = dart->tty;
+
+ if (!tp)
+ return ENXIO;
+ return ((*linesw[tp->t_line].l_read)(tp, uio, flag));
}
-int dartwrite (dev, uio, flag)
+int
+dartwrite(dev, uio, flag)
dev_t dev;
struct uio *uio;
int flag;
{
- int port;
- struct tty *tp;
- struct dart_info *dart;
- struct dartsoftc *sc;
-
- if (port = DART_PORT(dev) > 1) {
- return (ENODEV);
- }
- sc = (struct dartsoftc *)dart_cd.cd_devs[0];
- dart = &sc->sc_dart[port];
-
- tp = dart->tty;
- if (!tp)
- return ENXIO;
- return ((*linesw[tp->t_line].l_write)(tp, uio, flag));
+ int port;
+ struct tty *tp;
+ struct dart_info *dart;
+ struct dartsoftc *sc;
+
+ if (port = DART_PORT(dev) > 1) {
+ return (ENODEV);
+ }
+ sc = (struct dartsoftc *)dart_cd.cd_devs[0];
+ dart = &sc->sc_dart[port];
+
+ tp = dart->tty;
+ if (!tp)
+ return ENXIO;
+ return ((*linesw[tp->t_line].l_write)(tp, uio, flag));
}
void
@@ -860,88 +875,88 @@ dartrint(sc, port)
struct dartsoftc *sc;
int port;
{
- union dartreg *addr;
- union dart_pt_io *ptaddr;
- struct tty *tp;
- unsigned char data, sr;
- struct dart_info *dart;
-
- dart = &sc->sc_dart[port];
- addr = sc->dart_reg;
-
- /* read status reg */
- ptaddr = (union dart_pt_io *) addr + port;
-
- dprintf(("dartrint: Rx int dev %d\n", dev));
-
- tp = dart->tty;
-
- dprintf(("dartrint: ptaddr = 0x%08x from uart at 0x%08x\n",
- ptaddr, addr));
-
- while ((sr = ptaddr->read.rd_sr) & RXRDY) {
- dprintf(("dartrint: sr = 0x%08x\n", sr));
+ union dartreg *addr;
+ union dart_pt_io *ptaddr;
+ struct tty *tp;
+ unsigned char data, sr;
+ struct dart_info *dart;
- data = ptaddr->read.rd_rb; /* read data and reset receiver */
+ dart = &sc->sc_dart[port];
+ addr = sc->dart_reg;
- dprintf(("dartrint: read char \"%c\" (0x%02x) tp = 0x%x\n",
- data, data, tp));
+ /* read status reg */
+ ptaddr = (union dart_pt_io *) addr + port;
- if ((tp->t_state & (TS_ISOPEN|TS_WOPEN)) == 0){
- return;
- }
+ dprintf(("dartrint: Rx int port %d\n", port));
- if (sr & RBRK) {
- dprintf(("dartrint: BREAK detected\n"));
- /*
- data = tp->t_breakc;
- ttyinput(data, tp);
- */
- /* clear break state */
- ptaddr->write.wr_cr = BRKINTRESET;
- DELAY_CR;
- ptaddr->write.wr_cr = ERRRESET;
+ tp = dart->tty;
-#if DDB_XXX
- if (ddb_break_mode & DDB_ENTER_BREAK) {
- dprintf(("dartrint: break detected - entering debugger\n"));
- gimmeabreak();
- }
+ dprintf(("dartrint: ptaddr = 0x%08x from uart at 0x%08x\n",
+ ptaddr, addr));
+
+ while ((sr = ptaddr->read.rd_sr) & RXRDY) {
+ dprintf(("dartrint: sr = 0x%08x\n", sr));
+
+ data = ptaddr->read.rd_rb; /* read data and reset receiver */
+
+ dprintf(("dartrint: read char \"%c\" (0x%02x) tp = 0x%x\n",
+ data, data, tp));
+
+ if ((tp->t_state & (TS_ISOPEN|TS_WOPEN)) == 0) {
+ return;
+ }
+
+ if (sr & RBRK) {
+ dprintf(("dartrint: BREAK detected\n"));
+ /*
+ data = tp->t_breakc;
+ ttyinput(data, tp);
+ */
+ /* clear break state */
+ ptaddr->write.wr_cr = BRKINTRESET;
+ DELAY_CR;
+ ptaddr->write.wr_cr = ERRRESET;
+
+#if defined(DDB)
+ if (ddb_break_mode & DDB_ENTER_BREAK) {
+ dprintf(("dartrint: break detected - entering debugger\n"));
+ gimmeabreak();
+ }
#endif
- } else {
- if (sr & (FRERR|PERR|ROVRN)) { /* errors */
- if (sr & ROVRN)
- printf("dart0: receiver overrun port %c\n", 'A' + port);
- if (sr & FRERR)
- printf("dart0: framing error port %c\n", 'A' + port);
- if (sr & PERR)
- printf("dart0: parity error port %c\n", 'A' + port);
- dprintf(("dartrint: error received\n"));
- /* clear error state */
- ptaddr->write.wr_cr = ERRRESET;
- } else {
- /* no errors */
-#if DDB_XXX
- if ((ddb_break_mode & DDB_ENTER_CHAR) && (ddb_break_char == data)) {
- dprintf(("dartrint: ddb_break_char detected - entering debugger\n"));
- gimmeabreak();
- } else
+ } else {
+ if (sr & (FRERR|PERR|ROVRN)) { /* errors */
+ if (sr & ROVRN)
+ printf("dart0: receiver overrun port %c\n", 'A' + port);
+ if (sr & FRERR)
+ printf("dart0: framing error port %c\n", 'A' + port);
+ if (sr & PERR)
+ printf("dart0: parity error port %c\n", 'A' + port);
+ dprintf(("dartrint: error received\n"));
+ /* clear error state */
+ ptaddr->write.wr_cr = ERRRESET;
+ } else {
+ /* no errors */
+#if defined(DDB)
+ if ((ddb_break_mode & DDB_ENTER_CHAR) && (ddb_break_char == data)) {
+ dprintf(("dartrint: ddb_break_char detected - entering debugger\n"));
+ gimmeabreak();
+ } else
#endif
- {
- if (tp->t_ispeed == B134) /* CS6 */
- data &= 077;
+ {
+ if (tp->t_ispeed == B134) /* CS6 */
+ data &= 077;
#if 0 /* XXX ??? */
- else if (tp->t_flags & (RAW|LITOUT|PASS8)) /*CS8*/
- ;
+ else if (tp->t_flags & (RAW|LITOUT|PASS8)) /*CS8*/
+ ;
#endif
- else
- data &= 0177; /* CS7 */
- ttyinput(data, tp);
- }
- }
- }
- }
- dprintf(("dartrint: ready\n"));
+ else
+ data &= 0177; /* CS7 */
+ ttyinput(data, tp);
+ }
+ }
+ }
+ }
+ dprintf(("dartrint: ready\n"));
}
void
@@ -949,106 +964,106 @@ dartxint(sc, port)
struct dartsoftc *sc;
int port;
{
- struct tty *tp;
- struct dart_info *dart;
- union dartreg *addr;
- dev_t dev;
+ struct tty *tp;
+ struct dart_info *dart;
+ union dartreg *addr;
+ dev_t dev;
- dart = &sc->sc_dart[port];
- addr = sc->dart_reg;
+ dart = &sc->sc_dart[port];
+ addr = sc->dart_reg;
- tp = dart->tty;
+ tp = dart->tty;
- simple_lock(&dart->t_lock);
+ simple_lock(&dart->t_lock);
- if ((tp->t_state & (TS_ISOPEN|TS_WOPEN))==0)
- goto out;
+ if ((tp->t_state & (TS_ISOPEN|TS_WOPEN))==0)
+ goto out;
- if (tp->t_state & TS_FLUSH)
- tp->t_state &= ~TS_FLUSH;
+ if (tp->t_state & TS_FLUSH)
+ tp->t_state &= ~TS_FLUSH;
- if (tp->t_state & TS_BUSY) {
- tp->t_state &= ~TS_BUSY;
- dprintf(("dartxint: starting output\n"));
- dartstart(tp);
- if (tp->t_state & TS_BUSY) {
- dprintf(("dartxint: ready - Tx left enabled\n"));
- simple_unlock(&dart->t_lock);
- return;
- }
- }
- out:
+ if (tp->t_state & TS_BUSY) {
+ tp->t_state &= ~TS_BUSY;
+ dprintf(("dartxint: starting output\n"));
+ dartstart(tp);
+ if (tp->t_state & TS_BUSY) {
+ dprintf(("dartxint: ready - Tx left enabled\n"));
+ simple_unlock(&dart->t_lock);
+ return;
+ }
+ }
+ out:
- /* disable transmitter */
- if (port == 0)
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~ITXRDYA;
- else
- dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~ITXRDYB;
+ /* disable transmitter */
+ if (port == 0)
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~ITXRDYA;
+ else
+ dart_sv_reg.sv_imr = dart_sv_reg.sv_imr & ~ITXRDYB;
- addr->write.wr_imr = dart_sv_reg.sv_imr;
+ addr->write.wr_imr = dart_sv_reg.sv_imr;
- simple_unlock(&dart->t_lock);
+ simple_unlock(&dart->t_lock);
- dprintf(("dartxint: ready - Tx disabled\n"));
+ dprintf(("dartxint: ready - Tx disabled\n"));
- return;
+ return;
}
int
dartintr(sc)
struct dartsoftc *sc;
{
- unsigned char isr;
- unsigned char sr;
- int port;
- union dartreg *addr;
-
- /* read interrupt status register and mask with imr */
- addr = sc->dart_reg;
-
- isr = addr->read.rd_isr;
- isr &= dart_sv_reg.sv_imr;
-
- if (isr) { /* interrupt from this duart */
- if (isr & IIPCHG) {
- unsigned int ip = addr->read.rd_ip;
- unsigned int ipcr = addr->read.rd_ipcr;
- dartmodemtrans(sc, ip, ipcr);
- return 0;
- }
-
- if (isr & (IRXRDYA | ITXRDYA))
- port = 0;
- else
- if (isr & (IRXRDYB | ITXRDYB))
- port = 1;
- else {
- printf("dartintr: spurious interrupt, isr 0x%08x\n", isr);
- panic("dartintr");
- }
-
- dprintf(("dartintr: interrupt from port %d, isr 0x%08x\n",
- port, isr));
-
- if (isr & (IRXRDYA | IRXRDYB)) {
- dprintf(("dartintr: Rx interrupt\n"));
- dartrint(sc, port);
- }
- if (isr & (ITXRDYA | ITXRDYB)) {
- dprintf(("dartintr: Tx interrupt\n"));
- dartxint(sc, port);
- }
- if (((port == A_PORT) && (isr & IBRKA))
- || ((port == B_PORT) && (isr & IBRKB))) {
- union dart_pt_io *ptaddr =
- (union dart_pt_io *)addr + port;
-
- dprintf(("dartintr: clearing end of BREAK state\n"));
- ptaddr->write.wr_cr = BRKINTRESET;
- }
- }
- dprintf(("dartintr: ready\n"));
- return 1;
+ unsigned char isr;
+ unsigned char sr;
+ int port;
+ union dartreg *addr;
+
+ /* read interrupt status register and mask with imr */
+ addr = sc->dart_reg;
+
+ isr = addr->read.rd_isr;
+ isr &= dart_sv_reg.sv_imr;
+
+ if (isr) { /* interrupt from this duart */
+ if (isr & IIPCHG) {
+ unsigned int ip = addr->read.rd_ip;
+ unsigned int ipcr = addr->read.rd_ipcr;
+ dartmodemtrans(sc, ip, ipcr);
+ return 0;
+ }
+
+ if (isr & (IRXRDYA | ITXRDYA))
+ port = 0;
+ else
+ if (isr & (IRXRDYB | ITXRDYB))
+ port = 1;
+ else {
+ printf("dartintr: spurious interrupt, isr 0x%08x\n", isr);
+ panic("dartintr");
+ }
+
+ dprintf(("dartintr: interrupt from port %d, isr 0x%08x\n",
+ port, isr));
+
+ if (isr & (IRXRDYA | IRXRDYB)) {
+ dprintf(("dartintr: Rx interrupt\n"));
+ dartrint(sc, port);
+ }
+ if (isr & (ITXRDYA | ITXRDYB)) {
+ dprintf(("dartintr: Tx interrupt\n"));
+ dartxint(sc, port);
+ }
+ if (((port == A_PORT) && (isr & IBRKA))
+ || ((port == B_PORT) && (isr & IBRKB))) {
+ union dart_pt_io *ptaddr =
+ (union dart_pt_io *)addr + port;
+
+ dprintf(("dartintr: clearing end of BREAK state\n"));
+ ptaddr->write.wr_cr = BRKINTRESET;
+ }
+ }
+ dprintf(("dartintr: ready\n"));
+ return 1;
}
/*
@@ -1060,27 +1075,27 @@ int
dartcnprobe(cp)
struct consdev *cp;
{
- int maj;
-
- if (cputyp != CPU_188){
- cp->cn_pri = CN_DEAD;
- return 0;
- }
- /* locate the major number */
- for (maj = 0; maj < nchrdev; maj++)
- if (cdevsw[maj].d_open == dartopen)
- break;
-
- cp->cn_dev = makedev(maj, 0);
- cp->cn_pri = CN_NORMAL;
- return (1);
+ int maj;
+
+ if (cputyp != CPU_188) {
+ cp->cn_pri = CN_DEAD;
+ return 0;
+ }
+ /* locate the major number */
+ for (maj = 0; maj < nchrdev; maj++)
+ if (cdevsw[maj].d_open == dartopen)
+ break;
+
+ cp->cn_dev = makedev(maj, 0);
+ cp->cn_pri = CN_NORMAL;
+ return (1);
}
int
dartcninit(cp)
struct consdev *cp;
{
- /* Nothing to do */
+ /* Nothing to do */
}
int
@@ -1088,103 +1103,136 @@ dartcnputc(dev, c)
dev_t dev;
char c;
{
- union dartreg *addr;
- union dart_pt_io *ptaddr;
- m88k_psr_type psr;
-
- addr = (union dartreg *) MVME188_DUART;
-
- ptaddr = (union dart_pt_io *) addr + ((dev & 1) ? 1 : 0);
+ union dartreg *addr;
+ union dart_pt_io *ptaddr;
+ m88k_psr_type psr;
+ int s;
+ int port;
- psr = disable_interrupts_return_psr();
+ port = DART_PORT(dev);
- /* Assume first port initialized if we get here. */
- /* Assume the bug initializes the port */
+ addr = (union dartreg *) MVME188_DUART;
- /* inhibit interrupts on the chip */
- addr->write.wr_imr = dart_sv_reg.sv_imr & ~ITXRDYA;
- /* make sure transmitter is enabled */
- DELAY_CR;
- ptaddr->write.wr_cr = TXEN;
+#if 1
+ ptaddr = (union dart_pt_io *) addr + port;
+#else
+ ptaddr = (union dart_pt_io *) addr + ((dev & 1) ? 1 : 0);
+#endif
- /* If the character is a line feed(\n) */
- /* then follow it with carriage return (\r) */
- for (;;) {
- while (!(ptaddr->read.rd_sr & TXRDY))
- ;
- ptaddr->write.wr_tb = c;
- if (c != '\n')
- break;
- c = '\r';
- }
+#if 1
+ s = spldart();
+#else
+ psr = disable_interrupts_return_psr();
+#endif
- /* wait for transmitter to empty */
- while (!(ptaddr->read.rd_sr & TXEMT))
- ;
+ /* Assume first port initialized if we get here. */
+ /* Assume the bug initializes the port */
+
+ /* inhibit interrupts on the chip */
+ addr->write.wr_imr = dart_sv_reg.sv_imr & ~ITXRDYA;
+ /* make sure transmitter is enabled */
+ DELAY_CR;
+ ptaddr->write.wr_cr = TXEN;
+
+ /* If the character is a line feed(\n) */
+ /* then follow it with carriage return (\r) */
+ for (;;) {
+ while (!(ptaddr->read.rd_sr & TXRDY))
+ ;
+ ptaddr->write.wr_tb = c;
+ if (c != '\n')
+ break;
+ c = '\r';
+ }
- /* restore the previous state */
- addr->write.wr_imr = dart_sv_reg.sv_imr;
- DELAY_CR;
- ptaddr->write.wr_cr = dart_sv_reg.sv_cr[0];
+ /* wait for transmitter to empty */
+ while (!(ptaddr->read.rd_sr & TXEMT))
+ ;
- set_psr(psr);
+ /* restore the previous state */
+ addr->write.wr_imr = dart_sv_reg.sv_imr;
+ DELAY_CR;
+ ptaddr->write.wr_cr = dart_sv_reg.sv_cr[0];
- return;
+#if 1
+ splx(s);
+#else
+ set_psr(psr);
+#endif
+ return;
}
int
dartcngetc(dev)
dev_t dev;
{
- union dartreg *addr; /* pointer to DUART regs */
- union dart_pt_io *ptaddr; /* pointer to port regs */
- unsigned char sr; /* status reg of port a/b */
- int c; /* received character */
- m88k_psr_type psr;
- char buf[] = "char x";
-
- psr = disable_interrupts_return_psr();
-
- addr = (union dartreg *) DART_BASE;
- ptaddr = (union dart_pt_io *) addr + ((dev & 1) ? 1 : 0);
-
- /* enable receiver */
- ptaddr->write.wr_cr = RXEN;
-
- do {
- /* read status reg */
- sr = ptaddr->read.rd_sr;
-
- /* receiver interrupt handler*/
- if (sr & RXRDY) {
- /* read character from port */
- c = ptaddr->read.rd_rb;
-
- /* check break condition */
- if (sr & RBRK) {
- /* clear break state */
- ptaddr->write.wr_cr = BRKINTRESET;
- DELAY_CR;
- ptaddr->write.wr_cr = ERRRESET;
- set_psr(psr);
- return c;
- }
-
- if (sr & (FRERR|PERR|ROVRN)) {
- /* clear error state */
- ptaddr->write.wr_cr = ERRRESET;
- DELAY_CR;
- ptaddr->write.wr_cr = BRKINTRESET;
- } else {
- buf[5] = (char) c;
-
- set_psr(psr);
- return (c & 0x7f);
- }
- }
- } while (-1);
-
- set_psr(psr);
-
- return -1;
+ union dartreg *addr; /* pointer to DUART regs */
+ union dart_pt_io *ptaddr; /* pointer to port regs */
+ unsigned char sr; /* status reg of port a/b */
+ int c; /* received character */
+ int s;
+ int port;
+ m88k_psr_type psr;
+ char buf[] = "char x";
+
+ port = DART_PORT(dev);
+#if 1
+ s = spldart();
+#else
+ psr = disable_interrupts_return_psr();
+#endif
+ addr = (union dartreg *) DART_BASE;
+#if 1
+ ptaddr = (union dart_pt_io *) addr + port;
+#else
+ ptaddr = (union dart_pt_io *) addr + ((dev & 1) ? 1 : 0);
+#endif
+ /* enable receiver */
+ ptaddr->write.wr_cr = RXEN;
+
+ do {
+ /* read status reg */
+ sr = ptaddr->read.rd_sr;
+
+ /* receiver interrupt handler*/
+ if (sr & RXRDY) {
+ /* read character from port */
+ c = ptaddr->read.rd_rb;
+
+ /* check break condition */
+ if (sr & RBRK) {
+ /* clear break state */
+ ptaddr->write.wr_cr = BRKINTRESET;
+ DELAY_CR;
+ ptaddr->write.wr_cr = ERRRESET;
+#if 1
+ splx(s);
+#else
+ set_psr(psr);
+#endif
+ return c;
+ }
+
+ if (sr & (FRERR|PERR|ROVRN)) {
+ /* clear error state */
+ ptaddr->write.wr_cr = ERRRESET;
+ DELAY_CR;
+ ptaddr->write.wr_cr = BRKINTRESET;
+ } else {
+ buf[5] = (char) c;
+#if 1
+ splx(s);
+#else
+ set_psr(psr);
+#endif
+ return (c & 0x7f);
+ }
+ }
+ } while (-1);
+#if 1
+ splx(s);
+#else
+ set_psr(psr);
+#endif
+ return -1;
}
diff --git a/sys/arch/mvme88k/dev/if_ve.c b/sys/arch/mvme88k/dev/if_ve.c
index 502be8e6143..ec724fdb277 100644
--- a/sys/arch/mvme88k/dev/if_ve.c
+++ b/sys/arch/mvme88k/dev/if_ve.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ve.c,v 1.1 1999/05/29 04:41:43 smurph Exp $ */
+/* $OpenBSD: if_ve.c,v 1.2 2001/02/01 03:38:14 smurph Exp $ */
/*-
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1982, 1992, 1993
@@ -73,6 +73,7 @@
#include <machine/autoconf.h>
#include <machine/cpu.h>
#include <machine/bugio.h>
+#include <machine/mmu.h> /* DMA_CACHE_SYNC, etc... */
#include <mvme88k/dev/if_vereg.h>
#include <mvme88k/dev/if_vevar.h>
@@ -109,17 +110,17 @@ hide void ve_shutdown __P((void *));
* This structure contains the output queue for the interface, its address, ...
*/
struct ve_softc {
- struct vam7990_softc sc_am7990; /* glue to MI code */
- struct intrhand sc_ih; /* interrupt vectoring */
- struct vereg1 *sc_r1; /* LANCE registers */
- u_short csr; /* Control/Status reg image */
- u_long board_addr;
- struct evcnt sc_intrcnt;
- struct evcnt sc_errcnt;
- struct vme2reg *sc_vme2;
- u_char sc_ipl;
- u_char sc_vec;
- int sc_flags;
+ struct vam7990_softc sc_am7990; /* glue to MI code */
+ struct intrhand sc_ih; /* interrupt vectoring */
+ struct vereg1 *sc_r1; /* LANCE registers */
+ u_short csr; /* Control/Status reg image */
+ u_long board_addr;
+ struct evcnt sc_intrcnt;
+ struct evcnt sc_errcnt;
+ struct vme2reg *sc_vme2;
+ u_char sc_ipl;
+ u_char sc_vec;
+ int sc_flags;
};
struct cfdriver ve_cd = {
@@ -232,7 +233,7 @@ ve_ackint(sc)
struct vam7990_softc *sc;
{
register struct vereg1 *reg1 = ((struct ve_softc *)sc)->sc_r1;
- ENABLE_INTR;
+ ENABLE_INTR;
}
int
@@ -326,7 +327,7 @@ veattach(parent, self, aux)
sc->sc_wrcsr = vewrcsr;
sc->sc_hwreset = vereset;
sc->sc_hwinit = NULL;
- vereset(sc);
+ vereset(sc);
ve_config(sc);
@@ -384,7 +385,7 @@ ve_config(sc)
sc->sc_memsize = 262144;
switch (sc->sc_memsize) {
- case 8192:
+ case 8192:
sc->sc_nrbuf = 4;
sc->sc_ntbuf = 1;
break;
@@ -896,9 +897,9 @@ ve_intr(arg)
return (0);
/* clear the interrupting condition */
- (*sc->sc_wrcsr)(sc, LE_CSR0,
- isr & (LE_C0_INEA | LE_C0_BABL | LE_C0_MISS | LE_C0_MERR |
- LE_C0_RINT | LE_C0_TINT | LE_C0_IDON));
+ (*sc->sc_wrcsr)(sc, LE_CSR0,
+ isr & (LE_C0_INEA | LE_C0_BABL | LE_C0_MISS |
+ LE_C0_MERR | LE_C0_RINT | LE_C0_TINT | LE_C0_IDON));
if (isr & LE_C0_ERR) {
if (isr & LE_C0_BABL) {
#ifdef LEDEBUG
@@ -942,7 +943,7 @@ ve_intr(arg)
ve_rint(sc);
if (isr & LE_C0_TINT)
ve_tint(sc);
- ve_ackint(sc);
+ ve_ackint(sc);
return (1);
}
@@ -1308,6 +1309,9 @@ ve_copytobuf_contig(sc, from, boff, len)
int boff, len;
{
volatile caddr_t buf = sc->sc_mem;
+ volatile caddr_t phys = (caddr_t)sc->sc_addr;
+ dma_cachectl((vm_offset_t)phys + boff, len, DMA_CACHE_SYNC);
+ dma_cachectl((vm_offset_t)buf + boff, len, DMA_CACHE_SYNC);
/*
* Just call bcopy() to do the work.
@@ -1322,7 +1326,9 @@ ve_copyfrombuf_contig(sc, to, boff, len)
int boff, len;
{
volatile caddr_t buf = sc->sc_mem;
-
+ volatile caddr_t phys = (caddr_t)sc->sc_addr;
+ dma_cachectl((vm_offset_t)phys + boff, len, DMA_CACHE_SYNC_INVAL);
+ dma_cachectl((vm_offset_t)buf + boff, len, DMA_CACHE_SYNC_INVAL);
/*
* Just call bcopy() to do the work.
*/
@@ -1335,6 +1341,9 @@ ve_zerobuf_contig(sc, boff, len)
int boff, len;
{
volatile caddr_t buf = sc->sc_mem;
+ volatile caddr_t phys = (caddr_t)sc->sc_addr;
+ dma_cachectl((vm_offset_t)phys + boff, len, DMA_CACHE_SYNC);
+ dma_cachectl((vm_offset_t)buf + boff, len, DMA_CACHE_SYNC);
/*
* Just let bzero() do the work
diff --git a/sys/arch/mvme88k/dev/sclock.c b/sys/arch/mvme88k/dev/sclock.c
index fcc0ea7a398..f00541d0136 100644
--- a/sys/arch/mvme88k/dev/sclock.c
+++ b/sys/arch/mvme88k/dev/sclock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sclock.c,v 1.1 1999/09/27 18:43:25 smurph Exp $ */
+/* $OpenBSD: sclock.c,v 1.2 2001/02/01 03:38:14 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
*
@@ -86,6 +86,7 @@
#include <sys/gmon.h>
#endif
+#include <machine/board.h>
#include <machine/psl.h>
#include <machine/autoconf.h>
#include <machine/bugio.h>
@@ -113,13 +114,13 @@ struct simplelock cio_lock;
int statvar = 8192;
int statmin; /* statclock interval - 1/2*variance */
-static int sclockmatch __P((struct device *, void *, void *));
-static void sclockattach __P((struct device *, struct device *, void *));
+static int sclockmatch __P((struct device *, void *, void *));
+static void sclockattach __P((struct device *, struct device *, void *));
-void sbc_initstatclock __P((void));
-void m188_initstatclock __P((void));
-void m188_cio_init __P((unsigned));
-void write_cio __P((unsigned, unsigned));
+void sbc_initstatclock __P((void));
+void m188_initstatclock __P((void));
+void m188_cio_init __P((unsigned));
+void write_cio __P((unsigned, unsigned));
struct sclocksoftc {
struct device sc_dev;
@@ -134,8 +135,8 @@ struct cfdriver sclock_cd = {
NULL, "sclock", DV_DULL, 0
};
-int sbc_statintr __P((void *));
-int m188_statintr __P((void *));
+int sbc_statintr __P((void *));
+int m188_statintr __P((void *));
int sclockbus;
u_char stat_reset;
@@ -146,8 +147,8 @@ u_char stat_reset;
*/
int
sclockmatch(parent, vcf, args)
- struct device *parent;
- void *vcf, *args;
+struct device *parent;
+void *vcf, *args;
{
register struct confargs *ca = args;
register struct cfdata *cf = vcf;
@@ -169,40 +170,40 @@ sclockmatch(parent, vcf, args)
void
sclockattach(parent, self, args)
- struct device *parent, *self;
- void *args;
+struct device *parent, *self;
+void *args;
{
struct confargs *ca = args;
struct sclocksoftc *sc = (struct sclocksoftc *)self;
sclockbus = ca->ca_bustype;
-
- switch (sclockbus) {
+
+ switch (sclockbus) {
#if NPCCTWO > 0
- case BUS_PCCTWO:
- sc->sc_statih.ih_fn = sbc_statintr;
- sc->sc_statih.ih_arg = 0;
- sc->sc_statih.ih_wantframe = 1;
- sc->sc_statih.ih_ipl = ca->ca_ipl;
- stat_reset = ca->ca_ipl | PCC2_IRQ_IEN | PCC2_IRQ_ICLR;
- pcctwointr_establish(PCC2V_TIMER2, &sc->sc_statih);
- mdfp.statclock_init_func = &sbc_initstatclock;
- printf(": VME1x7");
- break;
+ case BUS_PCCTWO:
+ sc->sc_statih.ih_fn = sbc_statintr;
+ sc->sc_statih.ih_arg = 0;
+ sc->sc_statih.ih_wantframe = 1;
+ sc->sc_statih.ih_ipl = ca->ca_ipl;
+ stat_reset = ca->ca_ipl | PCC2_IRQ_IEN | PCC2_IRQ_ICLR;
+ pcctwointr_establish(PCC2V_TIMER2, &sc->sc_statih);
+ mdfp.statclock_init_func = &sbc_initstatclock;
+ printf(": VME1x7");
+ break;
#endif /* NPCCTWO */
#if NSYSCON > 0
- case BUS_SYSCON:
- sc->sc_statih.ih_fn = m188_statintr;
- sc->sc_statih.ih_arg = 0;
- sc->sc_statih.ih_wantframe = 1;
- sc->sc_statih.ih_ipl = ca->ca_ipl;
- sysconintr_establish(SYSCV_TIMER2, &sc->sc_statih);
- mdfp.statclock_init_func = &m188_initstatclock;
- printf(": VME188");
- break;
+ case BUS_SYSCON:
+ sc->sc_statih.ih_fn = m188_statintr;
+ sc->sc_statih.ih_arg = 0;
+ sc->sc_statih.ih_wantframe = 1;
+ sc->sc_statih.ih_ipl = ca->ca_ipl;
+ sysconintr_establish(SYSCV_TIMER2, &sc->sc_statih);
+ mdfp.statclock_init_func = &m188_initstatclock;
+ printf(": VME188");
+ break;
#endif /* NSYSCON */
- }
- printf("\n");
+ }
+ printf("\n");
}
#if NPCCTWO > 0
@@ -211,8 +212,8 @@ sbc_initstatclock(void)
{
register int statint, minint;
-#ifdef DEBUG
- printf("SBC statclock init\n");
+#ifdef CLOCK_DEBUG
+ printf("SBC statclock init\n");
#endif
if (stathz == 0)
stathz = hz;
@@ -232,7 +233,7 @@ sbc_initstatclock(void)
sys_pcc2->pcc2_t2cmp = pcc2_timer_us2lim(statint);
sys_pcc2->pcc2_t2count = 0;
sys_pcc2->pcc2_t2ctl = PCC2_TCTL_CEN | PCC2_TCTL_COC |
- PCC2_TCTL_COVF;
+ PCC2_TCTL_COVF;
sys_pcc2->pcc2_t2irq = stat_reset;
statmin = statint - (statvar >> 1);
@@ -240,7 +241,7 @@ sbc_initstatclock(void)
int
sbc_statintr(cap)
- void *cap;
+void *cap;
{
register u_long newint, r, var;
@@ -274,13 +275,14 @@ sbc_statintr(cap)
int
m188_statintr(cap)
- void *cap;
+void *cap;
{
register u_long newint, r, var;
+ volatile int *ist = (volatile int *)MVME188_IST;
- CIO_LOCK;
+ CIO_LOCK;
statclock((struct clockframe *)cap);
- write_cio(CIO_CSR1, CIO_GCB|CIO_CIP); /* Ack the interrupt */
+ write_cio(CIO_CSR1, CIO_GCB|CIO_CIP); /* Ack the interrupt */
/*
* Compute new randomized interval. The intervals are uniformly
@@ -292,12 +294,17 @@ m188_statintr(cap)
r = random() & (var - 1);
} while (r == 0);
newint = statmin + r;
-/* printf("newint = %d, 0x%x\n", newint, newint);*/
+ /*
+ printf("newint = %d, 0x%x\n", newint, newint);
+ */
write_cio(CIO_CT1MSB, (newint & 0xFF00) >> 8); /* Load time constant CTC #1 */
write_cio(CIO_CT1LSB, newint & 0xFF);
- /* force a trigger event */
- write_cio(CIO_CSR1, CIO_GCB|CIO_TCB|CIO_IE); /* Start CTC #1 running */
- CIO_UNLOCK;
+ /* force a trigger event */
+ write_cio(CIO_CSR1, CIO_GCB|CIO_TCB|CIO_IE); /* Start CTC #1 running */
+ if (*ist & DTI_BIT) {
+ printf("CIOI not clearing!\n");
+ }
+ CIO_UNLOCK;
return (1);
}
@@ -306,8 +313,8 @@ m188_initstatclock(void)
{
register int statint, minint;
-#ifdef DEBUG
- printf("VME188 clock init\n");
+#ifdef CLOCK_DEBUG
+ printf("VME188 clock init\n");
#endif
simple_lock_init(&cio_lock);
if (stathz == 0)
@@ -322,8 +329,8 @@ m188_initstatclock(void)
minint = statint / 2 + 100;
while (statvar > minint)
statvar >>= 1;
- m188_cio_init(statint);
- statmin = statint - (statvar >> 1);
+ m188_cio_init(statint);
+ statmin = statint - (statvar >> 1);
}
#define CIO_CNTRL 0xFFF8300C
@@ -334,11 +341,19 @@ write_cio(reg, val)
unsigned reg,val;
{
int s, i;
+ volatile int *cio_ctrl = (volatile int *)CIO_CNTRL;
+
s = splclock();
- /* Select register */
- *((volatile int *) CIO_CTRL) = (reg & 0xFF);
- /* Write the value */
- *((volatile int *) CIO_CTRL) = (val & 0xFF);
+ CIO_LOCK;
+
+ i = *cio_ctrl; /* goto state 1 */
+ *cio_ctrl = 0; /* take CIO out of RESET */
+ i = *cio_ctrl; /* reset CIO state machine */
+
+ *cio_ctrl = (reg & 0xFF); /* Select register */
+ *cio_ctrl = (val & 0xFF); /* Write the value */
+
+ CIO_UNLOCK;
splx(s);
}
@@ -348,18 +363,21 @@ read_cio(reg)
unsigned reg;
{
int c;
- int s, i;
- volatile int *port = (volatile int *)CIO_CNTRL;
-
+ int s, i;
+ volatile int *cio_ctrl = (volatile int *)CIO_CNTRL;
+
s = splclock();
+ CIO_LOCK;
+
/* Select register */
- *port = (char)(reg&0xFF);
+ *cio_ctrl = (char)(reg&0xFF);
/* Delay for a short time to allow 8536 to settle */
- for(i=0;i<100;i++);
+ for (i=0;i<100;i++);
/* read the value */
- c = *port;
+ c = *cio_ctrl;
+ CIO_UNLOCK;
splx(s);
- return((u_char)c&0xFF);
+ return ((u_char)c&0xFF);
}
/*
@@ -372,41 +390,41 @@ void
m188_cio_init(unsigned p)
{
long i;
- short period;
+ short period;
- CIO_LOCK;
+ CIO_LOCK;
- period = p & 0xFFFF;
-
- /* Initialize 8536 CTC */
+ period = p & 0xFFFF;
+
+ /* Initialize 8536 CTC */
/* Start by forcing chip into known state */
(void) read_cio(CIO_MICR);
write_cio(CIO_MICR, CIO_MICR_RESET); /* Reset the CTC */
- for(i=0;i < 1000L; i++) /* Loop to delay */
+ for (i=0;i < 1000L; i++) /* Loop to delay */
;
/* Clear reset and start init seq. */
- write_cio(CIO_MICR, 0x00);
+ write_cio(CIO_MICR, 0x00);
/* Wait for chip to come ready */
- while((read_cio(CIO_MICR)) != (char) CIO_MICR_RJA)
+ while ((read_cio(CIO_MICR)) != (char) CIO_MICR_RJA)
;
- /* init Z8036 */
+ /* init Z8036 */
write_cio(CIO_MICR, CIO_MICR_MIE | CIO_MICR_NV | CIO_MICR_RJA | CIO_MICR_DLC);
write_cio(CIO_CTMS1, CIO_CTMS_CSC); /* Continuous count */
- write_cio(CIO_PDCB, 0xFF); /* set port B to input */
-
- write_cio(CIO_CT1MSB, (period & 0xFF00) >> 8); /* Load time constant CTC #1 */
+ write_cio(CIO_PDCB, 0xFF); /* set port B to input */
+
+ /* Load time constant CTC #1 */
+ write_cio(CIO_CT1MSB, (period & 0xFF00) >> 8);
write_cio(CIO_CT1LSB, period & 0xFF);
-
- /* enable counter 1 */
- write_cio(CIO_MCCR, CIO_MCCR_CT1E | CIO_MCCR_PBE);
- /* enable interrupts and start */
+ /* enable counter 1 */
+ write_cio(CIO_MCCR, CIO_MCCR_CT1E | CIO_MCCR_PBE);
+
+ /* enable interrupts and start */
/*write_cio(CIO_IVR, SYSCV_TIMER2);*/
- write_cio(CIO_CSR1, CIO_GCB|CIO_TCB|CIO_IE); /* Start CTC #1 running */
-
- CIO_UNLOCK;
-}
+ write_cio(CIO_CSR1, CIO_GCB|CIO_TCB|CIO_IE); /* Start CTC #1 running */
+ CIO_UNLOCK;
+}
#endif /* NSYSCON */
diff --git a/sys/arch/mvme88k/dev/siop.c b/sys/arch/mvme88k/dev/siop.c
index fe0c95e5a43..7b3c06174c1 100644
--- a/sys/arch/mvme88k/dev/siop.c
+++ b/sys/arch/mvme88k/dev/siop.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: siop.c,v 1.4 2001/01/13 05:18:58 smurph Exp $ */
+/* $OpenBSD: siop.c,v 1.5 2001/02/01 03:38:15 smurph Exp $ */
/*
* Copyright (c) 1994 Michael L. Hitch
@@ -229,9 +229,9 @@ siop_scsicmd(xs)
slp = xs->sc_link;
sc = slp->adapter_softc;
flags = xs->flags;
- xs->error = XS_NOERROR;
+ xs->error = XS_NOERROR;
- /* XXXX ?? */
+ /* XXXX ?? */
if (flags & SCSI_DATA_UIO)
panic("siop: scsi data uio requested");
diff --git a/sys/arch/mvme88k/dev/syscon.c b/sys/arch/mvme88k/dev/syscon.c
index 9b052d66ad1..8056a228db3 100644
--- a/sys/arch/mvme88k/dev/syscon.c
+++ b/sys/arch/mvme88k/dev/syscon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: syscon.c,v 1.2 2000/03/26 23:32:00 deraadt Exp $ */
+/* $OpenBSD: syscon.c,v 1.3 2001/02/01 03:38:15 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* All rights reserved.
@@ -79,9 +79,10 @@ struct sysconsoftc {
void *sc_vaddr; /* Utility I/O space */
void *sc_paddr;
struct sysconreg *sc_syscon; /* the actual registers */
- struct intrhand sc_abih; /* `abort' switch */
- struct intrhand sc_acih; /* `ac fial' */
- struct intrhand sc_sfih; /* `sys fial' */
+ struct intrhand sc_abih; /* `abort' switch */
+ struct intrhand sc_acih; /* `ac fial' */
+ struct intrhand sc_sfih; /* `sys fial' */
+ struct intrhand sc_m188ih; /* `m188 interrupt' */
};
void sysconattach __P((struct device *, struct device *, void *));
@@ -90,6 +91,7 @@ void setupiackvectors __P((void));
int sysconabort __P((struct frame *frame));
int sysconacfail __P((struct frame *frame));
int sysconsysfail __P((struct frame *frame));
+int sysconm188 __P((struct frame *frame));
struct cfattach syscon_ca = {
sizeof(struct sysconsoftc), sysconmatch, sysconattach
@@ -103,8 +105,8 @@ struct sysconreg *sys_syscon = NULL;
int
sysconmatch(parent, vcf, args)
- struct device *parent;
- void *vcf, *args;
+struct device *parent;
+void *vcf, *args;
{
struct cfdata *cf = vcf;
struct confargs *ca = args;
@@ -112,16 +114,16 @@ sysconmatch(parent, vcf, args)
/* Don't match if wrong cpu */
if (cputyp != CPU_188) return (0);
- /* Uh, MVME188 better have on of these, so always match if it
- * is a MVME188... */
- syscon = (struct sysconreg *)(IIOV(ca->ca_paddr));
+ /* Uh, MVME188 better have on of these, so always match if it
+ * is a MVME188... */
+ syscon = (struct sysconreg *)(IIOV(ca->ca_paddr));
return (1);
}
int
syscon_print(args, bus)
- void *args;
- const char *bus;
+void *args;
+const char *bus;
{
struct confargs *ca = args;
@@ -134,8 +136,8 @@ syscon_print(args, bus)
int
syscon_scan(parent, child, args)
- struct device *parent;
- void *child, *args;
+struct device *parent;
+void *child, *args;
{
struct cfdata *cf = child;
struct sysconsoftc *sc = (struct sysconsoftc *)parent;
@@ -143,14 +145,14 @@ syscon_scan(parent, child, args)
struct confargs oca;
if (parent->dv_cfdata->cf_driver->cd_indirect) {
- printf(" indirect devices not supported\n");
- return 0;
- }
+ printf(" indirect devices not supported\n");
+ return 0;
+ }
bzero(&oca, sizeof oca);
oca.ca_offset = cf->cf_loc[0];
oca.ca_ipl = cf->cf_loc[1];
- if ((oca.ca_offset != (void*)-1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) {
+ if (((int)oca.ca_offset != -1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) {
oca.ca_vaddr = sc->sc_vaddr + oca.ca_offset;
oca.ca_paddr = sc->sc_paddr + oca.ca_offset;
} else {
@@ -167,9 +169,10 @@ syscon_scan(parent, child, args)
}
void
+
sysconattach(parent, self, args)
- struct device *parent, *self;
- void *args;
+struct device *parent, *self;
+void *args;
{
struct confargs *ca = args;
struct sysconsoftc *sc = (struct sysconsoftc *)self;
@@ -189,61 +192,74 @@ sysconattach(parent, self, args)
printf(": rev %d\n", 1);
- /*
- * pseudo driver, abort interrupt handler
- */
- sc->sc_abih.ih_fn = sysconabort;
- sc->sc_abih.ih_arg = 0;
- sc->sc_abih.ih_ipl = IPL_ABORT;
- sc->sc_abih.ih_wantframe = 1;
- sc->sc_acih.ih_fn = sysconacfail;
- sc->sc_acih.ih_arg = 0;
- sc->sc_acih.ih_ipl = IPL_ABORT;
- sc->sc_acih.ih_wantframe = 1;
- sc->sc_sfih.ih_fn = sysconsysfail;
- sc->sc_sfih.ih_arg = 0;
- sc->sc_sfih.ih_ipl = IPL_ABORT;
- sc->sc_sfih.ih_wantframe = 1;
-
- intr_establish(SYSCV_ABRT, &sc->sc_abih);
- intr_establish(SYSCV_ACF, &sc->sc_acih);
- intr_establish(SYSCV_SYSF, &sc->sc_sfih);
+ /*
+ * pseudo driver, abort interrupt handler
+ */
+ sc->sc_abih.ih_fn = sysconabort;
+ sc->sc_abih.ih_arg = 0;
+ sc->sc_abih.ih_ipl = IPL_ABORT;
+ sc->sc_abih.ih_wantframe = 1;
+ sc->sc_acih.ih_fn = sysconacfail;
+ sc->sc_acih.ih_arg = 0;
+ sc->sc_acih.ih_ipl = IPL_ABORT;
+ sc->sc_acih.ih_wantframe = 1;
+ sc->sc_sfih.ih_fn = sysconsysfail;
+ sc->sc_sfih.ih_arg = 0;
+ sc->sc_sfih.ih_ipl = IPL_ABORT;
+ sc->sc_sfih.ih_wantframe = 1;
+ sc->sc_m188ih.ih_fn = sysconm188;
+ sc->sc_m188ih.ih_arg = 0;
+ sc->sc_m188ih.ih_ipl = IPL_ABORT;
+ sc->sc_m188ih.ih_wantframe = 1;
+
+ intr_establish(SYSCV_ABRT, &sc->sc_abih);
+ intr_establish(SYSCV_ACF, &sc->sc_acih);
+ intr_establish(SYSCV_SYSF, &sc->sc_sfih);
+ intr_establish(M188_IVEC, &sc->sc_m188ih);
config_search(syscon_scan, self, args);
}
int
sysconintr_establish(vec, ih)
- int vec;
- struct intrhand *ih;
+int vec;
+struct intrhand *ih;
{
return (intr_establish(vec, ih));
}
int
sysconabort(frame)
- struct frame *frame;
+struct frame *frame;
{
- ISR_RESET_NMI;
- nmihand(frame);
+ ISR_RESET_NMI;
+ nmihand(frame);
return (1);
}
int
sysconsysfail(frame)
- struct frame *frame;
+struct frame *frame;
{
- ISR_RESET_SYSFAIL;
- nmihand(frame);
+ ISR_RESET_SYSFAIL;
+ nmihand(frame);
return (1);
}
int
sysconacfail(frame)
- struct frame *frame;
+struct frame *frame;
+{
+ ISR_RESET_ACFAIL;
+ nmihand(frame);
+ return (1);
+}
+
+int
+sysconm188(frame)
+struct frame *frame;
{
- ISR_RESET_ACFAIL;
- nmihand(frame);
+ printf("MVME188 interrupting?\n");
return (1);
}
diff --git a/sys/arch/mvme88k/dev/sysconreg.h b/sys/arch/mvme88k/dev/sysconreg.h
index bcf4739371f..568ab09a2bc 100644
--- a/sys/arch/mvme88k/dev/sysconreg.h
+++ b/sys/arch/mvme88k/dev/sysconreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: sysconreg.h,v 1.1 1999/09/27 18:43:25 smurph Exp $ */
+/* $OpenBSD: sysconreg.h,v 1.2 2001/02/01 03:38:15 smurph Exp $ */
/*
* Memory map for SYSCON found in mvme188 board set.
@@ -11,37 +11,37 @@
#include <machine/board.h>
struct sysconreg {
- volatile unsigned int *ien0;
- volatile unsigned int *ien1;
- volatile unsigned int *ien2;
- volatile unsigned int *ien3;
- volatile unsigned int *ienall;
- volatile unsigned int *ist;
- volatile unsigned int *setswi;
- volatile unsigned int *clrswi;
- volatile unsigned int *istate;
- volatile unsigned int *clrint;
- volatile unsigned char *global0;
- volatile unsigned char *global1;
- volatile unsigned char *global2;
- volatile unsigned char *global3;
- volatile unsigned int *ucsr;
- volatile unsigned int *glbres;
- volatile unsigned int *ccsr;
- volatile unsigned int *error;
- volatile unsigned int *pcnfa;
- volatile unsigned int *pcnfb;
- volatile unsigned int *extad;
- volatile unsigned int *extam;
- volatile unsigned int *whoami;
- volatile unsigned int *wmad;
- volatile unsigned int *rmad;
- volatile unsigned int *wvad;
- volatile unsigned int *rvad;
- volatile unsigned int *cio_portc;
- volatile unsigned int *cio_portb;
- volatile unsigned int *cio_porta;
- volatile unsigned int *cio_ctrl;
+ volatile unsigned int *ien0;
+ volatile unsigned int *ien1;
+ volatile unsigned int *ien2;
+ volatile unsigned int *ien3;
+ volatile unsigned int *ienall;
+ volatile unsigned int *ist;
+ volatile unsigned int *setswi;
+ volatile unsigned int *clrswi;
+ volatile unsigned int *istate;
+ volatile unsigned int *clrint;
+ volatile unsigned char *global0;
+ volatile unsigned char *global1;
+ volatile unsigned char *global2;
+ volatile unsigned char *global3;
+ volatile unsigned int *ucsr;
+ volatile unsigned int *glbres;
+ volatile unsigned int *ccsr;
+ volatile unsigned int *error;
+ volatile unsigned int *pcnfa;
+ volatile unsigned int *pcnfb;
+ volatile unsigned int *extad;
+ volatile unsigned int *extam;
+ volatile unsigned int *whoami;
+ volatile unsigned int *wmad;
+ volatile unsigned int *rmad;
+ volatile unsigned int *wvad;
+ volatile unsigned int *rvad;
+ volatile unsigned int *cio_portc;
+ volatile unsigned int *cio_portb;
+ volatile unsigned int *cio_porta;
+ volatile unsigned int *cio_ctrl;
};
extern struct sysconreg *sys_syscon;
@@ -49,10 +49,10 @@ extern struct sysconreg *sys_syscon;
/*
* Vectors we use
*/
-#define SYSCV_ABRT 0x110
-#define SYSCV_SYSF 0x111
-#define SYSCV_ACF 0x112
-#define SYSCV_SCC 0x55
+#define SYSCV_ABRT 0x52
+#define SYSCV_SYSF 0x53
+#define SYSCV_ACF 0x54
+#define SYSCV_SCC 0x55
#define SYSCV_TIMER4 0x56
#define SYSCV_TIMER3 0x57
#define SYSCV_TIMER2 0x58
diff --git a/sys/arch/mvme88k/dev/vme.c b/sys/arch/mvme88k/dev/vme.c
index cb5c4726620..b3498243929 100644
--- a/sys/arch/mvme88k/dev/vme.c
+++ b/sys/arch/mvme88k/dev/vme.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vme.c,v 1.6 2001/01/14 20:25:22 smurph Exp $ */
+/* $OpenBSD: vme.c,v 1.7 2001/02/01 03:38:15 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1995 Theo de Raadt
@@ -65,8 +65,11 @@ int vme2chip_init __P((struct vmesoftc *sc));
u_long vme2chip_map __P((u_long base, int len, int dwidth));
int vme2abort __P((struct frame *frame));
int sysconabort __P((struct frame *frame));
+int intr_findvec __P((int start, int end));
static int vmebustype;
+static int vmevecbase;
+
struct vme2reg *sys_vme2 = NULL;
struct cfattach vme_ca = {
@@ -259,7 +262,7 @@ vmescan(parent, child, args, bustype)
oca.ca_vec = cf->cf_loc[2];
oca.ca_ipl = cf->cf_loc[3];
if (oca.ca_ipl > 0 && oca.ca_vec == -1)
- oca.ca_vec = intr_findvec(255, 0);
+ oca.ca_vec = vme_findvec();
if (oca.ca_len == -1)
oca.ca_len = 4096;
@@ -284,15 +287,15 @@ vmescan(parent, child, args, bustype)
void
vmeattach(parent, self, args)
- struct device *parent, *self;
- void *args;
+struct device *parent, *self;
+void *args;
{
struct vmesoftc *sc = (struct vmesoftc *)self;
struct confargs *ca = args;
struct vme1reg *vme1;
struct vme2reg *vme2;
int scon;
- char sconc;
+ char sconc;
/* XXX any initialization to do? */
@@ -303,27 +306,38 @@ vmeattach(parent, self, args)
#if NPCCTWO > 0
case BUS_PCCTWO:
vme2 = (struct vme2reg *)sc->sc_vaddr;
+ /* Sanity check that the Bug is set up right */
+ if (VME2_GET_VBR1(vme2) >= 0xF0) {
+ panic("Correct the VME Vector Base Registers in the Bug ROM.\nSuggested values are 0x60 for VME Vec0 and 0x70 for VME Vec1.");
+ }
+ vmevecbase = VME2_GET_VBR1(vme2) + 0x10;
scon = (vme2->vme2_tctl & VME2_TCTL_SCON);
- printf(": %ssystem controller\n", scon ? "" : "not ");
- if (scon) sys_vme2 = vme2;
+ printf(": vector base 0x%x, %ssystem controller\n", vmevecbase, scon ? "" : "not ");
+ if (scon) sys_vme2 = vme2;
vme2chip_init(sc);
break;
#endif
#if NSYSCON > 0
case BUS_SYSCON:
- vme2 = (struct vme2reg *)sc->sc_vaddr;
- sconc = *(char *)GLOBAL1;
- sconc &= M188_SYSCON;
+ vmevecbase = 0x80; /* Hard coded for MVME188 */
+ sconc = *(char *)GLOBAL1;
+ sconc &= M188_SYSCON;
printf(": %ssystem controller\n", scon ? "" : "not ");
vmesyscon_init(sc);
break;
#endif
}
-
while (config_found(self, NULL, NULL))
;
}
+/* find a VME vector based on what is in NVRAM settings. */
+int
+vme_findvec(void)
+{
+ return(intr_findvec(vmevecbase, 0xFF));
+}
+
/*
* On the VMEbus, only one cpu may be configured to respond to any
* particular vme ipl. Therefore, it wouldn't make sense to globally
@@ -336,6 +350,17 @@ vmeattach(parent, self, args)
* Obviously no check is made to see if another cpu is using that
* interrupt. If you share you will lose.
*/
+
+/*
+ * All VME bus devices will use a vector starting with VBR1 + 0x10
+ * and determined by intr_findvec(). (in machdep.c) vmeintr_establish()
+ * should be called with the 'vec' argument = 0 to 'auto vector' a
+ * VME device.
+ *
+ * The 8 SW interrupters will start with VBR1. The rest will start
+ * with VBR0< 4) & 0xFF.
+ */
+
int
vmeintr_establish(vec, ih)
int vec;
@@ -350,8 +375,6 @@ vmeintr_establish(vec, ih)
#endif
int x;
- x = (intr_establish(vec, ih));
-
switch (vmebustype) {
#if NPCCTWO > 0
case BUS_PCCTWO:
@@ -363,13 +386,10 @@ vmeintr_establish(vec, ih)
#if NSYSCON > 0
case BUS_SYSCON:
syscon = (struct sysconreg *)sc->sc_vaddr;
- /*
- syscon->vme2_irqen = vme2->vme2_irqen |
- VMES_IRQ_VME(ih->ih_ipl);
- */
break;
#endif
}
+ x = (intr_establish(vec, ih));
return (x);
}
@@ -388,28 +408,28 @@ vme2chip_init(sc)
printf("%s: using BUG parameters\n", sc->sc_dev.dv_xname);
/* setup a A32D16 space */
printf("%s: 1phys 0x%08x-0x%08x to VME 0x%08x-0x%08x\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000,
- vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000,
+ vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000);
/* setup a A32D32 space */
printf("%s: 2phys 0x%08x-0x%08x to VME 0x%08x-0x%08x\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000,
- vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000,
+ vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000);
/* setup a A24D16 space */
printf("%s: 3phys 0x%08x-0x%08x to VME 0x%08x-0x%08x\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master3 << 16, vme2->vme2_master3 & 0xffff0000,
- vme2->vme2_master3 << 16, vme2->vme2_master3 & 0xffff0000);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master3 << 16, vme2->vme2_master3 & 0xffff0000,
+ vme2->vme2_master3 << 16, vme2->vme2_master3 & 0xffff0000);
/* setup a XXXXXX space */
printf("%s: 4phys 0x%08x-0x%08x to VME 0x%08x-0x%08x\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master4 << 16, vme2->vme2_master4 & 0xffff0000,
- vme2->vme2_master4 << 16 + vme2->vme2_master4mod << 16,
- vme2->vme2_master4 & 0xffff0000 + vme2->vme2_master4 & 0xffff0000);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master4 << 16, vme2->vme2_master4 & 0xffff0000,
+ vme2->vme2_master4 << 16 + vme2->vme2_master4mod << 16,
+ vme2->vme2_master4 & 0xffff0000 + vme2->vme2_master4 & 0xffff0000);
/*
* Map the VME irq levels to the cpu levels 1:1.
* This is rather inflexible, but much easier.
@@ -423,7 +443,20 @@ vme2chip_init(sc)
printf("%s: vme2_irql4 = 0x%08x\n", sc->sc_dev.dv_xname,
vme2->vme2_irql4);
*/
- if (vmebustype == BUS_PCCTWO){
+
+ /* Enable the reset switch */
+ vme2->vme2_tctl |= VME2_TCTL_RSWE;
+ /* Set Watchdog timeout to about 1 minute */
+ vme2->vme2_tcr |= VME2_TCR_64S;
+ /* Enable VMEChip2 Interrupts */
+ vme2->vme2_vbr |= VME2_IOCTL1_MIEN;
+ /*
+ * Map the Software VME irq levels to the cpu level 7.
+ */
+ vme2->vme2_irql3 = (7 << VME2_IRQL3_SW7SHIFT) | (7 << VME2_IRQL3_SW6SHIFT) |
+ (7 << VME2_IRQL3_SW5SHIFT) | (7 << VME2_IRQL3_SW4SHIFT) |
+ (7 << VME2_IRQL3_SW3SHIFT) | (7 << VME2_IRQL3_SW2SHIFT) |
+ (7 << VME2_IRQL3_SW1SHIFT) | (7 << VME2_IRQL3_SW0SHIFT);
/*
* pseudo driver, abort interrupt handler
*/
@@ -433,15 +466,14 @@ vme2chip_init(sc)
sc->sc_abih.ih_wantframe = 1;
intr_establish(110, &sc->sc_abih);
vme2->vme2_irqen |= VME2_IRQ_AB;
- }
- vme2->vme2_irqen = vme2->vme2_irqen | VME2_IRQ_ACF;
+ vme2->vme2_irqen |= VME2_IRQ_ACF;
}
#endif /* NPCCTWO */
#if NSYSCON > 0
int
vmesyscon_init(sc)
- struct vmesoftc *sc;
+struct vmesoftc *sc;
{
struct sysconreg *syscon = (struct sysconreg *)sc->sc_vaddr;
u_long ctl, addr, vasize;
@@ -453,22 +485,22 @@ vmesyscon_init(sc)
ctl = vme2->vme2_masterctl;
printf("%s: using BUG parameters\n", sc->sc_dev.dv_xname);
printf("%s: 1phys 0x%08x-0x%08x to VME 0x%08x-0x%08x master\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000,
- vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000,
+ vme2->vme2_master1 << 16, vme2->vme2_master1 & 0xffff0000);
printf("%s: 2phys 0x%08x-0x%08x to VME 0x%08x-0x%08x slave\n",
- sc->sc_dev.dv_xname,
- vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000,
- vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000);
-
- /*
- * pseudo driver, abort interrupt handler
- */
- sc->sc_abih.ih_fn = sysconabort;
- sc->sc_abih.ih_arg = 0;
- sc->sc_abih.ih_ipl = IPL_NMI;
- sc->sc_abih.ih_wantframe = 1;
- intr_establish(110, &sc->sc_abih);
+ sc->sc_dev.dv_xname,
+ vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000,
+ vme2->vme2_master2 << 16, vme2->vme2_master2 & 0xffff0000);
+
+ /*
+ * pseudo driver, abort interrupt handler
+ */
+ sc->sc_abih.ih_fn = sysconabort;
+ sc->sc_abih.ih_arg = 0;
+ sc->sc_abih.ih_ipl = IPL_NMI;
+ sc->sc_abih.ih_wantframe = 1;
+ intr_establish(110, &sc->sc_abih);
#endif /* TODO */
}
#endif /* NSYSCON */
@@ -505,11 +537,24 @@ vme2abort(frame)
{
struct vmesoftc *sc = (struct vmesoftc *) vme_cd.cd_devs[0];
struct vme2reg *vme2 = (struct vme2reg *)sc->sc_vaddr;
+ int rc = 0;
+ if (vme2->vme2_irqstat & VME2_IRQ_AB) {
+ vme2->vme2_irqclr = VME2_IRQ_AB;
+ nmihand(frame);
+ rc = 1;
+ }
+ if (vme2->vme2_irqstat & VME2_IRQ_AB) {
+ vme2->vme2_irqclr = VME2_IRQ_AB;
+ nmihand(frame);
+ rc = 1;
+ }
+#if 0
if (vme2->vme2_irqstat & VME2_IRQ_AB == 0) {
printf("%s: abort irq not set\n", sc->sc_dev.dv_xname);
return (0);
}
+#endif
vme2->vme2_irqclr = VME2_IRQ_AB;
nmihand(frame);
return (1);
diff --git a/sys/arch/mvme88k/dev/vme.h b/sys/arch/mvme88k/dev/vme.h
index d143d53f791..98c64ddeaa6 100644
--- a/sys/arch/mvme88k/dev/vme.h
+++ b/sys/arch/mvme88k/dev/vme.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vme.h,v 1.3 1999/05/29 04:41:44 smurph Exp $ */
+/* $OpenBSD: vme.h,v 1.4 2001/02/01 03:38:15 smurph Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -30,9 +30,9 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#if 0
-#include <machine/cpu.h>
-#endif
+#ifndef __MVEME88K_DEV_VME_H__
+#define __MVEME88K_DEV_VME_H__
+
struct vmesoftc {
struct device sc_dev;
void * sc_vaddr;
@@ -222,7 +222,22 @@ struct vme2reg {
/*40*/ volatile u_long vme2_dmacount;
/*44*/ volatile u_long vme2_dmatable;
/*48*/ volatile u_long vme2_dmastat;
-/*4c*/ volatile u_long vme2_vmejunk;
+/*4c*/ volatile u_long vme2_tcr;
+#define VME2_TCR_1MS (1 << 8) /* Watchdog 1 ms */
+#define VME2_TCR_2MS (2 << 8) /* Watchdog 2 ms */
+#define VME2_TCR_4MS (3 << 8) /* Watchdog 4 ms */
+#define VME2_TCR_8MS (4 << 8) /* Watchdog 8 ms */
+#define VME2_TCR_16MS (5 << 8) /* Watchdog 16 ms */
+#define VME2_TCR_32MS (6 << 8) /* Watchdog 32 ms */
+#define VME2_TCR_64MS (7 << 8) /* Watchdog 64 ms */
+#define VME2_TCR_128MS (8 << 8) /* Watchdog 128 ms */
+#define VME2_TCR_256MS (9 << 8) /* Watchdog 256 ms */
+#define VME2_TCR_512MS (10 << 8) /* Watchdog 512 ms */
+#define VME2_TCR_1S (11 << 8) /* Watchdog 1 s */
+#define VME2_TCR_4S (12 << 8) /* Watchdog 4 s */
+#define VME2_TCR_16S (13 << 8) /* Watchdog 16 s */
+#define VME2_TCR_32S (14 << 8) /* Watchdog 32 s */
+#define VME2_TCR_64S (15 << 8) /* Watchdog 64 s */
/*50*/ volatile u_long vme2_t1cmp;
/*54*/ volatile u_long vme2_t1count;
/*58*/ volatile u_long vme2_t2cmp;
@@ -232,9 +247,25 @@ struct vme2reg {
#define VME2_TCTL1_COC 0x02
#define VME2_TCTL1_COVF 0x04
#define VME2_TCTL1_OVF 0xf0
-#define VME2_TCTL_SCON 0x40000000 /* we are SCON */
-#define VME2_TCTL_SYSFAIL 0x20000000 /* light SYSFAIL led */
+#define VME2_TCTL2_CEN (0x01 << 8)
+#define VME2_TCTL2_COC (0x02 << 8)
+#define VME2_TCTL2_COVF (0x04 << 8)
+#define VME2_TCTL2_OVF (0xf0 << 8)
+#define VME2_TCTL_WDEN 0x00010000 /* Watchdog Enable */
+#define VME2_TCTL_WDRSE 0x00020000 /* Watchdog Reset Enable */
+#define VME2_TCTL_WDSL 0x00040000 /* local or system reset */
+#define VME2_TCTL_WDBFE 0x00080000 /* Watchdog Board Fail Enable */
+#define VME2_TCTL_WDTO 0x00100000 /* Watchdog Timeout Status */
+#define VME2_TCTL_WDCC 0x00200000 /* Watchdog Clear Counter */
+#define VME2_TCTL_WDCS 0x00400000 /* Watchdog Clear Timeout */
#define VME2_TCTL_SRST 0x00800000 /* system reset */
+#define VME2_TCTL_RSWE 0x01000000 /* Reset Switch Enable */
+#define VME2_TCTL_BDFLO 0x02000000 /* Assert Board Fail */
+#define VME2_TCTL_CPURS 0x04000000 /* Clear Power-up Reset bit */
+#define VME2_TCTL_PURS 0x08000000 /* Power-up Reset bit */
+#define VME2_TCTL_BDFLI 0x10000000 /* Board Fail Status*/
+#define VME2_TCTL_SYSFAIL 0x20000000 /* light SYSFAIL led */
+#define VME2_TCTL_SCON 0x40000000 /* we are SCON */
/*64*/ volatile u_long vme2_prescale;
/*68*/ volatile u_long vme2_irqstat;
/*6c*/ volatile u_long vme2_irqen;
@@ -264,6 +295,7 @@ struct vme2reg {
#define VME2_IRQ_SW2 0x00000400
#define VME2_IRQ_SW1 0x00000200
#define VME2_IRQ_SW0 0x00000100
+#define VME2_IRQ_SW(x) ((1 << (x))) << 8)
#define VME2_IRQ_SPARE 0x00000080
#define VME2_IRQ_VME7 0x00000040
#define VME2_IRQ_VME6 0x00000020
@@ -311,8 +343,13 @@ struct vme2reg {
#define VME2_IRQL4_VME1SHIFT 0
/*88*/ volatile u_long vme2_vbr;
#define VME2_SYSFAIL (1 << 22)
+#define VME2_IOCTL1_MIEN (1 << 23)
#define VME2_VBR_0SHIFT 28
#define VME2_VBR_1SHIFT 24
+#define VME2_SET_VBR0(x) ((x) << VME2_VBR_0SHIFT)
+#define VME2_SET_VBR1(x) ((x) << VME2_VBR_1SHIFT)
+#define VME2_GET_VBR0(x) ((((x)->vme2_vbr >> 28) & 0xf) << 4)
+#define VME2_GET_VBR1(x) ((((x)->vme2_vbr >> 24) & 0xf) << 4)
#define VME2_VBR_GPOXXXX 0x00ffffff
/*8c*/ volatile u_long vme2_misc;
#define VME2_MISC_MPIRQEN 0x00000080 /* do not set */
@@ -336,8 +373,10 @@ struct vme2reg {
#define VME2_A16BASE 0xffff0000UL
#define VME2_A24BASE 0xff000000UL
-void * vmepmap __P((struct vmesoftc *sc, void * vmeaddr, int len,
- int bustype));
-void * vmemap __P((struct vmesoftc *sc, void * vmeaddr, int len,
- int bustype));
-int vmerw __P((struct vmesoftc *sc, struct uio *uio, int flags, int bus));
+void * vmepmap __P((struct vmesoftc *sc, void * vmeaddr, int len, int bustype));
+void * vmemap __P((struct vmesoftc *sc, void * vmeaddr, int len, int bustype));
+int vmerw __P((struct vmesoftc *sc, struct uio *uio, int flags, int bus));
+int vmeintr_establish __P((int vec, struct intrhand *ih));
+int vme_findvec __P((void));
+
+#endif __MVEME88K_DEV_VME_H__
diff --git a/sys/arch/mvme88k/dev/vs.c b/sys/arch/mvme88k/dev/vs.c
index 5c5e44b991d..d63ae1ba75e 100644
--- a/sys/arch/mvme88k/dev/vs.c
+++ b/sys/arch/mvme88k/dev/vs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vs.c,v 1.2 1999/09/27 18:43:25 smurph Exp $ */
+/* $OpenBSD: vs.c,v 1.3 2001/02/01 03:38:16 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
@@ -57,12 +57,14 @@
#define PAGESIZE 4096
#include <mvme88k/dev/vsreg.h>
#include <mvme88k/dev/vsvar.h>
- #include "machine/mmu.h"
+ #include <mvme88k/dev/vme.h> /* vme_findvec() */
+ #include <machine/mmu.h> /* DMA_CACHE_SYNC, etc... */
#define ROUND_PAGE m88k_round_page
#define TRUNC_PAGE m88k_trunc_page
#else
#include <mvme68k/dev/vsreg.h>
#include <mvme68k/dev/vsvar.h>
+ #include <mvme68k/dev/vme.h> /* vme_findvec() */
#define ROUND_PAGE m68k_round_page
#define TRUNC_PAGE m68k_trunc_page
#endif /* MVME187 */
@@ -71,15 +73,19 @@ int vs_checkintr __P((struct vs_softc *, struct scsi_xfer *, int *));
int vs_chksense __P((struct scsi_xfer *));
void vs_reset __P((struct vs_softc *));
void vs_resync __P((struct vs_softc *));
-void vs_initialize __P((struct vs_softc *));
-int vs_intr __P((struct vs_softc *));
+int vs_initialize __P((struct vs_softc *));
+int vs_nintr __P((struct vs_softc *));
+int vs_eintr __P((struct vs_softc *));
int vs_poll __P((struct vs_softc *, struct scsi_xfer *));
-void vs_scsidone __P((struct scsi_xfer *, int));
+void vs_scsidone __P((struct vs_softc *, struct scsi_xfer *, int));
M328_CQE * vs_getcqe __P((struct vs_softc *));
M328_IOPB * vs_getiopb __P((struct vs_softc *));
+static __inline__ void vs_clear_return_info __P((struct vs_softc *));
+
extern int cold;
extern u_int kvtop();
+
/*
* 16 bit 's' memory functions. MVME328 is a D16 board.
* We must program with that in mind or else...
@@ -94,39 +100,40 @@ extern u_int kvtop();
*/
void
-scopy(void *src, void *dst, unsigned short cnt)
+vs_copy(src, dst, cnt)
+void *src;
+void *dst;
+unsigned short cnt;
{
- register unsigned short volatile *x, *y, z;
+ register unsigned short volatile *x, *y, z;
- z = cnt >> 1;
- x = (unsigned short *) src;
- y = (unsigned short *) dst;
+ z = cnt >> 1;
+ x = (unsigned short *) src;
+ y = (unsigned short *) dst;
- while (z--) {
- *y++ = *x++;
- }
+ while (z--) {
+ *y++ = *x++;
+ }
}
void
-szero(void *src, u_long cnt)
+vs_zero(src, cnt)
+void *src;
+u_long cnt;
{
- register unsigned short *source;
- register unsigned short zero = 0;
- register unsigned short z;
+ register unsigned short *source;
+ register unsigned short zero = 0;
+ register unsigned short z;
- source = (unsigned short *) src;
- z = cnt >> 1;
+ source = (unsigned short *) src;
+ z = cnt >> 1;
- while (z--) {
- *source++ = zero;
- }
- return;
+ while (z--) {
+ *source++ = zero;
+ }
+ return;
}
-
-
-
-
/*
* default minphys routine for MVME328 based controllers
*/
@@ -134,38 +141,39 @@ void
vs_minphys(bp)
struct buf *bp;
{
- /*
- * No max transfer at this level.
- */
- minphys(bp);
+ /*
+ * No max transfer at this level.
+ */
+ minphys(bp);
}
-int do_vspoll(sc, to)
+int
+do_vspoll(sc, to)
struct vs_softc *sc;
int to;
{
- int i;
- if (to <= 0 ) to = 50000;
- /* use cmd_wait values? */
- i = 50000;
- /*spl0();*/
- while (!(CRSW & (M_CRSW_CRBV | M_CRSW_CC))) {
- if (--i <= 0) {
-#ifdef DEBUG
- printf ("waiting: timeout %d crsw 0x%x\n", to, CRSW);
+ int i;
+ if (to <= 0 ) to = 50000;
+ /* use cmd_wait values? */
+ i = 10000;
+ /*spl0();*/
+ while (!(CRSW & (M_CRSW_CRBV | M_CRSW_CC))) {
+ if (--i <= 0) {
+#ifdef SDEBUG2
+ printf ("waiting: timeout %d crsw 0x%x\n", to, CRSW);
#endif
- i = 50000;
- --to;
- if (to <= 0) {
- /*splx(s);*/
- vs_reset(sc);
- vs_resync(sc);
- printf ("timed out: timeout %d crsw 0x%x\n", to, CRSW);
- return 1;
- }
- }
- }
- return 0;
+ i = 50000;
+ --to;
+ if (to <= 0) {
+ /*splx(s);*/
+ vs_reset(sc);
+ vs_resync(sc);
+ printf ("timed out: timeout %d crsw 0x%x\n", to, CRSW);
+ return 1;
+ }
+ }
+ }
+ return 0;
}
int
@@ -173,520 +181,525 @@ vs_poll(sc, xs)
struct vs_softc *sc;
struct scsi_xfer *xs;
{
- M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_CQE *cqep;
- M328_IOPB *iopb;
- int i;
- int status;
- int s;
- int to;
-
- /*s = splbio();*/
- to = xs->timeout / 1000;
- for (;;) {
- if (do_vspoll(sc, to)) break;
- if (vs_checkintr(sc, xs, &status)) {
- vs_scsidone(xs, status);
- }
- if (CRSW & M_CRSW_ER)
- CRB_CLR_ER(CRSW);
- CRB_CLR_DONE(CRSW);
- if (xs->flags & ITSDONE) break;
- }
- return (COMPLETE);
+ M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_CQE *cqep;
+ M328_IOPB *iopb;
+ int i;
+ int status;
+ int s;
+ int to;
+
+ /*s = splbio();*/
+ to = xs->timeout / 1000;
+ for (;;) {
+ if (do_vspoll(sc, to)) {
+ xs->error = XS_SELTIMEOUT;
+ xs->status = -1;
+ xs->flags |= ITSDONE;
+ /* clear the return information */
+ vs_clear_return_info(sc);
+ if (xs->flags & SCSI_POLL)
+ return (COMPLETE);
+ break;
+ }
+ if (vs_checkintr(sc, xs, &status)) {
+ vs_scsidone(sc, xs, status);
+ }
+ if (CRSW & M_CRSW_ER)
+ CRB_CLR_ER(CRSW);
+ CRB_CLR_DONE(CRSW);
+ if (xs->flags & ITSDONE) break;
+ }
+ /* clear the return information */
+ vs_clear_return_info(sc);
+ return (COMPLETE);
}
-void thaw_queue(sc, target)
+void
+thaw_queue(sc, target)
struct vs_softc *sc;
u_int8_t target;
{
- u_short t;
- t = target << 8;
- t |= 0x0001;
- THAW_REG = t;
- /* loop until thawed */
- while (THAW_REG & 0x01);
+ u_short t;
+ t = target << 8;
+ t |= 0x0001;
+ THAW_REG = t;
+ /* loop until thawed */
+ while (THAW_REG & 0x01);
}
void
-vs_scsidone (xs, stat)
+vs_scsidone (sc, xs, stat)
+struct vs_softc *sc;
struct scsi_xfer *xs;
int stat;
{
- struct scsi_link *slp = xs->sc_link;
- struct vs_softc *sc = slp->adapter_softc;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- xs->status = stat;
- while (xs->status == SCSI_CHECK) {
- vs_chksense(xs);
- thaw_queue(sc, slp->target + 1);
- }
- xs->flags |= ITSDONE;
- /*sc->sc_tinfo[slp->target].cmds++;*/
- if (CRSW & M_CRSW_ER)
- CRB_CLR_ER(CRSW);
- CRB_CLR_DONE(CRSW);
- thaw_queue(sc, slp->target + 1);
- szero(riopb, sizeof(M328_IOPB));
- scsi_done(xs);
+ int tgt;
+ struct scsi_link *slp = xs->sc_link;
+ xs->status = stat;
+
+ while (xs->status == SCSI_CHECK) {
+ vs_chksense(xs);
+ tgt = xs->sc_link->target + 1;
+ thaw_queue(sc, tgt);
+ }
+
+ tgt = xs->sc_link->target + 1;
+ xs->flags |= ITSDONE;
+ /*sc->sc_tinfo[slp->target].cmds++;*/
+
+ /* thaw all work queues */
+ thaw_queue(sc, tgt);
+ scsi_done(xs);
}
int
vs_scsicmd(xs)
struct scsi_xfer *xs;
{
- struct scsi_link *slp = xs->sc_link;
- struct vs_softc *sc = slp->adapter_softc;
- int flags, s, i;
- unsigned long buf, len;
- u_short iopb_len;
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_CQE *cqep;
- M328_IOPB *iopb;
- M328_CMD *m328_cmd;
-
- /* If the target doesn't exist, abort */
- if (!sc->sc_tinfo[slp->target].avail) {
- xs->error = XS_SELTIMEOUT;
- xs->status = -1;
- xs->flags |= ITSDONE;
- scsi_done(xs);
- }
-
- slp->quirks |= SDEV_NOLUNS;
- flags = xs->flags;
+ struct scsi_link *slp = xs->sc_link;
+ struct vs_softc *sc = slp->adapter_softc;
+ int flags, s, i;
+ unsigned long buf, len;
+ u_short iopb_len;
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_CQE *cqep;
+ M328_IOPB *iopb;
+ M328_CMD *m328_cmd;
+
+ slp->quirks |= SDEV_NOLUNS;
+ flags = xs->flags;
+
#ifdef SDEBUG
- printf("scsi_cmd() ");
- if (xs->cmd->opcode == 0) {
- printf("TEST_UNIT_READY ");
- } else if (xs->cmd->opcode == REQUEST_SENSE) {
- printf("REQUEST_SENSE ");
- } else if (xs->cmd->opcode == INQUIRY) {
- printf("INQUIRY ");
- } else if (xs->cmd->opcode == MODE_SELECT) {
- printf("MODE_SELECT ");
- } else if (xs->cmd->opcode == MODE_SENSE) {
- printf("MODE_SENSE ");
- } else if (xs->cmd->opcode == START_STOP) {
- printf("START_STOP ");
- } else if (xs->cmd->opcode == RESERVE) {
- printf("RESERVE ");
- } else if (xs->cmd->opcode == RELEASE) {
- printf("RELEASE ");
- } else if (xs->cmd->opcode == PREVENT_ALLOW) {
- printf("PREVENT_ALLOW ");
- } else if (xs->cmd->opcode == POSITION_TO_ELEMENT) {
- printf("POSITION_TO_EL ");
- } else if (xs->cmd->opcode == CHANGE_DEFINITION) {
- printf("CHANGE_DEF ");
- } else if (xs->cmd->opcode == MODE_SENSE_BIG) {
- printf("MODE_SENSE_BIG ");
- } else if (xs->cmd->opcode == MODE_SELECT_BIG) {
- printf("MODE_SELECT_BIG ");
- } else if (xs->cmd->opcode == 0x25) {
- printf("READ_CAPACITY ");
- } else if (xs->cmd->opcode == 0x08) {
- printf("READ_COMMAND ");
- }
+ printf("scsi_cmd() ");
+ if (xs->cmd->opcode == 0) {
+ printf("TEST_UNIT_READY ");
+ } else if (xs->cmd->opcode == REQUEST_SENSE) {
+ printf("REQUEST_SENSE ");
+ } else if (xs->cmd->opcode == INQUIRY) {
+ printf("INQUIRY ");
+ } else if (xs->cmd->opcode == MODE_SELECT) {
+ printf("MODE_SELECT ");
+ } else if (xs->cmd->opcode == MODE_SENSE) {
+ printf("MODE_SENSE ");
+ } else if (xs->cmd->opcode == START_STOP) {
+ printf("START_STOP ");
+ } else if (xs->cmd->opcode == RESERVE) {
+ printf("RESERVE ");
+ } else if (xs->cmd->opcode == RELEASE) {
+ printf("RELEASE ");
+ } else if (xs->cmd->opcode == PREVENT_ALLOW) {
+ printf("PREVENT_ALLOW ");
+ } else if (xs->cmd->opcode == POSITION_TO_ELEMENT) {
+ printf("POSITION_TO_EL ");
+ } else if (xs->cmd->opcode == CHANGE_DEFINITION) {
+ printf("CHANGE_DEF ");
+ } else if (xs->cmd->opcode == MODE_SENSE_BIG) {
+ printf("MODE_SENSE_BIG ");
+ } else if (xs->cmd->opcode == MODE_SELECT_BIG) {
+ printf("MODE_SELECT_BIG ");
+ } else if (xs->cmd->opcode == 0x25) {
+ printf("READ_CAPACITY ");
+ } else if (xs->cmd->opcode == 0x08) {
+ printf("READ_COMMAND ");
+ }
#endif
- if (flags & SCSI_POLL) {
- cqep = mc;
- iopb = miopb;
- } else {
- cqep = vs_getcqe(sc);
- iopb = vs_getiopb(sc);
- }
- if (cqep == NULL) {
- xs->error = XS_DRIVER_STUFFUP;
- return (TRY_AGAIN_LATER);
- }
-
-/* s = splbio();*/
- iopb_len = sizeof(M328_short_IOPB) + xs->cmdlen;
- szero(iopb, sizeof(M328_IOPB));
-
- scopy(xs->cmd, &iopb->iopb_SCSI[0], xs->cmdlen);
- iopb->iopb_CMD = IOPB_SCSI;
-#if 0
- LV(iopb->iopb_BUFF, kvtop(xs->data));
- LV(iopb->iopb_LENGTH, xs->datalen);
-#endif
- iopb->iopb_UNIT = slp->lun << 3;
- iopb->iopb_UNIT |= slp->target;
- iopb->iopb_NVCT = (u_char)sc->sc_nvec;
- iopb->iopb_EVCT = (u_char)sc->sc_evec;
-
- /*
- * Since the 187 doesn't support cache snooping, we have
- * to flush the cache for a write and flush with inval for
- * a read, prior to starting the IO.
- */
- if (xs->flags & SCSI_DATA_IN) { /* read */
-#if defined(MVME187)
- dma_cachectl((vm_offset_t)xs->data, xs->datalen,
- DMA_CACHE_SYNC_INVAL);
+ if (flags & SCSI_POLL) {
+ cqep = mc;
+ iopb = miopb;
+ } else {
+ cqep = vs_getcqe(sc);
+ iopb = vs_getiopb(sc);
+ }
+ if (cqep == NULL) {
+ xs->error = XS_DRIVER_STUFFUP;
+ return (TRY_AGAIN_LATER);
+ }
+
+ iopb_len = sizeof(M328_short_IOPB) + xs->cmdlen;
+ vs_zero(iopb, sizeof(M328_IOPB));
+
+ vs_copy(xs->cmd, &iopb->iopb_SCSI[0], xs->cmdlen);
+ iopb->iopb_CMD = IOPB_SCSI;
+ iopb->iopb_UNIT = slp->lun << 3;
+ iopb->iopb_UNIT |= slp->target;
+ iopb->iopb_NVCT = (u_char)sc->sc_nvec;
+ iopb->iopb_EVCT = (u_char)sc->sc_evec;
+
+ /*
+ * Since the 187 doesn't support cache snooping, we have
+ * to flush the cache for a write and flush with inval for
+ * a read, prior to starting the IO.
+ */
+ if (xs->flags & SCSI_DATA_IN) { /* read */
+#if defined(MVME187) || defined(MVME188) || defined(MVME197)
+ dma_cachectl((vm_offset_t)xs->data, xs->datalen,
+ DMA_CACHE_SYNC_INVAL);
#endif
- iopb->iopb_OPTION |= OPT_READ;
- } else { /* write */
-#if defined(MVME187)
- dma_cachectl((vm_offset_t)xs->data, xs->datalen,
- DMA_CACHE_SYNC);
+ iopb->iopb_OPTION |= OPT_READ;
+ } else { /* write */
+#if defined(MVME187) || defined(MVME188) || defined(MVME197)
+ dma_cachectl((vm_offset_t)xs->data, xs->datalen,
+ DMA_CACHE_SYNC);
#endif
- iopb->iopb_OPTION |= OPT_WRITE;
- }
-
- if (flags & SCSI_POLL) {
- iopb->iopb_OPTION |= OPT_INTDIS;
- iopb->iopb_LEVEL = 0;
- } else {
- iopb->iopb_OPTION |= OPT_INTEN;
- iopb->iopb_LEVEL = sc->sc_ipl;
- }
- iopb->iopb_ADDR = ADDR_MOD;
-
- /*
- * Wait until we can use the command queue entry.
- * Should only have to wait if the master command
- * queue entry is busy.
- */
- while (cqep->cqe_QECR & M_QECR_GO);
-
- cqep->cqe_IOPB_ADDR = OFF(iopb);
- cqep->cqe_IOPB_LENGTH = iopb_len;
- if (flags & SCSI_POLL) {
- cqep->cqe_WORK_QUEUE = slp->target + 1;
- } else {
- cqep->cqe_WORK_QUEUE = slp->target + 1;
- }
-
- MALLOC(m328_cmd, M328_CMD*, sizeof(M328_CMD), M_DEVBUF, M_WAITOK);
-
- m328_cmd->xs = xs;
- if (xs->datalen) {
- m328_cmd->top_sg_list = vs_build_memory_structure(xs, iopb);
- } else {
- m328_cmd->top_sg_list = (M328_SG)0;
- }
-
- LV(cqep->cqe_CTAG, m328_cmd);
-
- if (crb->crb_CRSW & M_CRSW_AQ) {
- cqep->cqe_QECR = M_QECR_AA;
- }
- VL(buf, iopb->iopb_BUFF);
- VL(len, iopb->iopb_LENGTH);
+ iopb->iopb_OPTION |= OPT_WRITE;
+ }
+
+ if (flags & SCSI_POLL) {
+ iopb->iopb_OPTION |= OPT_INTDIS;
+ iopb->iopb_LEVEL = 0;
+ } else {
+ iopb->iopb_OPTION |= OPT_INTEN;
+ iopb->iopb_LEVEL = sc->sc_ipl;
+ }
+ iopb->iopb_ADDR = ADDR_MOD;
+
+ /*
+ * Wait until we can use the command queue entry.
+ * Should only have to wait if the master command
+ * queue entry is busy and we are polling.
+ */
+ while (cqep->cqe_QECR & M_QECR_GO);
+
+ cqep->cqe_IOPB_ADDR = OFF(iopb);
+ cqep->cqe_IOPB_LENGTH = iopb_len;
+ if (flags & SCSI_POLL) {
+ cqep->cqe_WORK_QUEUE = 0;
+ } else {
+ cqep->cqe_WORK_QUEUE = slp->target + 1;
+ }
+
+ MALLOC(m328_cmd, M328_CMD*, sizeof(M328_CMD), M_DEVBUF, M_WAITOK);
+
+ m328_cmd->xs = xs;
+ if (xs->datalen) {
+ m328_cmd->top_sg_list = vs_build_memory_structure(xs, iopb);
+ } else {
+ m328_cmd->top_sg_list = (M328_SG)0;
+ }
+
+ LV(cqep->cqe_CTAG, m328_cmd);
+
+ if (crb->crb_CRSW & M_CRSW_AQ) {
+ cqep->cqe_QECR = M_QECR_AA;
+ }
+ VL(buf, iopb->iopb_BUFF);
+ VL(len, iopb->iopb_LENGTH);
#ifdef SDEBUG
- printf("tgt %d lun %d buf %x len %d wqn %d ipl %d\n", slp->target,
- slp->lun, buf, len, cqep->cqe_WORK_QUEUE, iopb->iopb_LEVEL);
+ printf("tgt %d lun %d buf %x len %d wqn %d ipl %d crsw 0x%x\n",
+ slp->target, slp->lun, buf, len, cqep->cqe_WORK_QUEUE,
+ iopb->iopb_LEVEL, crb->crb_CRSW);
#endif
- cqep->cqe_QECR |= M_QECR_GO;
-
- if (flags & SCSI_POLL) {
- /* poll for the command to complete */
-/* splx(s);*/
- vs_poll(sc, xs);
- return (COMPLETE);
- }
-/* splx(s);*/
- return (SUCCESSFULLY_QUEUED);
+ cqep->cqe_QECR |= M_QECR_GO;
+
+ if (flags & SCSI_POLL) {
+ /* poll for the command to complete */
+ vs_poll(sc, xs);
+ return (COMPLETE);
+ }
+ return (SUCCESSFULLY_QUEUED);
}
int
vs_chksense(xs)
struct scsi_xfer *xs;
{
- int flags, s, i;
- struct scsi_link *slp = xs->sc_link;
- struct vs_softc *sc = slp->adapter_softc;
- struct scsi_sense *ss;
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
-
- /* ack and clear the error */
- CRB_CLR_DONE(CRSW);
- CRB_CLR_ER(CRSW);
- xs->status = 0;
-
- szero(miopb, sizeof(M328_IOPB));
- /* This is a command, so point to it */
- ss = (void *)&miopb->iopb_SCSI[0];
- szero(ss, sizeof(*ss));
- ss->opcode = REQUEST_SENSE;
- ss->byte2 = slp->lun << 5;
- ss->length = sizeof(struct scsi_sense_data);
-
- miopb->iopb_CMD = IOPB_SCSI;
- miopb->iopb_OPTION = OPT_READ;
- miopb->iopb_NVCT = (u_char)sc->sc_nvec;
- miopb->iopb_EVCT = (u_char)sc->sc_evec;
- miopb->iopb_LEVEL = 0; /*sc->sc_ipl;*/
- miopb->iopb_ADDR = ADDR_MOD;
- LV(miopb->iopb_BUFF, kvtop(&xs->sense));
- LV(miopb->iopb_LENGTH, sizeof(struct scsi_sense_data));
-
- szero(mc, sizeof(M328_CQE));
- mc->cqe_IOPB_ADDR = OFF(miopb);
- mc->cqe_IOPB_LENGTH = sizeof(M328_short_IOPB) + sizeof(struct scsi_sense);
- mc->cqe_WORK_QUEUE = 0;
- mc->cqe_QECR = M_QECR_GO;
- /* poll for the command to complete */
- s = splbio();
- do_vspoll(sc, 0);
- /*
- if (xs->cmd->opcode != PREVENT_ALLOW) {
- xs->error = XS_SENSE;
- }
- */
- xs->status = riopb->iopb_STATUS >> 8;
+ int flags, s, i;
+ struct scsi_link *slp = xs->sc_link;
+ struct vs_softc *sc = slp->adapter_softc;
+ struct scsi_sense *ss;
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_IOPB *miopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
+
+ /* ack and clear the error */
+ CRB_CLR_ER(CRSW);
+ CRB_CLR_DONE(CRSW);
+ xs->status = 0;
+
+ vs_zero(miopb, sizeof(M328_IOPB));
+ /* This is a command, so point to it */
+ ss = (void *)&miopb->iopb_SCSI[0];
+ vs_zero(ss, sizeof(*ss));
+ ss->opcode = REQUEST_SENSE;
+ ss->byte2 = slp->lun << 5;
+ ss->length = sizeof(struct scsi_sense_data);
+
+ miopb->iopb_CMD = IOPB_SCSI;
+ miopb->iopb_OPTION = OPT_READ;
+ miopb->iopb_NVCT = (u_char)sc->sc_nvec;
+ miopb->iopb_EVCT = (u_char)sc->sc_evec;
+ miopb->iopb_LEVEL = 0; /*sc->sc_ipl;*/
+ miopb->iopb_ADDR = ADDR_MOD;
+ LV(miopb->iopb_BUFF, kvtop(&xs->sense));
+ LV(miopb->iopb_LENGTH, sizeof(struct scsi_sense_data));
+
+ vs_zero(mc, sizeof(M328_CQE));
+ mc->cqe_IOPB_ADDR = OFF(miopb);
+ mc->cqe_IOPB_LENGTH = sizeof(M328_short_IOPB) +
+ sizeof(struct scsi_sense);
+ mc->cqe_WORK_QUEUE = 0;
+ mc->cqe_QECR = M_QECR_GO;
+ /* poll for the command to complete */
+ s = splbio();
+ do_vspoll(sc, 0);
+ /*
+ if (xs->cmd->opcode != PREVENT_ALLOW) {
+ xs->error = XS_SENSE;
+ }
+ */
+ xs->status = riopb->iopb_STATUS >> 8;
#ifdef SDEBUG
- scsi_print_sense(xs, 2);
+ scsi_print_sense(xs, 2);
#endif
- splx(s);
+ splx(s);
}
M328_CQE *
vs_getcqe(sc)
struct vs_softc *sc;
{
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_CQE *cqep;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_CQE *cqep;
- cqep = (M328_CQE *)&sc->sc_vsreg->sh_CQE[mcsb->mcsb_QHDP];
+ cqep = (M328_CQE *)&sc->sc_vsreg->sh_CQE[mcsb->mcsb_QHDP];
- if (cqep->cqe_QECR & M_QECR_GO)
- return NULL; /* Hopefully, this will never happen */
- mcsb->mcsb_QHDP++;
- if (mcsb->mcsb_QHDP == NUM_CQE) mcsb->mcsb_QHDP = 0;
- szero(cqep, sizeof(M328_CQE));
- return cqep;
+ if (cqep->cqe_QECR & M_QECR_GO)
+ return NULL; /* Hopefully, this will never happen */
+ mcsb->mcsb_QHDP++;
+ if (mcsb->mcsb_QHDP == NUM_CQE) mcsb->mcsb_QHDP = 0;
+ vs_zero(cqep, sizeof(M328_CQE));
+ return cqep;
}
M328_IOPB *
vs_getiopb(sc)
struct vs_softc *sc;
{
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_IOPB *iopb;
- int slot;
-
- if (mcsb->mcsb_QHDP == 0) {
- slot = NUM_CQE;
- } else {
- slot = mcsb->mcsb_QHDP - 1;
- }
- iopb = (M328_IOPB *)&sc->sc_vsreg->sh_IOPB[slot];
- szero(iopb, sizeof(M328_IOPB));
- return iopb;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_IOPB *iopb;
+ int slot;
+
+ if (mcsb->mcsb_QHDP == 0) {
+ slot = NUM_CQE;
+ } else {
+ slot = mcsb->mcsb_QHDP - 1;
+ }
+ iopb = (M328_IOPB *)&sc->sc_vsreg->sh_IOPB[slot];
+ vs_zero(iopb, sizeof(M328_IOPB));
+ return iopb;
}
-void
+int
vs_initialize(sc)
struct vs_softc *sc;
{
- M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_IOPB *iopb;
- M328_WQCF *wiopb = (M328_WQCF *)&sc->sc_vsreg->sh_MCE_IOPB;
- u_short i, crsw;
- int failed = 0;
-
- CRB_CLR_DONE(CRSW);
- szero(cib, sizeof(M328_CIB));
- mcsb->mcsb_QHDP = 0;
- sc->sc_qhp = 0;
- cib->cib_NCQE = 10;
- cib->cib_BURST = 0;
- cib->cib_NVECT = sc->sc_ipl << 8;
- cib->cib_NVECT |= sc->sc_nvec;
- cib->cib_EVECT = sc->sc_ipl << 8;
- cib->cib_EVECT |= sc->sc_evec;
- cib->cib_PID = 0x07;
- cib->cib_SID = 0x00;
- cib->cib_CRBO = OFF(crb);
- cib->cib_SELECT_msw = HI(SELECTION_TIMEOUT);
- cib->cib_SELECT_lsw = LO(SELECTION_TIMEOUT);
- cib->cib_WQ0TIMO_msw = HI(4);
- cib->cib_WQ0TIMO_lsw = LO(4);
- cib->cib_VMETIMO_msw = 0; /*HI(VME_BUS_TIMEOUT);*/
- cib->cib_VMETIMO_lsw = 0; /*LO(VME_BUS_TIMEOUT);*/
- cib->cib_SBRIV = sc->sc_ipl << 8;
- cib->cib_SBRIV |= sc->sc_evec;
- cib->cib_SOF0 = 0x15;
- cib->cib_SRATE0 = 100/4;
- cib->cib_SOF1 = 0x0;
- cib->cib_SRATE1 = 0x0;
-
- iopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
- szero(iopb, sizeof(M328_IOPB));
- iopb->iopb_CMD = CNTR_INIT;
- iopb->iopb_OPTION = 0;
- iopb->iopb_NVCT = (u_char)sc->sc_nvec;
- iopb->iopb_EVCT = (u_char)sc->sc_evec;
- iopb->iopb_LEVEL = 0; /*sc->sc_ipl;*/
- iopb->iopb_ADDR = SHIO_MOD;
- LV(iopb->iopb_BUFF, OFF(cib));
- LV(iopb->iopb_LENGTH, sizeof(M328_CIB));
-
- szero(mc, sizeof(M328_CQE));
- mc->cqe_IOPB_ADDR = OFF(iopb);
- mc->cqe_IOPB_LENGTH = sizeof(M328_IOPB);
- mc->cqe_WORK_QUEUE = 0;
- mc->cqe_QECR = M_QECR_GO;
- /* poll for the command to complete */
- do_vspoll(sc, 0);
- CRB_CLR_DONE(CRSW);
-
- /* initialize work queues */
- for (i=1; i<8; i++) {
- szero(wiopb, sizeof(M328_IOPB));
- wiopb->wqcf_CMD = CNTR_INIT_WORKQ;
- wiopb->wqcf_OPTION = 0;
- wiopb->wqcf_NVCT = (u_char)sc->sc_nvec;
- wiopb->wqcf_EVCT = (u_char)sc->sc_evec;
- wiopb->wqcf_ILVL = 0; /*sc->sc_ipl;*/
- wiopb->wqcf_WORKQ = i;
- wiopb->wqcf_WOPT = (WQO_FOE | WQO_INIT);
- wiopb->wqcf_SLOTS = JAGUAR_MAX_Q_SIZ;
- LV(wiopb->wqcf_CMDTO, 2);
-
- szero(mc, sizeof(M328_CQE));
- mc->cqe_IOPB_ADDR = OFF(wiopb);
- mc->cqe_IOPB_LENGTH = sizeof(M328_IOPB);
- mc->cqe_WORK_QUEUE = 0;
- mc->cqe_QECR = M_QECR_GO;
- /* poll for the command to complete */
- do_vspoll(sc, 0);
- if (CRSW & M_CRSW_ER) {
- /*printf("\nerror: queue %d status = 0x%x\n", i, riopb->iopb_STATUS);*/
- /*failed = 1;*/
- CRB_CLR_ER(CRSW);
- }
- CRB_CLR_DONE(CRSW);
- delay(500);
- }
- /* start queue mode */
- CRSW = 0;
- mcsb->mcsb_MCR |= M_MCR_SQM;
- crsw = CRSW;
- do_vspoll(sc, 0);
- if (CRSW & M_CRSW_ER) {
- printf("error: status = 0x%x\n", riopb->iopb_STATUS);
- CRB_CLR_ER(CRSW);
- }
- CRB_CLR_DONE(CRSW);
-
- if (failed) {
- printf(": failed!\n");
- return;
- }
- /* reset SCSI bus */
- vs_reset(sc);
- /* sync all devices */
- vs_resync(sc);
- printf(": target %d\n", sc->sc_link.adapter_target);
+ M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_IOPB *iopb;
+ M328_WQCF *wiopb = (M328_WQCF *)&sc->sc_vsreg->sh_MCE_IOPB;
+ u_short i, crsw;
+ int failed = 0;
+
+ CRB_CLR_DONE(CRSW);
+ vs_zero(cib, sizeof(M328_CIB));
+ mcsb->mcsb_QHDP = 0;
+ sc->sc_qhp = 0;
+ cib->cib_NCQE = 10;
+ cib->cib_BURST = 0;
+ cib->cib_NVECT = sc->sc_ipl << 8;
+ cib->cib_NVECT |= sc->sc_nvec;
+ cib->cib_EVECT = sc->sc_ipl << 8;
+ cib->cib_EVECT |= sc->sc_evec;
+ cib->cib_PID = 0x07;
+ cib->cib_SID = 0x00;
+ cib->cib_CRBO = OFF(crb);
+ cib->cib_SELECT_msw = HI(SELECTION_TIMEOUT);
+ cib->cib_SELECT_lsw = LO(SELECTION_TIMEOUT);
+ cib->cib_WQ0TIMO_msw = HI(4);
+ cib->cib_WQ0TIMO_lsw = LO(4);
+ cib->cib_VMETIMO_msw = 0; /*HI(VME_BUS_TIMEOUT);*/
+ cib->cib_VMETIMO_lsw = 0; /*LO(VME_BUS_TIMEOUT);*/
+ cib->cib_ERR_FLGS = M_ERRFLGS_RIN | M_ERRFLGS_RSE;
+ cib->cib_SBRIV = sc->sc_ipl << 8;
+ cib->cib_SBRIV |= sc->sc_evec;
+ cib->cib_SOF0 = 0x15;
+ cib->cib_SRATE0 = 100/4;
+ cib->cib_SOF1 = 0x0;
+ cib->cib_SRATE1 = 0x0;
+
+ iopb = (M328_IOPB *)&sc->sc_vsreg->sh_MCE_IOPB;
+ vs_zero(iopb, sizeof(M328_IOPB));
+ iopb->iopb_CMD = CNTR_INIT;
+ iopb->iopb_OPTION = 0;
+ iopb->iopb_NVCT = (u_char)sc->sc_nvec;
+ iopb->iopb_EVCT = (u_char)sc->sc_evec;
+ iopb->iopb_LEVEL = 0; /*sc->sc_ipl;*/
+ iopb->iopb_ADDR = SHIO_MOD;
+ LV(iopb->iopb_BUFF, OFF(cib));
+ LV(iopb->iopb_LENGTH, sizeof(M328_CIB));
+
+ vs_zero(mc, sizeof(M328_CQE));
+ mc->cqe_IOPB_ADDR = OFF(iopb);
+ mc->cqe_IOPB_LENGTH = sizeof(M328_IOPB);
+ mc->cqe_WORK_QUEUE = 0;
+ mc->cqe_QECR = M_QECR_GO;
+ /* poll for the command to complete */
+ do_vspoll(sc, 0);
+ CRB_CLR_DONE(CRSW);
+
+ /* initialize work queues */
+ for (i=1; i<8; i++) {
+ vs_zero(wiopb, sizeof(M328_IOPB));
+ wiopb->wqcf_CMD = CNTR_INIT_WORKQ;
+ wiopb->wqcf_OPTION = 0;
+ wiopb->wqcf_NVCT = (u_char)sc->sc_nvec;
+ wiopb->wqcf_EVCT = (u_char)sc->sc_evec;
+ wiopb->wqcf_ILVL = 0; /*sc->sc_ipl;*/
+ wiopb->wqcf_WORKQ = i;
+ wiopb->wqcf_WOPT = (WQO_FOE | WQO_INIT);
+ wiopb->wqcf_SLOTS = JAGUAR_MAX_Q_SIZ;
+ LV(wiopb->wqcf_CMDTO, 4); /* 1 second */
+
+ vs_zero(mc, sizeof(M328_CQE));
+ mc->cqe_IOPB_ADDR = OFF(wiopb);
+ mc->cqe_IOPB_LENGTH = sizeof(M328_IOPB);
+ mc->cqe_WORK_QUEUE = 0;
+ mc->cqe_QECR = M_QECR_GO;
+ /* poll for the command to complete */
+ do_vspoll(sc, 0);
+ if (CRSW & M_CRSW_ER) {
+ /*printf("\nerror: queue %d status = 0x%x\n", i, riopb->iopb_STATUS);*/
+ /*failed = 1;*/
+ CRB_CLR_ER(CRSW);
+ }
+ CRB_CLR_DONE(CRSW);
+ delay(500);
+ }
+ /* start queue mode */
+ CRSW = 0;
+ mcsb->mcsb_MCR |= M_MCR_SQM;
+ crsw = CRSW;
+ do_vspoll(sc, 0);
+ if (CRSW & M_CRSW_ER) {
+ printf("error: status = 0x%x\n", riopb->iopb_STATUS);
+ CRB_CLR_ER(CRSW);
+ }
+ CRB_CLR_DONE(CRSW);
+
+ if (failed) {
+ printf(": failed!\n");
+ return (1);
+ }
+ /* reset SCSI bus */
+ vs_reset(sc);
+ /* sync all devices */
+ vs_resync(sc);
+ printf(": target %d\n", sc->sc_link.adapter_target);
+ return (0); /* success */
}
void
vs_resync(sc)
struct vs_softc *sc;
{
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_DRCF *devreset = (M328_DRCF *)&sc->sc_vsreg->sh_MCE_IOPB;
- u_short i;
-
- for (i=0; i<7; i++) {
- szero(devreset, sizeof(M328_DRCF));
- devreset->drcf_CMD = CNTR_DEV_REINIT;
- devreset->drcf_OPTION = 0x00; /* no interrupts yet... */
- devreset->drcf_NVCT = sc->sc_nvec;
- devreset->drcf_EVCT = sc->sc_evec;
- devreset->drcf_ILVL = 0;
- devreset->drcf_UNIT = i;
-
- szero(mc, sizeof(M328_CQE));
- mc->cqe_IOPB_ADDR = OFF(devreset);
- mc->cqe_IOPB_LENGTH = sizeof(M328_DRCF);
- mc->cqe_WORK_QUEUE = 0;
- mc->cqe_QECR = M_QECR_GO;
- /* poll for the command to complete */
- do_vspoll(sc, 0);
- if (riopb->iopb_STATUS) {
- sc->sc_tinfo[i].avail = 0;
- } else {
- sc->sc_tinfo[i].avail = 1;
- }
- if (CRSW & M_CRSW_ER) {
- CRB_CLR_ER(CRSW);
- }
- CRB_CLR_DONE(CRSW);
- }
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_DRCF *devreset = (M328_DRCF *)&sc->sc_vsreg->sh_MCE_IOPB;
+ u_short i;
+ for (i=0; i<7; i++) {
+ vs_zero(devreset, sizeof(M328_DRCF));
+ devreset->drcf_CMD = CNTR_DEV_REINIT;
+ devreset->drcf_OPTION = 0x00; /* no interrupts yet... */
+ devreset->drcf_NVCT = sc->sc_nvec;
+ devreset->drcf_EVCT = sc->sc_evec;
+ devreset->drcf_ILVL = 0;
+ devreset->drcf_UNIT = i;
+
+ vs_zero(mc, sizeof(M328_CQE));
+ mc->cqe_IOPB_ADDR = OFF(devreset);
+ mc->cqe_IOPB_LENGTH = sizeof(M328_DRCF);
+ mc->cqe_WORK_QUEUE = 0;
+ mc->cqe_QECR = M_QECR_GO;
+ /* poll for the command to complete */
+ do_vspoll(sc, 0);
+ if (riopb->iopb_STATUS) {
+#ifdef SDEBUG
+ printf("status: %x\n", riopb->iopb_STATUS);
+#endif
+ sc->sc_tinfo[i].avail = 0;
+ } else {
+ sc->sc_tinfo[i].avail = 1;
+ }
+ if (CRSW & M_CRSW_ER) {
+ CRB_CLR_ER(CRSW);
+ }
+ CRB_CLR_DONE(CRSW);
+ }
}
void
vs_reset(sc)
struct vs_softc *sc;
{
- struct vsreg * rp;
- u_int s;
- u_char i;
- struct iopb_reset* iopr;
- struct cqe *cqep;
- struct iopb_scsi *iopbs;
- struct scsi_sense *ss;
- M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
- M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
- M328_SRCF *reset = (M328_SRCF *)&sc->sc_vsreg->sh_MCE_IOPB;
- M328_IOPB *iopb;
-
- szero(reset, sizeof(M328_SRCF));
- reset->srcf_CMD = IOPB_RESET;
- reset->srcf_OPTION = 0x00; /* no interrupts yet... */
- reset->srcf_NVCT = sc->sc_nvec;
- reset->srcf_EVCT = sc->sc_evec;
- reset->srcf_ILVL = 0;
- reset->srcf_BUSID = 0;
- s = splbio();
-
- szero(mc, sizeof(M328_CQE));
- mc->cqe_IOPB_ADDR = OFF(reset);
- mc->cqe_IOPB_LENGTH = sizeof(M328_SRCF);
- mc->cqe_WORK_QUEUE = 0;
- mc->cqe_QECR = M_QECR_GO;
- /* poll for the command to complete */
- while (1) {
- do_vspoll(sc, 0);
- /* ack & clear scsi error condition cause by reset */
- if (CRSW & M_CRSW_ER) {
- CRB_CLR_ER(CRSW);
- CRB_CLR_DONE(CRSW);
- riopb->iopb_STATUS = 0;
- break;
- }
- CRB_CLR_DONE(CRSW);
- }
- /* thaw all work queues */
- thaw_queue(sc, 0xFF);
- splx (s);
+ struct vsreg * rp;
+ u_int s;
+ u_char i;
+ struct iopb_reset* iopr;
+ struct cqe *cqep;
+ struct iopb_scsi *iopbs;
+ struct scsi_sense *ss;
+ M328_CIB *cib = (M328_CIB *)&sc->sc_vsreg->sh_CIB;
+ M328_CQE *mc = (M328_CQE*)&sc->sc_vsreg->sh_MCE;
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_MCSB *mcsb = (M328_MCSB *)&sc->sc_vsreg->sh_MCSB;
+ M328_SRCF *reset = (M328_SRCF *)&sc->sc_vsreg->sh_MCE_IOPB;
+ M328_IOPB *iopb;
+
+ vs_zero(reset, sizeof(M328_SRCF));
+ reset->srcf_CMD = IOPB_RESET;
+ reset->srcf_OPTION = 0x00; /* no interrupts yet... */
+ reset->srcf_NVCT = sc->sc_nvec;
+ reset->srcf_EVCT = sc->sc_evec;
+ reset->srcf_ILVL = 0;
+ reset->srcf_BUSID = 0;
+ s = splbio();
+
+ vs_zero(mc, sizeof(M328_CQE));
+ mc->cqe_IOPB_ADDR = OFF(reset);
+ mc->cqe_IOPB_LENGTH = sizeof(M328_SRCF);
+ mc->cqe_WORK_QUEUE = 0;
+ mc->cqe_QECR = M_QECR_GO;
+ /* poll for the command to complete */
+ while (1) {
+ do_vspoll(sc, 0);
+ /* ack & clear scsi error condition cause by reset */
+ if (CRSW & M_CRSW_ER) {
+ CRB_CLR_ER(CRSW);
+ CRB_CLR_DONE(CRSW);
+ riopb->iopb_STATUS = 0;
+ break;
+ }
+ CRB_CLR_DONE(CRSW);
+ }
+ /* thaw all work queues */
+ thaw_queue(sc, 0xFF);
+ splx (s);
}
-
/*
* Process an interrupt from the MVME328
* We'll generally update: xs->{flags,resid,error,sense,status} and
@@ -699,134 +712,233 @@ struct vs_softc *sc;
struct scsi_xfer *xs;
int *status;
{
- struct vsreg * rp = sc->sc_vsreg;
- int target = -1;
- int lun = -1;
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
- struct scsi_generic *cmd;
- u_long buf;
- u_long len;
- u_char error;
-
- target = xs->sc_link->target;
- lun = xs->sc_link->lun;
- cmd = (struct scsi_generic *)&riopb->iopb_SCSI[0];
-
- VL(buf, riopb->iopb_BUFF);
- VL(len, riopb->iopb_LENGTH);
- *status = riopb->iopb_STATUS >> 8;
- error = riopb->iopb_STATUS & 0xFF;
+ struct vsreg * rp = sc->sc_vsreg;
+ int target = -1;
+ int lun = -1;
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ struct scsi_generic *cmd;
+ u_long buf;
+ u_long len;
+ u_char error;
+
+ target = xs->sc_link->target;
+ lun = xs->sc_link->lun;
+ cmd = (struct scsi_generic *)&riopb->iopb_SCSI[0];
+
+ VL(buf, riopb->iopb_BUFF);
+ VL(len, riopb->iopb_LENGTH);
+ *status = riopb->iopb_STATUS >> 8;
+ error = riopb->iopb_STATUS & 0xFF;
#ifdef SDEBUG
- printf("scsi_chk() ");
-
- if (xs->cmd->opcode == 0) {
- printf("TEST_UNIT_READY ");
- } else if (xs->cmd->opcode == REQUEST_SENSE) {
- printf("REQUEST_SENSE ");
- } else if (xs->cmd->opcode == INQUIRY) {
- printf("INQUIRY ");
- } else if (xs->cmd->opcode == MODE_SELECT) {
- printf("MODE_SELECT ");
- } else if (xs->cmd->opcode == MODE_SENSE) {
- printf("MODE_SENSE ");
- } else if (xs->cmd->opcode == START_STOP) {
- printf("START_STOP ");
- } else if (xs->cmd->opcode == RESERVE) {
- printf("RESERVE ");
- } else if (xs->cmd->opcode == RELEASE) {
- printf("RELEASE ");
- } else if (xs->cmd->opcode == PREVENT_ALLOW) {
- printf("PREVENT_ALLOW ");
- } else if (xs->cmd->opcode == POSITION_TO_ELEMENT) {
- printf("POSITION_TO_EL ");
- } else if (xs->cmd->opcode == CHANGE_DEFINITION) {
- printf("CHANGE_DEF ");
- } else if (xs->cmd->opcode == MODE_SENSE_BIG) {
- printf("MODE_SENSE_BIG ");
- } else if (xs->cmd->opcode == MODE_SELECT_BIG) {
- printf("MODE_SELECT_BIG ");
- } else if (xs->cmd->opcode == 0x25) {
- printf("READ_CAPACITY ");
- } else if (xs->cmd->opcode == 0x08) {
- printf("READ_COMMAND ");
- }
-
- printf("tgt %d lun %d buf %x len %d status %x ", target, lun, buf, len, riopb->iopb_STATUS);
-
- if (CRSW & M_CRSW_EX) {
- printf("[ex]");
- }
- if (CRSW & M_CRSW_QMS) {
- printf("[qms]");
- }
- if (CRSW & M_CRSW_SC) {
- printf("[sc]");
- }
- if (CRSW & M_CRSW_SE) {
- printf("[se]");
- }
- if (CRSW & M_CRSW_AQ) {
- printf("[aq]");
- }
- if (CRSW & M_CRSW_ER) {
- printf("[er]");
- }
- printf("\n");
+ printf("scsi_chk() ");
+
+ if (xs->cmd->opcode == 0) {
+ printf("TEST_UNIT_READY ");
+ } else if (xs->cmd->opcode == REQUEST_SENSE) {
+ printf("REQUEST_SENSE ");
+ } else if (xs->cmd->opcode == INQUIRY) {
+ printf("INQUIRY ");
+ } else if (xs->cmd->opcode == MODE_SELECT) {
+ printf("MODE_SELECT ");
+ } else if (xs->cmd->opcode == MODE_SENSE) {
+ printf("MODE_SENSE ");
+ } else if (xs->cmd->opcode == START_STOP) {
+ printf("START_STOP ");
+ } else if (xs->cmd->opcode == RESERVE) {
+ printf("RESERVE ");
+ } else if (xs->cmd->opcode == RELEASE) {
+ printf("RELEASE ");
+ } else if (xs->cmd->opcode == PREVENT_ALLOW) {
+ printf("PREVENT_ALLOW ");
+ } else if (xs->cmd->opcode == POSITION_TO_ELEMENT) {
+ printf("POSITION_TO_EL ");
+ } else if (xs->cmd->opcode == CHANGE_DEFINITION) {
+ printf("CHANGE_DEF ");
+ } else if (xs->cmd->opcode == MODE_SENSE_BIG) {
+ printf("MODE_SENSE_BIG ");
+ } else if (xs->cmd->opcode == MODE_SELECT_BIG) {
+ printf("MODE_SELECT_BIG ");
+ } else if (xs->cmd->opcode == 0x25) {
+ printf("READ_CAPACITY ");
+ } else if (xs->cmd->opcode == 0x08) {
+ printf("READ_COMMAND ");
+ }
+
+ printf("tgt %d lun %d buf %x len %d status %x ", target, lun, buf, len, riopb->iopb_STATUS);
+
+ if (CRSW & M_CRSW_EX) {
+ printf("[ex]");
+ }
+ if (CRSW & M_CRSW_QMS) {
+ printf("[qms]");
+ }
+ if (CRSW & M_CRSW_SC) {
+ printf("[sc]");
+ }
+ if (CRSW & M_CRSW_SE) {
+ printf("[se]");
+ }
+ if (CRSW & M_CRSW_AQ) {
+ printf("[aq]");
+ }
+ if (CRSW & M_CRSW_ER) {
+ printf("[er]");
+ }
+ printf("\n");
#endif
- if (len != xs->datalen) {
- xs->resid = xs->datalen - len;
- } else {
- xs->resid = 0;
- }
-
- if (error == SCSI_SELECTION_TO) {
- xs->error = XS_SELTIMEOUT;
- xs->status = -1;
- *status = -1;
- }
- return 1;
+ if (len != xs->datalen) {
+ xs->resid = xs->datalen - len;
+ } else {
+ xs->resid = 0;
+ }
+
+ if (error == SCSI_SELECTION_TO) {
+ xs->error = XS_SELTIMEOUT;
+ xs->status = -1;
+ *status = -1;
+ }
+ return 1;
}
+/* normal interrupt routine */
int
-vs_intr (sc)
+vs_nintr(sc)
register struct vs_softc *sc;
{
- M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
- struct scsi_xfer *xs;
- M328_CMD *m328_cmd;
- unsigned long loc;
- int status;
- int s;
- s = splbio();
- /* Got a valid interrupt on this device */
-
- VL(loc, crb->crb_CTAG);
+ M328_CRB *crb = (M328_CRB *)&sc->sc_vsreg->sh_CRB;
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_CMD *m328_cmd;
+ struct scsi_xfer *xs;
+ int status;
+ int s;
+
+ if ((CRSW & CONTROLLER_ERROR) == CONTROLLER_ERROR)
+ return(vs_eintr(sc));
+
+ s = splbio();
+ /* Got a valid interrupt on this device */
+ sc->sc_intrcnt_n.ev_count++;
+
+ VL((unsigned long)m328_cmd, crb->crb_CTAG);
#ifdef SDEBUG
- printf("Interrupt!!! ");
- printf("loc == 0x%x\n", loc);
+ printf("Interrupt!!! ");
+ printf("m328_cmd == 0x%x\n", m328_cmd);
#endif
- /*
- * If this is a controller error, there won't be a m328_cmd
- * pointer in the CTAG feild. Bad things happen if you try
- * to point to address 0. Controller error should be handeled
- * in vsdma.c I'll change this soon - steve.
- */
- if (loc) {
- m328_cmd = (M328_CMD *)loc;
- xs = m328_cmd->xs;
- if (m328_cmd->top_sg_list) {
- vs_dealloc_scatter_gather(m328_cmd->top_sg_list);
- m328_cmd->top_sg_list = (M328_SG)0;
- }
-
- FREE(m328_cmd, M_DEVBUF); /* free the command tag */
- if (vs_checkintr (sc, xs, &status)) {
- vs_scsidone(xs, status);
- }
- }
- splx(s);
+ /*
+ * If this is a controller error, there won't be a m328_cmd
+ * pointer in the CTAG feild. Bad things happen if you try
+ * to point to address 0. Controller error should be handeled
+ * in vsdma.c I'll change this soon - steve.
+ */
+ if (m328_cmd) {
+ xs = m328_cmd->xs;
+ if (m328_cmd->top_sg_list) {
+ vs_dealloc_scatter_gather(m328_cmd->top_sg_list);
+ m328_cmd->top_sg_list = (M328_SG)0;
+ }
+ FREE(m328_cmd, M_DEVBUF); /* free the command tag */
+ if (vs_checkintr(sc, xs, &status)) {
+ vs_scsidone(sc, xs, status);
+ }
+ }
+ /* ack the interrupt */
+ if (CRSW & M_CRSW_ER)
+ CRB_CLR_ER(CRSW);
+ CRB_CLR_DONE(CRSW);
+ /* clear the return information */
+ vs_clear_return_info(sc);
+ splx(s);
+ return (1);
+}
+
+int
+vs_eintr(sc)
+register struct vs_softc *sc;
+{
+ M328_CEVSB *crb = (M328_CEVSB *)&sc->sc_vsreg->sh_CRB;
+ M328_CMD *m328_cmd;
+ struct scsi_xfer *xs;
+ int crsw = crb->cevsb_CRSW;
+ int type = crb->cevsb_TYPE;
+ int length = crb->cevsb_IOPB_LENGTH;
+ int wq = crb->cevsb_WORK_QUEUE;
+ int ecode = crb->cevsb_ERROR;
+ int status, s;
+
+ s = splbio();
+
+ /* Got a valid interrupt on this device */
+ sc->sc_intrcnt_e.ev_count++;
+
+ VL((unsigned long)m328_cmd, crb->cevsb_CTAG);
+#ifdef SDEBUG
+ printf("Error Interrupt!!! ");
+ printf("m328_cmd == 0x%x\n", m328_cmd);
+#endif
+ xs = m328_cmd->xs;
+
+ if (crsw & M_CRSW_RST) {
+ printf("%s: SCSI Bus Reset!\n", vs_name(sc));
+ /* clear the return information */
+ vs_clear_return_info(sc);
+ splx(s);
+ return(1);
+ }
+ switch (ecode) {
+ case CEVSB_ERR_TYPE:
+ printf("%s: IOPB Type error!\n", vs_name(sc));
+ break;
+ case CEVSB_ERR_TO:
+ printf("%s: Timeout!\n", vs_name(sc));
+ xs->error = XS_SELTIMEOUT;
+ xs->status = -1;
+ xs->flags |= ITSDONE;
+ status = -1;
+ scsi_done(xs);
+ break;
+ case CEVSB_ERR_TR: /* Target Reconnect, no IOPB */
+ printf("%s: Target Reconnect error!\n", vs_name(sc));
+ break;
+ case CEVSB_ERR_OF: /* Overflow */
+ printf("%s: Overflow error!\n", vs_name(sc));
+ break;
+ case CEVSB_ERR_BD: /* Bad direction */
+ printf("%s: Bad Direction!\n", vs_name(sc));
+ break;
+ case CEVSB_ERR_NR: /* Non-Recoverabl Error */
+ printf("%s: Non-Recoverable error!\n", vs_name(sc));
+ break;
+ case CESVB_ERR_PANIC: /* Board Painc!!! */
+ printf("%s: Board Painc!!!\n", vs_name(sc));
+ break;
+ default:
+ printf("%s: Uh oh!... Error 0x%x\n", vs_name(sc), ecode);
+ Debugger();
+ }
+#ifdef SDEBUG
+ printf("%s: crsw = 0x%x iopb_type = %d iopb_len = %d wq = %d error = 0x%x\n",
+ vs_name(sc), crsw, type, length, wq, ecode);
+#endif
+ if (CRSW & M_CRSW_ER)
+ CRB_CLR_ER(CRSW);
+ CRB_CLR_DONE(CRSW);
+ thaw_queue(sc, 0xFF);
+ /* clear the return information */
+ vs_clear_return_info(sc);
+ splx(s);
+ return(1);
+}
+
+static __inline__ void
+vs_clear_return_info(sc)
+register struct vs_softc *sc;
+{
+ M328_IOPB *riopb = (M328_IOPB *)&sc->sc_vsreg->sh_RET_IOPB;
+ M328_CEVSB *crb = (M328_CEVSB *)&sc->sc_vsreg->sh_CRB;
+ vs_zero(riopb, sizeof(M328_IOPB));
+ vs_zero(crb, sizeof(M328_CEVSB));
}
/*
@@ -836,190 +948,190 @@ register struct vs_softc *sc;
M328_SG
vs_alloc_scatter_gather(void)
{
- M328_SG sg;
+ M328_SG sg;
- MALLOC(sg, M328_SG, sizeof(struct m328_sg), M_DEVBUF, M_WAITOK);
- assert ( sg );
- if ( !sg ) {
- panic ("Memory for scatter_gather_list not available");
- }
- bzero(sg, sizeof(struct m328_sg));
+ MALLOC(sg, M328_SG, sizeof(struct m328_sg), M_DEVBUF, M_WAITOK);
+ assert ( sg );
+ if ( !sg ) {
+ panic ("Memory for scatter_gather_list not available");
+ }
+ bzero(sg, sizeof(struct m328_sg));
- return (sg);
+ return (sg);
}
void
-vs_dealloc_scatter_gather(M328_SG sg)
+vs_dealloc_scatter_gather(sg)
+M328_SG sg;
{
- register int i;
-
- if (sg->level > 0) {
- for (i=0; sg->down[i] && i<MAX_SG_ELEMENTS; i++) {
- vs_dealloc_scatter_gather(sg->down[i]);
- }
- }
- FREE(sg, M_DEVBUF);
+ register int i;
+
+ if (sg->level > 0) {
+ for (i=0; sg->down[i] && i<MAX_SG_ELEMENTS; i++) {
+ vs_dealloc_scatter_gather(sg->down[i]);
+ }
+ }
+ FREE(sg, M_DEVBUF);
}
void
-vs_link_sg_element(sg_list_element_t * element,
- register vm_offset_t phys_add,
- register int len)
+vs_link_sg_element(element, phys_add, len)
+sg_list_element_t *element;
+register vm_offset_t phys_add;
+register int len;
{
- element->count.bytes = len;
- LV(element->address, phys_add);
- element->link = 0; /* FALSE */
- element->transfer_type = NORMAL_TYPE;
- element->memory_type = LONG_TRANSFER;
- element->address_modifier = 0xD;
+ element->count.bytes = len;
+ LV(element->address, phys_add);
+ element->link = 0; /* FALSE */
+ element->transfer_type = NORMAL_TYPE;
+ element->memory_type = LONG_TRANSFER;
+ element->address_modifier = 0xD;
}
void
-vs_link_sg_list(sg_list_element_t * list,
- register vm_offset_t phys_add,
- register int elements)
+vs_link_sg_list(list, phys_add, elements)
+sg_list_element_t *list;
+register vm_offset_t phys_add;
+register int elements;
{
- list->count.scatter.gather = elements;
- LV(list->address, phys_add);
- list->link = 1; /* TRUE */
- list->transfer_type = NORMAL_TYPE;
- list->memory_type = LONG_TRANSFER;
- list->address_modifier = 0xD;
+ list->count.scatter.gather = elements;
+ LV(list->address, phys_add);
+ list->link = 1; /* TRUE */
+ list->transfer_type = NORMAL_TYPE;
+ list->memory_type = LONG_TRANSFER;
+ list->address_modifier = 0xD;
}
-
M328_SG
vs_build_memory_structure(xs, iopb)
struct scsi_xfer *xs;
-M328_IOPB *iopb; /* the iopb */
+M328_IOPB *iopb; /* the iopb */
{
- M328_SG sg;
- vm_offset_t starting_point_virt, starting_point_phys, point_virt,
- point1_phys, point2_phys, virt;
- unsigned len;
- int level;
-
- sg = (M328_SG)0; /* Hopefully we need no scatter/gather list */
-
- /*
- * We have the following things:
- * virt the virtuell address of the contiguous virtual memory block
- * len the lenght of the contiguous virtual memory block
- * starting_point_virt the virtual address of the contiguous *physical* memory block
- * starting_point_phys the *physical* address of the contiguous *physical* memory block
- * point_virt the pointer to the virtual memory we are checking at the moment
- * point1_phys the pointer to the *physical* memory we are checking at the moment
- * point2_phys the pointer to the *physical* memory we are checking at the moment
- */
-
- level = 0;
- virt = starting_point_virt = (vm_offset_t)xs->data;
- point1_phys = starting_point_phys = kvtop(xs->data);
- len = xs->datalen;
- /*
- * Check if we need scatter/gather
- */
-
- if (len > PAGESIZE) {
- for (level = 0, point_virt = ROUND_PAGE(starting_point_virt+1);
- /* if we do already scatter/gather we have to stay in the loop and jump */
- point_virt < virt + (vm_offset_t)len || sg ;
- point_virt += PAGESIZE) { /* out later */
-
- point2_phys = kvtop(point_virt);
-
- if ((point2_phys - TRUNC_PAGE(point1_phys) - PAGESIZE) || /* physical memory is not contiguous */
- (point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE && sg)) { /* we only can access (1<<16)-1 bytes in scatter/gather_mode */
- if (point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE) { /* We were walking too far for one scatter/gather block ... */
- assert( MAX_SG_BLOCK_SIZE > PAGESIZE );
- point_virt = TRUNC_PAGE(starting_point_virt+MAX_SG_BLOCK_SIZE-1); /* So go back to the beginning of the last matching page */
- /* and gererate the physadress of this location for the next time. */
- point2_phys = kvtop(point_virt);
- }
-
- if (!sg) {
- /* We allocate our fist scatter/gather list */
- sg = vs_alloc_scatter_gather();
- }
+ M328_SG sg;
+ vm_offset_t starting_point_virt, starting_point_phys, point_virt,
+ point1_phys, point2_phys, virt;
+ unsigned len;
+ int level;
+
+ sg = (M328_SG)0; /* Hopefully we need no scatter/gather list */
+
+ /*
+ * We have the following things:
+ * virt the virtuell address of the contiguous virtual memory block
+ * len the lenght of the contiguous virtual memory block
+ * starting_point_virt the virtual address of the contiguous *physical* memory block
+ * starting_point_phys the *physical* address of the contiguous *physical* memory block
+ * point_virt the pointer to the virtual memory we are checking at the moment
+ * point1_phys the pointer to the *physical* memory we are checking at the moment
+ * point2_phys the pointer to the *physical* memory we are checking at the moment
+ */
+
+ level = 0;
+ virt = starting_point_virt = (vm_offset_t)xs->data;
+ point1_phys = starting_point_phys = kvtop(xs->data);
+ len = xs->datalen;
+ /*
+ * Check if we need scatter/gather
+ */
+
+ if (len > PAGESIZE) {
+ for (level = 0, point_virt = ROUND_PAGE(starting_point_virt+1);
+ /* if we do already scatter/gather we have to stay in the loop and jump */
+ point_virt < virt + (vm_offset_t)len || sg ;
+ point_virt += PAGESIZE) { /* out later */
+
+ point2_phys = kvtop(point_virt);
+
+ if ((point2_phys - TRUNC_PAGE(point1_phys) - PAGESIZE) || /* physical memory is not contiguous */
+ (point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE && sg)) { /* we only can access (1<<16)-1 bytes in scatter/gather_mode */
+ if (point_virt - starting_point_virt >= MAX_SG_BLOCK_SIZE) { /* We were walking too far for one scatter/gather block ... */
+ assert( MAX_SG_BLOCK_SIZE > PAGESIZE );
+ point_virt = TRUNC_PAGE(starting_point_virt+MAX_SG_BLOCK_SIZE-1); /* So go back to the beginning of the last matching page */
+ /* and gererate the physadress of this location for the next time. */
+ point2_phys = kvtop(point_virt);
+ }
+
+ if (!sg) {
+ /* We allocate our fist scatter/gather list */
+ sg = vs_alloc_scatter_gather();
+ }
#if 1 /* broken firmware */
- if (sg->elements >= MAX_SG_ELEMENTS) {
- vs_dealloc_scatter_gather(sg);
- return (NULL);
- }
+ if (sg->elements >= MAX_SG_ELEMENTS) {
+ vs_dealloc_scatter_gather(sg);
+ return (NULL);
+ }
#else /* if the firmware will ever get fixed */
- while (sg->elements >= MAX_SG_ELEMENTS) {
- if (!sg->up) { /* If the list full in this layer ? */
- sg->up = vs_alloc_scatter_gather();
- sg->up->level = sg->level+1;
- sg->up->down[0] = sg;
- sg->up->elements = 1;
- }
- /* link this full list also in physical memory */
- vs_link_sg_list(&(sg->up->list[sg->up->elements-1]),
- kvtop((vm_offset_t)sg->list),
- sg->elements);
- sg = sg->up; /* Climb up */
- }
- while (sg->level) { /* As long as we are not a the base level */
- register int i;
-
- i = sg->elements;
- /* We need a new element */
- sg->down[i] = vs_alloc_scatter_gather();
- sg->down[i]->level = sg->level - 1;
- sg->down[i]->up = sg;
- sg->elements++;
- sg = sg->down[i]; /* Climb down */
- }
+ while (sg->elements >= MAX_SG_ELEMENTS) {
+ if (!sg->up) { /* If the list full in this layer ? */
+ sg->up = vs_alloc_scatter_gather();
+ sg->up->level = sg->level+1;
+ sg->up->down[0] = sg;
+ sg->up->elements = 1;
+ }
+ /* link this full list also in physical memory */
+ vs_link_sg_list(&(sg->up->list[sg->up->elements-1]),
+ kvtop((vm_offset_t)sg->list),
+ sg->elements);
+ sg = sg->up; /* Climb up */
+ }
+ while (sg->level) { /* As long as we are not a the base level */
+ register int i;
+
+ i = sg->elements;
+ /* We need a new element */
+ sg->down[i] = vs_alloc_scatter_gather();
+ sg->down[i]->level = sg->level - 1;
+ sg->down[i]->up = sg;
+ sg->elements++;
+ sg = sg->down[i]; /* Climb down */
+ }
#endif /* 1 */
-
- if (point_virt < virt+(vm_offset_t)len) {
- /* linking element */
- vs_link_sg_element(&(sg->list[sg->elements]),
- starting_point_phys,
- point_virt-starting_point_virt);
- sg->elements++;
- } else {
- /* linking last element */
- vs_link_sg_element(&(sg->list[sg->elements]),
- starting_point_phys,
- (vm_offset_t)(virt+len)-starting_point_virt);
- sg->elements++;
- break; /* We have now collected all blocks */
- }
- starting_point_virt = point_virt;
- starting_point_phys = point2_phys;
- }
- point1_phys = point2_phys;
- }
- }
-
- /*
- * Climb up along the right side of the tree until we reach the top.
- */
-
- if (sg) {
- while (sg->up) {
- /* link this list also in physical memory */
- vs_link_sg_list(&(sg->up->list[sg->up->elements-1]),
- kvtop((vm_offset_t)sg->list),
- sg->elements);
- sg = sg->up; /* Climb up */
- }
-
- iopb->iopb_OPTION |= M_OPT_SG;
- iopb->iopb_ADDR |= M_ADR_SG_LINK;
- LV(iopb->iopb_BUFF, kvtop((vm_offset_t)sg->list));
- LV(iopb->iopb_LENGTH, sg->elements);
- LV(iopb->iopb_SGTTL, len);
- } else {
- /* no scatter/gather neccessary */
- LV(iopb->iopb_BUFF, starting_point_phys);
- LV(iopb->iopb_LENGTH, len);
- }
- return (sg);
+ if (point_virt < virt+(vm_offset_t)len) {
+ /* linking element */
+ vs_link_sg_element(&(sg->list[sg->elements]),
+ starting_point_phys,
+ point_virt-starting_point_virt);
+ sg->elements++;
+ } else {
+ /* linking last element */
+ vs_link_sg_element(&(sg->list[sg->elements]),
+ starting_point_phys,
+ (vm_offset_t)(virt+len)-starting_point_virt);
+ sg->elements++;
+ break; /* We have now collected all blocks */
+ }
+ starting_point_virt = point_virt;
+ starting_point_phys = point2_phys;
+ }
+ point1_phys = point2_phys;
+ }
+ }
+
+ /*
+ * Climb up along the right side of the tree until we reach the top.
+ */
+
+ if (sg) {
+ while (sg->up) {
+ /* link this list also in physical memory */
+ vs_link_sg_list(&(sg->up->list[sg->up->elements-1]),
+ kvtop((vm_offset_t)sg->list),
+ sg->elements);
+ sg = sg->up; /* Climb up */
+ }
+
+ iopb->iopb_OPTION |= M_OPT_SG;
+ iopb->iopb_ADDR |= M_ADR_SG_LINK;
+ LV(iopb->iopb_BUFF, kvtop((vm_offset_t)sg->list));
+ LV(iopb->iopb_LENGTH, sg->elements);
+ LV(iopb->iopb_SGTTL, len);
+ } else {
+ /* no scatter/gather neccessary */
+ LV(iopb->iopb_BUFF, starting_point_phys);
+ LV(iopb->iopb_LENGTH, len);
+ }
+ return (sg);
}
-
diff --git a/sys/arch/mvme88k/dev/vsdma.c b/sys/arch/mvme88k/dev/vsdma.c
index ce15c791b55..1b7819f780a 100644
--- a/sys/arch/mvme88k/dev/vsdma.c
+++ b/sys/arch/mvme88k/dev/vsdma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vsdma.c,v 1.2 1999/09/27 18:43:26 smurph Exp $ */
+/* $OpenBSD: vsdma.c,v 1.3 2001/02/01 03:38:16 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* All rights reserved.
@@ -51,20 +51,19 @@
#include <mvme88k/dev/vsreg.h>
#include <mvme88k/dev/vsvar.h>
#include <mvme88k/dev/vme.h>
-#include "machine/mmu.h"
#else
#include <mvme68k/dev/vsreg.h>
#include <mvme68k/dev/vsvar.h>
#include <mvme68k/dev/vme.h>
#endif /* defined(MVME187) */
-int vsmatch __P((struct device *, void *, void *));
-void vsattach __P((struct device *, struct device *, void *));
-int vsprint __P((void *auxp, char *));
-void vs_initialize __P((struct vs_softc *));
-int vs_intr __P((struct vs_softc *));
-int vs_nintr __P((struct vs_softc *));
-int vs_eintr __P((struct vs_softc *));
+int vsmatch __P((struct device *, void *, void *));
+void vsattach __P((struct device *, struct device *, void *));
+int vsprint __P((void *auxp, char *));
+int vs_initialize __P((struct vs_softc *));
+int vs_intr __P((struct vs_softc *));
+int vs_nintr __P((struct vs_softc *));
+int vs_eintr __P((struct vs_softc *));
struct scsi_adapter vs_scsiswitch = {
vs_scsicmd,
@@ -81,37 +80,31 @@ struct scsi_device vs_scsidev = {
};
struct cfattach vs_ca = {
- sizeof(struct vs_softc), vsmatch, vsattach,
+ sizeof(struct vs_softc), vsmatch, vsattach,
};
-
+
struct cfdriver vs_cd = {
- NULL, "vs", DV_DULL, 0
+ NULL, "vs", DV_DULL, 0
};
int
vsmatch(pdp, vcf, args)
- struct device *pdp;
- void *vcf, *args;
+struct device *pdp;
+void *vcf, *args;
{
struct cfdata *cf = vcf;
struct confargs *ca = args;
if (!badvaddr(ca->ca_vaddr, 1)) {
- /*
- if (ca->ca_vec & 0x03) {
- printf("vs: bad vector 0x%x\n", ca->ca_vec);
- return (0);
- }
- */
- return(1);
- } else {
+ return (1);
+ } else {
return (0);
- }
+ }
}
void
vsattach(parent, self, auxp)
- struct device *parent, *self;
- void *auxp;
+struct device *parent, *self;
+void *auxp;
{
struct vs_softc *sc = (struct vs_softc *)self;
struct confargs *ca = auxp;
@@ -122,9 +115,10 @@ vsattach(parent, self, auxp)
sc->sc_vsreg = rp = ca->ca_vaddr;
sc->sc_ipl = ca->ca_ipl;
- sc->sc_nvec = ca->ca_vec + 0;
- sc->sc_evec = ca->ca_vec + 1;
- sc->sc_link.adapter_softc = sc;
+ sc->sc_nvec = ca->ca_vec;
+ /* get the next available vector for the error interrupt func. */
+ sc->sc_evec = vme_findvec();
+ sc->sc_link.adapter_softc = sc;
sc->sc_link.adapter_target = 7;
sc->sc_link.adapter = &vs_scsiswitch;
sc->sc_link.device = &vs_scsidev;
@@ -133,28 +127,28 @@ vsattach(parent, self, auxp)
sc->sc_ih_n.ih_fn = vs_nintr;
sc->sc_ih_n.ih_arg = sc;
sc->sc_ih_n.ih_ipl = ca->ca_ipl;
-
-
- sc->sc_ih_e.ih_fn = vs_eintr;
+
+ sc->sc_ih_e.ih_fn = vs_eintr;
sc->sc_ih_e.ih_arg = sc;
sc->sc_ih_e.ih_ipl = ca->ca_ipl;
-
- vs_initialize(sc);
+
+ if (vs_initialize(sc))
+ return;
vmeintr_establish(sc->sc_nvec, &sc->sc_ih_n);
vmeintr_establish(sc->sc_evec, &sc->sc_ih_e);
- evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_n);
- evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_e);
+ evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_n);
+ evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt_e);
/*
* attach all scsi units on us, watching for boot device
* (see dk_establish).
*/
tmp = bootpart;
- if (ca->ca_paddr != bootaddr)
- bootpart = -1; /* invalid flag to dk_establish */
+ if (ca->ca_paddr != bootaddr)
+ bootpart = -1; /* invalid flag to dk_establish */
config_found(self, &sc->sc_link, scsiprint);
- bootpart = tmp; /* restore old value */
+ bootpart = tmp; /* restore old value */
}
/*
@@ -162,38 +156,10 @@ vsattach(parent, self, auxp)
*/
int
vsprint(auxp, pnp)
- void *auxp;
- char *pnp;
+void *auxp;
+char *pnp;
{
if (pnp == NULL)
return (UNCONF);
return (QUIET);
}
-
-/* normal interrupt function */
-int
-vs_nintr(sc)
- struct vs_softc *sc;
-{
-#ifdef SDEBUG
- printf("Normal Interrupt!!!\n");
-#endif
- vs_intr(sc);
- sc->sc_intrcnt_n.ev_count++;
- return (1);
-}
-
-/* error interrupt function */
-int
-vs_eintr(sc)
- struct vs_softc *sc;
-{
-#ifdef SDEBUG
- printf("Error Interrupt!!!\n");
-#endif
- vs_intr(sc);
- sc->sc_intrcnt_e.ev_count++;
- return (1);
-}
-
-
diff --git a/sys/arch/mvme88k/dev/vsreg.h b/sys/arch/mvme88k/dev/vsreg.h
index 8adac5f60fe..386ef0e15eb 100644
--- a/sys/arch/mvme88k/dev/vsreg.h
+++ b/sys/arch/mvme88k/dev/vsreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vsreg.h,v 1.2 1999/09/27 18:43:26 smurph Exp $ */
+/* $OpenBSD: vsreg.h,v 1.3 2001/02/01 03:38:16 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1990 The Regents of the University of California.
@@ -162,53 +162,6 @@ typedef struct mcsb
/**************** END Master Control Status Block (MCSB) *******************/
-/**************** Scater/Gather Stuff *******************/
-
-typedef struct {
- union {
- unsigned short bytes :16;
- #define MAX_SG_BLOCK_SIZE (1<<16) /* the size *has* to be always *smaller* */
- struct {
- unsigned short :8;
- unsigned short gather :8;
- } scatter;
- } count;
- LONGV address;
- unsigned short link :1;
- unsigned short :3;
- unsigned short transfer_type :2;
- /* 0x0 is reserved */
- #define SHORT_TREANSFER 0x1
- #define LONG_TRANSFER 0x2
- #define SCATTER_GATTER_LIST_IN_SHORT_IO 0x3
- unsigned short memory_type :2;
- #define NORMAL_TYPE 0x0
- #define BLOCK_MODE 0x1
- /* 0x2 is reserved */
- /* 0x3 is reserved */
- unsigned short address_modifier :8;
-}sg_list_element_t;
-
-typedef sg_list_element_t * scatter_gather_list_t;
-
-#define MAX_SG_ELEMENTS 64
-
-struct m328_sg {
- struct m328_sg *up;
- int elements;
- int level;
- struct m328_sg *down[MAX_SG_ELEMENTS];
- sg_list_element_t list[MAX_SG_ELEMENTS];
-};
-
-typedef struct m328_sg *M328_SG;
-
-typedef struct {
- struct scsi_xfer *xs;
- M328_SG top_sg_list;
-} M328_CMD;
-/**************** END Scater/Gather Stuff *******************/
-
/**************** Host Semaphore Block (HSB) *******************/
typedef struct hsb
@@ -344,6 +297,10 @@ typedef struct cqe
#define CRB_CLR_DONE(crsw) ((crsw) = 0)
#define CRB_CLR_ER(crsw) ((crsw) &= ~M_CRSW_ER)
+#define CRB_CLR_SC(crsw) ((crsw) &= ~M_CRSW_SC)
+#define CRB_CLR_SE(crsw) ((crsw) &= ~M_CRSW_SE)
+#define CRB_CLR_RST(crsw) ((crsw) &= ~M_CRSW_RST)
+#define CRB_CLR(crsw) ((crsw) &= ~(x))
typedef struct crb
{ /* Command Response Block */
@@ -358,9 +315,9 @@ typedef struct crb
/**************** END Command Response Block (CRB) *******************/
/*********** Controller Error Vector Status Block (CEVSB) **************/
-
-typedef struct cevsb
-{ /* Command Response Block */
+#define CONTROLLER_ERROR 0x0085
+#define NR_SCSI_ERROR 0x0885
+typedef struct cevsb { /* Command Response Block */
volatile u_short cevsb_CRSW; /* Command Response Status Word */
volatile u_char cevsb_TYPE; /* IOPB type */
volatile u_char cevsb_RES0; /* Reserved byte */
@@ -370,6 +327,13 @@ typedef struct cevsb
volatile u_short cevsb_RES1; /* Reserved word */
volatile u_char cevsb_RES2; /* Reserved byte */
volatile u_char cevsb_ERROR; /* error code */
+#define CEVSB_ERR_TYPE 0xC0 /* IOPB type error */
+#define CEVSB_ERR_TO 0xC1 /* IOPB timeout error */
+#define CEVSB_ERR_TR 0x82 /* Target Reconnect, no IOPB */
+#define CEVSB_ERR_OF 0x83 /* Overflow */
+#define CEVSB_ERR_BD 0x84 /* Bad direction */
+#define CEVSB_ERR_NR 0x86 /* Non-Recoverabl Error */
+#define CESVB_ERR_PANIC 0xFF /* Board Painc!!! */
volatile u_short cevsb_AUXERR; /* COUGAR error code */
} M328_CEVSB;
@@ -739,18 +703,4 @@ typedef struct ipsg
#define D64_MOD ( (TT_D64 << 10) | (MEMTYPE << 8) | ADRM_EXT_S_D64 )
#define SHIO_MOD ( (TT_NORMAL << 10) | (MEMT_SHIO << 8) | ADRM_SHT_N_IO)
-/*
- * Scatter/gather functions
- */
-
-M328_SG vs_alloc_scatter_gather __P((void));
-void vs_dealloc_scatter_gather __P((M328_SG sg));
-void vs_link_scatter_gather_element __P((sg_list_element_t *element,
- register vm_offset_t phys_add,
- register int len));
-void vs_link_scatter_gather_list __P((sg_list_element_t *list,
- register vm_offset_t phys_add,
- register int elements));
-M328_SG vs_build_memory_structure __P((struct scsi_xfer *xs, M328_IOPB *iopb));
-
#endif /* _M328REG_H_ */
diff --git a/sys/arch/mvme88k/dev/vsvar.h b/sys/arch/mvme88k/dev/vsvar.h
index e645971095c..56440eb28c3 100644
--- a/sys/arch/mvme88k/dev/vsvar.h
+++ b/sys/arch/mvme88k/dev/vsvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vsvar.h,v 1.1 1999/05/29 04:41:44 smurph Exp $ */
+/* $OpenBSD: vsvar.h,v 1.2 2001/02/01 03:38:16 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1990 The Regents of the University of California.
@@ -48,6 +48,55 @@
#define LO(x) (u_short)((unsigned long)x & 0x0000FFFF)
#define HI(x) (u_short)((unsigned long)x >> 16)
#define OFF(x) (u_short)((long)kvtop(x) - (long)kvtop(sc->sc_vsreg))
+#define vs_name(sc) (sc)->sc_dev.dv_xname
+
+/**************** Scater/Gather Stuff *******************/
+
+typedef struct {
+ union {
+ unsigned short bytes :16;
+ #define MAX_SG_BLOCK_SIZE (1<<16) /* the size *has* to be always *smaller* */
+ struct {
+ unsigned short :8;
+ unsigned short gather :8;
+ } scatter;
+ } count;
+ LONGV address;
+ unsigned short link :1;
+ unsigned short :3;
+ unsigned short transfer_type :2;
+ /* 0x0 is reserved */
+ #define SHORT_TREANSFER 0x1
+ #define LONG_TRANSFER 0x2
+ #define SCATTER_GATTER_LIST_IN_SHORT_IO 0x3
+ unsigned short memory_type :2;
+ #define NORMAL_TYPE 0x0
+ #define BLOCK_MODE 0x1
+ /* 0x2 is reserved */
+ /* 0x3 is reserved */
+ unsigned short address_modifier :8;
+} sg_list_element_t;
+
+typedef sg_list_element_t * scatter_gather_list_t;
+
+#define MAX_SG_ELEMENTS 64
+
+struct m328_sg {
+ struct m328_sg *up;
+ int elements;
+ int level;
+ struct m328_sg *down[MAX_SG_ELEMENTS];
+ sg_list_element_t list[MAX_SG_ELEMENTS];
+};
+
+typedef struct m328_sg *M328_SG;
+
+typedef struct {
+ struct scsi_xfer *xs;
+ M328_SG top_sg_list;
+} M328_CMD;
+
+/**************** END Scater/Gather Stuff *******************/
struct vs_tinfo {
int cmds; /* #commands processed */
@@ -56,32 +105,32 @@ struct vs_tinfo {
int perrs; /* #parity errors */
int senses; /* #request sense commands sent */
ushort lubusy; /* What local units/subr. are busy? */
- u_char flags;
- u_char period; /* Period suggestion */
- u_char offset; /* Offset suggestion */
- int avail; /* Is there a device there */
+ u_char flags;
+ u_char period; /* Period suggestion */
+ u_char offset; /* Offset suggestion */
+ int avail; /* Is there a device there */
} tinfo_t;
-struct vs_softc {
- struct device sc_dev;
- struct intrhand sc_ih_e;
- struct intrhand sc_ih_n;
- struct evcnt sc_intrcnt_e;
- struct evcnt sc_intrcnt_n;
+struct vs_softc {
+ struct device sc_dev;
+ struct intrhand sc_ih_e;
+ struct intrhand sc_ih_n;
+ struct evcnt sc_intrcnt_e;
+ struct evcnt sc_intrcnt_n;
u_short sc_ipl;
- u_short sc_evec;
- u_short sc_nvec;
- struct scsi_link sc_link; /* proto for sub devices */
- u_long sc_chnl; /* channel 0 or 1 for dual bus cards */
- u_long sc_qhp; /* Command queue head pointer */
- struct vsreg *sc_vsreg;
+ u_short sc_evec;
+ u_short sc_nvec;
+ struct scsi_link sc_link; /* proto for sub devices */
+ u_long sc_chnl; /* channel 0 or 1 for dual bus cards */
+ u_long sc_qhp; /* Command queue head pointer */
+ struct vsreg *sc_vsreg;
#define SIOP_NACB 8
struct vs_tinfo sc_tinfo[8];
- u_char sc_flags;
- u_char sc_sien;
- u_char sc_dien;
+ u_char sc_flags;
+ u_char sc_sien;
+ u_char sc_dien;
u_char sc_minsync;
- struct map *hus_map;
+ struct map *hus_map;
/* one for each target */
struct syncpar {
u_char state;
@@ -128,5 +177,18 @@ struct vs_softc {
void vs_minphys __P((struct buf *bp));
int vs_scsicmd __P((struct scsi_xfer *));
+/*
+ * Scatter/gather functions
+ */
+
+M328_SG vs_alloc_scatter_gather __P((void));
+void vs_dealloc_scatter_gather __P((M328_SG sg));
+void vs_link_scatter_gather_element __P((sg_list_element_t *element,
+ register vm_offset_t phys_add,
+ register int len));
+void vs_link_scatter_gather_list __P((sg_list_element_t *list,
+ register vm_offset_t phys_add,
+ register int elements));
+M328_SG vs_build_memory_structure __P((struct scsi_xfer *xs, M328_IOPB *iopb));
#endif /* _M328VAR_H */
diff --git a/sys/arch/mvme88k/include/asm.h b/sys/arch/mvme88k/include/asm.h
index a72fcdaf82f..4540ecdc0b9 100644
--- a/sys/arch/mvme88k/include/asm.h
+++ b/sys/arch/mvme88k/include/asm.h
@@ -200,20 +200,19 @@
* in a rather precarious state and so special cautions must
* be taken.
*/
-#define FLAG_CPU_FIELD_WIDTH 4 /* must be <= 12 */
+#define FLAG_CPU_FIELD_WIDTH 2 /* must be <= 12 */
#define FLAG_IGNORE_DATA_EXCEPTION 5 /* bit number 5 */
-#define FLAG_INTERRUPT_EXCEPTION 6 /* bit number 6 */
-#define FLAG_ENABLING_FPU 7 /* bit number 7 */
-#define FLAG_FROM_KERNEL 8 /* bit number 8 */
-#define FLAG_187 8 /* bit number 9 */
-#define FLAG_188 9 /* bit number 10 */
-#define FLAG_197 10 /* bit number 11 */
+#define FLAG_INTERRUPT_EXCEPTION 6 /* bit number 6 */
+#define FLAG_ENABLING_FPU 7 /* bit number 7 */
+#define FLAG_FROM_KERNEL 8 /* bit number 8 */
+#define FLAG_187 9 /* bit number 9 */
+#define FLAG_188 10 /* bit number 10 */
+#define FLAG_197 11 /* bit number 11 */
/* REGister OFFset into the E.F. (exception frame) */
#define REG_OFF(reg_num) ((reg_num) * 4) /* (num * sizeof(register int)) */
#define GENREG_OFF(num) (REG_OFF(EF_R0 + (num))) /* GENeral REGister OFFset */
-
#define GENERAL_BREATHING_ROOM /* arbitrarily */ 200
#define KERNEL_STACK_BREATHING_ROOM \
(GENERAL_BREATHING_ROOM + SIZEOF_STRUCT_PCB + SIZEOF_STRUCT_UTHREAD)
diff --git a/sys/arch/mvme88k/include/asm_macro.h b/sys/arch/mvme88k/include/asm_macro.h
index c437fb7a0cf..1ece66a5ab7 100644
--- a/sys/arch/mvme88k/include/asm_macro.h
+++ b/sys/arch/mvme88k/include/asm_macro.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: asm_macro.h,v 1.8 2001/01/15 19:50:38 deraadt Exp $ */
+/* $OpenBSD: asm_macro.h,v 1.9 2001/02/01 03:38:17 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -59,12 +59,12 @@ typedef unsigned long m88k_psr_type;
*/
static __inline__ m88k_psr_type disable_interrupts_return_psr(void)
{
- m88k_psr_type temp, oldpsr;
- __asm__ __volatile__ ("ldcr %0, cr1" : "=r" (oldpsr));
- __asm__ __volatile__ ("set %1, %0, 1<1>" : "=r" (oldpsr), "=r" (temp));
- __asm__ __volatile__ ("stcr %0, cr1" : "=r" (temp));
- __asm__ __volatile__ ("tcnd ne0, r0, 0");
- return oldpsr;
+ m88k_psr_type temp, oldpsr;
+ __asm__ __volatile__ ("ldcr %0, cr1" : "=r" (oldpsr));
+ __asm__ __volatile__ ("set %1, %0, 1<1>" : "=r" (oldpsr), "=r" (temp));
+ __asm__ __volatile__ ("stcr %0, cr1" : "=r" (temp));
+ __asm__ __volatile__ ("tcnd ne0, r0, 0");
+ return oldpsr;
}
#define disable_interrupt() (void)disable_interrupts_return_psr()
@@ -73,7 +73,7 @@ static __inline__ m88k_psr_type disable_interrupts_return_psr(void)
*/
static __inline__ void set_psr(m88k_psr_type psr)
{
- __asm__ __volatile__ ("stcr %0, cr1" :: "r" (psr));
+ __asm__ __volatile__ ("stcr %0, cr1" :: "r" (psr));
}
/*
@@ -81,11 +81,11 @@ static __inline__ void set_psr(m88k_psr_type psr)
*/
static __inline__ m88k_psr_type enable_interrupts_return_psr(void)
{
- m88k_psr_type temp, oldpsr; /* need a temporary register */
- __asm__ __volatile__ ("ldcr %0, cr1" : "=r" (oldpsr));
- __asm__ __volatile__ ("clr %1, %0, 1<1>" : "=r" (oldpsr), "=r" (temp));
- __asm__ __volatile__ ("stcr %0, cr1" : "=r" (temp));
- return oldpsr;
+ m88k_psr_type temp, oldpsr; /* need a temporary register */
+ __asm__ __volatile__ ("ldcr %0, cr1" : "=r" (oldpsr));
+ __asm__ __volatile__ ("clr %1, %0, 1<1>" : "=r" (oldpsr), "=r" (temp));
+ __asm__ __volatile__ ("stcr %0, cr1" : "=r" (temp));
+ return oldpsr;
}
#define enable_interrupt() (void)enable_interrupts_return_psr()
@@ -97,8 +97,19 @@ static __inline__ m88k_psr_type enable_interrupts_return_psr(void)
*/
static __inline__ void flush_pipeline()
{
- __asm__ __volatile__ ("tcnd ne0, r0, 0");
+ __asm__ __volatile__ ("tcnd ne0, r0, 0");
}
#define db_flush_pipeline flush_pipeline
+/*
+ * gets the current stack pointer.
+ */
+static inline unsigned long stack_pointer()
+{
+ register unsigned long sp;
+ __asm__ __volatile__ ("or %0,r0,r31" : "=r" (sp));
+ return(sp);
+}
+
+
#endif __MACHINE_M88K_ASM_MACRO_H__
diff --git a/sys/arch/mvme88k/include/board.h b/sys/arch/mvme88k/include/board.h
index 7890c04b7f5..b69e13778ca 100644
--- a/sys/arch/mvme88k/include/board.h
+++ b/sys/arch/mvme88k/include/board.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: board.h,v 1.7 2001/01/14 20:25:23 smurph Exp $ */
+/* $OpenBSD: board.h,v 1.8 2001/02/01 03:38:17 smurph Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
@@ -63,52 +63,52 @@
#endif
#define UDEFINED
-#define MAX_CPUS 4 /* no. of CPUs */
-#define MAX_CMMUS 8 /* 2 CMMUs per CPU - 1 data and 1 code */
+#define MAX_CPUS 4 /* no. of CPUs */
+#define MAX_CMMUS 8 /* 2 CMMUs per CPU - 1 data and 1 code */
-#define SYSV_BASE U(0x00000000) /* system virtual base */
+#define SYSV_BASE U(0x00000000) /* system virtual base */
-#define MAXU_ADDR U(0x40000000) /* size of user virtual space */
-#define MAXPHYSMEM U(0x10000000) /* max physical memory */
+#define MAXU_ADDR U(0x40000000) /* size of user virtual space */
+#define MAXPHYSMEM U(0x10000000) /* max physical memory */
-#define VMEA16 U(0xFFFF0000) /* VMEbus A16 */
-#define VMEA16_SIZE U(0x0000EFFF) /* VMEbus A16 size */
-#define VMEA32D16 U(0xFF000000) /* VMEbus A32/D16 */
+#define VMEA16 U(0xFFFF0000) /* VMEbus A16 */
+#define VMEA16_SIZE U(0x0000EFFF) /* VMEbus A16 size */
+#define VMEA32D16 U(0xFF000000) /* VMEbus A32/D16 */
#define VMEA32D16_SIZE U(0x007FFFFF) /* VMEbus A32/D16 size */
/* These need to be here because of the way m18x_cmmu.c
handles the CMMU's. */
-#define CMMU_SIZE 0x1000
+#define CMMU_SIZE 0x1000
#ifndef CMMU_DEFS
#define CMMU_DEFS
-#define SBC_CMMU_I U(0xFFF77000) /* Single Board Computer code CMMU */
-#define SBC_CMMU_D U(0xFFF7F000) /* Single Board Computer data CMMU */
+#define SBC_CMMU_I U(0xFFF77000) /* Single Board Computer code CMMU */
+#define SBC_CMMU_D U(0xFFF7F000) /* Single Board Computer data CMMU */
-#define VME_CMMU_I0 U(0xFFF7E000) /* MVME188 code CMMU 0 */
-#define VME_CMMU_I1 U(0xFFF7D000) /* MVME188 code CMMU 1 */
-#define VME_CMMU_I2 U(0xFFF7B000) /* MVME188 code CMMU 2 */
-#define VME_CMMU_I3 U(0xFFF77000) /* MVME188 code CMMU 3 */
-#define VME_CMMU_D0 U(0xFFF6F000) /* MVME188 data CMMU 0 */
-#define VME_CMMU_D1 U(0xFFF5F000) /* MVME188 data CMMU 1 */
-#define VME_CMMU_D2 U(0xFFF3F000) /* MVME188 data CMMU 2 */
-#define VME_CMMU_D3 U(0xFFF7F000) /* MVME188 data CMMU 3 */
+#define VME_CMMU_I0 U(0xFFF7E000) /* MVME188 code CMMU 0 */
+#define VME_CMMU_I1 U(0xFFF7D000) /* MVME188 code CMMU 1 */
+#define VME_CMMU_I2 U(0xFFF7B000) /* MVME188 code CMMU 2 */
+#define VME_CMMU_I3 U(0xFFF77000) /* MVME188 code CMMU 3 */
+#define VME_CMMU_D0 U(0xFFF6F000) /* MVME188 data CMMU 0 */
+#define VME_CMMU_D1 U(0xFFF5F000) /* MVME188 data CMMU 1 */
+#define VME_CMMU_D2 U(0xFFF3F000) /* MVME188 data CMMU 2 */
+#define VME_CMMU_D3 U(0xFFF7F000) /* MVME188 data CMMU 3 */
#endif /* CMMU_DEFS */
/* These are the hardware exceptions. */
-#define INT_BIT 0x1 /* interrupt exception */
-#define IACC_BIT 0x2 /* instruction access exception */
-#define DACC_BIT 0x4 /* data access exception */
-#define MACC_BIT 0x8 /* misaligned access exception */
-#define UOPC_BIT 0x10 /* unimplemented opcode exception*/
-#define PRIV_BIT 0x20 /* priviledge violation exception*/
-#define BND_BIT 0x40 /* bounds check violation */
-#define IDE_BIT 0x80 /* illegal integer divide */
-#define IOV_BIT 0x100 /* integer overflow exception */
-#define ERR_BIT 0x200 /* error exception */
-#define FPUP_BIT 0x400 /* FPU precise exception */
-#define FPUI_BIT 0x800 /* FPU imprecise exception */
+#define INT_BIT 0x1 /* interrupt exception */
+#define IACC_BIT 0x2 /* instruction access exception */
+#define DACC_BIT 0x4 /* data access exception */
+#define MACC_BIT 0x8 /* misaligned access exception */
+#define UOPC_BIT 0x10 /* unimplemented opcode exception*/
+#define PRIV_BIT 0x20 /* priviledge violation exception*/
+#define BND_BIT 0x40 /* bounds check violation */
+#define IDE_BIT 0x80 /* illegal integer divide */
+#define IOV_BIT 0x100 /* integer overflow exception */
+#define ERR_BIT 0x200 /* error exception */
+#define FPUP_BIT 0x400 /* FPU precise exception */
+#define FPUI_BIT 0x800 /* FPU imprecise exception */
#if defined(MVME187) || defined(MVME197)
#include <machine/mvme1x7.h>
diff --git a/sys/arch/mvme88k/include/m88100.h b/sys/arch/mvme88k/include/m88100.h
index 88739cbc908..57b8111f5fd 100644
--- a/sys/arch/mvme88k/include/m88100.h
+++ b/sys/arch/mvme88k/include/m88100.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: m88100.h,v 1.5 2001/01/14 20:25:24 smurph Exp $ */
+/* $OpenBSD: m88100.h,v 1.6 2001/02/01 03:38:17 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1992 Carnegie Mellon University
@@ -40,6 +40,7 @@
*/
/* DMT0, DMT1, DMT2 */
+#define DMT_SKIP 0x00010000 /* skip this dmt in data_access_emulation */
#define DMT_BO 0x00008000 /* Byte-Ordering */
#define DMT_DAS 0x00004000 /* Data Access Space */
#define DMT_DOUB1 0x00002000 /* Double Word */
@@ -53,17 +54,22 @@
#ifndef ASSEMBLER
#include "sys/types.h"
+/* dmt_skip is never set by the cpu. It is used to
+ * mark 'known' transactions so that they don't get
+ * prosessed by data_access_emulation(). XXX smurph
+ */
struct dmt_reg {
- unsigned int :16,
- dmt_bo:1,
- dmt_das:1,
- dmt_doub1:1,
- dmt_lockbar:1,
- dmt_dreg:5,
- dmt_signed:1,
- dmt_en:4,
- dmt_write:1,
- dmt_valid:1;
+ unsigned int :15,
+ dmt_skip:1,
+ dmt_bo:1,
+ dmt_das:1,
+ dmt_doub1:1,
+ dmt_lockbar:1,
+ dmt_dreg:5,
+ dmt_signed:1,
+ dmt_en:4,
+ dmt_write:1,
+ dmt_valid:1;
};
#endif
diff --git a/sys/arch/mvme88k/include/mvme188.h b/sys/arch/mvme88k/include/mvme188.h
index 3bb14c0ff5e..039e396c09a 100644
--- a/sys/arch/mvme88k/include/mvme188.h
+++ b/sys/arch/mvme88k/include/mvme188.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mvme188.h,v 1.2 2001/01/14 20:25:24 smurph Exp $ */
+/* $OpenBSD: mvme188.h,v 1.3 2001/02/01 03:38:18 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* All rights reserved.
@@ -63,14 +63,14 @@
#endif
-#define MVME188_EPROM U(0xFFC00000)
-#define MVME188_EPROM_SIZE U(0x00080000)
-#define MVME188_SRAM U(0xFFE00000)
-#define MVME188_SRAM_SIZE U(0x00020000)
-#define MVME188_UTILITY U(0xFF000000)
+#define MVME188_EPROM U(0xFFC00000)
+#define MVME188_EPROM_SIZE U(0x00080000)
+#define MVME188_SRAM U(0xFFE00000)
+#define MVME188_SRAM_SIZE U(0x00020000)
+#define MVME188_UTILITY U(0xFF000000)
#define MVME188_UTILITY_SIZE U(0x01000000)
-#define UTIL_START U(0xFFC00000) /* start of MVME188 utility space */
-#define UTIL_SIZE U(0x003FFFFF) /* size of MVME188 utility space */
+#define UTIL_START U(0xFFC00000) /* start of MVME188 utility space */
+#define UTIL_SIZE U(0x003FFFFF) /* size of MVME188 utility space */
/*
* MVME188 declarations for hardware level device registers and such.
@@ -78,24 +78,24 @@
/* base address for the interrupt control registers */
#define INTR_CONTROL_BASE U(0xfff84000)
-#define VMEA24SPACE U(0xEEC00000) /* VMEA24 master addr space (4 Meg) */
+#define VMEA24SPACE U(0xEEC00000) /* VMEA24 master addr space (4 Meg) */
/* per-processor interrupt enable registers */
-#define MVME188_IEN0 U(0xFFF84004) /* interrupt enable CPU 0 */
-#define MVME188_IEN1 U(0xFFF84008) /* interrupt enable CPU 1 */
-#define MVME188_IEN2 U(0xFFF84010) /* interrupt enable CPU 2 */
-#define MVME188_IEN3 U(0xFFF84020) /* interrupt enable CPU 3 */
+#define MVME188_IEN0 U(0xFFF84004) /* interrupt enable CPU 0 */
+#define MVME188_IEN1 U(0xFFF84008) /* interrupt enable CPU 1 */
+#define MVME188_IEN2 U(0xFFF84010) /* interrupt enable CPU 2 */
+#define MVME188_IEN3 U(0xFFF84020) /* interrupt enable CPU 3 */
/* same as above */
-#define IEN0_REG U(0xfff84004)
-#define IEN1_REG U(0xfff84008)
-#define IEN2_REG U(0xfff84010)
-#define IEN3_REG U(0xfff84020)
+#define IEN0_REG U(0xfff84004)
+#define IEN1_REG U(0xfff84008)
+#define IEN2_REG U(0xfff84010)
+#define IEN3_REG U(0xfff84020)
-#define IENALL_REG U(0xfff8403c)
+#define IENALL_REG U(0xfff8403c)
-#define MVME188_IST U(0xFFF84040) /* interrupt status register */
-#define IST_REG U(0xfff84040) /* same as above */
+#define MVME188_IST U(0xFFF84040) /* interrupt status register */
+#define IST_REG U(0xfff84040) /* same as above */
#define MVME188_SETSWI U(0xFFF84080) /* generate soft interrupt */
#define MVME188_CLRSWI U(0xFFF84084) /* reset soft interrupt */
@@ -103,82 +103,82 @@
#define MVME188_CLRINT U(0xFFF8408C) /* reset HW interrupt */
/* same as above */
-#define SETSWI_REG U(0xfff84080) /* SETSWI register addr */
-#define CLRSWI_REG U(0xfff84084) /* CLRSWI register addr */
-#define ISTATE_REG U(0xfff84088)
-#define CLRINT_REG U(0xfff8408C)
-
-#define MVME188_GCSR U(0xFFF86000) /* 188 global control and status reg */
-#define MVME188_UCSR U(0xFFF87000) /* 188 utility control and status reg */
-#define MVME188_BASAD U(0xFFF87004) /* 188 base address reg */
-#define MVME188_GLBRES U(0xFFF8700C) /* 188 global reset reg */
-
-#define GCSR_BASE U(0xfff86000)
-#define GLOBAL0 GCSR_BASE + 0x01
-#define GLOBAL1 GCSR_BASE + 0x03
-#define GLOBAL2 GCSR_BASE + 0x05
-#define GLOBAL3 GCSR_BASE + 0x07
-#define GLB0 U(0xfff86001)
-#define GLB1 U(0xfff86003)
-#define GLB2 U(0xfff86005)
-#define GLB3 U(0xfff86007)
-#define M188_SYSCON U(0x00000040)
-#define UCSR_REG U(0xfff87000)
-#define GLBRES_REG U(0xfff8700C)
-
-#define MVME188_CCSR U(0xFFF88000) /* 188 CPU board control status reg */
-#define MVME188_ERROR U(0xFFF88004) /* 188 Mbus fault reg */
-#define MVME188_PCNFA U(0xFFF88008) /* 188 Pbus A decoder reg */
-#define MVME188_PCNFB U(0xFFF8800C) /* 188 Pbus B decoder reg */
-#define MVME188_EXTAD U(0xFFF88010) /* 188 A24 master A24-A31 addr reg */
-#define MVME188_WHOAMI U(0xFFF88018) /* 188 whoami reg */
-#define MVME188_WMAD U(0xFFF88020) /* 188 write mbus addr decoder reg */
-#define MVME188_RMAD U(0xFFF88024) /* 188 read mbus addr decoder reg */
-#define MVME188_WVAD U(0xFFF88028) /* 188 write vmebus addr decoder reg */
-#define MVME188_RVAD U(0xFFF8802C) /* 188 read vmebus adds decoder reg */
+#define SETSWI_REG U(0xfff84080) /* SETSWI register addr */
+#define CLRSWI_REG U(0xfff84084) /* CLRSWI register addr */
+#define ISTATE_REG U(0xfff84088)
+#define CLRINT_REG U(0xfff8408C)
+
+#define MVME188_GCSR U(0xFFF86000) /* 188 global control and status reg */
+#define MVME188_UCSR U(0xFFF87000) /* 188 utility control and status reg */
+#define MVME188_BASAD U(0xFFF87004) /* 188 base address reg */
+#define MVME188_GLBRES U(0xFFF8700C) /* 188 global reset reg */
+
+#define GCSR_BASE U(0xfff86000)
+#define GLOBAL0 GCSR_BASE + 0x01
+#define GLOBAL1 GCSR_BASE + 0x03
+#define GLOBAL2 GCSR_BASE + 0x05
+#define GLOBAL3 GCSR_BASE + 0x07
+#define GLB0 U(0xfff86001)
+#define GLB1 U(0xfff86003)
+#define GLB2 U(0xfff86005)
+#define GLB3 U(0xfff86007)
+#define M188_SYSCON U(0x00000040)
+#define UCSR_REG U(0xfff87000)
+#define GLBRES_REG U(0xfff8700C)
+
+#define MVME188_CCSR U(0xFFF88000) /* 188 CPU board control status reg */
+#define MVME188_ERROR U(0xFFF88004) /* 188 Mbus fault reg */
+#define MVME188_PCNFA U(0xFFF88008) /* 188 Pbus A decoder reg */
+#define MVME188_PCNFB U(0xFFF8800C) /* 188 Pbus B decoder reg */
+#define MVME188_EXTAD U(0xFFF88010) /* 188 A24 master A24-A31 addr reg */
+#define MVME188_WHOAMI U(0xFFF88018) /* 188 whoami reg */
+#define MVME188_WMAD U(0xFFF88020) /* 188 write mbus addr decoder reg */
+#define MVME188_RMAD U(0xFFF88024) /* 188 read mbus addr decoder reg */
+#define MVME188_WVAD U(0xFFF88028) /* 188 write vmebus addr decoder reg */
+#define MVME188_RVAD U(0xFFF8802C) /* 188 read vmebus adds decoder reg */
/* duplicates of above */
-#define CCSR_REG U(0xfff88000)
-#define ERROR_REG U(0xfff88004) /* ERROR register addr */
-#define PCNFA_REG U(0xfff88008)
-#define PCNFB_REG U(0xfff8800c)
-#define EXTAD_REG U(0xfff88010)
-#define EXTAM_REG U(0xfff88014)
-#define WHOAMI_REG U(0xfff88018) /* WHOAMI register addr */
-#define WMAD_REG U(0xfff88020)
-#define RMAD_REG U(0xfff88024)
-#define WVAD_REG U(0xfff88028)
-#define RVAD_REG U(0xfff8802c)
-
-#define MAD_MDS 0x07 /* 188 MAD Device Select bits */
-
-#define VMEA24 0x5 /* Mbus addess decode select for VMEA24 */
-#define VADV 0x1 /* vmeaddres decode enable */
-#define VBDSELBIT 0 /* bit to enable vme slave response low true */
-#define VBDISABLE 0x1 /* VME BUS Disable */
-#define VSDBIT 1 /* bit number to enable snooping low true */
-#define VSDISABLE 0x2 /* VME Snoop Disable */
-#define VASPBIT 21 /* addr space 0 = A32, 1 = A24 bit */
-#define VASP 0x00200000 /* A24 VME address space */
-#define VPN 0x00400000 /* Page Number LSB */
-#define PAGECNT 0x400 /* number of (4 meg) pages to map */
+#define CCSR_REG U(0xfff88000)
+#define ERROR_REG U(0xfff88004) /* ERROR register addr */
+#define PCNFA_REG U(0xfff88008)
+#define PCNFB_REG U(0xfff8800c)
+#define EXTAD_REG U(0xfff88010)
+#define EXTAM_REG U(0xfff88014)
+#define WHOAMI_REG U(0xfff88018) /* WHOAMI register addr */
+#define WMAD_REG U(0xfff88020)
+#define RMAD_REG U(0xfff88024)
+#define WVAD_REG U(0xfff88028)
+#define RVAD_REG U(0xfff8802c)
+
+#define MAD_MDS 0x07 /* 188 MAD Device Select bits */
+
+#define VMEA24 0x5 /* Mbus addess decode select for VMEA24 */
+#define VADV 0x1 /* vmeaddres decode enable */
+#define VBDSELBIT 0 /* bit to enable vme slave response low true */
+#define VBDISABLE 0x1 /* VME BUS Disable */
+#define VSDBIT 1 /* bit number to enable snooping low true */
+#define VSDISABLE 0x2 /* VME Snoop Disable */
+#define VASPBIT 21 /* addr space 0 = A32, 1 = A24 bit */
+#define VASP 0x00200000 /* A24 VME address space */
+#define VPN 0x00400000 /* Page Number LSB */
+#define PAGECNT 0x400 /* number of (4 meg) pages to map */
#define UCSR_PWRUPBIT 0x4000 /* 188 UCSR powerup indicator */
#define UCSR_DRVSFBIT 0x2000 /* 188 UCSR Board system fail */
#define UCSR_BRIRQBIT 0x1000 /* 188 UCSR drives VME IRQ1 broadcast int */
-#define UCSR_ROBINBIT 0x800 /* 188 UCSR sel round robin VME arbiter mode */
-#define UCSR_BRLVBITS 0x600 /* 188 UCSR VME bus request level 0-3 */
-#define UCSR_RNEVERBIT 0x100 /* 188 UCSR VME bus never release once req'd */
-#define UCSR_RONRBIT 0x80 /* 188 UCSR VME bus req release on no request */
-#define UCSR_RWDBIT 0x40 /* 188 UCSR VME bus request release when done */
-#define UCSR_EARBTOBIT 0x20 /* 188 UCSR enable VME arbiter bus timeout */
+#define UCSR_ROBINBIT 0x800 /* 188 UCSR sel round robin VME arbiter mode */
+#define UCSR_BRLVBITS 0x600 /* 188 UCSR VME bus request level 0-3 */
+#define UCSR_RNEVERBIT 0x100 /* 188 UCSR VME bus never release once req'd */
+#define UCSR_RONRBIT 0x80 /* 188 UCSR VME bus req release on no request */
+#define UCSR_RWDBIT 0x40 /* 188 UCSR VME bus request release when done */
+#define UCSR_EARBTOBIT 0x20 /* 188 UCSR enable VME arbiter bus timeout */
/* MVME188 VMEbus data transfer timeout select */
-#define VTOSELBITS 0x18 /* 188 UCSR VMEbus timeout select bits */
-#define VTO32US 0x00 /* VMEbus timeout length - 32 MicroSec */
-#define VTO64US 0x01 /* VMEbus timeout length - 64 MicroSec */
-#define VTO128US 0x10 /* VMEbus timeout length - 128 MicroSec */
-#define VTODISABLE 0x18 /* VMEbus timeout length - disabled */
+#define VTOSELBITS 0x18 /* 188 UCSR VMEbus timeout select bits */
+#define VTO32US 0x00 /* VMEbus timeout length - 32 MicroSec */
+#define VTO64US 0x01 /* VMEbus timeout length - 64 MicroSec */
+#define VTO128US 0x10 /* VMEbus timeout length - 128 MicroSec */
+#define VTODISABLE 0x18 /* VMEbus timeout length - disabled */
/*
* processor dependend code section
@@ -207,18 +207,6 @@
#define INT_LEVEL 8 /* # of interrupt level + 1 */
-/*
- * masks and offsets for IST
- */
-#define HW_FAILURE_MASK U(0x60100000) /* HW failure bits */
-#define HW_FAILURE_ACF U(0x40000000) /* AC failure */
-#define HW_FAILURE_ABRTO U(0x20000000) /* Arbiter timeout */
-#define HW_FAILURE_SYSFAIL U(0x00100000) /* SYSFAIL asserted */
-
-#define SOFT_INTERRUPT_MASK U(0x0F00000F) /* software irq bits */
-#define VME_INTERRUPT_MASK U(0x00885450) /* vme irq bits */
-#define OBIO_INTERRUPT_MASK U(0xF0320100) /* on board I/O */
-
#define IEN_ABRT_LOG 31
#define IEN_CIOI_LOG 21
#define IEN_DTI_LOG 28
@@ -230,40 +218,45 @@
/* the following codes are the INT exception enable and status bits. */
/* Refer to MVME188 RISC Microcomputer User's Manual, 4-10. */
-#define ABRT_BIT U(0x80000000)
-#define ACF_BIT U(0x40000000)
-#define ARBTO_BIT U(0x20000000)
-#define DTI_BIT U(0x10000000)
-#define SWI7_BIT U(0x08000000)
-#define SWI6_BIT U(0x04000000)
-#define SWI5_BIT U(0x02000000)
-#define SWI4_BIT U(0x01000000)
-#define IRQ7_BIT U(0x00800000)
-#define CIOI_BIT U(0x00200000)
-#define SF_BIT U(0x00100000)
-#define IRQ6_BIT U(0x00080000)
-#define DI_BIT U(0x00020000)
-#define SIGHPI_BIT U(0x00010000)
-#define IRQ5_BIT U(0x00004000)
-#define IRQ4_BIT U(0x00001000)
-#define IRQ3_BIT U(0x00000400)
-#define LMI_BIT U(0x00000100)
-#define SIGLPI_BIT U(0x00000080)
-#define IRQ2_BIT U(0x00000040)
-#define IRQ1_BIT U(0x00000010)
-#define SWI3_BIT U(0x00000008)
-#define SWI2_BIT U(0x00000004)
-#define SWI1_BIT U(0x00000002)
-#define SWI0_BIT U(0x00000001)
-
-#define ABRT_BIT U(0x80000000)
-#define ACF_BIT U(0x40000000)
-#define ARBTO_BIT U(0x20000000)
-#define DTI_BIT U(0x10000000)
-#define CIOI_BIT U(0x00200000)
-#define SF_BIT U(0x00100000)
-#define DI_BIT U(0x00020000)
-#define LMI_BIT U(0x00000100)
+#define ABRT_BIT U(0x80000000) /* 31 */
+#define ACF_BIT U(0x40000000) /* 30 */
+#define ARBTO_BIT U(0x20000000) /* 29 */
+#define DTI_BIT U(0x10000000) /* 28 */
+#define SWI7_BIT U(0x08000000) /* 27 */
+#define SWI6_BIT U(0x04000000) /* 26 */
+#define SWI5_BIT U(0x02000000) /* 25 */
+#define SWI4_BIT U(0x01000000) /* 24 */
+#define IRQ7_BIT U(0x00800000) /* 23 */
+#define CIOI_BIT U(0x00200000) /* 21 */
+#define SF_BIT U(0x00100000) /* 20 */
+#define IRQ6_BIT U(0x00080000) /* 19 */
+#define DI_BIT U(0x00020000) /* 17 */
+#define SIGHPI_BIT U(0x00010000) /* 16 */
+#define IRQ5_BIT U(0x00004000) /* 14 */
+#define IRQ4_BIT U(0x00001000) /* 12 */
+#define IRQ3_BIT U(0x00000400) /* 10 */
+#define LMI_BIT U(0x00000100) /* 08 */
+#define SIGLPI_BIT U(0x00000080) /* 07 */
+#define IRQ2_BIT U(0x00000040) /* 06 */
+#define IRQ1_BIT U(0x00000010) /* 04 */
+#define SWI3_BIT U(0x00000008) /* 03 */
+#define SWI2_BIT U(0x00000004) /* 02 */
+#define SWI1_BIT U(0x00000002) /* 01 */
+#define SWI0_BIT U(0x00000001) /* 00 */
+
+/*
+ * masks and offsets for IST
+ * These are a combination of the above
+ */
+#define HW_FAILURE_MASK U(0xE0100000) /* hardware irq bits */
+#define SOFT_INTERRUPT_MASK U(0x0F00000F) /* software irq bits */
+#define VME_INTERRUPT_MASK U(0x00885450) /* vme irq bits */
+#define OBIO_INTERRUPT_MASK U(0x10330180) /* on board I/O */
+
+#define HW_FAILURE_ACF ACF_BIT /* AC failure */
+#define HW_FAILURE_ABRTO ARBTO_BIT /* Arbiter timeout */
+#define HW_FAILURE_SYSFAIL SF_BIT /* SYSFAIL asserted */
+#define HW_FAILURE_ABORT ABRT_BIT /* Abort pressed */
#define LVL7 (ABRT_BIT | ACF_BIT | IRQ7_BIT | SF_BIT)
#define LVL6 (IRQ6_BIT)
@@ -285,96 +278,96 @@
#define MASK_LVL_7 U(0x00000000) /* all ints disabled */
/* these are the various Z8536 CIO counter/timer registers */
-#define CIO_BASE U(0xfff83000)
-#define CIO_PORTC U(0xfff83000)
-#define CIO_PORTB U(0xfff83004)
-#define CIO_PORTA U(0xfff83008)
-#define CIO_CTRL U(0xfff8300c)
-
-#define CIO_MICR 0x00 /* Master interrupt control register */
-#define CIO_MICR_MIE 0x80
-#define CIO_MICR_DLC 0x40
-#define CIO_MICR_NV 0x20
-#define CIO_MICR_PAVIS 0x10
-#define CIO_MICR_PBVIS 0x08
-#define CIO_MICR_CTVIS 0x04
-#define CIO_MICR_RJA 0x02
-#define CIO_MICR_RESET 0x01
-
-#define CIO_MCCR 0x01 /* Master configuration control register */
-#define CIO_MCCR_PBE 0x80
-#define CIO_MCCR_CT1E 0x40
-#define CIO_MCCR_CT2E 0x20
-#define CIO_MCCR_CT3E 0x10
-#define CIO_MCCR_PLC 0x08
-#define CIO_MCCR_PAE 0x04
-
-#define CIO_CTMS1 0x1c /* Counter/timer mode specification #1 */
-#define CIO_CTMS2 0x1d /* Counter/timer mode specification #2 */
-#define CIO_CTMS3 0x1e /* Counter/timer mode specification #3 */
-#define CIO_CTMS_CSC 0x80 /* Continuous Single Cycle */
-#define CIO_CTMS_EOE 0x40 /* External Output Enable */
-#define CIO_CTMS_ECE 0x20 /* External Count Enable */
-#define CIO_CTMS_ETE 0x10 /* External Trigger Enable */
-#define CIO_CTMS_EGE 0x08 /* External Gate Enable */
-#define CIO_CTMS_REB 0x04 /* Retrigger Enable Bit */
-#define CIO_CTMS_PO 0x00 /* Pulse Output */
-#define CIO_CTMS_OSO 0x01 /* One Shot Output */
-#define CIO_CTMS_SWO 0x02 /* Square Wave Output */
-
-#define CIO_IVR 0x04 /* Interrupt vector register */
-
-#define CIO_CSR1 0x0a /* Command and status register CTC #1 */
-#define CIO_CSR2 0x0b /* Command and status register CTC #2 */
-#define CIO_CSR3 0x0c /* Command and status register CTC #3 */
-
-#define CIO_CT1MSB 0x16 /* CTC #1 Timer constant - MSB */
-#define CIO_CT1LSB 0x17 /* CTC #1 Timer constant - LSB */
-#define CIO_CT2MSB 0x18 /* CTC #2 Timer constant - MSB */
-#define CIO_CT2LSB 0x19 /* CTC #2 Timer constant - LSB */
-#define CIO_CT3MSB 0x1a /* CTC #3 Timer constant - MSB */
-#define CIO_CT3LSB 0x1b /* CTC #3 Timer constant - LSB */
-#define CIO_PDCA 0x23 /* Port A data direction control */
-#define CIO_PDCB 0x2b /* Port B data direction control */
-
-#define CIO_GCB 0x04 /* CTC Gate command bit */
-#define CIO_TCB 0x02 /* CTC Trigger command bit */
-#define CIO_IE 0xc0 /* CTC Interrupt enable (set) */
-#define CIO_CIP 0x20 /* CTC Clear interrupt pending */
-#define CIO_IP 0x20 /* CTC Interrupt pending */
+#define CIO_BASE U(0xfff83000)
+#define CIO_PORTC U(0xfff83000)
+#define CIO_PORTB U(0xfff83004)
+#define CIO_PORTA U(0xfff83008)
+#define CIO_CTRL U(0xfff8300c)
+
+#define CIO_MICR 0x00 /* Master interrupt control register */
+#define CIO_MICR_MIE 0x80
+#define CIO_MICR_DLC 0x40
+#define CIO_MICR_NV 0x20
+#define CIO_MICR_PAVIS 0x10
+#define CIO_MICR_PBVIS 0x08
+#define CIO_MICR_CTVIS 0x04
+#define CIO_MICR_RJA 0x02
+#define CIO_MICR_RESET 0x01
+
+#define CIO_MCCR 0x01 /* Master config control register */
+#define CIO_MCCR_PBE 0x80
+#define CIO_MCCR_CT1E 0x40
+#define CIO_MCCR_CT2E 0x20
+#define CIO_MCCR_CT3E 0x10
+#define CIO_MCCR_PLC 0x08
+#define CIO_MCCR_PAE 0x04
+
+#define CIO_CTMS1 0x1c /* Counter/timer mode specification #1 */
+#define CIO_CTMS2 0x1d /* Counter/timer mode specification #2 */
+#define CIO_CTMS3 0x1e /* Counter/timer mode specification #3 */
+#define CIO_CTMS_CSC 0x80 /* Continuous Single Cycle */
+#define CIO_CTMS_EOE 0x40 /* External Output Enable */
+#define CIO_CTMS_ECE 0x20 /* External Count Enable */
+#define CIO_CTMS_ETE 0x10 /* External Trigger Enable */
+#define CIO_CTMS_EGE 0x08 /* External Gate Enable */
+#define CIO_CTMS_REB 0x04 /* Retrigger Enable Bit */
+#define CIO_CTMS_PO 0x00 /* Pulse Output */
+#define CIO_CTMS_OSO 0x01 /* One Shot Output */
+#define CIO_CTMS_SWO 0x02 /* Square Wave Output */
+
+#define CIO_IVR 0x04 /* Interrupt vector register */
+
+#define CIO_CSR1 0x0a /* Command and status register CTC #1 */
+#define CIO_CSR2 0x0b /* Command and status register CTC #2 */
+#define CIO_CSR3 0x0c /* Command and status register CTC #3 */
+
+#define CIO_CT1MSB 0x16 /* CTC #1 Timer constant - MSB */
+#define CIO_CT1LSB 0x17 /* CTC #1 Timer constant - LSB */
+#define CIO_CT2MSB 0x18 /* CTC #2 Timer constant - MSB */
+#define CIO_CT2LSB 0x19 /* CTC #2 Timer constant - LSB */
+#define CIO_CT3MSB 0x1a /* CTC #3 Timer constant - MSB */
+#define CIO_CT3LSB 0x1b /* CTC #3 Timer constant - LSB */
+#define CIO_PDCA 0x23 /* Port A data direction control */
+#define CIO_PDCB 0x2b /* Port B data direction control */
+
+#define CIO_GCB 0x04 /* CTC Gate command bit */
+#define CIO_TCB 0x02 /* CTC Trigger command bit */
+#define CIO_IE 0xc0 /* CTC Interrupt enable (set) */
+#define CIO_CIP 0x20 /* CTC Clear interrupt pending */
+#define CIO_IP 0x20 /* CTC Interrupt pending */
/* these are the DART read registers */
-#define DART_BASE U(0xfff82000)
-#define DART_MRA U(0xfff82000) /* mode A */
-#define DART_SRA U(0xfff82004) /* status A */
-#define DART_RBA U(0xfff8200c) /* receive buffer A */
-#define DART_IPCR U(0xfff82010) /* input port change */
-#define DART_ISR U(0xfff82014) /* interrupt status */
-#define DART_CUR U(0xfff82018) /* count upper */
-#define DART_CLR U(0xfff8201c) /* count lower */
-#define DART_MR1B U(0xfff82020) /* mode B */
-#define DART_SRB U(0xfff82024) /* status B */
-#define DART_RBB U(0xfff8202c) /* receive buffer B */
-#define DART_IVR U(0xfff82030) /* interrupt vector */
-#define DART_INP U(0xfff82034) /* input port */
-#define DART_STARTC U(0xfff82038) /* start counter cmd */
-#define DART_STOPC U(0xfff8203c) /* stop counter cmd */
+#define DART_BASE U(0xfff82000)
+#define DART_MRA U(0xfff82000) /* mode A */
+#define DART_SRA U(0xfff82004) /* status A */
+#define DART_RBA U(0xfff8200c) /* receive buffer A */
+#define DART_IPCR U(0xfff82010) /* input port change */
+#define DART_ISR U(0xfff82014) /* interrupt status */
+#define DART_CUR U(0xfff82018) /* count upper */
+#define DART_CLR U(0xfff8201c) /* count lower */
+#define DART_MR1B U(0xfff82020) /* mode B */
+#define DART_SRB U(0xfff82024) /* status B */
+#define DART_RBB U(0xfff8202c) /* receive buffer B */
+#define DART_IVR U(0xfff82030) /* interrupt vector */
+#define DART_INP U(0xfff82034) /* input port */
+#define DART_STARTC U(0xfff82038) /* start counter cmd */
+#define DART_STOPC U(0xfff8203c) /* stop counter cmd */
/* these are the DART write registers */
-#define DART_CSRA U(0xfff82004) /* clock select A */
-#define DART_CRA U(0xfff82008) /* command A */
-#define DART_TBA U(0xfff8200c) /* transmit buffer A */
-#define DART_ACR U(0xfff82010) /* auxiliary control */
-#define DART_IMR U(0xfff82014) /* interrupt mask reg*/
-#define DART_CTUR U(0xfff82018) /* counter/timer MSB */
-#define DART_CTLR U(0xfff8201c) /* counter/timer LSB */
-#define DART_MRB U(0xfff82020) /* mode B */
-#define DART_CSRB U(0xfff82024) /* clock select B */
-#define DART_CRB U(0xfff82028) /* command B */
-#define DART_TBB U(0xfff8202c) /* transmit buffer B */
-#define DART_OPCR U(0xfff82034) /* output port config*/
-#define DART_OPRS U(0xfff82038) /* output port set */
-#define DART_OPRR U(0xfff8203c) /* output port reset */
+#define DART_CSRA U(0xfff82004) /* clock select A */
+#define DART_CRA U(0xfff82008) /* command A */
+#define DART_TBA U(0xfff8200c) /* transmit buffer A */
+#define DART_ACR U(0xfff82010) /* auxiliary control */
+#define DART_IMR U(0xfff82014) /* interrupt mask reg*/
+#define DART_CTUR U(0xfff82018) /* counter/timer MSB */
+#define DART_CTLR U(0xfff8201c) /* counter/timer LSB */
+#define DART_MRB U(0xfff82020) /* mode B */
+#define DART_CSRB U(0xfff82024) /* clock select B */
+#define DART_CRB U(0xfff82028) /* command B */
+#define DART_TBB U(0xfff8202c) /* transmit buffer B */
+#define DART_OPCR U(0xfff82034) /* output port config*/
+#define DART_OPRS U(0xfff82038) /* output port set */
+#define DART_OPRR U(0xfff8203c) /* output port reset */
#ifndef ASSEMBLER
@@ -392,7 +385,9 @@ extern volatile unsigned int *int_mask_reg[MAX_CPUS];
void block_obio_interrupt(unsigned mask);
void unblock_obio_interrupt(unsigned mask);
#endif
-#define M188_IACK U(0xFFF85000)
+
+#define M188_IACK U(0xFFF85000)
+#define M188_IVEC 0x40 /* vector returned upon MVME188 int */
#endif __MACHINE_MVME188_H__
diff --git a/sys/arch/mvme88k/include/mvme1x7.h b/sys/arch/mvme88k/include/mvme1x7.h
index c296c59bb98..3bf85c1f38c 100644
--- a/sys/arch/mvme88k/include/mvme1x7.h
+++ b/sys/arch/mvme88k/include/mvme1x7.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mvme1x7.h,v 1.2 2001/01/14 20:25:24 smurph Exp $ */
+/* $OpenBSD: mvme1x7.h,v 1.3 2001/02/01 03:38:18 smurph Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* Copyright (c) 1999 Steve Murphree, Jr.
@@ -65,39 +65,39 @@
#endif
#endif
-#define BUGROM_START U(0xFF800000) /* start of BUG PROM */
-#define BUGROM_SIZE U(0x003FFFFF) /* size of BUG PROM */
-#define SRAM_START U(0xFFE00000) /* start of sram used by bug */
-#define SRAM_SIZE U(0x0001FFFF) /* size of sram */
-#define OBIO_START U(0xFFF00000) /* start of local IO */
-#define OBIO_SIZE U(0x000EFFFF) /* size of obio space */
-#define UTIL_START U(0xFFC00000) /* start of MVME188 utility space */
-#define UTIL_SIZE U(0x003FFFFF) /* size of MVME188 utility space */
+#define BUGROM_START U(0xFF800000) /* start of BUG PROM */
+#define BUGROM_SIZE U(0x003FFFFF) /* size of BUG PROM */
+#define SRAM_START U(0xFFE00000) /* start of sram used by bug */
+#define SRAM_SIZE U(0x0001FFFF) /* size of sram */
+#define OBIO_START U(0xFFF00000) /* start of local IO */
+#define OBIO_SIZE U(0x000EFFFF) /* size of obio space */
+#define UTIL_START U(0xFFC00000) /* start of MVME188 utility space */
+#define UTIL_SIZE U(0x003FFFFF) /* size of MVME188 utility space */
#define INT_PRI_LEVEL U(0xFFF4203E) /* interrupt priority level */
#define INT_MASK_LEVEL U(0xFFF4203F) /* interrupt mask level */
#define LOCAL_IO_DEVS U(0xFFF00000) /* local IO devices */
-#define PCC2_ADDR U(0xFFF42000) /* PCCchip2 Regs */
-#define UTIL_ADDR U(0xFFC02000) /* PCCchip2 Regs */
-#define MEM_CTLR U(0xFFF43000) /* MEMC040 mem controller */
-#define SCC_ADDR U(0xFFF45000) /* Cirrus Chip */
-#define LANCE_ADDR U(0xFFF46000) /* 82596CA */
-#define SCSI_ADDR U(0xFFF47000) /* NCR 710 address */
-#define NCR710_SIZE U(0x00000040) /* NCR 710 size */
-#define MK48T08_ADDR U(0xFFFC0000) /* BBRAM, TOD */
+#define PCC2_ADDR U(0xFFF42000) /* PCCchip2 Regs */
+#define UTIL_ADDR U(0xFFC02000) /* PCCchip2 Regs */
+#define MEM_CTLR U(0xFFF43000) /* MEMC040 mem controller */
+#define SCC_ADDR U(0xFFF45000) /* Cirrus Chip */
+#define LANCE_ADDR U(0xFFF46000) /* 82596CA */
+#define SCSI_ADDR U(0xFFF47000) /* NCR 710 address */
+#define NCR710_SIZE U(0x00000040) /* NCR 710 size */
+#define MK48T08_ADDR U(0xFFFC0000) /* BBRAM, TOD */
-#define TOD_CAL_CTL U(0xFFFC1FF8) /* calendar control register */
-#define TOD_CAL_SEC U(0xFFFC1FF9) /* seconds */
-#define TOD_CAL_MIN U(0xFFFC1FFA) /* minutes */
-#define TOD_CAL_HOUR U(0xFFFC1FFB) /* hours */
-#define TOD_CAL_DOW U(0xFFFC1FFC) /* Day Of the Week */
-#define TOD_CAL_DAY U(0xFFFC1FFD) /* days */
-#define TOD_CAL_MON U(0xFFFC1FFE) /* months */
-#define TOD_CAL_YEAR U(0xFFFC1FFF) /* years */
+#define TOD_CAL_CTL U(0xFFFC1FF8) /* calendar control register */
+#define TOD_CAL_SEC U(0xFFFC1FF9) /* seconds */
+#define TOD_CAL_MIN U(0xFFFC1FFA) /* minutes */
+#define TOD_CAL_HOUR U(0xFFFC1FFB) /* hours */
+#define TOD_CAL_DOW U(0xFFFC1FFC) /* Day Of the Week */
+#define TOD_CAL_DAY U(0xFFFC1FFD) /* days */
+#define TOD_CAL_MON U(0xFFFC1FFE) /* months */
+#define TOD_CAL_YEAR U(0xFFFC1FFF) /* years */
-#define M187_IACK U(0xFFFE0000)
-#define M197_IACK U(0xFFF00100)
+#define M187_IACK U(0xFFFE0000)
+#define M197_IACK U(0xFFF00100)
#endif __MACHINE_MVME1X7_H__
diff --git a/sys/arch/mvme88k/include/param.h b/sys/arch/mvme88k/include/param.h
index fb169df5b33..3d6013ab19f 100644
--- a/sys/arch/mvme88k/include/param.h
+++ b/sys/arch/mvme88k/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.12 2001/01/14 20:25:24 smurph Exp $ */
+/* $OpenBSD: param.h,v 1.13 2001/02/01 03:38:18 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1988 University of Utah.
@@ -40,7 +40,7 @@
* from: Utah $Hdr: machparam.h 1.11 89/08/14$
*
* @(#)param.h 7.8 (Berkeley) 6/28/91
- * $Id: param.h,v 1.12 2001/01/14 20:25:24 smurph Exp $
+ * $Id: param.h,v 1.13 2001/02/01 03:38:18 smurph Exp $
*/
#ifndef _MACHINE_PARAM_H_
#define _MACHINE_PARAM_H_
@@ -59,20 +59,20 @@
* 88k.
*/
-#define ALIGNBYTES 15 /* 64 bit alignment */
-#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) & ~ALIGNBYTES)
-#define ALIGNED_POINTER(p,t) ((((u_long)(p)) & (sizeof(t)-1)) == 0)
+#define ALIGNBYTES 15 /* 64 bit alignment */
+#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) & ~ALIGNBYTES)
+#define ALIGNED_POINTER(p,t) ((((u_long)(p)) & (sizeof(t)-1)) == 0)
-#ifndef NBPG
-#define NBPG 4096 /* bytes/page */
+#ifndef NBPG
+#define NBPG 4096 /* bytes/page */
#endif /* NBPG */
-#define PGOFSET (NBPG-1) /* byte offset into page */
-#define PGSHIFT 12 /* LOG2(NBPG) */
-#define NPTEPG (NBPG/(sizeof(u_int)))
+#define PGOFSET (NBPG-1) /* byte offset into page */
+#define PGSHIFT 12 /* LOG2(NBPG) */
+#define NPTEPG (NBPG/(sizeof(u_int)))
-#define NBSEG (1<<22) /* bytes/segment */
-#define SEGOFSET (NBSEG-1)/* byte offset into segment */
-#define SEGSHIFT 22 /* LOG2(NBSEG) */
+#define NBSEG (1<<22) /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
/*
* 187 Bug uses the bottom 64k. We allocate ptes to map this into the
@@ -80,32 +80,32 @@
* past this 64k. How does this change KERNBASE? XXX
*/
-#define KERNBASE 0x0 /* start of kernel virtual */
-#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+#define KERNBASE 0x0 /* start of kernel virtual */
+#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
-#define DEV_BSIZE 512
-#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
-#define BLKDEV_IOSIZE 2048 /* Should this be changed? XXX */
-#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */
+#define DEV_BSIZE 512
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define BLKDEV_IOSIZE 2048 /* Should this be changed? XXX */
+#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */
-#define CLSIZE 1
-#define CLSIZELOG2 0
+#define CLSIZE 1
+#define CLSIZELOG2 0
/* NOTE: SSIZE, SINCR and UPAGES must be multiples of CLSIZE */
-#define SSIZE 1 /* initial stack size/NBPG */
-#define SINCR 1 /* increment of stack/NBPG */
-#define USPACE ctob(UPAGES)
+#define SSIZE 1 /* initial stack size/NBPG */
+#define SINCR 1 /* increment of stack/NBPG */
+#define USPACE ctob(UPAGES)
-#define UPAGES 4 /* pages of u-area */
-#define UADDR 0xEEE00000 /* address of u */
-#define UVPN (UADDR>>PGSHIFT) /* virtual page number of u */
-#define KERNELSTACK (UADDR+UPAGES*NBPG) /* top of kernel stack */
+#define UPAGES 8 /* pages of u-area */
+#define UADDR 0xEEE00000 /* address of u */
+#define UVPN (UADDR>>PGSHIFT) /* virtual page number of u */
+#define KERNELSTACK (UADDR+UPAGES*NBPG) /* top of kernel stack */
-#define PHYSIO_MAP_START 0xEEF00000
-#define PHYSIO_MAP_SIZE 0x00100000
-#define IOMAP_MAP_START 0xEF000000 /* VME etc */
-#define IOMAP_SIZE 0x018F0000
-#define NIOPMAP 32
+#define PHYSIO_MAP_START 0xEEF00000
+#define PHYSIO_MAP_SIZE 0x00100000
+#define IOMAP_MAP_START 0xEF000000 /* VME etc */
+#define IOMAP_SIZE 0x018F0000
+#define NIOPMAP 32
/*
* Constants related to network buffer management.
@@ -114,42 +114,42 @@
* clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
* of the hardware page size.
*/
-#define MSIZE 128 /* size of an mbuf */
-#define MCLSHIFT 11 /* convert bytes to m_buf clusters */
-#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */
-#define MCLOFSET (MCLBYTES - 1) /* offset within a m_buf cluster */
+#define MSIZE 128 /* size of an mbuf */
+#define MCLSHIFT 11 /* convert bytes to m_buf clusters */
+#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */
+#define MCLOFSET (MCLBYTES - 1) /* offset within a m_buf cluster */
-#ifndef NMBCLUSTERS
+#ifndef NMBCLUSTERS
#ifdef GATEWAY
-#define NMBCLUSTERS 1024 /* map size, max cluster allocation */
+#define NMBCLUSTERS 1024 /* map size, max cluster allocation */
#else
-#define NMBCLUSTERS 512 /* map size, max cluster allocation */
+#define NMBCLUSTERS 512 /* map size, max cluster allocation */
#endif
#endif
/*
* Size of kernel malloc arena in CLBYTES-sized logical pages
*/
-#ifndef NKMEMCLUSTERS
-#define NKMEMCLUSTERS (4096*1024/CLBYTES)
+#ifndef NKMEMCLUSTERS
+#define NKMEMCLUSTERS (4096*1024/CLBYTES)
#endif
#define MSGBUFSIZE 4096
/* pages ("clicks") to disk blocks */
-#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT))
-#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT))
-#define dtob(x) ((x)<<DEV_BSHIFT)
+#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT))
+#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT))
+#define dtob(x) ((x)<<DEV_BSHIFT)
/* pages to bytes */
-#define ctob(x) ((x)<<PGSHIFT)
+#define ctob(x) ((x)<<PGSHIFT)
/* bytes to pages */
-#define btoc(x) (((unsigned)(x)+(NBPG-1))>>PGSHIFT)
+#define btoc(x) (((unsigned)(x)+(NBPG-1))>>PGSHIFT)
-#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
+#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
((unsigned)(bytes) >> DEV_BSHIFT)
-#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
+#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
((unsigned)(db) << DEV_BSHIFT)
/*
@@ -158,13 +158,13 @@
* field from the disk label.
* For now though just use DEV_BSIZE.
*/
-#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE))
+#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE))
/*
* Mach derived conversion macros
*/
-#define mvme88k_btop(x) ((unsigned)(x) >> PGSHIFT)
-#define mvme88k_ptob(x) ((unsigned)(x) << PGSHIFT)
+#define mvme88k_btop(x) ((unsigned)(x) >> PGSHIFT)
+#define mvme88k_ptob(x) ((unsigned)(x) << PGSHIFT)
#include <machine/psl.h>
@@ -179,9 +179,10 @@ extern int cpumod;
/*
* Values for the cputyp variable.
*/
-#define CPU_187 0x187
-#define CPU_188 0x188
-#define CPU_197 0x197
+#define CPU_187 0x187
+#define CPU_188 0x188
+#define CPU_197 0x197
+#define CPU_8120 0x8120
#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/arch/mvme88k/include/reg.h b/sys/arch/mvme88k/include/reg.h
index 00c3ae9060f..160fb875b71 100644
--- a/sys/arch/mvme88k/include/reg.h
+++ b/sys/arch/mvme88k/include/reg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: reg.h,v 1.6 2001/01/12 07:29:27 smurph Exp $ */
+/* $OpenBSD: reg.h,v 1.7 2001/02/01 03:38:18 smurph Exp $ */
/*
* Copyright (c) 1999 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -78,7 +78,7 @@ struct reg {
unsigned isr; /* MVME197 */
unsigned ilar; /* MVME197 */
unsigned ipar; /* MVME197 */
- unsigned pad; /* alignment */
+ unsigned cpu; /* cpu number */
};
struct fpreg {
diff --git a/sys/arch/mvme88k/include/trap.h b/sys/arch/mvme88k/include/trap.h
index ea39c46f055..681804c94aa 100644
--- a/sys/arch/mvme88k/include/trap.h
+++ b/sys/arch/mvme88k/include/trap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.h,v 1.6 2001/01/14 20:25:24 smurph Exp $ */
+/* $OpenBSD: trap.h,v 1.7 2001/02/01 03:38:18 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1992 Carnegie Mellon University
@@ -34,43 +34,43 @@
* Trap type values
*/
-#define T_RESADFLT 0 /* reserved addressing fault */
-#define T_PRIVINFLT 1 /* privileged instruction fault */
-#define T_RESOPFLT 2 /* reserved operand fault */
+#define T_RESADFLT 0 /* reserved addressing fault */
+#define T_PRIVINFLT 1 /* privileged instruction fault */
+#define T_RESOPFLT 2 /* reserved operand fault */
/* End of known constants */
-#define T_INSTFLT 3 /* instruction access exception */
-#define T_DATAFLT 4 /* data access exception */
-#define T_MISALGNFLT 5 /* misaligned access exception */
-#define T_ILLFLT 6 /* unimplemented opcode exception */
-#define T_BNDFLT 7 /* bounds check violation exception */
-#define T_ZERODIV 8 /* illegal divide exception */
-#define T_OVFFLT 9 /* integer overflow exception */
-#define T_ERRORFLT 10 /* error exception */
-#define T_FPEPFLT 11 /* floating point precise exception */
-#define T_FPEIFLT 12 /* floating point imprecise exception */
-#define T_ASTFLT 13 /* software trap */
+#define T_INSTFLT 3 /* instruction access exception */
+#define T_DATAFLT 4 /* data access exception */
+#define T_MISALGNFLT 5 /* misaligned access exception */
+#define T_ILLFLT 6 /* unimplemented opcode exception */
+#define T_BNDFLT 7 /* bounds check violation exception */
+#define T_ZERODIV 8 /* illegal divide exception */
+#define T_OVFFLT 9 /* integer overflow exception */
+#define T_ERRORFLT 10 /* error exception */
+#define T_FPEPFLT 11 /* floating point precise exception */
+#define T_FPEIFLT 12 /* floating point imprecise exception */
+#define T_ASTFLT 13 /* software trap */
#if DDB
-#define T_KDB_ENTRY 14 /* force entry to kernel debugger */
-#define T_KDB_BREAK 15 /* break point hit */
-#define T_KDB_TRACE 16 /* trace */
+#define T_KDB_ENTRY 14 /* force entry to kernel debugger */
+#define T_KDB_BREAK 15 /* break point hit */
+#define T_KDB_TRACE 16 /* trace */
#endif /* DDB */
-#define T_UNKNOWNFLT 17 /* unknown exception */
-#define T_SIGTRAP 18 /* generate SIGTRAP */
-#define T_SIGSYS 19 /* generate SIGSYS */
-#define T_STEPBPT 20 /* special breakpoint for single step */
-#define T_USERBPT 21 /* user set breakpoint (for debugger) */
-#define T_SYSCALL 22 /* Syscall */
-#define T_NON_MASK 23 /* MVME197 Non-Maskable Interrupt */
+#define T_UNKNOWNFLT 17 /* unknown exception */
+#define T_SIGTRAP 18 /* generate SIGTRAP */
+#define T_SIGSYS 19 /* generate SIGSYS */
+#define T_STEPBPT 20 /* special breakpoint for single step */
+#define T_USERBPT 21 /* user set breakpoint (for debugger) */
+#define T_SYSCALL 22 /* Syscall */
+#define T_NON_MASK 23 /* MVME197 Non-Maskable Interrupt */
#if DDB
-#define T_KDB_WATCH 24 /* watchpoint hit */
+#define T_KDB_WATCH 24 /* watchpoint hit */
#endif /* DDB */
-#define T_197_READ 25 /* MVME197 Data Read Miss (Software Table Searches) */
-#define T_197_WRITE 26 /* MVME197 Data Write Miss (Software Table Searches) */
-#define T_197_INST 27 /* MVME197 Inst ATC Miss (Software Table Searches) */
-#define T_INT 28 /* interrupt exception */
-#define T_USER 29 /* user mode fault */
+#define T_197_READ 25 /* MVME197 Data Read Miss (Software Table Searches) */
+#define T_197_WRITE 26 /* MVME197 Data Write Miss (Software Table Searches) */
+#define T_197_INST 27 /* MVME197 Inst ATC Miss (Software Table Searches) */
+#define T_INT 28 /* interrupt exception */
+#define T_USER 29 /* user mode fault */
#endif __MACHINE_TRAP_H__
diff --git a/sys/arch/mvme88k/mvme88k/cmmu.c b/sys/arch/mvme88k/mvme88k/cmmu.c
index 8c8d8df0d50..34cc27390a6 100644
--- a/sys/arch/mvme88k/mvme88k/cmmu.c
+++ b/sys/arch/mvme88k/mvme88k/cmmu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cmmu.c,v 1.6 2000/03/03 00:54:53 todd Exp $ */
+/* $OpenBSD: cmmu.c,v 1.7 2001/02/01 03:38:19 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -241,23 +241,23 @@ cpu_configuration_print(int master)
void
cmmu_init(void)
{
- /* init the lock */
- simple_lock_init(&cmmu_cpu_lock);
+ /* init the lock */
+ simple_lock_init(&cmmu_cpu_lock);
- switch (cputyp) {
+ switch (cputyp) {
#if defined(MVME187) || defined(MVME188)
- case CPU_187:
- case CPU_188:
- m18x_cmmu_init();
- break;
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_init();
+ break;
#endif /* defined(MVME187) || defined(MVME188) */
#ifdef MVME197
- case CPU_197:
- m197_cmmu_init();
- break;
+ case CPU_197:
+ m197_cmmu_init();
+ break;
#endif /* MVME197 */
- }
- return;
+ }
+ return;
}
/*
@@ -464,22 +464,24 @@ cmmu_remote_set_sapr(unsigned cpu, unsigned ap)
void
cmmu_set_uapr(unsigned ap)
{
- CMMU_LOCK;
- switch (cputyp) {
+ register s = splhigh();
+ CMMU_LOCK;
+ switch (cputyp) {
#if defined(MVME187) || defined(MVME188)
- case CPU_187:
- case CPU_188:
- m18x_cmmu_set_uapr(ap);
- break;
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_set_uapr(ap);
+ break;
#endif /* defined(MVME187) || defined(MVME188) */
#ifdef MVME197
- case CPU_197:
- m197_cmmu_set_uapr(ap);
- break;
+ case CPU_197:
+ m197_cmmu_set_uapr(ap);
+ break;
#endif /* MVME197 */
- }
- CMMU_UNLOCK;
- return;
+ }
+ CMMU_UNLOCK;
+ splx(s);
+ return;
}
/*
@@ -590,27 +592,29 @@ cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
*/
void
cmmu_pmap_activate(
- unsigned cpu,
- unsigned uapr,
- batc_template_t i_batc[BATC_MAX],
- batc_template_t d_batc[BATC_MAX])
-{
- CMMU_LOCK;
- switch (cputyp) {
-#if defined(MVME187) || defined(MVME188)
- case CPU_187:
- case CPU_188:
- m18x_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
- break;
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX])
+{
+ register s = splhigh();
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
+ break;
#endif /* defined(MVME187) || defined(MVME188) */
#ifdef MVME197
- case CPU_197:
- m197_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
- break;
+ case CPU_197:
+ m197_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
+ break;
#endif /* MVME197 */
- }
- CMMU_UNLOCK;
- return;
+ }
+ CMMU_UNLOCK;
+ splx(s);
+ return;
}
/**
diff --git a/sys/arch/mvme88k/mvme88k/eh.S b/sys/arch/mvme88k/mvme88k/eh.S
index 0b357b5438c..ac358533514 100644
--- a/sys/arch/mvme88k/mvme88k/eh.S
+++ b/sys/arch/mvme88k/mvme88k/eh.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: eh.S,v 1.9 2000/12/28 21:21:24 smurph Exp $ */
+/* $OpenBSD: eh.S,v 1.10 2001/02/01 03:38:19 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -239,8 +239,20 @@
#define INTSTACK 0 /* To make interupts use their own stack */
- text
- align 8
+ data
+ align 4
+sbadcpupanic:
+ string "eh.S: bad cpu number in FLAGS\n"
+
+ text
+ align 8
+
+Lbadcpupanic:
+ or.u r2, r0, hi16(sbadcpupanic)
+ or r2, r2, lo16(sbadcpupanic)
+ bsr _panic
+
+ align 8
/***************************************************************************
***************************************************************************
@@ -274,7 +286,7 @@
** (which is pointed-to by r31).
**/
-
+#if defined(MVME187) || defined (MVME188)
#define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK) ; \
xcr FLAGS, FLAGS, SR1 ; \
FLAG_PRECHECK ; \
@@ -293,6 +305,9 @@
/* TMP2 now free -- use to set EF_VECTOR */ ; \
or TMP2, r0, NUM ; \
st TMP2, r31, REG_OFF(EF_VECTOR) ; \
+ /* TMP3 also free -- use to set last_vector */ ; \
+ or.u TMP3, r0, hi16(_last_vector) ; \
+ st TMP2, TMP3, lo16(_last_vector) ; \
; \
/* Clear any bits in the SSBR (held in TMP) */ ; \
/* SSBR_STUFF may be empty, though. */ ; \
@@ -304,7 +319,9 @@
; \
/* All general regs free -- do any debugging */ ; \
PREP_DEBUG(BIT, NAME)
+#endif /* defined(MVME187) || defined (MVME188) */
+#ifdef MVME197
#define PREP2(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK); \
xcr FLAGS, FLAGS, SR1 ; \
FLAG_PRECHECK ; \
@@ -334,6 +351,7 @@
; \
/* All general regs free -- do any debugging */ ; \
PREP_DEBUG(BIT, NAME)
+#endif /* MVME197 */
/* Some defines for use with PREP() */
#define No_SSBR_Stuff /* empty */
@@ -405,9 +423,9 @@ LABEL(_eh_debug) word 0x00000000
#define DONE(num) br return_from_exception_handler
#endif
-
+#if defined(MVME187) || defined (MVME188)
/*#########################################################################*/
-/*#### THE ACTUAL EXCEPTION HANDLER ENTRY POINTS ##########################*/
+/*#### THE ACTUAL EXCEPTION HANDLER ENTRY POINTS for MVME18x ##############*/
/*#########################################################################*/
/* unknown exception handler */
@@ -451,7 +469,7 @@ LABEL(_unimplemented_handler)
DONE(DEBUG_UNIMPLEMENTED_BIT)
/*
- * Some versions of the chip have * a bug whereby false privilege
+ * Some versions of the chip have a bug whereby false privilege
* violation exceptions are raised. If the valid bit in the SXIP is clear,
* it is false. If so, just return. The code before PREP handles this....
*/
@@ -659,10 +677,11 @@ LABEL(_error_handler)
ldcr r10, SR3
st r10, r31, REG_OFF(EF_FPHS2)
- /* error vector is zippo numero el'zeroooo */
- st r0, r31, REG_OFF(EF_VECTOR)
+ /* error vector is 10 */
+ or r10, r0, 10
+ st r10, r31, REG_OFF(EF_VECTOR)
-#ifdef MVME188
+#if 0 /* MVME188 */
#define IST_REG 0xfff84040 /* interrupt status addr */
/* check if it's a mvme188 */
or.u r10, r0, hi16(_cputyp)
@@ -876,8 +895,10 @@ _LABEL(ignore_data_exception)
1:
/* the following jumps to "badaddr__return_nonzero" in below */
- NOP
RTE
+#endif /* defined(MVME187) || defined (MVME188) */
+
+#ifdef MVME197
/*
* This is part of baddadr (below).
*/
@@ -908,6 +929,8 @@ _LABEL(m197_ignore_data_exception)
/* the following jumps to "badaddr__return_nonzero" in below */
NOP
RTE
+#endif /* MVME197 */
+
/*
* extern boolean_t badaddr(unsigned addr, unsigned len)
@@ -1036,6 +1059,7 @@ _LABEL(badaddr__return)
******************************************************************************
*/
+#if defined(MVME187) || defined (MVME188)
LABEL(setup_phase_one)
/***************** REGISTER STATUS BLOCK ***********************\
@@ -1175,47 +1199,59 @@ _LABEL(pickup_stack)
/*FALLTHROUGH */
_LABEL(have_pcb)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: free *
- * r1: free *
- * FLAGS: CPU status flags *
- * r31: our exception frame *
- * Valid in the exception frame: *
- * Exception-time r1, r31, FLAGS. *
- * Exception SR3, if appropriate. *
- *************************************************** *
- * immediate goal: *
- * Save the shadow registers that need to be saved to *
- * the exception frame. *
- \***************************************************************/
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * r1: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Save the shadow registers that need to be saved to *
+ * the exception frame. *
+ \***************************************************************/
stcr TMP, SR3 /* free up TMP, TMP2, TMP3 */
SAVE_TMP2
SAVE_TMP3
- /* save some exception-time registers to the exception frame */
+ /* save some exception-time registers to the exception frame */
ldcr TMP, EPSR
st TMP, r31, REG_OFF(EF_EPSR)
ldcr TMP3, SNIP
st TMP3, r31, REG_OFF(EF_SNIP)
ldcr TMP2, SFIP
st TMP2, r31, REG_OFF(EF_SFIP)
+ /* get and store the cpu number */
+ extu TMP, FLAGS, FLAG_CPU_FIELD_WIDTH<0> /* TMP = cpu# */
+ st TMP, r31, REG_OFF(EF_CPU)
- /*
- * Save Pbus fault status register from data and inst CMMU.
- */
+ /*
+ * Save Pbus fault status register from data and inst CMMU.
+ */
#ifdef MVME188
/* check if it's a mvme188 */
or.u TMP, r0, hi16(_cputyp)
ld TMP2, TMP, lo16(_cputyp)
cmp TMP, TMP2, 0x188
- bb1 ne, TMP, 4f
- ldcr TMP, SR1
- mak TMP, TMP, FLAG_CPU_FIELD_WIDTH<2> /* TMP = cpu# */
- cmp TMP2, TMP, 0x0 /* CPU0 ? */
- bb1 ne, TMP2, 1f
+ bb1 ne, TMP, 5f
+
+ extu TMP, FLAGS, FLAG_CPU_FIELD_WIDTH<0> /* TMP = cpu# */
+ cmp TMP2, TMP, 0x0 /* CPU0 ? */
+ bb1 eq, TMP2, 1f
+ cmp TMP2, TMP, 0x1 /* CPU1 ? */
+ bb1 eq, TMP2, 2f
+ cmp TMP2, TMP, 0x2 /* CPU2 ? */
+ bb1 eq, TMP2, 3f
+ cmp TMP2, TMP, 0x3 /* CPU3 ? */
+ bb1 eq, TMP2, 4f
+ /* Arrrrg! bad cpu# */
+ br Lbadcpupanic
+1:
/* must be CPU0 */
or.u TMP, r0, hi16(VME_CMMU_I0)
ld TMP2, TMP, lo16(VME_CMMU_I0) + 0x108
@@ -1224,10 +1260,8 @@ _LABEL(have_pcb)
ld TMP2, TMP, lo16(VME_CMMU_D0) + 0x108
st TMP2, r31, REG_OFF(EF_DPFSR)
br pfsr_done
-1:
- cmp TMP2, TMP, 0x1 /* CPU1 ? */
- bb1 ne, TMP2, 2f
- /* must be CPU1 */
+2:
+ /* must be CPU1 */
or.u TMP, r0, hi16(VME_CMMU_I1)
ld TMP2, TMP, lo16(VME_CMMU_I1) + 0x108
st TMP2, r31, REG_OFF(EF_IPFSR)
@@ -1235,10 +1269,8 @@ _LABEL(have_pcb)
ld TMP2, TMP, lo16(VME_CMMU_D1) + 0x108
st TMP2, r31, REG_OFF(EF_DPFSR)
br pfsr_done
-2:
- cmp TMP2, TMP, 0x2 /* CPU2 ? */
- bb1 ne, TMP2, 3f
- /* must be CPU2 */
+3:
+ /* must be CPU2 */
or.u TMP, r0, hi16(VME_CMMU_I2)
ld TMP2, TMP, lo16(VME_CMMU_I2) + 0x108
st TMP2, r31, REG_OFF(EF_IPFSR)
@@ -1246,8 +1278,8 @@ _LABEL(have_pcb)
ld TMP2, TMP, lo16(VME_CMMU_D2) + 0x108
st TMP2, r31, REG_OFF(EF_DPFSR)
br pfsr_done
-3:
- /* must be CPU3 */
+4:
+ /* must be CPU3 */
or.u TMP, r0, hi16(VME_CMMU_I3)
ld TMP2, TMP, lo16(VME_CMMU_I3) + 0x108
st TMP2, r31, REG_OFF(EF_IPFSR)
@@ -1255,7 +1287,7 @@ _LABEL(have_pcb)
ld TMP2, TMP, lo16(VME_CMMU_D3) + 0x108
st TMP2, r31, REG_OFF(EF_DPFSR)
br pfsr_done
-4:
+5:
#endif /* MVME188 */
/* it's a single processor SBC */
or.u TMP, r0, hi16(SBC_CMMU_I)
@@ -1387,6 +1419,8 @@ _LABEL(DMT_check_finished)
ldcr r1, SR2
jmp r1 /* return to allow the handler to clear more SSBR bits */
+#endif /* defined(MVME187) || defined (MVME188) */
+
/************************************************************************/
/************************************************************************/
@@ -1581,7 +1615,7 @@ _LABEL(misaligned_double)
/************************************************************************/
/************************************************************************/
-
+#if defined(MVME187) || defined (MVME188)
LABEL(setup_phase_two)
/***************** REGISTER STATUS BLOCK ***********************\
@@ -1834,6 +1868,7 @@ exception_handler_has_ksp: global exception_handler_has_ksp
_LABEL(return_to_calling_exception_handler)
jmp r14 /* loaded above */
+#endif /* defined(MVME187) || defined (MVME188) */
/*
@@ -2021,26 +2056,31 @@ LABEL(oops2)
/* clear the dmt0 word in the E.F. */
st r0, FPTR, REG_OFF(EF_DSR)
2:
+#endif /* MVME197 */
/*
* If the saved ipl is 0, then call dosoftint() to process soft
* interrupts.
* If returning to user land, look for ASTs
*/
-#endif /* MVME197 */
LABEL(_check_ast)
ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if ints off */
ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
bcnd ne0, r2, 1f /* can't do softint's */
- bsr.n _setipl
+
+ subu r31, r31, 32
+ bsr.n _setipl
or r2,r0,1
+ addu r31, r31, 32
bsr _dosoftint
/* is this needed? we are going to restore the ipl below XXX nivas */
+ subu r31, r31, 32
bsr.n _setipl
or r2,r0,0 /* ints are enabled */
- /* at ipl 0 now */
+ addu r31, r31, 32
+ /* at ipl 0 now */
1:
ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
bb1 PSR_SUPERVISOR_MODE_BIT, r2, no_ast /*skip if in system mode */
@@ -2076,15 +2116,17 @@ _LABEL(no_ast)
stcr r1, PSR
/* now ready to return....*/
-
+
+ ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
+ bsr.n _setipl
+ subu r31, r31, 40
+ addu r31, r31, 40
+
/*
* Transfer the frame pointer to r31, since we no longer need a stack.
* No page faults here, and interrupts are disabled.
*/
- ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
- bsr _setipl
-
or r31, r0, FPTR
/* restore r1 later */
ld.d r2 , r31, GENREG_OFF(2)
@@ -2797,7 +2839,7 @@ LABEL(m197_setup_phase_two)
* Another exception (or exceptions) may be raised in *
* this, which is why FLAG_ENABLING_FPU is set in SR1. *
\***************************************************************/
-
+ NOP
RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */
_LABEL(m197_fpu_enable)
@@ -2959,5 +3001,4 @@ m197_exception_handler_has_ksp: global m197_exception_handler_has_ksp
_LABEL(m197_return_to_calling_exception_handler)
jmp r14 /* loaded above */
-#endif
-
+#endif
diff --git a/sys/arch/mvme88k/mvme88k/genassym.c b/sys/arch/mvme88k/mvme88k/genassym.c
index 0f09828b1dd..ca1228dd17d 100644
--- a/sys/arch/mvme88k/mvme88k/genassym.c
+++ b/sys/arch/mvme88k/mvme88k/genassym.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: genassym.c,v 1.6 2001/01/12 07:29:26 smurph Exp $ */
+/* $OpenBSD: genassym.c,v 1.7 2001/02/01 03:38:20 smurph Exp $ */
/*
* Copyright (c) 1982, 1990 The Regents of the University of California.
* All rights reserved.
@@ -32,7 +32,7 @@
* SUCH DAMAGE.
*
* @(#)genassym.c 7.8 (Berkeley) 5/7/91
- * $Id: genassym.c,v 1.6 2001/01/12 07:29:26 smurph Exp $
+ * $Id: genassym.c,v 1.7 2001/02/01 03:38:20 smurph Exp $
*/
#ifndef KERNEL
@@ -136,32 +136,33 @@ main()
pair("EF_DMT2", int_offset_of_element(ss->dmt2));
pair("EF_DMD2", int_offset_of_element(ss->dmd2));
pair("EF_DMA2", int_offset_of_element(ss->dma2));
- pair("EF_FPECR", int_offset_of_element(ss->fpecr));
+ pair("EF_FPECR",int_offset_of_element(ss->fpecr));
pair("EF_FPCR", int_offset_of_element(ss->fpcr)); /* MVME197 */
pair("EF_FPSR", int_offset_of_element(ss->fpsr)); /* MVME197 */
- pair("EF_FPHS1", int_offset_of_element(ss->fphs1));
- pair("EF_FPLS1", int_offset_of_element(ss->fpls1));
- pair("EF_FPHS2", int_offset_of_element(ss->fphs2));
- pair("EF_FPLS2", int_offset_of_element(ss->fpls2));
+ pair("EF_FPHS1",int_offset_of_element(ss->fphs1));
+ pair("EF_FPLS1",int_offset_of_element(ss->fpls1));
+ pair("EF_FPHS2",int_offset_of_element(ss->fphs2));
+ pair("EF_FPLS2",int_offset_of_element(ss->fpls2));
pair("EF_FPPT", int_offset_of_element(ss->fppt));
pair("EF_FPRH", int_offset_of_element(ss->fprh));
pair("EF_FPRL", int_offset_of_element(ss->fprl));
pair("EF_FPIT", int_offset_of_element(ss->fpit));
- pair("EF_VECTOR", int_offset_of_element(ss->vector));
+ pair("EF_VECTOR",int_offset_of_element(ss->vector));
pair("EF_MASK", int_offset_of_element(ss->mask));
pair("EF_MODE", int_offset_of_element(ss->mode));
pair("EF_RET", int_offset_of_element(ss->scratch1));
pair("EF_IPFSR",int_offset_of_element(ss->ipfsr));
pair("EF_DPFSR",int_offset_of_element(ss->dpfsr));
- pair("EF_DSR",int_offset_of_element(ss->dsr)); /* MVME197 */
- pair("EF_DLAR",int_offset_of_element(ss->dlar)); /* MVME197 */
- pair("EF_DPAR",int_offset_of_element(ss->dpar)); /* MVME197 */
- pair("EF_ISR",int_offset_of_element(ss->dsr)); /* MVME197 */
- pair("EF_ILAR",int_offset_of_element(ss->ilar)); /* MVME197 */
- pair("EF_IPAR",int_offset_of_element(ss->ipar)); /* MVME197 */
- pair("EF_SRX",int_offset_of_element(ss->dpfsr));
- pair("EF_NREGS", sizeof(*ss)/sizeof(int));
+ pair("EF_DSR", int_offset_of_element(ss->dsr)); /* MVME197 */
+ pair("EF_DLAR", int_offset_of_element(ss->dlar)); /* MVME197 */
+ pair("EF_DPAR", int_offset_of_element(ss->dpar)); /* MVME197 */
+ pair("EF_ISR", int_offset_of_element(ss->dsr)); /* MVME197 */
+ pair("EF_ILAR", int_offset_of_element(ss->ilar)); /* MVME197 */
+ pair("EF_IPAR", int_offset_of_element(ss->ipar)); /* MVME197 */
+ pair("EF_SRX", int_offset_of_element(ss->dpfsr));
+ pair("EF_CPU", int_offset_of_element(ss->cpu)); /* cpu number */
+ pair("EF_NREGS",sizeof(*ss)/sizeof(int));
/* end MVME197 only */
diff --git a/sys/arch/mvme88k/mvme88k/locore.S b/sys/arch/mvme88k/mvme88k/locore.S
index 7b102554934..11a6f286436 100644
--- a/sys/arch/mvme88k/mvme88k/locore.S
+++ b/sys/arch/mvme88k/mvme88k/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.9 1999/09/27 19:13:22 smurph Exp $ */
+/* $OpenBSD: locore.S,v 1.10 2001/02/01 03:38:20 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -331,7 +331,7 @@ _LABEL(master_start)
* can set interrupt_stack[cpu_number()] = _intstack
*/
ldcr r10, SR1
- mak r10, r10, FLAG_CPU_FIELD_WIDTH<0> /* r10 <-- CPU# */
+ extu r10, r10, FLAG_CPU_FIELD_WIDTH<0> /* r10 <-- CPU# */
/* figure interrupt_stack[cpu_number()] */
or.u r11, r0, hi16(_interrupt_stack)
@@ -410,10 +410,6 @@ _m197_vector_list: /* references memory BELOW this line */
_m197_vector_list_end:
word END_OF_VECTOR_LIST
#endif /* MVME197 */
- .align 4096 /* Vector table is a page aligned list */
-LABEL(_vector_table) /* new vector table location, was addr 0 */
- space (0x1000) /* 16K */
-
.align 4096 /* SDT (segment descriptor table */
global _kernel_sdt
_kernel_sdt:
diff --git a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
index e4784270152..e308358ec7b 100644
--- a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
+++ b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_asm_routines.S,v 1.9 2001/01/14 20:25:25 smurph Exp $ */
+/* $OpenBSD: locore_asm_routines.S,v 1.10 2001/02/01 03:38:20 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1992 Carnegie Mellon University
@@ -223,9 +223,10 @@ LABEL(lockpause)
#undef simple_unlock
ENTRY(simple_lock_init)
st r0, r2, 0 /* init */
- ldcr r2, SR1 /* extract cpu number*/
- clr r2, r2, 0<FLAG_CPU_FIELD_WIDTH>
- mask r2, r2, 3 /* play it safe */
+
+ ldcr r2, SR1
+ extu r2, r2, FLAG_CPU_FIELD_WIDTH<0> /* r2 = cpu# */
+ mask r2, r2, 3 /* play it safe */
or.u r3, r0, hi16(lockinit)
or r3, r3, lo16(lockinit)
ld r4, r3[r2]
@@ -256,9 +257,9 @@ ENTRY(simple_lock)
xmem r3, r2, r0
bcnd ne0, r3, 1f
#if 0
- ldcr r5, SR1 /* extract cpu number */
- clr r5, r5, 0<FLAG_CPU_FIELD_WIDTH>
- mask r5, r5, 3 /* play it safe */
+ ldcr r5, SR1
+ extu r5, r5, FLAG_CPU_FIELD_WIDTH<0> /* r5 = cpu# */
+ mask r5, r5, 3 /* play it safe */
or.u r3, r0, hi16(lockuse)
or r3, r3, lo16(lockuse)
ld r4, r3[r5]
@@ -309,9 +310,10 @@ ENTRY(db_simple_lock)
or r10, r0, 1
xmem r10, r2, r0
bcnd ne0, r10, db_simple_lock_watch
- ldcr r2, SR1 /* extract cpu number*/
- clr r2, r2, 0<FLAG_CPU_FIELD_WIDTH>
- mask r2, r2, 3 /* play it safe*/
+
+ ldcr r2, SR1
+ extu r2, r2, FLAG_CPU_FIELD_WIDTH<0> /* r2 = cpu# */
+ mask r2, r2, 3 /* play it safe */
or.u r3, r0, hi16(lockuse)
or r3, r3, lo16(lockuse)
ld r4, r3[r2]
@@ -380,9 +382,9 @@ ENTRY(db_simple_lock_held)
*/
ENTRY(simple_lock_pause)
- ldcr r2, SR1 /* extract cpu number*/
- clr r2, r2, FLAG_CPU_FIELD_WIDTH
- mask r2, r2, 3 /* play it safe */
+ ldcr r2, SR1
+ extu r2, r2, FLAG_CPU_FIELD_WIDTH<0> /* r2 = cpu# */
+ mask r2, r2, 3 /* play it safe */
or.u r3, r0, hi16(lockpause)
or r3, r3, lo16(lockpause)
ld r4, r3[r2]
@@ -1138,10 +1140,10 @@ _LABEL(copyout_byte_only)
NOP
NOP
bcnd ne0, LEN, 1b
-# else
+#else
bcnd.n ne0, LEN, 1b
st.b.usr r5, DEST, LEN
-# endif
+#endif
2: or r2, r0, r0 /* successful return */
br .Lcodone
@@ -2471,7 +2473,7 @@ ENTRY(cpu_number)
#endif /* MVME188 */
1:
jmp.n r1
- clr r2, r2, 28<FLAG_CPU_FIELD_WIDTH>/* clears all but the CPU num */
+ extu r2, r2, FLAG_CPU_FIELD_WIDTH<0> /* r2 = cpu# */
#endif
/*************************************************************************
@@ -2514,12 +2516,11 @@ ENTRY(set_cpu_number)
stcr r4, PSR
jmp r1
- 1: /* bad cpu number*/
+1: /* bad cpu number*/
or.u r2, r0, hi16(1f)
bsr.n _panic
or r2, r2, lo16(1f)
- 1: string "set_cpu_number: bad CPU number\0"
+1: string "set_cpu_number: bad CPU number\0"
align 4
/* will not return */
-
diff --git a/sys/arch/mvme88k/mvme88k/locore_c_routines.c b/sys/arch/mvme88k/mvme88k/locore_c_routines.c
index 654dc5ee87a..069d4aee24a 100644
--- a/sys/arch/mvme88k/mvme88k/locore_c_routines.c
+++ b/sys/arch/mvme88k/mvme88k/locore_c_routines.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_c_routines.c,v 1.7 2001/01/14 20:25:25 smurph Exp $ */
+/* $OpenBSD: locore_c_routines.c,v 1.8 2001/02/01 03:38:20 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -30,14 +30,14 @@
*****************************************************************RCS**/
/* This file created by Omron Corporation, 1990. */
-#include <machine/cpu_number.h> /* cpu_number() */
-#include <machine/board.h> /* m188 bit defines */
-#include <machine/m88100.h> /* DMT_VALID */
-#include <assym.s> /* EF_NREGS, etc. */
-#include <machine/asm.h> /* END_OF_VECTOR_LIST, etc. */
-#include <machine/asm_macro.h> /* enable/disable interrupts */
+#include <machine/cpu_number.h> /* cpu_number() */
+#include <machine/board.h> /* m188 bit defines */
+#include <machine/m88100.h> /* DMT_VALID */
+#include <assym.s> /* EF_NREGS, etc. */
+#include <machine/asm.h> /* END_OF_VECTOR_LIST, etc. */
+#include <machine/asm_macro.h> /* enable/disable interrupts */
#ifdef DDB
- #include <ddb/db_output.h> /* db_printf() */
+ #include <ddb/db_output.h> /* db_printf() */
#endif /* DDB */
@@ -64,14 +64,14 @@ extern u_char *int_mask_level; /* in machdep.c */
extern unsigned master_cpu; /* in cmmu.c */
static struct {
- unsigned char offset;
- unsigned char size;
+ unsigned char offset;
+ unsigned char size;
} dmt_en_info[16] =
{
- {0, 0}, {3, DMT_BYTE}, {2, DMT_BYTE}, {2, DMT_HALF},
- {1, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
- {0, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
- {0, DMT_HALF}, {0, 0}, {0, 0}, {0, DMT_WORD}
+ {0, 0}, {3, DMT_BYTE}, {2, DMT_BYTE}, {2, DMT_HALF},
+ {1, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_HALF}, {0, 0}, {0, 0}, {0, DMT_WORD}
};
#if DATA_DEBUG
@@ -89,49 +89,50 @@ static char *bytes[] =
#define DAE_DEBUG(stuff)
#endif
+#if defined(MVME187) || defined(MVME188)
void
dae_print(unsigned *eframe)
{
- register int x;
- register struct dmt_reg *dmtx;
- register unsigned dmax, dmdx;
- register unsigned v, reg;
- static char *bytes[] =
- {
- "____", "___x", "__x_", "__xx",
- "_x__", "_x_x", "_xx_", "_xxx",
- "x___", "x__x", "x_x_", "x_xx",
- "xx__", "xx_x", "xxx_", "xxxx",
- };
-
- if (!(eframe[EF_DMT0] & DMT_VALID))
- return;
-
- for (x = 0; x < 3; x++) {
- dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
-
- if (!dmtx->dmt_valid)
- continue;
-
- dmdx = eframe[EF_DMD0+x*3];
- dmax = eframe[EF_DMA0+x*3];
-
- if (dmtx->dmt_write)
- printf("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
- x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
- dmdx, dmax, bytes[dmtx->dmt_en],
- dmtx->dmt_doub1 ? "double": "not double",
- dmtx->dmt_lockbar ? "xmem": "not xmem");
- else
- printf("[DMT%d=%x: ld.%c r%d <- %x as [%s] %s %s]\n",
- x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
- dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
- dmtx->dmt_doub1 ? "double": "not double",
- dmtx->dmt_lockbar ? "xmem": "not xmem");
-
- }
+ register int x;
+ register struct dmt_reg *dmtx;
+ register unsigned dmax, dmdx;
+ register unsigned v, reg;
+ static char *bytes[] =
+ {
+ "____", "___x", "__x_", "__xx",
+ "_x__", "_x_x", "_xx_", "_xxx",
+ "x___", "x__x", "x_x_", "x_xx",
+ "xx__", "xx_x", "xxx_", "xxxx",
+ };
+
+ if (!(eframe[EF_DMT0] & DMT_VALID))
+ return;
+
+ for (x = 0; x < 3; x++) {
+ dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
+
+ if (!dmtx->dmt_valid)
+ continue;
+
+ dmdx = eframe[EF_DMD0+x*3];
+ dmax = eframe[EF_DMA0+x*3];
+
+ if (dmtx->dmt_write)
+ printf("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmdx, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ else
+ printf("[DMT%d=%x: ld.%c r%d <- %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+
+ }
}
-#if defined(MVME187) || defined(MVME188)
+
void data_access_emulation(unsigned *eframe)
{
register int x;
@@ -139,13 +140,14 @@ void data_access_emulation(unsigned *eframe)
register unsigned dmax, dmdx;
register unsigned v, reg;
- if (!(eframe[EF_DMT0] & DMT_VALID))
+ dmtx = (struct dmt_reg *)&eframe[EF_DMT0];
+ if (!dmtx->dmt_valid && !dmtx->dmt_skip)
return;
-
+
for (x = 0; x < 3; x++) {
dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
- if (!dmtx->dmt_valid)
+ if (!dmtx->dmt_valid || dmtx->dmt_skip)
continue;
dmdx = eframe[EF_DMD0+x*3];
@@ -313,72 +315,71 @@ typedef struct {
* so don't call any other functions!
* XXX clean this - nivas
*/
-void vector_init(
- m88k_exception_vector_area *vector,
- unsigned *vector_init_list)
+void
+vector_init(m88k_exception_vector_area *vector, unsigned *vector_init_list)
{
- unsigned num;
- unsigned vec;
+ unsigned num;
+ unsigned vec;
#if defined(MVME187) || defined(MVME188)
- extern void sigsys(), sigtrap(), stepbpt(), userbpt();
- extern void syscall_handler();
+ extern void sigsys(), sigtrap(), stepbpt(), userbpt();
+ extern void syscall_handler();
#endif /* defined(MVME187) || defined(MVME188) */
#ifdef MVME197
- extern void m197_sigsys(), m197_sigtrap(), m197_stepbpt(), m197_userbpt();
- extern void m197_syscall_handler();
+ extern void m197_sigsys(), m197_sigtrap(), m197_stepbpt(), m197_userbpt();
+ extern void m197_syscall_handler();
#endif /* MVME197 */
-
- for (num = 0; (vec = vector_init_list[num]) != END_OF_VECTOR_LIST; num++) {
- if (vec != PREDEFINED_BY_ROM)
- SET_VECTOR(num, to, vec);
- asm ("or r0, r0, r0");
- asm ("or r0, r0, r0");
- asm ("or r0, r0, r0");
- asm ("or r0, r0, r0");
- }
- switch (cputyp) {
+ for (num = 0; (vec = vector_init_list[num]) != END_OF_VECTOR_LIST; num++) {
+ if (vec != PREDEFINED_BY_ROM)
+ SET_VECTOR(num, to, vec);
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ }
+
+ switch (cputyp) {
#ifdef MVME197
- case CPU_197:
- while (num < 496){
- SET_VECTOR(num, to, m197_sigsys);
- num++;
- }
- num++; /* skip 496, BUG ROM vector */
- SET_VECTOR(450, to, m197_syscall_handler);
-
- while (num <= SIGSYS_MAX)
- SET_VECTOR(num++, to, m197_sigsys);
-
- while (num <= SIGTRAP_MAX)
- SET_VECTOR(num++, to, m197_sigtrap);
-
- SET_VECTOR(504, to, m197_stepbpt);
- SET_VECTOR(511, to, m197_userbpt);
- break;
+ case CPU_197:
+ while (num < 496) {
+ SET_VECTOR(num, to, m197_sigsys);
+ num++;
+ }
+ num++; /* skip 496, BUG ROM vector */
+ SET_VECTOR(450, to, m197_syscall_handler);
+
+ while (num <= SIGSYS_MAX)
+ SET_VECTOR(num++, to, m197_sigsys);
+
+ while (num <= SIGTRAP_MAX)
+ SET_VECTOR(num++, to, m197_sigtrap);
+
+ SET_VECTOR(504, to, m197_stepbpt);
+ SET_VECTOR(511, to, m197_userbpt);
+ break;
#endif /* MVME197 */
#if defined(MVME187) || defined(MVME188)
- case CPU_187:
- case CPU_188:
- while (num < 496){
- SET_VECTOR(num, to, sigsys);
- num++;
- }
- num++; /* skip 496, BUG ROM vector */
-
- SET_VECTOR(450, to, syscall_handler);
+ case CPU_187:
+ case CPU_188:
+ while (num < 496) {
+ SET_VECTOR(num, to, sigsys);
+ num++;
+ }
+ num++; /* skip 496, BUG ROM vector */
- while (num <= SIGSYS_MAX)
- SET_VECTOR(num++, to, sigsys);
+ SET_VECTOR(450, to, syscall_handler);
- while (num <= SIGTRAP_MAX)
- SET_VECTOR(num++, to, sigtrap);
+ while (num <= SIGSYS_MAX)
+ SET_VECTOR(num++, to, sigsys);
- SET_VECTOR(504, to, stepbpt);
- SET_VECTOR(511, to, userbpt);
- break;
+ while (num <= SIGTRAP_MAX)
+ SET_VECTOR(num++, to, sigtrap);
+
+ SET_VECTOR(504, to, stepbpt);
+ SET_VECTOR(511, to, userbpt);
+ break;
#endif /* defined(MVME187) || defined(MVME188) */
- }
+ }
}
#ifdef MVME188
@@ -387,17 +388,16 @@ unsigned int m188_curspl[MAX_CPUS] = {0,0,0,0};
unsigned int blocked_interrupts_mask;
unsigned int int_mask_val[INT_LEVEL] = {
- MASK_LVL_0,
- MASK_LVL_1,
- MASK_LVL_2,
- MASK_LVL_3,
- MASK_LVL_4,
- MASK_LVL_5,
- MASK_LVL_6,
- MASK_LVL_7
+ MASK_LVL_0,
+ MASK_LVL_1,
+ MASK_LVL_2,
+ MASK_LVL_3,
+ MASK_LVL_4,
+ MASK_LVL_5,
+ MASK_LVL_6,
+ MASK_LVL_7
};
-
/*
* return next safe spl to reenable interrupts.
*/
@@ -406,236 +406,258 @@ safe_level(mask, curlevel)
unsigned mask;
unsigned curlevel;
{
- register int i;
-
- for (i = curlevel; i < 8; i++)
- if (! (int_mask_val[i] & mask))
- return i;
- printf("safe_level: no safe level for mask 0x%08x level %d found\n",
- mask, curlevel);
- panic("safe_level");
+ register int i;
+
+ for (i = curlevel; i < 8; i++)
+ if (! (int_mask_val[i] & mask))
+ return i;
+ printf("safe_level: no safe level for mask 0x%08x level %d found\n",
+ mask, curlevel);
+ panic("safe_level");
}
void
setlevel(int level)
{
- m88k_psr_type psr;
- register unsigned int mask;
- register int cpu = cpu_number();
-
- mask = int_mask_val[level];
+ m88k_psr_type psr;
+ register unsigned int mask;
+ register int cpu = cpu_number();
+ if (level > 7) {
+ panic("setlevel: bad level 0x%x", level);
+ }
+ mask = int_mask_val[level];
- if (cpu != master_cpu)
- mask &= SLAVE_MASK;
+ if (cpu != master_cpu)
+ mask &= SLAVE_MASK;
- mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
-
- mask &= ~blocked_interrupts_mask;
+#if 0
+ mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
- *int_mask_reg[cpu] = mask;
- int_mask_shadow[cpu] = mask;
+ mask &= ~blocked_interrupts_mask;
+#endif
- m188_curspl[cpu] = level;
+ *int_mask_reg[cpu] = mask;
+#if 0
+ int_mask_shadow[cpu] = mask;
+#endif
+ m188_curspl[cpu] = level;
}
#ifdef DDB
void
db_setlevel(int level)
{
- m88k_psr_type psr;
- register unsigned int mask;
- register int cpu = cpu_number();
-
- mask = int_mask_val[level];
+ m88k_psr_type psr;
+ register unsigned int mask;
+ register int cpu = cpu_number();
- if (cpu != master_cpu)
- mask &= SLAVE_MASK;
+ mask = int_mask_val[level];
- mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
+ if (cpu != master_cpu)
+ mask &= SLAVE_MASK;
+#if 0
+ mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
- mask &= ~blocked_interrupts_mask;
+ mask &= ~blocked_interrupts_mask;
+#endif
- *int_mask_reg[cpu] = mask;
- int_mask_shadow[cpu] = mask;
+ *int_mask_reg[cpu] = mask;
+#if 0
+ int_mask_shadow[cpu] = mask;
+#endif
- m188_curspl[cpu] = level;
+ m188_curspl[cpu] = level;
}
#endif /* DDB */
-void block_obio_interrupt(unsigned mask)
+void
+block_obio_interrupt(unsigned mask)
{
- blocked_interrupts_mask |= mask;
+ blocked_interrupts_mask |= mask;
}
-void unblock_obio_interrupt(unsigned mask)
+void
+unblock_obio_interrupt(unsigned mask)
{
- blocked_interrupts_mask |= ~mask;
+ blocked_interrupts_mask |= ~mask;
}
#endif /* MVME188 */
-unsigned spl(void)
+unsigned
+spl(void)
{
- unsigned curspl;
- m88k_psr_type psr; /* proccessor status register */
- int cpu = 0;
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
- psr = disable_interrupts_return_psr();
- switch (cputyp) {
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
#ifdef MVME188
- case CPU_188:
- cpu = cpu_number();
- curspl = m188_curspl[cpu];
- break;
+ case CPU_188:
+ cpu = cpu_number();
+ curspl = m188_curspl[cpu];
+ break;
#endif /* MVME188 */
#if defined(MVME187) || defined(MVME197)
- case CPU_187:
- case CPU_197:
- curspl = *int_mask_level;
- break;
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ break;
#endif /* defined(MVME187) || defined(MVME197) */
- default:
- panic("spl: Can't determine cpu type!");
- }
- set_psr(psr);
- return curspl;
+ default:
+ panic("spl: Can't determine cpu type!");
+ }
+ set_psr(psr);
+ return curspl;
}
#if DDB
-unsigned db_spl(void)
+unsigned
+db_spl(void)
{
- unsigned curspl;
- m88k_psr_type psr; /* proccessor status register */
- int cpu = 0;
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
- psr = disable_interrupts_return_psr();
- switch (cputyp) {
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
#ifdef MVME188
- case CPU_188:
- cpu = cpu_number();
- curspl = m188_curspl[cpu];
- break;
+ case CPU_188:
+ cpu = cpu_number();
+ curspl = m188_curspl[cpu];
+ break;
#endif /* MVME188 */
#if defined(MVME187) || defined(MVME197)
- case CPU_187:
- case CPU_197:
- curspl = *int_mask_level;
- break;
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ break;
#endif /* defined(MVME187) || defined(MVME197) */
- default:
- panic("db_spl: Can't determine cpu type!");
- }
- set_psr(psr);
- return curspl;
+ default:
+ panic("db_spl: Can't determine cpu type!");
+ }
+ set_psr(psr);
+ return curspl;
}
#endif /* DDB */
-unsigned getipl(void)
+unsigned
+getipl(void)
{
- return (spl());
+ return (spl());
}
#if DDB
-unsigned db_getipl(void)
+unsigned
+db_getipl(void)
{
- return (db_spl());
+ return (db_spl());
}
#endif /* DDB */
-unsigned setipl(unsigned level)
+unsigned
+setipl(unsigned level)
{
- unsigned curspl;
- m88k_psr_type psr; /* proccessor status register */
- int cpu = 0;
-
- psr = disable_interrupts_return_psr();
- switch (cputyp) {
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
+ if (level > 7) {
+ level = 0; /* assume this for the time being */
+ }
+
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
#ifdef MVME188
- case CPU_188:
- cpu = cpu_number();
- curspl = m188_curspl[cpu];
- setlevel(level);
- break;
+ case CPU_188:
+ cpu = cpu_number();
+ curspl = m188_curspl[cpu];
+ setlevel(level);
+ break;
#endif /* MVME188 */
#if defined(MVME187) || defined(MVME197)
- case CPU_187:
- case CPU_197:
- curspl = *int_mask_level;
- *int_mask_level = level;
- break;
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ *int_mask_level = level;
+ break;
#endif /* defined(MVME187) || defined(MVME197) */
- default:
- panic("setipl: Can't determine cpu type!");
- }
-
- flush_pipeline();
-
- /* The flush pipeline is required to make sure the above write gets
- * through the data pipe and to the hardware; otherwise, the next
- * bunch of instructions could execute at the wrong spl protection
- */
- set_psr(psr);
- return curspl;
+ default:
+ panic("setipl: Can't determine cpu type!");
+ }
+
+ flush_pipeline();
+
+ /* The flush pipeline is required to make sure the above write gets
+ * through the data pipe and to the hardware; otherwise, the next
+ * bunch of instructions could execute at the wrong spl protection
+ */
+ set_psr(psr);
+ return curspl;
}
#ifdef DDB
-unsigned db_setipl(unsigned level)
+unsigned
+db_setipl(unsigned level)
{
- unsigned curspl;
- m88k_psr_type psr; /* proccessor status register */
- int cpu = 0;
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
- psr = disable_interrupts_return_psr();
- switch (cputyp) {
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
#ifdef MVME188
- case CPU_188:
- cpu = cpu_number();
- curspl = m188_curspl[cpu];
- db_setlevel(level);
- break;
+ case CPU_188:
+ cpu = cpu_number();
+ curspl = m188_curspl[cpu];
+ db_setlevel(level);
+ break;
#endif /* MVME188 */
#if defined(MVME187) || defined(MVME197)
- case CPU_187:
- case CPU_197:
- curspl = *int_mask_level;
- *int_mask_level = level;
- break;
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ *int_mask_level = level;
+ break;
#endif /* defined(MVME187) || defined(MVME197) */
- default:
- panic("db_setipl: Can't determine cpu type!");
- }
-
- flush_pipeline();
-
- /* The flush pipeline is required to make sure the above write gets
- * through the data pipe and to the hardware; otherwise, the next
- * bunch of instructions could execute at the wrong spl protection
- */
- set_psr(psr);
- return curspl;
+ default:
+ panic("db_setipl: Can't determine cpu type!");
+ }
+
+ flush_pipeline();
+
+ /* The flush pipeline is required to make sure the above write gets
+ * through the data pipe and to the hardware; otherwise, the next
+ * bunch of instructions could execute at the wrong spl protection
+ */
+ set_psr(psr);
+ return curspl;
}
#endif /* DDB */
#if NCPUS > 1
- #include <sys/simplelock.h>
+#include <sys/simplelock.h>
void
simple_lock_init(lkp)
__volatile struct simplelock *lkp;
{
- lkp->lock_data = 0;
+ lkp->lock_data = 0;
}
-int test_and_set(lock)
+int
+test_and_set(lock)
__volatile int *lock;
{
-/*
- int oldlock = *lock;
- if (*lock == 0) {
- *lock = 1;
- return 0;
- }
-*/
- return *lock;
- *lock = 1;
- return 0;
+#if 0
+ int oldlock = *lock;
+ if (*lock == 0) {
+ *lock = 1;
+ return 0;
+ }
+#else
+ return *lock;
+ *lock = 1;
+ return 0;
+#endif
}
#endif
+
diff --git a/sys/arch/mvme88k/mvme88k/m18x_cmmu.c b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c
index b55fe7027f8..5087e44680f 100644
--- a/sys/arch/mvme88k/mvme88k/m18x_cmmu.c
+++ b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: m18x_cmmu.c,v 1.4 2001/01/12 07:29:26 smurph Exp $ */
+/* $OpenBSD: m18x_cmmu.c,v 1.5 2001/02/01 03:38:20 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -73,9 +73,9 @@
#define CMMU_DEBUG 1
#ifdef DEBUG
- #define DB_CMMU 0x4000 /* MMU debug */
-unsigned int debuglevel = 0;
- #define dprintf(_L_,_X_) { if (debuglevel & (_L_)) { unsigned int psr = disable_interrupts_return_psr(); printf("%d: ", cpu_number()); printf _X_; set_psr(psr); } }
+ #define DB_CMMU 0x4000 /* MMU debug */
+unsigned int m18x_debuglevel = 0;
+ #define dprintf(_L_,_X_) { if (m18x_debuglevel & (_L_)) { unsigned int psr = disable_interrupts_return_psr(); printf("%d: ", cpu_number()); printf _X_; set_psr(psr); } }
#else
#define dprintf(_L_,_X_)
#endif
@@ -263,19 +263,19 @@ struct board_config {
{ 1, 4, 8}, /* 4P128 - 4P512 */
{ 1, 2, 8}, /* 2P128 - 2P512 */
{ 1, 1, 8}, /* 1P128 - 1P512 */
- { -1, -1, -1},
- { -1, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1},
{ 1, 2, 4}, /* 2P64 - 2P256 */
{ 1, 1, 4}, /* 1P64 - 1P256 */
- { -1, -1, -1},
- { -1, -1, -1},
- { -1, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1},
{ 1, 1, 2}, /* 1P32 - 1P128 */
- { -1, -1, -1},
- { -1, -1, -1},
- { -1, -1, -1},
- { -1, -1, -1},
- { -1, -1, -1}
+ { 0, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1},
+ { 0, -1, -1}
};
/*
@@ -286,22 +286,22 @@ struct cmmu cmmu[MAX_CMMUS] =
{
/* addr cpu mode access
alive addr mask */
- {(void *)VME_CMMU_I0, -1, INST_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_I0, -1, INST_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_D0, -1, DATA_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_I1, -1, INST_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_D1, -1, DATA_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_I2, -1, INST_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_D2, -1, DATA_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_I3, -1, INST_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0},
+ CMMU_DEAD, 0, 0},
{(void *)VME_CMMU_D3, -1, DATA_CMMU, CMMU_ACS_BOTH,
- CMMU_DEAD, 0, 0}
+ CMMU_DEAD, 0, 0}
};
struct cpu_cmmu {
@@ -341,8 +341,8 @@ m18x_setup_board_config(void)
case CPU_188:
whoami = (volatile unsigned long *)MVME188_WHOAMI;
vme188_config = (*whoami & 0xf0) >> 4;
- dprintf(DB_CMMU,("m18x_setup_board_config: WHOAMI @ 0x%08x holds value 0x%08x\n",
- whoami, *whoami));
+ dprintf(DB_CMMU,("m18x_setup_board_config: WHOAMI @ 0x%08x holds value 0x%08x vme188_config = %d\n",
+ whoami, *whoami, vme188_config));
max_cpus = bd_config[vme188_config].ncpus;
max_cmmus = bd_config[vme188_config].ncmmus;
break;
@@ -791,7 +791,7 @@ m18x_cmmu_init(void)
cpu_cmmu[cpu].pair[INST_CMMU] = cpu_cmmu[cpu].pair[DATA_CMMU] = 0;
}
- for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++)
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++){
if (m18x_cmmu_alive(cmmu_num)) {
id.cpupid = cmmu[cmmu_num].cmmu_regs->idr;
@@ -864,10 +864,10 @@ m18x_cmmu_init(void)
cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_SUPER_ALL;
cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_USER_ALL;
}
-
- /*
- * Enable snooping...
- */
+ }
+ /*
+ * Enable snooping...
+ */
for (cpu = 0; cpu < max_cpus; cpu++) {
if (!cpu_sets[cpu])
continue;
@@ -1867,10 +1867,8 @@ union batcu {
((LINE) == 1 ? (UNION).field.vv1 : \
((LINE) == 0 ? (UNION).field.vv0 : ~0))))
-
#undef VEQR_ADDR
#define VEQR_ADDR 0
-
/*
* Show (for debugging) how the given CMMU translates the given ADDRESS.
* If cmmu == -1, the data cmmu for the current cpu is used.
@@ -1942,11 +1940,12 @@ m18x_cmmu_show_translation(
#endif /* 0 */
{
if (cmmu_num == -1) {
- if (cpu_cmmu[0].pair[DATA_CMMU] == 0) {
+ int cpu = cpu_number();
+ if (cpu_cmmu[cpu].pair[DATA_CMMU] == 0) {
db_printf("ack! can't figure my own data cmmu number.\n");
return;
}
- cmmu_num = cpu_cmmu[0].pair[DATA_CMMU] - cmmu;
+ cmmu_num = cpu_cmmu[cpu].pair[DATA_CMMU] - cmmu;
if (verbose_flag)
db_printf("The data cmmu for cpu#%d is cmmu#%d.\n",
0, cmmu_num);
diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c
index fac6a04c108..900b52e8701 100644
--- a/sys/arch/mvme88k/mvme88k/machdep.c
+++ b/sys/arch/mvme88k/mvme88k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.24 2001/01/14 20:25:25 smurph Exp $ */
+/* $OpenBSD: machdep.c,v 1.25 2001/02/01 03:38:21 smurph Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -123,7 +123,7 @@ void m88100_Xfp_precise(void);
void m88110_Xfp_precise(void);
void setupiackvectors(void);
-unsigned char *ivec[] = {
+volatile unsigned char *ivec[] = {
(unsigned char *)0xFFFE0003, /* not used, no such thing as int 0 */
(unsigned char *)0xFFFE0007,
(unsigned char *)0xFFFE000B,
@@ -132,6 +132,7 @@ unsigned char *ivec[] = {
(unsigned char *)0xFFFE0017,
(unsigned char *)0xFFFE001B,
(unsigned char *)0xFFFE001F,
+ (unsigned char *)0x00000000,
};
#ifdef MVME188
@@ -277,7 +278,10 @@ int bootcngetc __P((dev_t));
extern void nullcnpollc __P((dev_t, int));
#define bootcnpollc nullcnpollc
static struct consdev bootcons = {
- NULL, NULL, bootcngetc, bootcnputc,
+ (void (*))NULL,
+ (void (*))NULL,
+ bootcngetc,
+ (void (*))bootcnputc,
bootcnpollc, makedev(14,0), 1};
void cmmu_init(void);
/*
@@ -296,7 +300,7 @@ consinit()
cn_tab = NULL;
cninit();
-#if defined (DDB)
+#if defined(DDB)
kdb_init();
if (boothowto & RB_KDB)
Debugger();
@@ -317,7 +321,6 @@ size_memory(void)
#define PATTERN 0x5a5a5a5a
#define STRIDE (4*1024) /* 4k at a time */
#define Roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
-#if 1
/*
* count it up.
*/
@@ -348,9 +351,9 @@ size_memory(void)
break;
*look = save;
}
-#else
- look = (unsigned int *)0x03FFF000; /* temp hack to fake 32Meg on MVME188 */
-#endif
+ if ((look > (unsigned int *)0x01FFF000) && (cputyp == CPU_188)) {
+ look = (unsigned int *)0x01FFF000; /* temp hack to fake 32Meg on MVME188 */
+ }
physmem = btoc(trunc_page((unsigned)look)); /* in pages */
return (trunc_page((unsigned)look));
}
@@ -397,15 +400,6 @@ identifycpu()
printf("\nModel: %s\n", cpu_model);
}
-/* The following two functions assume UPAGES == 4 */
-#if UPAGES != 4
- #error "UPAGES changed?"
-#endif
-
-#if USPACE != (UPAGES * NBPG)
- #error "USPACE changed?"
-#endif
-
/*
* Setup u area ptes for u area double mapping.
*/
@@ -528,7 +522,29 @@ cpu_startup()
printf("uarea_pages %x: UADDR not free\n", uarea_pages);
panic("bad UADDR");
}
+
+ /*
+ * Grab the BUGROM space that we hardwired in pmap_bootstrap
+ */
+ bugromva = BUGROM_START;
+#if defined(UVM)
+ uvm_map(kernel_map, (vaddr_t *)&bugromva, BUGROM_SIZE,
+ NULL, UVM_UNKNOWN_OFFSET,UVM_MAPFLAG(UVM_PROT_NONE,
+ UVM_PROT_NONE,
+ UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0));
+#else
+ vm_map_find(kernel_map, vm_object_allocate(BUGROM_SIZE), 0,
+ (vm_offset_t *)&bugromva, BUGROM_SIZE, TRUE);
+#endif
+ if (bugromva != BUGROM_START) {
+ printf("bugromva %x: BUGROM not free\n", bugromva);
+ panic("bad bugromva");
+ }
+ /*
+ * Grab machine dependant memory spaces
+ */
switch (cputyp) {
#ifdef MVME187
case CPU_187:
@@ -536,27 +552,8 @@ cpu_startup()
#ifdef MVME197
case CPU_197:
#endif
-#if defined(MVME187) || defined(MVME197)
- /*
- * Grab the BUGROM space that we hardwired in pmap_bootstrap
- */
- bugromva = BUGROM_START;
-
-#if defined(UVM)
- uvm_map(kernel_map, (vaddr_t *)&bugromva, BUGROM_SIZE,
- NULL, UVM_UNKNOWN_OFFSET,UVM_MAPFLAG(UVM_PROT_NONE,
- UVM_PROT_NONE,
- UVM_INH_NONE,
- UVM_ADV_NORMAL, 0));
-#else
- vm_map_find(kernel_map, vm_object_allocate(BUGROM_SIZE), 0,
- (vm_offset_t *)&bugromva, BUGROM_SIZE, TRUE);
-#endif
- if (bugromva != BUGROM_START) {
- printf("bugromva %x: BUGROM not free\n", bugromva);
- panic("bad bugromva");
- }
+#if defined(MVME187) || defined(MVME197)
/*
* Grab the SRAM space that we hardwired in pmap_bootstrap
*/
@@ -1361,9 +1358,8 @@ setupiackvectors()
#ifdef MAP_VEC
extern vm_offset_t iomap_mapin(vm_offset_t, vm_size_t, boolean_t);
#endif
-
/*
- * map a page in for phys address 0xfffe0000 and set the
+ * map a page in for phys address 0xfffe0000 (M187) and set the
* addresses for various levels.
*/
switch (cputyp) {
@@ -1383,6 +1379,16 @@ setupiackvectors()
#else
vaddr = (u_char *)M188_IACK;
#endif
+ ivec[0] = vaddr; /* We dont use level 0 */
+ ivec[1] = vaddr + 0x04;
+ ivec[2] = vaddr + 0x08;
+ ivec[3] = vaddr + 0x0c;
+ ivec[4] = vaddr + 0x10;
+ ivec[5] = vaddr + 0x14;
+ ivec[6] = vaddr + 0x18;
+ ivec[7] = vaddr + 0x1c;
+ ivec[8] = vaddr + 0x20; /* for self inflicted interrupts */
+ *ivec[8] = M188_IVEC; /* supply a vector for m188ih */
break;
#endif /* MVME188 */
#ifdef MVME197
@@ -1400,14 +1406,19 @@ setupiackvectors()
#ifdef DEBUG
printf("interrupt ACK address mapped at 0x%x\n", vaddr);
#endif
- ivec[0] = vaddr + 0x03;
- ivec[1] = vaddr + 0x07;
- ivec[2] = vaddr + 0x0b;
- ivec[3] = vaddr + 0x0f;
- ivec[4] = vaddr + 0x13;
- ivec[5] = vaddr + 0x17;
- ivec[6] = vaddr + 0x1b;
- ivec[7] = vaddr + 0x1f;
+
+#if defined(MVME187) || defined(MVME197)
+ if (cputyp != CPU_188) {
+ ivec[0] = vaddr + 0x03; /* We dont use level 0 */
+ ivec[1] = vaddr + 0x07;
+ ivec[2] = vaddr + 0x0b;
+ ivec[3] = vaddr + 0x0f;
+ ivec[4] = vaddr + 0x13;
+ ivec[5] = vaddr + 0x17;
+ ivec[6] = vaddr + 0x1b;
+ ivec[7] = vaddr + 0x1f;
+ }
+#endif
}
/* gets an interrupt stack for slave processors */
@@ -1467,10 +1478,12 @@ int start, end;
/* Sanity check! */
if (start < 0 || end > 255 || start > end)
- return (-1);
- for (vec = start; vec < end; --vec)
+ panic("intr_findvec(): bad parameters");
+ for (vec = start; vec < end; --vec){
if (intr_handlers[vec] == (struct intrhand *)0)
return (vec);
+ }
+ printf("intr_findvec(): uh oh....\n", vec);
return (-1);
}
@@ -1486,7 +1499,7 @@ intr_establish(int vec, struct intrhand *ihand)
if (vec < 0 || vec > 255) {
#if DIAGNOSTIC
- panic("intr_establish: vec (%x) not between 0 and 0xff",
+ panic("intr_establish: vec (0x%x) not between 0x00 and 0xff",
vec);
#endif /* DIAGNOSTIC */
return (INTR_EST_BADVEC);
@@ -1495,7 +1508,7 @@ intr_establish(int vec, struct intrhand *ihand)
if (intr = intr_handlers[vec]) {
if (intr->ih_ipl != ihand->ih_ipl) {
#if DIAGNOSTIC
- panic("intr_establish: there are other handlers with vec (%x) at ipl %x, but you want it at %x",
+ panic("intr_establish: there are other handlers with vec (0x%x) at ipl %x, but you want it at %x",
intr->ih_ipl, vec, ihand->ih_ipl);
#endif /* DIAGNOSTIC */
return (INTR_EST_BADIPL);
@@ -1530,21 +1543,26 @@ intr_establish(int vec, struct intrhand *ihand)
*/
/* Hard coded vector table for onboard devices. */
-unsigned obio_vec[32] = {SYSCV_ABRT,SYSCV_ACF,0,SYSCV_TIMER1,0,0,0,0,
- 0,0,SYSCV_TIMER2,SYSCV_SYSF,0,0,SYSCV_SCC,0,
+
+unsigned obio_vec[32] = {
0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,SYSCV_SCC,0,0,SYSCV_SYSF,SYSCV_TIMER2,0,0,
+ 0,0,0,0,SYSCV_TIMER1,0,SYSCV_ACF,SYSCV_ABRT,
};
+
#define GET_MASK(cpu, val) *int_mask_reg[cpu] & (val)
+#define VME_VECTOR_MASK 0x1ff /* mask into VIACK register */
+#define VME_BERR_MASK 0x100 /* timeout during VME IACK cycle */
void
m188_ext_int(u_int v, struct m88100_saved_state *eframe)
{
- register int cpu = 0; /*cpu_number();*/
+ register int cpu = cpu_number();
register unsigned int cur_mask;
register unsigned int level, old_spl;
register struct intrhand *intr;
- int ret, intnum;
+ int ret, intbit;
unsigned vec;
cur_mask = ISR_GET_CURRENT_MASK(cpu);
@@ -1589,12 +1607,14 @@ m188_ext_int(u_int v, struct m88100_saved_state *eframe)
;
}
- setipl((u_char)level);
-
if (level > 7 || (char)level < 0) {
panic("int level (%x) is not between 0 and 7", level);
}
+ setipl(level);
+
+ enable_interrupt();
+
/* generate IACK and get the vector */
/*
@@ -1603,59 +1623,51 @@ m188_ext_int(u_int v, struct m88100_saved_state *eframe)
* XXX smurph
*/
- intnum = ff1(cur_mask);
- if (intnum & OBIO_INTERRUPT_MASK) {
- vec = obio_vec[intnum];
- if (vec = 0) {
- printf("unknown onboard interrupt: mask = 0x%b\n", 1 << intnum, IST_STRING);
+ /* find the first bit set in the current mask */
+ intbit = ff1(cur_mask);
+ if (OBIO_INTERRUPT_MASK & (1 << intbit)) {
+ if (guarded_access(ivec[level], 4, &vec) == EFAULT) {
+ printf("Unable to get vector for this vmebus interrupt (level %x)\n", level);
+ goto out_m188;
+ }
+ vec = obio_vec[intbit];
+ if (vec == 0) {
+ printf("unknown onboard interrupt: mask = 0x%b\n", 1 << intbit, IST_STRING);
panic("m188_ext_int");
}
- } else if (intnum & HW_FAILURE_MASK) {
- vec = obio_vec[intnum];
- if (vec = 0) {
- printf("unknown hadware failure: mask = 0x%b\n", 1 << intnum, IST_STRING);
+ } else if (HW_FAILURE_MASK & (1 << intbit)) {
+ vec = obio_vec[intbit];
+ if (vec == 0) {
+ printf("unknown hadware failure: mask = 0x%b\n", 1 << intbit, IST_STRING);
panic("m188_ext_int");
}
- } else if (intnum & VME_INTERRUPT_MASK) {
- asm volatile("tb1 0, r0, 0");
- if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
+ } else if (VME_INTERRUPT_MASK & (1 << intbit)) {
+ if (guarded_access(ivec[level], 4, &vec) == EFAULT) {
printf("Unable to get vector for this vmebus interrupt (level %x)\n", level);
goto out_m188;
}
- } else {
- printf("unknown interrupt: mask = 0x%b\n", 1 << intnum, IST_STRING);
- panic("m188_ext_int");
- }
-#if 0
- if (cur_mask & ABRT_BIT) { /* abort button interrupt */
- vec = 110;
- } else if (cur_mask & DTI_BIT) { /* interval timer interrupt */
- vec = SYSCV_TIMER1;
- } else if (cur_mask & CIOI_BIT) { /* statistics timer interrupt */
- vec = SYSCV_TIMER2;
- } else if (cur_mask & DI_BIT) { /* duart interrupt */
- vec = SYSCV_SCC;
- } else { /* vmebus interrupt */
- asm volatile("tb1 0, r0, 0");
- if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
- printf("Unable to get vector for this vmebus interrupt (level %x)\n", level);
+ vec &= VME_VECTOR_MASK;
+ if (vec & VME_BERR_MASK) {
+ printf("m188_ext_int: vme vec timeout\n");
goto out_m188;
}
+ if (vec == 0) {
+ printf("unknown vme interrupt: mask = 0x%b\n", 1 << intbit, IST_STRING);
+ panic("m188_ext_int");
+ }
+ } else {
+ printf("unknown interrupt: level = %d intbit = 0x%x mask = 0x%b\n",
+ level, intbit, 1 << intbit, IST_STRING);
+ panic("m188_ext_int");
}
-#endif
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
if (vec > 0xFF) {
- panic("interrupt vector %x greater than 255", vec);
+ panic("m188_ext_int: interrupt vector 0x%x greater than 255!\nlevel = %d iack = 0x%x\n",
+ vec, level, ivec[level]);
}
-#if 0
- enable_interrupt(); /* should we ?? */
-#endif
if ((intr = intr_handlers[vec]) == 0)
- printf("Spurious interrupt (level %x and vec %x)\n", level, vec);
-
+ printf("Spurious interrupt: level = %d vec = 0x%x, intbit = %d mask = 0x%b\n",
+ level, vec, intbit, 1 << intbit, IST_STRING);
/*
* Walk through all interrupt handlers in the chain for the
* given vector, calling each handler in turn, till some handler
@@ -1669,17 +1681,22 @@ m188_ext_int(u_int v, struct m88100_saved_state *eframe)
if (ret)
break;
}
- if (ret == 0)
- printf("Unclaimed interrupt (level %x and vec %x)\n", level, vec);
+ if (ret == 0) {
+ printf("Unclaimed interrupt: level = %d vec = 0x%x, intbit = %d mask = 0x%b\n",
+ level, vec, intbit, 1 << intbit, IST_STRING);
+ break;
+ }
+#if 0
+ disable_interrupt();
+#endif
} while (cur_mask = ISR_GET_CURRENT_MASK(cpu));
-
/*
* process any remaining data access exceptions before
* returning to assembler
*/
+out_m188:
disable_interrupt();
- out_m188:
if (eframe->dmt0 & DMT_VALID) {
trap(T_DATAFLT, eframe);
data_access_emulation(eframe);
@@ -1802,7 +1819,7 @@ sbc_ext_int(u_int v, struct m88100_saved_state *eframe)
*/
disable_interrupt();
- out:
+out:
if (cputyp != CPU_197) {
if (eframe->dmt0 & DMT_VALID) {
trap(T_DATAFLT, eframe);
@@ -1818,7 +1835,7 @@ sbc_ext_int(u_int v, struct m88100_saved_state *eframe)
*/
setipl((u_char)mask);
- beatit:
+beatit:
return;
}
#endif /* defined(MVME187) || defined(MVME197) */
@@ -2165,11 +2182,11 @@ regdump(struct trapframe *f)
printf("fprh %x ", f->fprh);
printf("fprl %x ", f->fprl);
printf("fpit %x\n", f->fpit);
- printf("vector %x ", f->vector);
+ printf("vector %d ", f->vector);
printf("mask %x ", f->mask);
printf("mode %x ", f->mode);
printf("scratch1 %x ", f->scratch1);
- printf("pad %x\n", f->pad);
+ printf("cpu %x\n", f->cpu);
}
#endif
#ifdef MVME197
@@ -2186,11 +2203,11 @@ regdump(struct trapframe *f)
printf("isr %x ", f->isr);
printf("ilar %x ", f->ilar);
printf("ipar %x\n", f->ipar);
- printf("vector %x ", f->vector);
+ printf("vector %d ", f->vector);
printf("mask %x ", f->mask);
printf("mode %x ", f->mode);
printf("scratch1 %x ", f->scratch1);
- printf("pad %x\n", f->pad);
+ printf("cpu %x\n", f->cpu);
}
#endif
#ifdef MVME188
@@ -2322,13 +2339,13 @@ mvme_bootstrap(void)
#endif
avail_start = first_addr;
avail_end = last_addr;
-#ifdef DEBUG
- printf("MVME%x boot: memory from 0x%x to 0x%x\n", cputyp, avail_start, avail_end);
-#endif
/*
* Steal MSGBUFSIZE at the top of physical memory for msgbuf
*/
avail_end -= m88k_round_page(MSGBUFSIZE);
+#ifdef DEBUG
+ printf("MVME%x boot: memory from 0x%x to 0x%x\n", cputyp, avail_start, avail_end);
+#endif
pmap_bootstrap((vm_offset_t)M88K_TRUNC_PAGE((unsigned)&kernelstart) /* = loadpt */,
&avail_start, &avail_end, &virtual_avail,
&virtual_end);
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index 0b3ca8e17d5..62a8d52c07e 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.18 2001/01/14 20:25:25 smurph Exp $ */
+/* $OpenBSD: pmap.c,v 1.19 2001/02/01 03:38:21 smurph Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
@@ -101,19 +101,20 @@ extern int max_cpus;
* conditional debugging
*/
+ #define CD_NONE 0x00
#define CD_NORM 0x01
- #define CD_FULL 0x02
+ #define CD_FULL 0x02
- #define CD_ACTIVATE 0x0000004 /* _pmap_activate */
+ #define CD_ACTIVATE 0x0000004 /* pmap_activate */
#define CD_KMAP 0x0000008 /* pmap_expand_kmap */
#define CD_MAP 0x0000010 /* pmap_map */
#define CD_MAPB 0x0000020 /* pmap_map_batc */
- #define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
+ #define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
#define CD_BOOT 0x0000080 /* pmap_bootstrap */
#define CD_INIT 0x0000100 /* pmap_init */
- #define CD_CREAT 0x0000200 /* pmap_create */
+ #define CD_CREAT 0x0000200 /* pmap_create */
#define CD_FREE 0x0000400 /* pmap_free_tables */
- #define CD_DESTR 0x0000800 /* pmap_destroy */
+ #define CD_DESTR 0x0000800 /* pmap_destroy */
#define CD_RM 0x0001000 /* pmap_remove */
#define CD_RMAL 0x0002000 /* pmap_remove_all */
#define CD_COW 0x0004000 /* pmap_copy_on_write */
@@ -126,14 +127,11 @@ extern int max_cpus;
#define CD_IMOD 0x0200000 /* pmap_is_modified */
#define CD_CREF 0x0400000 /* pmap_clear_reference */
#define CD_PGMV 0x0800000 /* pagemove */
- #define CD_CHKPV 0x1000000 /* check_pv_list */
- #define CD_CHKPM 0x2000000 /* check_pmap_consistency */
+ #define CD_CHKPV 0x1000000 /* check_pv_list */
+ #define CD_CHKPM 0x2000000 /* check_pmap_consistency */
#define CD_CHKM 0x4000000 /* check_map */
#define CD_ALL 0x0FFFFFC
-int pmap_con_dbg = CD_NORM;
-/*
-int pmap_con_dbg = CD_FULL| CD_NORM | CD_PROT | CD_BOOT | CD_CHKPV | CD_CHKPM | CD_CHKM;
-int pmap_con_dbg = CD_NORM;*/
+int pmap_con_dbg = CD_NONE;
#else
#define STATIC static
@@ -170,11 +168,19 @@ STATIC kpdt_entry_t kpdt_free;
/*
* Size of kernel page tables for mapping onboard IO space.
*/
-#if defined(MVME188) && !(defined(MVME187) || defined(MVME197))
-#define OBIO_PDT_SIZE 0
+#if defined(MVME188)
+#define M188_PDT_SIZE (M88K_BTOP(UTIL_SIZE) * sizeof(pt_entry_t))
#else
-#define OBIO_PDT_SIZE ((cputyp == CPU_188) ? 0 : (M88K_BTOP(OBIO_SIZE) * sizeof(pt_entry_t)))
-#endif
+#define M188_PDT_SIZE 0
+#endif
+
+#if (defined(MVME187) || defined(MVME197))
+#define M1x7_PDT_SIZE (M88K_BTOP(OBIO_SIZE) * sizeof(pt_entry_t))
+#else
+#define M1x7_PDT_SIZE 0
+#endif
+
+#define OBIO_PDT_SIZE ((cputyp == CPU_188) ? M188_PDT_SIZE : M1x7_PDT_SIZE)
#define MAX_KERNEL_PDT_SIZE (KERNEL_PDT_SIZE + OBIO_PDT_SIZE)
/*
@@ -396,154 +402,24 @@ extern vm_offset_t obiova;
void
flush_atc_entry(long users, vm_offset_t va, int kernel)
{
- register int cpu;
- long tusers = users;
+ register int cpu;
+ long tusers = users;
#if 0
- if (ff1(tusers) > 4) { /* can't be more than 4 */
- printf("ff1 users = %d!\n", ff1(tusers));
- panic("bogus amount of users!!!");
- }
+ if (ff1(tusers) > 4) { /* can't be more than 4 */
+ printf("ff1 users = %d!\n", ff1(tusers));
+ panic("bogus amount of users!!!");
+ }
#endif
- while ((cpu = ff1(tusers)) != 32) {
- if (cpu_sets[cpu]) { /* just checking to make sure */
- cmmu_flush_remote_tlb(cpu, kernel, va, M88K_PGBYTES);
- }
- tusers &= ~(1 << cpu);
- }
+ while ((cpu = ff1(tusers)) != 32) {
+ if (cpu_sets[cpu]) { /* just checking to make sure */
+ cmmu_flush_remote_tlb(cpu, kernel, va, M88K_PGBYTES);
+ }
+ tusers &= ~(1 << cpu);
+ }
}
/*
- * Routine: _PMAP_ACTIVATE
- *
- * Author: N. Sugai
- *
- * Function:
- * Binds the given physical map to the given processor.
- *
- * Parameters:
- * pmap pointer to pmap structure
- * p pointer to proc structure
- * cpu CPU number
- *
- * If the specified pmap is not kernel_pmap, this routine makes arp
- * template and stores it into UAPR (user area pointer register) in the
- * CMMUs connected to the specified CPU.
- *
- * If kernel_pmap is specified, only flushes the TLBs mapping kernel
- * virtual space, in the CMMUs connected to the specified CPU.
- *
- * NOTE:
- * All of the code of this function extracted from macro PMAP_ACTIVATE
- * to make debugging easy. Accordingly, PMAP_ACTIVATE simlpy call
- * _pmap_activate.
- *
- */
-#if 0
-void
-_pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu)
-{
- apr_template_t apr_data;
- int n;
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) pmap 0x%x\n", curproc, (unsigned)pmap);
-#endif
-
- if (pmap != kernel_pmap) {
- /*
- * Lock the pmap to put this cpu in its active set.
- */
- simple_lock(&pmap->lock);
-
- apr_data.bits = 0;
- apr_data.field.st_base = M88K_BTOP(pmap->sdt_paddr);
- apr_data.field.wt = 0;
- apr_data.field.g = 1;
- apr_data.field.ci = 0;
- apr_data.field.te = 1;
-#ifdef notyet
- #ifdef OMRON_PMAP
- /*
- * cmmu_pmap_activate will set the uapr and the batc entries, then
- * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE
- * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL.
- */
- cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc);
- for (n = 0; n < BATC_MAX; n++)
- *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
- #else
- cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
- #endif
-#endif /* notyet */
- /*
- * I am forcing it to not program the BATC at all. pmap.c module
- * needs major, major cleanup. XXX nivas
- */
- cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
-
- /*
- * Mark that this cpu is using the pmap.
- */
- SETBIT_CPUSET(my_cpu, &(pmap->cpus_using));
-
- simple_unlock(&pmap->lock);
-
- } else {
-
- /*
- * kernel_pmap must be always active.
- */
-
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc);
-#endif
-
- }
-} /* _pmap_activate */
-#endif
-
-/*
- * Routine: _PMAP_DEACTIVATE
- *
- * Author: N. Sugai
- *
- * Function:
- * Unbinds the given physical map to the given processor.
- *
- * Parameters:
- * pmap pointer to pmap structure
- * th pointer to thread structure
- * cpu CPU number
- *
- * _pmap_deactive simply clears the cpus_using field in given pmap structure.
- *
- * NOTE:
- * All of the code of this function extracted from macro PMAP_DEACTIVATE
- * to make debugging easy. Accordingly, PMAP_DEACTIVATE simlpy call
- * _pmap_deactivate.
- *
- */
-#if 0
-void
-_pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu)
-{
- if (pmap != kernel_pmap) {
-
- /*
- * we expect the spl is already raised to sched level.
- */
- simple_lock(&pmap->lock);
- CLRBIT_CPUSET(my_cpu, &(pmap->cpus_using));
- simple_unlock(&pmap->lock);
- }
-}
-#endif
-/*
* Author: Joe Uemura
* Convert machine-independent protection code to M88K protection bits.
*
@@ -558,12 +434,12 @@ _pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu)
STATIC unsigned int
m88k_protection(pmap_t map, vm_prot_t prot)
{
- pte_template_t p;
+ pte_template_t p;
- p.bits = 0;
- p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
+ p.bits = 0;
+ p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
- return (p.bits);
+ return (p.bits);
} /* m88k_protection */
@@ -589,31 +465,33 @@ m88k_protection(pmap_t map, vm_prot_t prot)
* SDT_VALID
* PDT_IDX
*/
+vm_offset_t va_tmp = (vm_offset_t)0xDEADBEEF;
pt_entry_t *
pmap_pte(pmap_t map, vm_offset_t virt)
{
- sdt_entry_t *sdt;
-
- /*XXX will this change if physical memory is not contiguous? */
- /* take a look at PDTIDX XXXnivas */
- if (map == PMAP_NULL)
- panic("pmap_pte: pmap is NULL");
+ sdt_entry_t *sdt;
- sdt = SDTENT(map,virt);
+ /*XXX will this change if physical memory is not contiguous? */
+ /* take a look at PDTIDX XXXnivas */
+ if (map == PMAP_NULL)
+ panic("pmap_pte: pmap is NULL");
- /*
- * Check whether page table is exist or not.
- */
- if (!SDT_VALID(sdt))
- return (PT_ENTRY_NULL);
- else
- return ((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) +
- PDTIDX(virt));
+ sdt = SDTENT(map,virt);
+ if (virt == va_tmp) {
+ printf("check sdt @ 0x%x\n", sdt);
+ }
+ /*
+ * Check whether page table is exist or not.
+ */
+ if (!SDT_VALID(sdt))
+ return (PT_ENTRY_NULL);
+ else
+ return ((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) +
+ PDTIDX(virt));
} /* pmap_pte */
-
/*
* Routine: PMAP_EXPAND_KMAP (internal)
*
@@ -653,36 +531,36 @@ pmap_pte(pmap_t map, vm_offset_t virt)
STATIC pt_entry_t *
pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
{
- int aprot;
- sdt_entry_t *sdt;
- kpdt_entry_t kpdt_ent;
- pmap_t map = kernel_pmap;
+ int aprot;
+ sdt_entry_t *sdt;
+ kpdt_entry_t kpdt_ent;
+ pmap_t map = kernel_pmap;
#if DEBUG
- if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
- printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
+ if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
+ printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
#endif
- aprot = m88k_protection (map, prot);
+ aprot = m88k_protection (map, prot);
- /* segment table entry derivate from map and virt. */
- sdt = SDTENT(map, virt);
- if (SDT_VALID(sdt))
- panic("pmap_expand_kmap: segment table entry VALID");
+ /* segment table entry derivate from map and virt. */
+ sdt = SDTENT(map, virt);
+ if (SDT_VALID(sdt))
+ panic("pmap_expand_kmap: segment table entry VALID");
- kpdt_ent = kpdt_free;
- if (kpdt_ent == KPDT_ENTRY_NULL) {
- printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
- return (PT_ENTRY_NULL);
- }
- kpdt_free = kpdt_free->next;
+ kpdt_ent = kpdt_free;
+ if (kpdt_ent == KPDT_ENTRY_NULL) {
+ printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
+ return (PT_ENTRY_NULL);
+ }
+ kpdt_free = kpdt_free->next;
- ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
- (unsigned)(kpdt_ent->phys) = 0;
- (unsigned)(kpdt_ent->next) = 0;
+ ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
+ (unsigned)(kpdt_ent->phys) = 0;
+ (unsigned)(kpdt_ent->next) = 0;
- return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
+ return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
}/* pmap_expand_kmap() */
/*
@@ -728,63 +606,63 @@ void m197_load_patc(int, vm_offset_t, vm_offset_t, int);
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
{
- int aprot;
- unsigned npages;
- unsigned num_phys_pages;
- unsigned cmode;
- pt_entry_t *pte;
- pte_template_t template;
- static unsigned i = 0;
- /*
- * cache mode is passed in the top 16 bits.
- * extract it from there. And clear the top
- * 16 bits from prot.
- */
- cmode = (prot & 0xffff0000) >> 16;
- prot &= 0x0000ffff;
+ int aprot;
+ unsigned npages;
+ unsigned num_phys_pages;
+ unsigned cmode;
+ pt_entry_t *pte;
+ pte_template_t template;
+ static unsigned i = 0;
+ /*
+ * cache mode is passed in the top 16 bits.
+ * extract it from there. And clear the top
+ * 16 bits from prot.
+ */
+ cmode = (prot & 0xffff0000) >> 16;
+ prot &= 0x0000ffff;
#if DEBUG
- if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
- printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
- curproc, start, end, virt, prot, cmode);
+ if ((pmap_con_dbg & (CD_MAP | CD_NORM)) == (CD_MAP | CD_NORM))
+ printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
+ curproc, start, end, virt, prot, cmode);
#endif
- if (start > end)
- panic("pmap_map: start greater than end address");
+ if (start > end)
+ panic("pmap_map: start greater than end address");
- aprot = m88k_protection (kernel_pmap, prot);
+ aprot = m88k_protection (kernel_pmap, prot);
- template.bits = M88K_TRUNC_PAGE(start) | aprot | cmode | DT_VALID;
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | cmode | DT_VALID;
- npages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+ npages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
- for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
+ for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic ("pmap_map: Cannot allocate pte table");
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map: Cannot allocate pte table");
#ifdef DEBUG
- if (pmap_con_dbg & CD_MAP)
- if (pte->dtype)
- printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+ if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
+ if (pte->dtype)
+ printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
-
- *pte = template.pte;
+ *pte = template.pte;
#ifdef MVME197
- /* hack for MVME197 */
- if (cputyp == CPU_197) {
- if (i < 32) {
- m197_load_patc(i, virt, (vm_offset_t)template.bits, 1);
- i++;
- }
- }
+ /* hack for MVME197 */
+ if (cputyp == CPU_197) {
+ if (i < 32) {
+ m197_load_patc(i, virt,
+ (vm_offset_t)template.bits, 1);
+ i++;
+ }
+ }
#endif
- virt += M88K_PGBYTES;
- template.bits += M88K_PGBYTES;
- }
+ virt += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ }
- return (virt);
+ return (virt);
} /* pmap_map() */
@@ -835,100 +713,99 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
*/
vm_offset_t
pmap_map_batc(vm_offset_t virt, vm_offset_t start, vm_offset_t end,
- vm_prot_t prot, unsigned cmode)
+ vm_prot_t prot, unsigned cmode)
{
- int aprot;
- unsigned num_phys_pages;
- vm_offset_t phys;
- pt_entry_t *pte;
- pte_template_t template;
- batc_template_t batctmp;
- register int i;
+ int aprot;
+ unsigned num_phys_pages;
+ vm_offset_t phys;
+ pt_entry_t *pte;
+ pte_template_t template;
+ batc_template_t batctmp;
+ register int i;
#if DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf ("(pmap_map_batc :%x) phys address from %x to %x mapped at virtual %x, prot %x\n", curproc,
- start, end, virt, prot);
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ printf ("(pmap_map_batc :%x) phys address from %x to %x mapped at virtual %x, prot %x\n", curproc,
+ start, end, virt, prot);
#endif
- if (start > end)
- panic("pmap_map_batc: start greater than end address");
+ if (start > end)
+ panic("pmap_map_batc: start greater than end address");
- aprot = m88k_protection (kernel_pmap, prot);
- template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
- phys = start;
- batctmp.bits = 0;
- batctmp.field.sup = 1; /* supervisor */
- batctmp.field.wt = template.pte.wt; /* write through */
- batctmp.field.g = template.pte.g; /* global */
- batctmp.field.ci = template.pte.ci; /* cache inhibit */
- batctmp.field.wp = template.pte.prot; /* protection */
- batctmp.field.v = 1; /* valid */
+ aprot = m88k_protection (kernel_pmap, prot);
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
+ phys = start;
+ batctmp.bits = 0;
+ batctmp.field.sup = 1; /* supervisor */
+ batctmp.field.wt = template.pte.wt; /* write through */
+ batctmp.field.g = template.pte.g; /* global */
+ batctmp.field.ci = template.pte.ci; /* cache inhibit */
+ batctmp.field.wp = template.pte.prot; /* protection */
+ batctmp.field.v = 1; /* valid */
- num_phys_pages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+ num_phys_pages = M88K_BTOP(M88K_ROUND_PAGE(end) -
+ M88K_TRUNC_PAGE(start));
- while (num_phys_pages > 0) {
+ while (num_phys_pages > 0) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
- num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
+ num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
#endif
- if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
- num_phys_pages >= BATC_BLKBYTES/M88K_PGBYTES &&
- batc_used < BATC_MAX ) {
-
- /*
- * map by BATC
- */
- batctmp.field.lba = M88K_BTOBLK(virt);
- batctmp.field.pba = M88K_BTOBLK(phys);
-
- for ( i = 0; i < max_cpus; i++)
- if (cpu_sets[i])
- cmmu_set_pair_batc_entry(i, batc_used, batctmp.bits);
-
- batc_entry[batc_used] = batctmp.field;
-
+ if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
+ num_phys_pages >= BATC_BLKBYTES/M88K_PGBYTES &&
+ batc_used < BATC_MAX ) {
+ /*
+ * map by BATC
+ */
+ batctmp.field.lba = M88K_BTOBLK(virt);
+ batctmp.field.pba = M88K_BTOBLK(phys);
+
+ for ( i = 0; i < max_cpus; i++)
+ if (cpu_sets[i])
+ cmmu_set_pair_batc_entry(i, batc_used,
+ batctmp.bits);
+ batc_entry[batc_used] = batctmp.field;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
- printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
- }
- if (pmap_con_dbg & CD_MAPB) {
-
- for (i = 0; i < BATC_BLKBYTES; i += M88K_PGBYTES ) {
- pte = pmap_pte(kernel_pmap, virt+i);
- if (pte->dtype)
- printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
- }
- }
+ if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
+ printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
+ }
+ if (pmap_con_dbg & CD_MAPB) {
+
+ for (i = 0; i < BATC_BLKBYTES; i += M88K_PGBYTES ) {
+ pte = pmap_pte(kernel_pmap, virt+i);
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
+ }
+ }
#endif
- batc_used++;
- virt += BATC_BLKBYTES;
- phys += BATC_BLKBYTES;
- template.pte.pfn = M88K_BTOP(phys);
- num_phys_pages -= BATC_BLKBYTES/M88K_PGBYTES;
- continue;
- }
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic ("pmap_map_batc: Cannot allocate pte table");
+ batc_used++;
+ virt += BATC_BLKBYTES;
+ phys += BATC_BLKBYTES;
+ template.pte.pfn = M88K_BTOP(phys);
+ num_phys_pages -= BATC_BLKBYTES/M88K_PGBYTES;
+ continue;
+ }
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map_batc: Cannot allocate pte table");
#ifdef DEBUG
- if (pmap_con_dbg & CD_MAPB)
- if (pte->dtype)
- printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+ if (pmap_con_dbg & CD_MAPB)
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
- *pte = template.pte;
- virt += M88K_PGBYTES;
- phys += M88K_PGBYTES;
- template.bits += M88K_PGBYTES;
- num_phys_pages--;
- }
+ *pte = template.pte;
+ virt += M88K_PGBYTES;
+ phys += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ num_phys_pages--;
+ }
- return (M88K_ROUND_PAGE(virt));
+ return (M88K_ROUND_PAGE(virt));
} /* pmap_map_batc() */
@@ -967,71 +844,73 @@ pmap_map_batc(vm_offset_t virt, vm_offset_t start, vm_offset_t end,
void
pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
{
- int spl, spl_sav;
- pt_entry_t *pte;
- vm_offset_t va;
- int kflush;
- int cpu;
- register unsigned users;
- register pte_template_t opte;
+ int spl, spl_sav;
+ pt_entry_t *pte;
+ vm_offset_t va;
+ int kflush;
+ int cpu;
+ register unsigned users;
+ register pte_template_t opte;
#ifdef DEBUG
- if ( mode & CACHE_MASK ) {
- printf("(cache_ctrl) illegal mode %x\n",mode);
- return;
- }
- if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
- }
+ if ( mode & CACHE_MASK ) {
+ printf("(cache_ctrl) illegal mode %x\n",mode);
+ return;
+ }
+ if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
+ printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
+ }
#endif /* DEBUG */
- if ( pmap == PMAP_NULL ) {
- panic("pmap_cache_ctrl: pmap is NULL");
- }
-
- PMAP_LOCK(pmap, spl);
-
- /*
- *
- */
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- for (va = s; va < e; va += M88K_PGBYTES) {
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- continue;
+ if ( pmap == PMAP_NULL ) {
+ panic("pmap_cache_ctrl: pmap is NULL");
+ }
+
+ PMAP_LOCK(pmap, spl);
+
+ /*
+ *
+ */
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ for (va = s; va < e; va += M88K_PGBYTES) {
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ continue;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
- }
+ if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
+ printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
+ }
#endif /* DEBUG */
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- * XXX
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
-
- /*
- * Data cache should be copied back and invalidated.
- */
- for (cpu=0; cpu<max_cpus; cpu++)
- if (cpu_sets[cpu])
- /*cmmu_flush_remote_data_cache(cpu, M88K_PTOB(pte->pfn),M88K_PGBYTES);*/
- cmmu_flush_remote_cache(cpu, M88K_PTOB(pte->pfn), M88K_PGBYTES);
-
- }
-
- PMAP_UNLOCK(pmap, spl);
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ * XXX
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+
+ /*
+ * Data cache should be copied back and invalidated.
+ */
+ for (cpu=0; cpu<max_cpus; cpu++)
+ if (cpu_sets[cpu])
+ /*cmmu_flush_remote_data_cache(cpu,
+ M88K_PTOB(pte->pfn),M88K_PGBYTES);*/
+ cmmu_flush_remote_cache(cpu, M88K_PTOB(pte->pfn),
+ M88K_PGBYTES);
+
+ }
+
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_cache_ctrl */
@@ -1085,143 +964,142 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
void
pmap_bootstrap(vm_offset_t load_start, /* IN */
- vm_offset_t *phys_start, /* IN/OUT */
- vm_offset_t *phys_end, /* IN */
- vm_offset_t *virt_start, /* OUT */
- vm_offset_t *virt_end) /* OUT */
+ vm_offset_t *phys_start, /* IN/OUT */
+ vm_offset_t *phys_end, /* IN */
+ vm_offset_t *virt_start, /* OUT */
+ vm_offset_t *virt_end) /* OUT */
{
- kpdt_entry_t kpdt_virt;
- sdt_entry_t *kmap;
- vm_offset_t vaddr,
- virt,
- kpdt_phys,
- s_text,
- e_text,
- kernel_pmap_size,
- etherpa;
- apr_template_t apr_data;
- pt_entry_t *pte;
- int i;
- u_long foo;
- pmap_table_t ptable;
- extern char *kernelstart, *etext;
- extern char *kernel_sdt;
- extern void cmmu_go_virt(void);
+ kpdt_entry_t kpdt_virt;
+ sdt_entry_t *kmap;
+ vm_offset_t vaddr,
+ virt,
+ kpdt_phys,
+ s_text,
+ e_text,
+ kernel_pmap_size,
+ etherpa;
+ apr_template_t apr_data;
+ pt_entry_t *pte;
+ int i;
+ pmap_table_t ptable;
+ extern char *kernelstart, *etext;
+ extern char *kernel_sdt;
+ extern void cmmu_go_virt(void);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) {
- printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) {
+ printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
+ }
#endif
- ptes_per_vm_page = PAGE_SIZE >> M88K_PGSHIFT;
- if (ptes_per_vm_page == 0) {
- panic("pmap_bootstrap: VM page size < MACHINE page size");
- }
- if (!PAGE_ALIGNED(load_start)) {
- panic("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x", load_start);
- }
-
- simple_lock_init(&kernel_pmap->lock);
-
- /*
- * Allocate the kernel page table from the front of available
- * physical memory,
- * i.e. just after where the kernel image was loaded.
- */
- /*
- * The calling sequence is
- * ...
- * pmap_bootstrap(&kernelstart,...)
- * kernelstart is the first symbol in the load image.
- * We link the kernel such that &kernelstart == 0x10000 (size of
- * BUG ROM)
- * The expression (&kernelstart - load_start) will end up as
- * 0, making *virt_start == *phys_start, giving a 1-to-1 map)
- */
-
- *phys_start = M88K_ROUND_PAGE(*phys_start);
- *virt_start = *phys_start +
- (M88K_TRUNC_PAGE((unsigned)&kernelstart) - load_start);
-
- /*
- * Initialize kernel_pmap structure
- */
- kernel_pmap->ref_count = 1;
- kernel_pmap->cpus_using = 0;
- kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start);
- kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start);
- kmapva = *virt_start;
+ ptes_per_vm_page = PAGE_SIZE >> M88K_PGSHIFT;
+ if (ptes_per_vm_page == 0) {
+ panic("pmap_bootstrap: VM page size < MACHINE page size");
+ }
+ if (!PAGE_ALIGNED(load_start)) {
+ panic("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x", load_start);
+ }
+
+ simple_lock_init(&kernel_pmap->lock);
+
+ /*
+ * Allocate the kernel page table from the front of available
+ * physical memory,
+ * i.e. just after where the kernel image was loaded.
+ */
+ /*
+ * The calling sequence is
+ * ...
+ * pmap_bootstrap(&kernelstart,...)
+ * kernelstart is the first symbol in the load image.
+ * We link the kernel such that &kernelstart == 0x10000 (size of
+ * BUG ROM)
+ * The expression (&kernelstart - load_start) will end up as
+ * 0, making *virt_start == *phys_start, giving a 1-to-1 map)
+ */
+
+ *phys_start = M88K_ROUND_PAGE(*phys_start);
+ *virt_start = *phys_start +
+ (M88K_TRUNC_PAGE((unsigned)&kernelstart) - load_start);
+
+ /*
+ * Initialize kernel_pmap structure
+ */
+ kernel_pmap->ref_count = 1;
+ kernel_pmap->cpus_using = 0;
+ kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start);
+ kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start);
+ kmapva = *virt_start;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("kernel_pmap->sdt_paddr = %x\n",kernel_pmap->sdt_paddr);
- printf("kernel_pmap->sdt_vaddr = %x\n",kernel_pmap->sdt_vaddr);
- }
- /* init double-linked list of pmap structure */
- kernel_pmap->next = kernel_pmap;
- kernel_pmap->prev = kernel_pmap;
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kernel_pmap->sdt_paddr = %x\n",kernel_pmap->sdt_paddr);
+ printf("kernel_pmap->sdt_vaddr = %x\n",kernel_pmap->sdt_vaddr);
+ }
+ /* init double-linked list of pmap structure */
+ kernel_pmap->next = kernel_pmap;
+ kernel_pmap->prev = kernel_pmap;
#endif
- /*
- * Reserve space for segment table entries.
- * One for the regular segment table and one for the shadow table
- * The shadow table keeps track of the virtual address of page
- * tables. This is used in virtual-to-physical address translation
- * functions. Remember, MMU cares only for physical addresses of
- * segment and page table addresses. For kernel page tables, we
- * really don't need this virtual stuff (since the kernel will
- * be mapped 1-to-1) but for user page tables, this is required.
- * Just to be consistent, we will maintain the shadow table for
- * kernel pmap also.
- */
-
- kernel_pmap_size = 2*SDT_SIZE;
+ /*
+ * Reserve space for segment table entries.
+ * One for the regular segment table and one for the shadow table
+ * The shadow table keeps track of the virtual address of page
+ * tables. This is used in virtual-to-physical address translation
+ * functions. Remember, MMU cares only for physical addresses of
+ * segment and page table addresses. For kernel page tables, we
+ * really don't need this virtual stuff (since the kernel will
+ * be mapped 1-to-1) but for user page tables, this is required.
+ * Just to be consistent, we will maintain the shadow table for
+ * kernel pmap also.
+ */
+
+ kernel_pmap_size = 2*SDT_SIZE;
#ifdef DEBUG
- printf("kernel segment table from 0x%x to 0x%x\n", kernel_pmap->sdt_vaddr,
- kernel_pmap->sdt_vaddr + kernel_pmap_size);
+ printf("kernel segment table from 0x%x to 0x%x\n", kernel_pmap->sdt_vaddr,
+ kernel_pmap->sdt_vaddr + kernel_pmap_size);
#endif
- /* save pointers to where page table entries start in physical memory */
- kpdt_phys = (*phys_start + kernel_pmap_size);
- kpdt_virt = (kpdt_entry_t)(*virt_start + kernel_pmap_size);
- kernel_pmap_size += MAX_KERNEL_PDT_SIZE;
- *phys_start += kernel_pmap_size;
- *virt_start += kernel_pmap_size;
-
- /* init all segment and page descriptor to zero */
- bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size);
+ /* save pointers to where page table entries start in physical memory */
+ kpdt_phys = (*phys_start + kernel_pmap_size);
+ kpdt_virt = (kpdt_entry_t)(*virt_start + kernel_pmap_size);
+ kernel_pmap_size += MAX_KERNEL_PDT_SIZE;
+ *phys_start += kernel_pmap_size;
+ *virt_start += kernel_pmap_size;
+
+ /* init all segment and page descriptor to zero */
+ bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size);
#ifdef DEBUG
- printf("kernel page table to 0x%x\n", kernel_pmap->sdt_vaddr + kernel_pmap_size);
+ printf("kernel page table to 0x%x\n", kernel_pmap->sdt_vaddr + kernel_pmap_size);
#endif
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("kpdt_phys = %x\n",kpdt_phys);
- printf("kpdt_virt = %x\n",kpdt_virt);
- printf("end of kpdt at (virt)0x%08x ; (phys)0x%08x\n",
- *virt_start,*phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kpdt_phys = %x\n",kpdt_phys);
+ printf("kpdt_virt = %x\n",kpdt_virt);
+ printf("end of kpdt at (virt)0x%08x ; (phys)0x%08x\n",
+ *virt_start,*phys_start);
+ }
#endif
- /*
- * init the kpdt queue
- */
- kpdt_free = kpdt_virt;
- for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) {
- kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
- kpdt_virt->phys = kpdt_phys;
- kpdt_virt = kpdt_virt->next;
- kpdt_phys += PDT_SIZE;
- }
- kpdt_virt->next = KPDT_ENTRY_NULL; /* terminate the list */
-
- /*
- * Map the kernel image into virtual space
- */
-
- s_text = load_start; /* paddr of text */
- e_text = load_start + ((unsigned)&etext -
- M88K_TRUNC_PAGE((unsigned)&kernelstart));
- /* paddr of end of text section*/
- e_text = M88K_ROUND_PAGE(e_text);
+ /*
+ * init the kpdt queue
+ */
+ kpdt_free = kpdt_virt;
+ for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) {
+ kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
+ kpdt_virt->phys = kpdt_phys;
+ kpdt_virt = kpdt_virt->next;
+ kpdt_phys += PDT_SIZE;
+ }
+ kpdt_virt->next = KPDT_ENTRY_NULL; /* terminate the list */
+
+ /*
+ * Map the kernel image into virtual space
+ */
+
+ s_text = load_start; /* paddr of text */
+ e_text = load_start + ((unsigned)&etext -
+ M88K_TRUNC_PAGE((unsigned)&kernelstart));
+ /* paddr of end of text section*/
+ e_text = M88K_ROUND_PAGE(e_text);
#ifdef OMRON_PMAP
#define PMAPER pmap_map
@@ -1229,148 +1107,148 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */
#define PMAPER pmap_map_batc
#endif
-#if 1 /* defined(MVME187) || defined (MVME197) */
- /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
- if (cputyp != CPU_188) { /* != CPU_188 */
- vaddr = PMAPER(
- 0,
- 0,
- 0x10000,
- (VM_PROT_WRITE | VM_PROT_READ)|(CACHE_INH <<16));
- assert(vaddr == M88K_TRUNC_PAGE((unsigned)&kernelstart));
- }
-#endif /* defined(MVME187) || defined (MVME197) */
-
- vaddr = PMAPER(
- (vm_offset_t)M88K_TRUNC_PAGE(((unsigned)&kernelstart)),
- s_text,
- e_text,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL<<16)); /* shouldn't it be RO? XXX*/
-
- vaddr = PMAPER(
- vaddr,
- e_text,
- (vm_offset_t)kmap,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL << 16));
-
- /*
- * Map system segment & page tables - should be cache inhibited?
- * 88200 manual says that CI bit is driven on the Mbus while accessing
- * the translation tree. I don't think we need to map it CACHE_INH
- * here...
- */
- if (kmapva != vaddr) {
+ /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
+ vaddr = PMAPER(
+ 0,
+ 0,
+ 0x10000,
+ (VM_PROT_WRITE | VM_PROT_READ)|(CACHE_INH <<16));
+
+ assert(vaddr == M88K_TRUNC_PAGE((unsigned)&kernelstart));
+
+ /* map the kernel text read only */
+ vaddr = PMAPER(
+ (vm_offset_t)M88K_TRUNC_PAGE(((unsigned)&kernelstart)),
+ s_text,
+ e_text,
+ (VM_PROT_WRITE | VM_PROT_READ)|(CACHE_GLOBAL<<16)); /* shouldn't it be RO? XXX*/
+
+ vaddr = PMAPER(
+ vaddr,
+ e_text,
+ (vm_offset_t)kmap,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL << 16));
+ /*
+ * Map system segment & page tables - should be cache inhibited?
+ * 88200 manual says that CI bit is driven on the Mbus while accessing
+ * the translation tree. I don't think we need to map it CACHE_INH
+ * here...
+ */
+ if (kmapva != vaddr) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("(pmap_bootstrap) correcting vaddr\n");
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("(pmap_bootstrap) correcting vaddr\n");
+ }
#endif
- while (vaddr < (*virt_start - kernel_pmap_size))
- vaddr = M88K_ROUND_PAGE(vaddr + 1);
- }
- vaddr = PMAPER(
- vaddr,
- (vm_offset_t)kmap,
- *phys_start,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
-
- if (vaddr != *virt_start) {
+ while (vaddr < (*virt_start - kernel_pmap_size))
+ vaddr = M88K_ROUND_PAGE(vaddr + 1);
+ }
+ vaddr = PMAPER(
+ vaddr,
+ (vm_offset_t)kmap,
+ *phys_start,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
+
+ if (vaddr != *virt_start) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("1:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
- *virt_start, *phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("1:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
+ *virt_start, *phys_start);
+ }
#endif
- *virt_start = vaddr;
- *phys_start = round_page(*phys_start);
- }
+ *virt_start = vaddr;
+ *phys_start = round_page(*phys_start);
+ }
#if defined(MVME187) || defined (MVME197)
- /*
- * Get ethernet buffer - need etherlen bytes physically contiguous.
- * 1 to 1 mapped as well???. There is actually a bug in the macros
- * used by the 1x7 ethernet driver. Remove this when that is fixed.
- * XXX -nivas
- */
- if (cputyp != CPU_188) { /* != CPU_188 */
- *phys_start = vaddr;
- etherlen = ETHERPAGES * NBPG;
- etherbuf = (void *)vaddr;
-
- vaddr = PMAPER(
- vaddr,
- *phys_start,
- *phys_start + etherlen,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
-
- *virt_start += etherlen;
- *phys_start += etherlen;
-
- if (vaddr != *virt_start) {
+ /*
+ * Get ethernet buffer - need etherlen bytes physically contiguous.
+ * 1 to 1 mapped as well???. There is actually a bug in the macros
+ * used by the 1x7 ethernet driver. Remove this when that is fixed.
+ * XXX -nivas
+ */
+ if (cputyp != CPU_188) { /* != CPU_188 */
+ *phys_start = vaddr;
+ etherlen = ETHERPAGES * NBPG;
+ etherbuf = (void *)vaddr;
+
+ vaddr = PMAPER(
+ vaddr,
+ *phys_start,
+ *phys_start + etherlen,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
+
+ *virt_start += etherlen;
+ *phys_start += etherlen;
+
+ if (vaddr != *virt_start) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("2:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
- *virt_start, *phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("2:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
+ *virt_start, *phys_start);
+ }
#endif
- *virt_start = vaddr;
- *phys_start = round_page(*phys_start);
- }
- }
+ *virt_start = vaddr;
+ *phys_start = round_page(*phys_start);
+ }
+ }
#endif /* defined(MVME187) || defined (MVME197) */
- *virt_start = round_page(*virt_start);
- *virt_end = VM_MAX_KERNEL_ADDRESS;
+ *virt_start = round_page(*virt_start);
+ *virt_end = VM_MAX_KERNEL_ADDRESS;
- /*
- * Map a few more pages for phys routines and debugger.
- */
+ /*
+ * Map a few more pages for phys routines and debugger.
+ */
- phys_map_vaddr1 = round_page(*virt_start);
- phys_map_vaddr2 = phys_map_vaddr1 + PAGE_SIZE * max_cpus;
+ phys_map_vaddr1 = round_page(*virt_start);
+ phys_map_vaddr2 = phys_map_vaddr1 + PAGE_SIZE * max_cpus;
- /*
- * To make 1:1 mapping of virt:phys, throw away a few phys pages.
- * XXX what is this? nivas
- */
+ /*
+ * To make 1:1 mapping of virt:phys, throw away a few phys pages.
+ * XXX what is this? nivas
+ */
- *phys_start += 2 * PAGE_SIZE * max_cpus;
- *virt_start += 2 * PAGE_SIZE * max_cpus;
+ *phys_start += 2 * PAGE_SIZE * max_cpus;
+ *virt_start += 2 * PAGE_SIZE * max_cpus;
- /*
- * Map all IO space 1-to-1. Ideally, I would like to not do this
- * but have va for the given IO address dynamically allocated. But
- * on the 88200, 2 of the BATCs are hardwired to map the IO space
- * 1-to-1; I decided to map the rest of the IO space 1-to-1.
- * And bug ROM & the SRAM need to be mapped 1-to-1 if we ever want to
- * execute bug system calls after the MMU has been turned on.
- * OBIO should be mapped cache inhibited.
- */
+ /*
+ * Map all IO space 1-to-1. Ideally, I would like to not do this
+ * but have va for the given IO address dynamically allocated. But
+ * on the 88200, 2 of the BATCs are hardwired to map the IO space
+ * 1-to-1; I decided to map the rest of the IO space 1-to-1.
+ * And bug ROM & the SRAM need to be mapped 1-to-1 if we ever want to
+ * execute bug system calls after the MMU has been turned on.
+ * OBIO should be mapped cache inhibited.
+ */
- ptable = pmap_table_build(avail_end); /* see pmap_table.c for details */
+ ptable = pmap_table_build(0); /* see pmap_table.c for details */
#ifdef DEBUG
- printf("pmap_bootstrap: -> pmap_table_build\n");
-#endif
- for ( ; ptable->size != 0xffffffffU; ptable++)
- if (ptable->size) {
- /*
- * size-1, 'cause pmap_map rounds up to next pagenumber
- */
- PMAPER(ptable->virt_start,
- ptable->phys_start,
- ptable->phys_start + (ptable->size - 1),
- ptable->prot|(ptable->cacheability << 16));
- }
-
- /*
- * Allocate all the submaps we need. Note that SYSMAP just allocates
- * kernel virtual address with no physical backing memory. The idea
- * is physical memory will be mapped at this va before using that va.
- * This means that if different physcal pages are going to be mapped
- * at different times, we better do a tlb flush before using it -
- * else we will be referencing the wrong page.
- */
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("pmap_bootstrap: -> pmap_table_build\n");
+ }
+#endif
+
+ for ( ; ptable->size != 0xffffffffU; ptable++)
+ if (ptable->size) {
+ /*
+ * size-1, 'cause pmap_map rounds up to next pagenumber
+ */
+ PMAPER(ptable->virt_start,
+ ptable->phys_start,
+ ptable->phys_start + (ptable->size - 1),
+ ptable->prot|(ptable->cacheability << 16));
+ }
+
+ /*
+ * Allocate all the submaps we need. Note that SYSMAP just allocates
+ * kernel virtual address with no physical backing memory. The idea
+ * is physical memory will be mapped at this va before using that va.
+ * This means that if different physcal pages are going to be mapped
+ * at different times, we better do a tlb flush before using it -
+ * else we will be referencing the wrong page.
+ */
#define SYSMAP(c, p, v, n) \
({ \
@@ -1380,97 +1258,95 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */
virt += ((n)*NBPG); \
})
- virt = *virt_start;
+ virt = *virt_start;
- SYSMAP(caddr_t, vmpte , vmmap, 1);
- SYSMAP(struct msgbuf *, msgbufmap ,msgbufp, btoc(MSGBUFSIZE));
+ SYSMAP(caddr_t, vmpte , vmmap, 1);
+ SYSMAP(struct msgbuf *, msgbufmap ,msgbufp, btoc(MSGBUFSIZE));
- vmpte->pfn = -1;
- vmpte->dtype = DT_INVALID;
+ vmpte->pfn = -1;
+ vmpte->dtype = DT_INVALID;
- *virt_start = virt;
+ *virt_start = virt;
- /*
- * Set translation for UPAGES at UADDR. The idea is we want to
- * have translations set up for UADDR. Later on, the ptes for
- * for this address will be set so that kstack will refer
- * to the u area. Make sure pmap knows about this virtual
- * address by doing vm_findspace on kernel_map.
- */
+ /*
+ * Set translation for UPAGES at UADDR. The idea is we want to
+ * have translations set up for UADDR. Later on, the ptes for
+ * for this address will be set so that kstack will refer
+ * to the u area. Make sure pmap knows about this virtual
+ * address by doing vm_findspace on kernel_map.
+ */
- for (i = 0, virt = UADDR; i < UPAGES; i++, virt += PAGE_SIZE) {
+ for (i = 0, virt = UADDR; i < UPAGES; i++, virt += PAGE_SIZE) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("setting up mapping for Upage %d @ %x\n", i, virt);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("setting up mapping for Upage %d @ %x\n", i, virt);
+ }
#endif
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
- }
-
- /*
- * Switch to using new page tables
- */
-
- apr_data.bits = 0;
- apr_data.field.st_base = M88K_BTOP(kernel_pmap->sdt_paddr);
- apr_data.field.wt = 1;
- apr_data.field.g = 1;
- apr_data.field.ci = 0;
- apr_data.field.te = 1; /* Translation enable */
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
+ }
+
+ /*
+ * Switch to using new page tables
+ */
+
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(kernel_pmap->sdt_paddr);
+ apr_data.field.wt = 1;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1; /* Translation enable */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- void show_apr(unsigned value);
- show_apr(apr_data.bits);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ void show_apr(unsigned value);
+ show_apr(apr_data.bits);
+ }
#endif
- /* Invalidate entire kernel TLB. */
+ /* Invalidate entire kernel TLB. */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("invalidating tlb %x\n", apr_data.bits);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("invalidating tlb %x\n", apr_data.bits);
+ }
#endif
- for (i = 0; i < MAX_CPUS; i++)
- if (cpu_sets[i]) {
- /* Invalidate entire kernel TLB. */
- cmmu_flush_remote_tlb(i, 1, 0, -1);
+ for (i = 0; i < MAX_CPUS; i++)
+ if (cpu_sets[i]) {
+ /* Invalidate entire kernel TLB. */
+ cmmu_flush_remote_tlb(i, 1, 0, -1);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("After cmmu_flush_remote_tlb()\n");
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("After cmmu_flush_remote_tlb()\n");
+ }
#endif
- /* still physical */
- /*
- * Set valid bit to DT_INVALID so that the very first pmap_enter()
- * on these won't barf in pmap_remove_range().
- */
- pte = pmap_pte(kernel_pmap, phys_map_vaddr1);
- pte->pfn = -1;
- pte->dtype = DT_INVALID;
- pte = pmap_pte(kernel_pmap, phys_map_vaddr2);
- pte->dtype = DT_INVALID;
- pte->pfn = -1;
- /* Load supervisor pointer to segment table. */
- cmmu_remote_set_sapr(i, apr_data.bits);
+ /* still physical */
+ /*
+ * Set valid bit to DT_INVALID so that the very first
+ * pmap_enter() on these won't barf in
+ * pmap_remove_range().
+ */
+ pte = pmap_pte(kernel_pmap, phys_map_vaddr1);
+ pte->pfn = -1;
+ pte->dtype = DT_INVALID;
+ pte = pmap_pte(kernel_pmap, phys_map_vaddr2);
+ pte->dtype = DT_INVALID;
+ pte->pfn = -1;
+ /* Load supervisor pointer to segment table. */
+ cmmu_remote_set_sapr(i, apr_data.bits);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("After cmmu_remote_set_sapr()\n");
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("After cmmu_remote_set_sapr()\n");
+ }
#endif
- SETBIT_CPUSET(i, &kernel_pmap->cpus_using);
- /* Load supervisor pointer to segment table. */
- }
+ SETBIT_CPUSET(i, &kernel_pmap->cpus_using);
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("running virtual - avail_next 0x%x\n", *phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("running virtual - avail_next 0x%x\n", *phys_start);
+ }
#endif
- avail_next = *phys_start;
-
- return;
-
+ avail_next = *phys_start;
+ return;
} /* pmap_bootstrap() */
/*
@@ -1489,22 +1365,22 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */
void *
pmap_bootstrap_alloc(int size)
{
- register void *mem;
-
- size = round_page(size);
- mem = (void *)virtual_avail;
- virtual_avail = pmap_map(virtual_avail, avail_start,
- avail_start + size,
- VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
- avail_start += size;
+ register void *mem;
+
+ size = round_page(size);
+ mem = (void *)virtual_avail;
+ virtual_avail = pmap_map(virtual_avail, avail_start,
+ avail_start + size,
+ VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
+ avail_start += size;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("pmap_bootstrap_alloc: size %x virtual_avail %x avail_start %x\n",
- size, virtual_avail, avail_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("pmap_bootstrap_alloc: size %x virtual_avail %x avail_start %x\n",
+ size, virtual_avail, avail_start);
+ }
#endif
- bzero((void *)mem, size);
- return (mem);
+ bzero((void *)mem, size);
+ return (mem);
}
#endif /* !defined(MACHINE_NEW_NONCONTIG) */
@@ -1554,147 +1430,143 @@ pmap_bootstrap_alloc(int size)
void
pmap_init(void)
{
- register long npages;
- register vm_offset_t addr;
- register vm_size_t s;
- register int i;
- struct pv_entry *pv;
- char *attr;
- struct simplelock *lock;
- int bank;
+ register long npages;
+ register vm_offset_t addr;
+ register vm_size_t s;
+ register int i;
+ struct pv_entry *pv;
+ char *attr;
+ struct simplelock *lock;
+ int bank;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
- printf("pmap_init()\n");
+ if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
+ printf("pmap_init()\n");
#endif
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+ for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
+ npages += vm_physmem[bank].end - vm_physmem[bank].start;
- /*
- * Allocate memory for the pv_head_table and its lock bits,
- * the modify bit array, and the pte_page table.
- */
- for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
- npages += vm_physmem[bank].end - vm_physmem[bank].start;
-
- s = PV_TABLE_SIZE(npages); /* pv_list */
- s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */
- s += npages * sizeof(char); /* pmap_modify_list */
+ s = PV_TABLE_SIZE(npages); /* pv_list */
+ s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */
+ s += npages * sizeof(char); /* pmap_modify_list */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
- printf("(pmap_init) nbr of managed pages = %x\n", npages);
- printf("(pmap_init) size of pv_list = %x\n",
- npages * sizeof(struct pv_entry));
- }
+ if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
+ printf("(pmap_init) nbr of managed pages = %x\n", npages);
+ printf("(pmap_init) size of pv_list = %x\n",
+ npages * sizeof(struct pv_entry));
+ }
#endif
- s = round_page(s);
+ s = round_page(s);
#if defined(UVM)
- addr = (vaddr_t)uvm_km_zalloc(kernel_map, s);
+ addr = (vaddr_t)uvm_km_zalloc(kernel_map, s);
#else
- addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+ addr = (vm_offset_t)kmem_alloc(kernel_map, s);
#endif
- pv_head_table = (pv_entry_t)addr;
- addr += PV_TABLE_SIZE(npages);
-
- /*
- * Assume that 'simple_lock' is used to lock pv_lock_table
- */
- pv_lock_table = (struct simplelock *)addr; /* XXX */
- addr += PV_LOCK_TABLE_SIZE(npages);
-
- pmap_modify_list = (char *)addr;
-
- /*
- * Initialize pv_lock_table
- */
- for (i = 0; i < npages; i++)
- simple_lock_init(&(pv_lock_table[i]));
-
- /*
- * Now that the pv, attribute, and lock tables have been allocated,
- * assign them to the memory segments.
- */
- pv = pv_head_table;
- lock = pv_lock_table;
- attr = pmap_modify_list;
- for (bank = 0; bank < vm_nphysseg; bank++) {
- npages = vm_physmem[bank].end - vm_physmem[bank].start;
- vm_physmem[bank].pmseg.pvent = pv;
- vm_physmem[bank].pmseg.attrs = attr;
- vm_physmem[bank].pmseg.plock = lock;
- pv += npages;
- lock += npages;
- attr += npages;
- }
-
- pmap_initialized = TRUE;
-
+ pv_head_table = (pv_entry_t)addr;
+ addr += PV_TABLE_SIZE(npages);
+
+ /*
+ * Assume that 'simple_lock' is used to lock pv_lock_table
+ */
+ pv_lock_table = (struct simplelock *)addr; /* XXX */
+ addr += PV_LOCK_TABLE_SIZE(npages);
+
+ pmap_modify_list = (char *)addr;
+
+ /*
+ * Initialize pv_lock_table
+ */
+ for (i = 0; i < npages; i++)
+ simple_lock_init(&(pv_lock_table[i]));
+
+ /*
+ * Now that the pv, attribute, and lock tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_head_table;
+ lock = pv_lock_table;
+ attr = pmap_modify_list;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npages = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ vm_physmem[bank].pmseg.plock = lock;
+ pv += npages;
+ lock += npages;
+ attr += npages;
+ }
+ pmap_initialized = TRUE;
} /* pmap_init() */
#else
void
pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
- register long npages;
- register vm_offset_t addr;
- register vm_size_t s;
- register int i;
- vm_size_t pvl_table_size;
+ register long npages;
+ register vm_offset_t addr;
+ register vm_size_t s;
+ register int i;
+ vm_size_t pvl_table_size;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
- printf("(pmap_init) phys_start %x phys_end %x\n", phys_start, phys_end);
+ if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
+ printf("(pmap_init) phys_start %x phys_end %x\n", phys_start, phys_end);
#endif
- /*
- * Allocate memory for the pv_head_table and its lock bits,
- * the modify bit array, and the pte_page table.
- */
- npages = atop(phys_end - phys_start);
- s = PV_TABLE_SIZE(npages); /* pv_list */
- s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */
- s += npages * sizeof(char); /* pmap_modify_list */
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+ npages = atop(phys_end - phys_start);
+ s = PV_TABLE_SIZE(npages); /* pv_list */
+ s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */
+ s += npages * sizeof(char); /* pmap_modify_list */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
- printf("(pmap_init) nbr of managed pages = %x\n", npages);
- printf("(pmap_init) size of pv_list = %x\n",
- npages * sizeof(struct pv_entry));
- }
+ if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
+ printf("(pmap_init) nbr of managed pages = %x\n", npages);
+ printf("(pmap_init) size of pv_list = %x\n",
+ npages * sizeof(struct pv_entry));
+ }
#endif
-
- s = round_page(s);
- addr = (vm_offset_t)kmem_alloc(kernel_map, s);
-
- pv_head_table = (pv_entry_t)addr;
- addr += PV_TABLE_SIZE(npages);
-
- /*
- * Assume that 'simple_lock' is used to lock pv_lock_table
- */
- pv_lock_table = (struct simplelock *)addr; /* XXX */
- addr += PV_LOCK_TABLE_SIZE(npages);
-
- pmap_modify_list = (char *)addr;
-
- /*
- * Initialize pv_lock_table
- */
- for (i = 0; i < npages; i++)
- simple_lock_init(&(pv_lock_table[i]));
-
- /*
- * Only now, when all of the data structures are allocated,
- * can we set pmap_phys_start and pmap_phys_end. If we set them
- * too soon, the kmem_alloc above will blow up when it causes
- * a call to pmap_enter, and pmap_enter tries to manipulate the
- * (not yet existing) pv_list.
- */
- pmap_phys_start = phys_start;
- pmap_phys_end = phys_end;
-
- pmap_initialized = TRUE;
+ s = round_page(s);
+ addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+
+ pv_head_table = (pv_entry_t)addr;
+ addr += PV_TABLE_SIZE(npages);
+
+ /*
+ * Assume that 'simple_lock' is used to lock pv_lock_table
+ */
+ pv_lock_table = (struct simplelock *)addr; /* XXX */
+ addr += PV_LOCK_TABLE_SIZE(npages);
+
+ pmap_modify_list = (char *)addr;
+
+ /*
+ * Initialize pv_lock_table
+ */
+ for (i = 0; i < npages; i++)
+ simple_lock_init(&(pv_lock_table[i]));
+
+ /*
+ * Only now, when all of the data structures are allocated,
+ * can we set pmap_phys_start and pmap_phys_end. If we set them
+ * too soon, the kmem_alloc above will blow up when it causes
+ * a call to pmap_enter, and pmap_enter tries to manipulate the
+ * (not yet existing) pv_list.
+ */
+ pmap_phys_start = phys_start;
+ pmap_phys_end = phys_end;
+
+ pmap_initialized = TRUE;
} /* pmap_init() */
#endif
@@ -1738,35 +1610,32 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
void
pmap_zero_page(vm_offset_t phys)
{
- vm_offset_t srcva;
- pte_template_t template ;
- unsigned int i;
- unsigned int spl_sav;
- int my_cpu;
- pt_entry_t *srcpte;
-
- my_cpu = cpu_number();
- srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE));
- srcpte = pmap_pte(kernel_pmap, srcva);
-
- for (i = 0; i < ptes_per_vm_page; i++, phys += M88K_PGBYTES) {
- template.bits = M88K_TRUNC_PAGE(phys)
- | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
- | DT_VALID | CACHE_GLOBAL;
-
-
- spl_sav = splimp();
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
- splx(spl_sav);
- bzero (srcva, M88K_PGBYTES);
- /* force the data out */
- cmmu_flush_remote_data_cache(my_cpu,phys, M88K_PGBYTES);
- }
-
+ vm_offset_t srcva;
+ pte_template_t template;
+ unsigned int i;
+ unsigned int spl_sav;
+ int my_cpu;
+ pt_entry_t *srcpte;
+
+ my_cpu = cpu_number();
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+
+ for (i = 0; i < ptes_per_vm_page; i++, phys += M88K_PGBYTES) {
+ template.bits = M88K_TRUNC_PAGE(phys)
+ | m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
+ | DT_VALID | CACHE_GLOBAL;
+
+ spl_sav = splimp();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+ splx(spl_sav);
+ bzero (srcva, M88K_PGBYTES);
+ /* force the data out */
+ cmmu_flush_remote_data_cache(my_cpu,phys, M88K_PGBYTES);
+ }
} /* pmap_zero_page() */
-
/*
* Routine: PMAP_CREATE
*
@@ -1797,119 +1666,129 @@ pmap_zero_page(vm_offset_t phys)
pmap_t
pmap_create(vm_size_t size)
{
- pmap_t p;
+ pmap_t p;
- /*
- * A software use-only map doesn't even need a map.
- */
- if (size != 0)
- return (PMAP_NULL);
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+ if (size != 0)
+ return (PMAP_NULL);
- CHECK_PMAP_CONSISTENCY("pmap_create");
+ CHECK_PMAP_CONSISTENCY("pmap_create");
- p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
- if (p == PMAP_NULL) {
- panic("pmap_create: cannot allocate a pmap");
- }
+ p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
+ if (p == PMAP_NULL) {
+ panic("pmap_create: cannot allocate a pmap");
+ }
- bzero(p, sizeof(*p));
- pmap_pinit(p);
- return (p);
+ bzero(p, sizeof(*p));
+ pmap_pinit(p);
+ return (p);
} /* pmap_create() */
void
pmap_pinit(pmap_t p)
{
- pmap_statistics_t stats;
- sdt_entry_t *segdt;
- int i;
+ pmap_statistics_t stats;
+ sdt_entry_t *segdt;
+ int i, spl;
+ unsigned int s;
+ vm_offset_t addr;
+ sdt_entry_t *sdt;
+ pt_entry_t *pte;
+ pte_template_t template;
+ int aprot;
+
+ /*
+ * Allocate memory for *actual* segment table and *shadow* table.
+ */
+ s = M88K_ROUND_PAGE(2 * SDT_SIZE);
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
+ printf("(pmap_create :%x) need %d pages for sdt\n",
+ curproc, atop(s));
+ }
+#endif
- /*
- * Allocate memory for *actual* segment table and *shadow* table.
- */
#if defined(UVM)
- segdt = (sdt_entry_t *)uvm_km_zalloc(kernel_map, 2 * SDT_SIZE);
+ segdt = (sdt_entry_t *)uvm_km_zalloc(kernel_map, s);
#else
- segdt = (sdt_entry_t *)kmem_alloc(kernel_map, 2 * SDT_SIZE);
+ segdt = (sdt_entry_t *)kmem_alloc(kernel_map, s);
#endif
- if (segdt == NULL)
- panic("pmap_create: kmem_alloc failure");
-
-#if 0
- /* maybe, we can use bzero to zero out the segdt. XXX nivas */
- bzero(segdt, 2 * SDT_SIZE);
-#endif /* 0 */
- /* use pmap zero page to zero it out */
- pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt));
- if (PAGE_SIZE == SDT_SIZE) /* only got half */
- pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt+PAGE_SIZE));
- if (PAGE_SIZE < 2*SDT_SIZE) /* get remainder */
- bzero((vm_offset_t)segdt+PAGE_SIZE, (2*SDT_SIZE)-PAGE_SIZE);
-
- /*
- * Initialize pointer to segment table both virtual and physical.
- */
- p->sdt_vaddr = segdt;
- p->sdt_paddr = (sdt_entry_t *)pmap_extract(kernel_pmap,(vm_offset_t)segdt);
-
- if (!PAGE_ALIGNED(p->sdt_paddr)) {
- printf("pmap_create: std table = %x\n",(int)p->sdt_paddr);
- panic("pmap_create: sdt_table not aligned on page boundary");
- }
+ if (segdt == NULL)
+ panic("pmap_create: kmem_alloc failure");
+
+ /* use pmap zero page to zero it out */
+ addr = (vm_offset_t)segdt;
+ for (i=0; i<atop(s); i++) {
+ pmap_zero_page(pmap_extract(kernel_pmap, addr));
+ addr += PAGE_SIZE;
+ }
+
+ /*
+ * Initialize pointer to segment table both virtual and physical.
+ */
+ p->sdt_vaddr = segdt;
+ p->sdt_paddr = (sdt_entry_t *)pmap_extract(kernel_pmap,(vm_offset_t)segdt);
+
+ if (!PAGE_ALIGNED(p->sdt_paddr)) {
+ printf("pmap_create: std table = %x\n",(int)p->sdt_paddr);
+ panic("pmap_create: sdt_table not aligned on page boundary");
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
- printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
- curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
- }
+ if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
+ printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
+ curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
+ }
#endif
-#if notneeded
- /*
- * memory for page tables should be CACHE DISABLED?
- */
- pmap_cache_ctrl(kernel_pmap,
- (vm_offset_t)segdt,
- (vm_offset_t)segdt+ (SDT_SIZE*2),
- CACHE_INH);
-#endif
- /*
- * Initialize SDT_ENTRIES.
- */
- /*
- * There is no need to clear segment table, since kmem_alloc would
- * provides us clean pages.
- */
-
- /*
- * Initialize pmap structure.
- */
- p->ref_count = 1;
- simple_lock_init(&p->lock);
- p->cpus_using = 0;
+ if (cputyp == CPU_188) {
+ /*
+ * memory for page tables should be CACHE DISABLED on MVME188
+ */
+ pmap_cache_ctrl(kernel_pmap,
+ (vm_offset_t)segdt,
+ (vm_offset_t)segdt+ (SDT_SIZE*2),
+ CACHE_INH);
+ }
+ /*
+ * Initialize SDT_ENTRIES.
+ */
+ /*
+ * There is no need to clear segment table, since kmem_alloc would
+ * provides us clean pages.
+ */
+
+ /*
+ * Initialize pmap structure.
+ */
+ p->ref_count = 1;
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
#ifdef OMRON_PMAP
- /* initialize block address translation cache */
- for (i = 0; i < BATC_MAX; i++) {
- p->i_batc[i].bits = 0;
- p->d_batc[i].bits = 0;
- }
+ /* initialize block address translation cache */
+ for (i = 0; i < BATC_MAX; i++) {
+ p->i_batc[i].bits = 0;
+ p->d_batc[i].bits = 0;
+ }
#endif
- /*
- * Initialize statistics.
- */
- stats = &p->stats;
- stats->resident_count = 0;
- stats->wired_count = 0;
+ /*
+ * Initialize statistics.
+ */
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
#ifdef DEBUG
- /* link into list of pmaps, just after kernel pmap */
- p->next = kernel_pmap->next;
- p->prev = kernel_pmap;
- kernel_pmap->next = p;
- p->next->prev = p;
+ /* link into list of pmaps, just after kernel pmap */
+ p->next = kernel_pmap->next;
+ p->prev = kernel_pmap;
+ kernel_pmap->next = p;
+ p->next->prev = p;
#endif
} /* pmap_pinit() */
@@ -1938,70 +1817,68 @@ pmap_pinit(pmap_t p)
* ranges represented by the table group sizes(PDT_TABLE_GROUP_VA_SPACE).
*
*/
-
STATIC void
pmap_free_tables(pmap_t pmap)
{
- unsigned long sdt_va; /* outer loop index */
- sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
- pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- unsigned int i,j;
+ unsigned long sdt_va; /* outer loop index */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ unsigned int i,j;
#if DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
- printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
+ if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
+ printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
#endif
- sdttbl = pmap->sdt_vaddr; /* addr of segment table */
-
- /*
- This contortion is here instead of the natural loop
- because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
- */
-
- i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- if ( j < 1024 ) j++;
-
- /* Segment table Loop */
- for ( ; i < j; i++) {
- sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
- if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS
+ is near 0xffffffff
+ */
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
+
+ /* Segment table Loop */
+ for ( ; i < j; i++) {
+ sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
+ if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_free_tables :%x) free page table = 0x%x\n", curproc, gdttbl);
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ printf("(pmap_free_tables :%x) free page table = 0x%x\n",
+ curproc, gdttbl);
#endif
- PT_FREE(gdttbl);
- }
-
- } /* Segment Loop */
+ PT_FREE(gdttbl);
+ }
+ } /* Segment Loop */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_free_tables :%x) free segment table = 0x%x\n", curproc, sdttbl);
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ printf("(pmap_free_tables :%x) free segment table = 0x%x\n",
+ curproc, sdttbl);
#endif
- /*
- * Freeing both *actual* and *shadow* segment tables
- */
+ /*
+ * Freeing both *actual* and *shadow* segment tables
+ */
#if defined(UVM)
- uvm_km_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
+ uvm_km_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
#else
- kmem_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
+ kmem_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
#endif
} /* pmap_free_tables() */
-
void
pmap_release(register pmap_t p)
{
- pmap_free_tables(p);
+ pmap_free_tables(p);
#ifdef DBG
- if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
- printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
- /* unlink from list of pmap structs */
- p->prev->next = p->next;
- p->next->prev = p->prev;
+ if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
+ /* unlink from list of pmap structs */
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
#endif
}
@@ -2033,30 +1910,30 @@ pmap_release(register pmap_t p)
void
pmap_destroy(pmap_t p)
{
- register int c, s;
+ register int c, s;
- if (p == PMAP_NULL) {
+ if (p == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
- printf("(pmap_destroy :%x) pmap is NULL\n", curproc);
+ if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) pmap is NULL\n", curproc);
#endif
- return;
- }
+ return;
+ }
- if (p == kernel_pmap) {
- panic("pmap_destroy: Attempt to destroy kernel pmap");
- }
+ if (p == kernel_pmap) {
+ panic("pmap_destroy: Attempt to destroy kernel pmap");
+ }
- CHECK_PMAP_CONSISTENCY("pmap_destroy");
+ CHECK_PMAP_CONSISTENCY("pmap_destroy");
- PMAP_LOCK(p, s);
- c = --p->ref_count;
- PMAP_UNLOCK(p, s);
+ PMAP_LOCK(p, s);
+ c = --p->ref_count;
+ PMAP_UNLOCK(p, s);
- if (c == 0) {
- pmap_release(p);
- free((caddr_t)p,M_VMPMAP);
- }
+ if (c == 0) {
+ pmap_release(p);
+ free((caddr_t)p,M_VMPMAP);
+ }
} /* pmap_destroy() */
@@ -2079,17 +1956,16 @@ pmap_destroy(pmap_t p)
void
pmap_reference(pmap_t p)
{
- int s;
+ int s;
- if (p != PMAP_NULL) {
- PMAP_LOCK(p, s);
- p->ref_count++;
- PMAP_UNLOCK(p, s);
- }
+ if (p != PMAP_NULL) {
+ PMAP_LOCK(p, s);
+ p->ref_count++;
+ PMAP_UNLOCK(p, s);
+ }
} /* pmap_reference */
-
/*
* Routine: PMAP_REMOVE_RANGE (internal)
*
@@ -2147,153 +2023,153 @@ pmap_reference(pmap_t p)
STATIC void
pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
{
- int pfi;
- int pfn;
- int num_removed = 0,
- num_unwired = 0;
- register int i;
- pt_entry_t *pte;
- pv_entry_t prev, cur;
- pv_entry_t pvl;
- vm_offset_t pa, va, tva;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (e < s)
- panic("pmap_remove_range: end < start");
-
- /*
- * Pmap has been locked by pmap_remove.
- */
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * Loop through the range in vm_page_size increments.
- * Do not assume that either start or end fail on any
- * kind of page boundary (though this may be true!?).
- */
-
- CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
-
- for (va = s; va < e; va += PAGE_SIZE) {
-
- sdt_entry_t *sdt;
-
- sdt = SDTENT(pmap,va);
-
- if (!SDT_VALID(sdt)) {
- va &= SDT_MASK; /* align to segment */
- if (va <= e - (1<<SDT_SHIFT))
- va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
- else /* wrap around */
- break;
- continue;
- }
-
- pte = pmap_pte(pmap,va);
-
- if (!PDT_VALID(pte)) {
- continue; /* no page mapping */
- }
-
- num_removed++;
-
- if (pte->wired)
- num_unwired++;
-
- pfn = pte->pfn;
- pa = M88K_PTOB(pfn);
-
- if (PMAP_MANAGED(pa)) {
- LOCK_PVH(pa);
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- pvl = PA_TO_PVH(pa);
- CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
-
- if (pvl->pmap == PMAP_NULL)
- panic("pmap_remove_range: null pv_list");
-
- if (pvl->va == va && pvl->pmap == pmap) {
-
- /*
- * Hander is the pv_entry. Copy the next one
- * to hander and free the next one (we can't
- * free the hander)
- */
- cur = pvl->next;
- if (cur != PV_ENTRY_NULL) {
- *pvl = *cur;
- free((caddr_t)cur, M_VMPVENT);
- } else {
- pvl->pmap = PMAP_NULL;
- }
-
- } else {
-
- for (prev = pvl; (cur = prev->next) != PV_ENTRY_NULL; prev = cur) {
- if (cur->va == va && cur->pmap == pmap) {
- break;
- }
- }
- if (cur == PV_ENTRY_NULL) {
- printf("pmap_remove_range: looking for VA "
- "0x%x (pa 0x%x) PV list at 0x%x\n", va, pa, (unsigned)pvl);
- panic("pmap_remove_range: mapping not in pv_list");
- }
-
- prev->next = cur->next;
- free((caddr_t)cur, M_VMPVENT);
- }
-
- CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
- UNLOCK_PVH(pa);
-
- } /* if PAGE_MANAGED */
-
- /*
- * For each pte in vm_page (NOTE: vm_page, not
- * M88K (machine dependent) page !! ), reflect
- * modify bits to pager and zero (invalidate,
- * remove) the pte entry.
- */
- tva = va;
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(pte);
- flush_atc_entry(users, tva, kflush);
-
- if (opte.pte.modified) {
- if (IS_VM_PHYSADDR(pa)) {
- vm_page_set_modified(PHYS_TO_VM_PAGE(opte.bits & M88K_PGMASK));
- }
- /* keep track ourselves too */
- if (PMAP_MANAGED(pa))
- SET_ATTRIB(pa, 1);
- }
- pte++;
- tva += M88K_PGBYTES;
- }
-
- } /* end for ( va = s; ...) */
-
- /*
- * Update the counts
- */
- pmap->stats.resident_count -= num_removed;
- pmap->stats.wired_count -= num_unwired;
+ int pfn;
+ int num_removed = 0;
+ int num_unwired = 0;
+ register int i;
+ pt_entry_t *pte;
+ pv_entry_t prev, cur;
+ pv_entry_t pvl;
+ vm_offset_t pa, va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (e < s)
+ panic("pmap_remove_range: end < start");
+
+ /*
+ * Pmap has been locked by pmap_remove.
+ */
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * Loop through the range in vm_page_size increments.
+ * Do not assume that either start or end fail on any
+ * kind of page boundary (though this may be true!?).
+ */
+
+ CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
+
+ for (va = s; va < e; va += PAGE_SIZE) {
+
+ sdt_entry_t *sdt;
+
+ sdt = SDTENT(pmap,va);
+
+ if (!SDT_VALID(sdt)) {
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
+ continue;
+ }
+
+ pte = pmap_pte(pmap,va);
+
+ if (!PDT_VALID(pte)) {
+ continue; /* no page mapping */
+ }
+
+ num_removed++;
+
+ if (pte->wired)
+ num_unwired++;
+
+ pfn = pte->pfn;
+ pa = M88K_PTOB(pfn);
+
+ if (PMAP_MANAGED(pa)) {
+ LOCK_PVH(pa);
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ pvl = PA_TO_PVH(pa);
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
+
+ if (pvl->pmap == PMAP_NULL) {
+ panic("pmap_remove_range: null pv_list");
+ }
+
+ if (pvl->va == va && pvl->pmap == pmap) {
+
+ /*
+ * Hander is the pv_entry. Copy the next one
+ * to hander and free the next one (we can't
+ * free the hander)
+ */
+ cur = pvl->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ } else {
+ pvl->pmap = PMAP_NULL;
+ }
+
+ } else {
+
+ for (prev = pvl; (cur = prev->next) != PV_ENTRY_NULL; prev = cur) {
+ if (cur->va == va && cur->pmap == pmap) {
+ break;
+ }
+ }
+ if (cur == PV_ENTRY_NULL) {
+ printf("pmap_remove_range: looking for VA "
+ "0x%x (pa 0x%x) PV list at 0x%x\n", va, pa, (unsigned)pvl);
+ panic("pmap_remove_range: mapping not in pv_list");
+ }
+
+ prev->next = cur->next;
+ free((caddr_t)cur, M_VMPVENT);
+ }
+
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
+ UNLOCK_PVH(pa);
+
+ } /* if PAGE_MANAGED */
+
+ /*
+ * For each pte in vm_page (NOTE: vm_page, not
+ * M88K (machine dependent) page !! ), reflect
+ * modify bits to pager and zero (invalidate,
+ * remove) the pte entry.
+ */
+ tva = va;
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(users, tva, kflush);
+
+ if (opte.pte.modified) {
+ if (IS_VM_PHYSADDR(pa)) {
+ vm_page_set_modified(PHYS_TO_VM_PAGE(opte.bits & M88K_PGMASK));
+ }
+ /* keep track ourselves too */
+ if (PMAP_MANAGED(pa))
+ SET_ATTRIB(pa, 1);
+ }
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+
+ } /* end for ( va = s; ...) */
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
} /* pmap_remove_range */
@@ -2322,28 +2198,26 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
void
pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
{
- int spl;
-
- if (map == PMAP_NULL) {
- return;
- }
+ int spl;
+ if (map == PMAP_NULL) {
+ return;
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
- printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
+ if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
+ printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
#endif
- CHECK_PAGE_ALIGN(s, "pmap_remove start addr");
+ CHECK_PAGE_ALIGN(s, "pmap_remove start addr");
- if (s>e)
- panic("pmap_remove: start greater than end address");
+ if (s > e)
+ panic("pmap_remove: start greater than end address");
- PMAP_LOCK(map, spl);
- pmap_remove_range(map, s, e);
- PMAP_UNLOCK(map, spl);
+ PMAP_LOCK(map, spl);
+ pmap_remove_range(map, s, e);
+ PMAP_UNLOCK(map, spl);
} /* pmap_remove() */
-
/*
* Routine: PMAP_REMOVE_ALL
*
@@ -2388,116 +2262,113 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
void
pmap_remove_all(vm_offset_t phys)
{
- pv_entry_t pvl, cur;
- register pt_entry_t *pte;
- int pfi;
- register int i;
- register vm_offset_t va;
- register pmap_t pmap;
- int spl;
- int dbgcnt = 0;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
- /* not a managed page. */
+ pv_entry_t pvl, cur;
+ register pt_entry_t *pte;
+ register int i;
+ register vm_offset_t va;
+ register pmap_t pmap;
+ int spl;
+ int dbgcnt = 0;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+ /* not a managed page. */
#ifdef DEBUG
- if (pmap_con_dbg & CD_RMAL)
- printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
+ if (pmap_con_dbg & CD_RMAL)
+ printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
#endif
- return;
- }
-
- SPLVM(spl);
-
- /*
- * Walk down PV list, removing all mappings.
- * We have to do the same work as in pmap_remove_pte_page
- * since that routine locks the pv_head. We don't have
- * to lock the pv_head, since we have the entire pmap system.
- */
- remove_all_Retry:
-
- pvl = PA_TO_PVH(phys);
- CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
- LOCK_PVH(phys);
-
- /*
- * Loop for each entry on the pv list
- */
- while ((pmap = pvl->pmap) != PMAP_NULL) {
- va = pvl->va;
- if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
- goto remove_all_Retry;
- }
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
-
- /*
- * Do a few consistency checks to make sure
- * the PV list and the pmap are in synch.
- */
- if (pte == PT_ENTRY_NULL) {
- printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
- (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
- panic("pmap_remove_all: pte NULL");
- }
- if (!PDT_VALID(pte))
- panic("pmap_remove_all: pte invalid");
- if (M88K_PTOB(pte->pfn) != phys)
- panic("pmap_remove_all: pte doesn't point to page");
- if (pte->wired)
- panic("pmap_remove_all: removing a wired page");
-
- pmap->stats.resident_count--;
-
- if ((cur = pvl->next) != PV_ENTRY_NULL) {
- *pvl = *cur;
- free((caddr_t)cur, M_VMPVENT);
- } else
- pvl->pmap = PMAP_NULL;
-
- /*
- * Reflect modified pages to pager.
- */
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(pte);
- flush_atc_entry(users, va, kflush);
-
- if (opte.pte.modified) {
- vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys));
- /* keep track ourselves too */
- SET_ATTRIB(phys, 1);
- }
- pte++;
- va += M88K_PGBYTES;
- }
-
- /*
- * Do not free any page tables,
- * leaves that for when VM calls pmap_collect().
- */
-
- simple_unlock(&pmap->lock);
- dbgcnt++;
- }
- CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
-
- UNLOCK_PVH(phys);
- SPLX(spl);
+ return;
+ }
+ SPLVM(spl);
+ /*
+ * Walk down PV list, removing all mappings.
+ * We have to do the same work as in pmap_remove_pte_page
+ * since that routine locks the pv_head. We don't have
+ * to lock the pv_head, since we have the entire pmap system.
+ */
+remove_all_Retry:
+
+ pvl = PA_TO_PVH(phys);
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
+ LOCK_PVH(phys);
+
+ /*
+ * Loop for each entry on the pv list
+ */
+ while ((pmap = pvl->pmap) != PMAP_NULL) {
+ va = pvl->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto remove_all_Retry;
+ }
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Do a few consistency checks to make sure
+ * the PV list and the pmap are in synch.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
+ (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
+ panic("pmap_remove_all: pte NULL");
+ }
+ if (!PDT_VALID(pte))
+ panic("pmap_remove_all: pte invalid");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_remove_all: pte doesn't point to page");
+ if (pte->wired)
+ panic("pmap_remove_all: removing a wired page");
+
+ pmap->stats.resident_count--;
+
+ if ((cur = pvl->next) != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ } else
+ pvl->pmap = PMAP_NULL;
+
+ /*
+ * Reflect modified pages to pager.
+ */
+ for (i = ptes_per_vm_page; i>0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the modified
+ * bit and/or the reference bit being written back
+ * by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(users, va, kflush);
+
+ if (opte.pte.modified) {
+ vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys));
+ /* keep track ourselves too */
+ SET_ATTRIB(phys, 1);
+ }
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ /*
+ * Do not free any page tables,
+ * leaves that for when VM calls pmap_collect().
+ */
+
+ simple_unlock(&pmap->lock);
+ dbgcnt++;
+ }
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
+
+ UNLOCK_PVH(phys);
+ SPLX(spl);
} /* pmap_remove_all() */
@@ -2530,103 +2401,100 @@ pmap_remove_all(vm_offset_t phys)
STATIC void
pmap_copy_on_write(vm_offset_t phys)
{
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- register int i;
- int spl, spl_sav;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ register int i;
+ int spl, spl_sav;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DEBUG
- if (pmap_con_dbg & CD_CMOD)
- printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys);
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys);
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
copy_on_write_Retry:
- pv_e = PA_TO_PVH(phys);
- CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
- LOCK_PVH(phys);
- if (pv_e->pmap == PMAP_NULL) {
-
+ pv_e = PA_TO_PVH(phys);
+ CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
+ LOCK_PVH(phys);
+
+ if (pv_e->pmap == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
- printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return; /* no mappings */
+ }
+
+ /*
+ * Run down the list of mappings to this physical page,
+ * disabling write privileges on each one.
+ */
+
+ while (pv_e != PV_ENTRY_NULL) {
+ pmap_t pmap;
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ va = pv_e->va;
- UNLOCK_PVH(phys);
- SPLX(spl);
-
- return; /* no mappings */
- }
-
- /*
- * Run down the list of mappings to this physical page,
- * disabling write privileges on each one.
- */
-
- while (pv_e != PV_ENTRY_NULL) {
- pmap_t pmap;
- vm_offset_t va;
-
- pmap = pv_e->pmap;
- va = pv_e->va;
-
- if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
- goto copy_on_write_Retry;
- }
-
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * Check for existing and valid pte
- */
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_copy_on_write: pte from pv_list not in map");
- if (!PDT_VALID(pte))
- panic("pmap_copy_on_write: invalid pte");
- if (M88K_PTOB(pte->pfn) != phys)
- panic("pmap_copy_on_write: pte doesn't point to page");
-
- /*
- * Flush TLBs of which cpus using pmap.
- */
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = M88K_RO;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
-
- simple_unlock(&pmap->lock);
- pv_e = pv_e->next;
- }
- CHECK_PV_LIST(phys, PA_TO_PVH(phys), "pmap_copy_on_write");
-
- UNLOCK_PVH(phys);
- SPLX(spl);
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto copy_on_write_Retry;
+ }
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * Check for existing and valid pte
+ */
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_copy_on_write: pte from pv_list not in map");
+ if (!PDT_VALID(pte))
+ panic("pmap_copy_on_write: invalid pte");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_copy_on_write: pte doesn't point to page");
+
+ /*
+ * Flush TLBs of which cpus using pmap.
+ */
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the modified
+ * bit and/or the reference bit being written back
+ * by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+ simple_unlock(&pmap->lock);
+ pv_e = pv_e->next;
+ }
+ CHECK_PV_LIST(phys, PA_TO_PVH(phys), "pmap_copy_on_write");
+
+ UNLOCK_PVH(phys);
+ SPLX(spl);
} /* pmap_copy_on_write */
@@ -2660,99 +2528,90 @@ copy_on_write_Retry:
void
pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
{
- pte_template_t maprot;
- unsigned ap;
- int spl, spl_sav;
- register int i;
- pt_entry_t *pte;
- vm_offset_t va, tva;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
- return;
- if ((prot & VM_PROT_READ) == 0) {
- pmap_remove(pmap, s, e);
- return;
- }
-
- if (s > e)
- panic("pmap_protect: start grater than end address");
-
- maprot.bits = m88k_protection(pmap, prot);
- ap = maprot.pte.prot;
-
- PMAP_LOCK(pmap, spl);
-
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- CHECK_PAGE_ALIGN(s, "pmap_protect");
-
- /*
- * Loop through the range in vm_page_size increment.
- * Do not assume that either start or end fall on any
- * kind of page boundary (though this may be true ?!).
- */
- for (va = s; va <= e; va += PAGE_SIZE) {
-
- pte = pmap_pte(pmap, va);
-
- if (pte == PT_ENTRY_NULL) {
-
- va &= SDT_MASK; /* align to segment */
- if (va <= e - (1<<SDT_SHIFT))
- va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
- else /* wrap around */
- break;
+ pte_template_t maprot;
+ unsigned ap;
+ int spl, spl_sav;
+ register int i;
+ pt_entry_t *pte;
+ vm_offset_t va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
+ return;
+ if ((prot & VM_PROT_READ) == 0) {
+ pmap_remove(pmap, s, e);
+ return;
+ }
+
+ if (s > e)
+ panic("pmap_protect: start grater than end address");
+
+ maprot.bits = m88k_protection(pmap, prot);
+ ap = maprot.pte.prot;
+ PMAP_LOCK(pmap, spl);
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ CHECK_PAGE_ALIGN(s, "pmap_protect");
+
+ /*
+ * Loop through the range in vm_page_size increment.
+ * Do not assume that either start or end fall on any
+ * kind of page boundary (though this may be true ?!).
+ */
+ for (va = s; va <= e; va += PAGE_SIZE) {
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL) {
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
#endif
- continue;
- }
+ continue;
+ }
- if (!PDT_VALID(pte)) {
+ if (!PDT_VALID(pte)) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
#endif
- continue; /* no page mapping */
- }
+ continue; /* no page mapping */
+ }
#if 0
- printf("(pmap_protect :%x) pte good\n", curproc);
+ printf("(pmap_protect :%x) pte good\n", curproc);
#endif
-
- tva = va;
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = ap;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, tva, kflush);
- splx(spl_sav);
- pte++;
- tva += M88K_PGBYTES;
- }
- }
-
- PMAP_UNLOCK(pmap, spl);
-
+ tva = va;
+ for (i = ptes_per_vm_page; i>0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the
+ * modified bit and/or the reference bit being
+ * written back by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = ap;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, tva, kflush);
+ splx(spl_sav);
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+ }
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_protect() */
-
-
/*
* Routine: PMAP_EXPAND
*
@@ -2800,116 +2659,100 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
STATIC void
pmap_expand(pmap_t map, vm_offset_t v)
{
- int i,
- spl;
- vm_offset_t pdt_vaddr,
- pdt_paddr;
-
- sdt_entry_t *sdt;
- pt_entry_t *pte;
- vm_offset_t pmap_extract();
-
- if (map == PMAP_NULL) {
- panic("pmap_expand: pmap is NULL");
- }
+ int i, spl;
+ vm_offset_t pdt_vaddr, pdt_paddr;
+ sdt_entry_t *sdt;
+ pt_entry_t *pte;
+ vm_offset_t pmap_extract();
+
+ if (map == PMAP_NULL) {
+ panic("pmap_expand: pmap is NULL");
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
- printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
+ if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
+ printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
#endif
- CHECK_PAGE_ALIGN (v, "pmap_expand");
+ CHECK_PAGE_ALIGN (v, "pmap_expand");
- /*
- * Handle kernel pmap in pmap_expand_kmap().
- */
- if (map == kernel_pmap) {
- PMAP_LOCK(map, spl);
- if (pmap_expand_kmap(v, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
- panic ("pmap_expand: Cannot allocate kernel pte table");
- PMAP_UNLOCK(map, spl);
+ /*
+ * Handle kernel pmap in pmap_expand_kmap().
+ */
+ if (map == kernel_pmap) {
+ PMAP_LOCK(map, spl);
+ if (pmap_expand_kmap(v, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
+ panic ("pmap_expand: Cannot allocate kernel pte table");
+ PMAP_UNLOCK(map, spl);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_EXP | CD_FULL)) == (CD_EXP | CD_FULL))
- printf("(pmap_expand :%x) kernel_pmap\n", curproc);
+ if ((pmap_con_dbg & (CD_EXP | CD_FULL)) == (CD_EXP | CD_FULL))
+ printf("(pmap_expand :%x) kernel_pmap\n", curproc);
#endif
- return;
- }
+ return;
+ }
- /* XXX */
-#ifdef MACH_KERNEL
- if (kmem_alloc_wired(kernel_map, &pdt_vaddr, PAGE_SIZE) != KERN_SUCCESS)
- panic("pmap_enter: kmem_alloc failure");
- pmap_zero_page(pmap_extract(kernel_pmap, pdt_vaddr));
-#else
+ /* XXX */
#if defined(UVM)
- pdt_vaddr = uvm_km_zalloc(kernel_map, PAGE_SIZE);
+ pdt_vaddr = uvm_km_zalloc(kernel_map, PAGE_SIZE);
#else
- pdt_vaddr = kmem_alloc (kernel_map, PAGE_SIZE);
-#endif
+ pdt_vaddr = kmem_alloc (kernel_map, PAGE_SIZE);
#endif
+ pdt_paddr = pmap_extract(kernel_pmap, pdt_vaddr);
- pdt_paddr = pmap_extract(kernel_pmap, pdt_vaddr);
-
-#if notneeded
- /*
- * the page for page tables should be CACHE DISABLED
- */
- pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
-#endif
+ if (cputyp == CPU_188) {
+ /*
+ * the page for page tables should be CACHE DISABLED on MVME188
+ */
+ pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
+ }
- PMAP_LOCK(map, spl);
+ PMAP_LOCK(map, spl);
- if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) {
- /*
- * Someone else caused us to expand
- * during our vm_allocate.
- */
- PMAP_UNLOCK(map, spl);
- /* XXX */
+ if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) {
+ /*
+ * Someone else caused us to expand
+ * during our vm_allocate.
+ */
+ PMAP_UNLOCK(map, spl);
+ /* XXX */
#if defined(UVM)
- uvm_km_free(kernel_map, pdt_vaddr, PAGE_SIZE);
+ uvm_km_free(kernel_map, pdt_vaddr, PAGE_SIZE);
#else
- kmem_free (kernel_map, pdt_vaddr, PAGE_SIZE);
+ kmem_free (kernel_map, pdt_vaddr, PAGE_SIZE);
#endif
-
+
#ifdef DEBUG
- if (pmap_con_dbg & CD_EXP)
- printf("(pmap_expand :%x) table has already allocated\n", curproc);
+ if (pmap_con_dbg & CD_EXP)
+ printf("(pmap_expand :%x) table has already allocated\n", curproc);
#endif
- return;
- }
-
- /*
- * Apply a mask to V to obtain the vaddr of the beginning of
- * its containing page 'table group',i.e. the group of
- * page tables that fit eithin a single VM page.
- * Using that, obtain the segment table pointer that references the
- * first page table in the group, and initilize all the
- * segment table descriptions for the page 'table group'.
- */
- v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
-
- sdt = SDTENT(map,v);
-
- /*
- * Init each of the segment entries to point the freshly allocated
- * page tables.
- */
-
- for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
- ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
- sdt++;
- pdt_paddr += PDT_SIZE;
- pdt_vaddr += PDT_SIZE;
- }
-
- PMAP_UNLOCK(map, spl);
-
+ return;
+ }
+ /*
+ * Apply a mask to V to obtain the vaddr of the beginning of
+ * its containing page 'table group',i.e. the group of
+ * page tables that fit eithin a single VM page.
+ * Using that, obtain the segment table pointer that references the
+ * first page table in the group, and initilize all the
+ * segment table descriptions for the page 'table group'.
+ */
+ v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
+
+ sdt = SDTENT(map,v);
+
+ /*
+ * Init each of the segment entries to point the freshly allocated
+ * page tables.
+ */
+ for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
+ ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
+ sdt++;
+ pdt_paddr += PDT_SIZE;
+ pdt_vaddr += PDT_SIZE;
+ }
+ PMAP_UNLOCK(map, spl);
} /* pmap_expand() */
-
-
/*
* Routine: PMAP_ENTER
*
@@ -2990,239 +2833,234 @@ pmap_expand(pmap_t map, vm_offset_t v)
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
- vm_prot_t prot, boolean_t wired,
- vm_prot_t access_type)
+ vm_prot_t prot, boolean_t wired,
+ vm_prot_t access_type)
{
- int ap;
- int spl, spl_sav;
- pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t old_pa;
- pte_template_t template;
- register int i;
- int pfi;
- pv_entry_t pvl;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (pmap == PMAP_NULL) {
- panic("pmap_enter: pmap is NULL");
- }
-
- CHECK_PAGE_ALIGN (va, "pmap_entry - VA");
- CHECK_PAGE_ALIGN (pa, "pmap_entry - PA");
-
- /*
- * Range check no longer use, since we use whole address space
- */
+ int ap;
+ int spl, spl_sav;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t old_pa;
+ pte_template_t template;
+ register int i;
+ pv_entry_t pvl;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_enter: pmap is NULL");
+ }
+
+ CHECK_PAGE_ALIGN (va, "pmap_entry - VA");
+ CHECK_PAGE_ALIGN (pa, "pmap_entry - PA");
+
+ /*
+ * Range check no longer use, since we use whole address space
+ */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (pmap == kernel_pmap)
- printf ("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
- else
- printf ("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (pmap == kernel_pmap)
+ printf ("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
+ else
+ printf ("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
+ }
#endif
- ap = m88k_protection (pmap, prot);
+ ap = m88k_protection (pmap, prot);
- /*
- * Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
- * If we determine we need a pvlist entry, we will unlock
- * and allocate one. Then will retry, throwing away
- * the allocated entry later (if we no longer need it).
- */
- pv_e = PV_ENTRY_NULL;
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * zalloc may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then will retry, throwing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
Retry:
- PMAP_LOCK(pmap, spl);
-
- /*
- * Expand pmap to include this pte. Assume that
- * pmap is always expanded to include enough M88K
- * pages to map one VM page.
- */
- while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
- /*
- * Must unlock to expand the pmap.
- */
- PMAP_UNLOCK(pmap, spl);
- pmap_expand(pmap, va);
- PMAP_LOCK(pmap, spl);
- }
-
- /*
- * Special case if the physical page is already mapped
- * at this address.
- */
- old_pa = M88K_PTOB(pte->pfn);
- if (old_pa == pa) {
-
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * May be changing its wired attributes or protection
- */
-
- if (wired && !pte->wired)
- pmap->stats.wired_count++;
- else if (!wired && pte->wired)
- pmap->stats.wired_count--;
-
- if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
- else
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
- if (wired)
- template.pte.wired = 1;
-
- /*
- * If there is a same mapping, we have nothing to do.
- */
- if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
- || (pte->prot != template.pte.prot)) {
-
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- template.pte.modified = opte.pte.modified;
- *pte++ = template.pte;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- template.bits += M88K_PGBYTES;
- va += M88K_PGBYTES;
- }
- }
-
- } else { /* if ( pa == old_pa) */
-
- /*
- * Remove old mapping from the PV list if necessary.
- */
- if (old_pa != (vm_offset_t)-1) {
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
+ PMAP_LOCK(pmap, spl);
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough M88K
+ * pages to map one VM page.
+ */
+ while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ /*
+ * Must unlock to expand the pmap.
+ */
+ PMAP_UNLOCK(pmap, spl);
+ pmap_expand(pmap, va);
+ PMAP_LOCK(pmap, spl);
+ }
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = M88K_PTOB(pte->pfn);
+ if (old_pa == pa) {
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * May be changing its wired attributes or protection
+ */
+
+ if (wired && !pte->wired)
+ pmap->stats.wired_count++;
+ else if (!wired && pte->wired)
+ pmap->stats.wired_count--;
+
+ if ((unsigned long)pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
+ if (wired)
+ template.pte.wired = 1;
+
+ /*
+ * If there is a same mapping, we have nothing to do.
+ */
+ if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
+ || (pte->prot != template.pte.prot)) {
+
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ template.pte.modified = opte.pte.modified;
+ *pte++ = template.pte;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ template.bits += M88K_PGBYTES;
+ va += M88K_PGBYTES;
+ }
+ }
+
+ } else { /* if ( pa == old_pa) */
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (old_pa != (vm_offset_t)-1) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
- phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
- PMAP_MANAGED(pa) ? 1 : 0);
- printf("pte %x pfn %x valid %x\n",
- pte, pte->pfn, pte->dtype);
- }
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
+ phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
+ PMAP_MANAGED(pa) ? 1 : 0);
+ printf("pte %x pfn %x valid %x\n",
+ pte, pte->pfn, pte->dtype);
+ }
+ }
#endif
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- flush_atc_entry(users, va, 1);
- } else {
- pmap_remove_range(pmap, va, va + PAGE_SIZE);
- }
- }
-
- if (PMAP_MANAGED(pa)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ flush_atc_entry(users, va, 1);
+ } else {
+ pmap_remove_range(pmap, va, va + PAGE_SIZE);
+ }
+ }
+
+ if (PMAP_MANAGED(pa)) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- printf("va 0x%x and managed pa 0x%x\n", va, pa);
- }
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ printf("va 0x%x and managed pa 0x%x\n", va, pa);
+ }
+ }
#endif
- /*
- * Enter the mappimg in the PV list for this
- * physical page.
- */
- LOCK_PVH(pa);
- pvl = PA_TO_PVH(pa);
- CHECK_PV_LIST (pa, pvl, "pmap_enter before");
-
- if (pvl->pmap == PMAP_NULL) {
-
- /*
- * No mappings yet
- */
- pvl->va = va;
- pvl->pmap = pmap;
- pvl->next = PV_ENTRY_NULL;
-
- } else {
+ /*
+ * Enter the mappimg in the PV list for this
+ * physical page.
+ */
+ LOCK_PVH(pa);
+ pvl = PA_TO_PVH(pa);
+ CHECK_PV_LIST (pa, pvl, "pmap_enter before");
+
+ if (pvl->pmap == PMAP_NULL) {
+ /*
+ * No mappings yet
+ */
+ pvl->va = va;
+ pvl->pmap = pmap;
+ pvl->next = PV_ENTRY_NULL;
+
+ } else {
#ifdef DEBUG
- /*
- * check that this mapping is not already there
- */
- {
- pv_entry_t e = pvl;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap && e->va == va)
- panic ("pmap_enter: already in pv_list");
- e = e->next;
- }
- }
+ /*
+ * check that this mapping is not already there
+ */
+ {
+ pv_entry_t e = pvl;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == va)
+ panic("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
#endif
- /*
- * Add new pv_entry after header.
- */
- if (pv_e == PV_ENTRY_NULL) {
- UNLOCK_PVH(pa);
- PMAP_UNLOCK(pmap, spl);
- pv_e = (pv_entry_t) malloc(sizeof *pv_e, M_VMPVENT,
- M_NOWAIT);
- goto Retry;
- }
- pv_e->va = va;
- pv_e->pmap = pmap;
- pv_e->next = pvl->next;
- pvl->next = pv_e;
- /*
- * Remeber that we used the pvlist entry.
- */
- pv_e = PV_ENTRY_NULL;
- }
- UNLOCK_PVH(pa);
- }
-
- /*
- * And count the mapping.
- */
- pmap->stats.resident_count++;
- if (wired)
- pmap->stats.wired_count++;
-
- if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
- else
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
-
- if (wired)
- template.pte.wired = 1;
-
- DO_PTES (pte, template.bits);
-
- } /* if ( pa == old_pa ) ... else */
-
- PMAP_UNLOCK(pmap, spl);
-
- if (pv_e != PV_ENTRY_NULL)
- free((caddr_t) pv_e, M_VMPVENT);
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pa);
+ PMAP_UNLOCK(pmap, spl);
+ pv_e = (pv_entry_t) malloc(sizeof *pv_e,
+ M_VMPVENT,
+ M_NOWAIT);
+ goto Retry;
+ }
+ pv_e->va = va;
+ pv_e->pmap = pmap;
+ pv_e->next = pvl->next;
+ pvl->next = pv_e;
+ /*
+ * Remeber that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pa);
+ }
-} /* pmap_enter */
+ /*
+ * And count the mapping.
+ */
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ if ((unsigned long)pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
+ if (wired)
+ template.pte.wired = 1;
+ DO_PTES (pte, template.bits);
+
+ } /* if ( pa == old_pa ) ... else */
+
+ PMAP_UNLOCK(pmap, spl);
+
+ if (pv_e != PV_ENTRY_NULL)
+ free((caddr_t) pv_e, M_VMPVENT);
+
+} /* pmap_enter */
/*
* Routine: pmap_change_wiring
@@ -3250,36 +3088,30 @@ Retry:
void
pmap_change_wiring(pmap_t map, vm_offset_t v, boolean_t wired)
{
- pt_entry_t *pte;
- int i;
- int spl;
+ pt_entry_t *pte;
+ int i;
+ int spl;
- PMAP_LOCK(map, spl);
+ PMAP_LOCK(map, spl);
- if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
- panic ("pmap_change_wiring: pte missing");
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic ("pmap_change_wiring: pte missing");
- if (wired && !pte->wired)
- /*
- * wiring mapping
- */
- map->stats.wired_count++;
+ if (wired && !pte->wired)
+ /* wiring mapping */
+ map->stats.wired_count++;
- else if (!wired && pte->wired)
- /*
- * unwired mapping
- */
- map->stats.wired_count--;
+ else if (!wired && pte->wired)
+ /* unwired mapping */
+ map->stats.wired_count--;
- for (i = ptes_per_vm_page; i>0; i--)
- (pte++)->wired = wired;
+ for (i = ptes_per_vm_page; i>0; i--)
+ (pte++)->wired = wired;
- PMAP_UNLOCK(map, spl);
+ PMAP_UNLOCK(map, spl);
} /* pmap_change_wiring() */
-
-
/*
* Routine: PMAP_EXTRACT
*
@@ -3310,45 +3142,41 @@ pmap_change_wiring(pmap_t map, vm_offset_t v, boolean_t wired)
vm_offset_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{
- register pt_entry_t *pte;
- register vm_offset_t pa;
- register int i;
- int spl;
-
- if (pmap == PMAP_NULL)
- panic("pmap_extract: pmap is NULL");
-
- /*
- * check BATC first
- */
- if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used-1; i > 0; i--)
- if (batc_entry[i].lba == M88K_BTOBLK(va)) {
- pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
- return (pa);
- }
-
- PMAP_LOCK(pmap, spl);
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else {
- if (PDT_VALID(pte))
- pa = M88K_PTOB(pte->pfn);
- else
- pa = (vm_offset_t) 0;
- }
-
- if (pa)
- pa |= (va & M88K_PGOFSET); /* offset within page */
-
- PMAP_UNLOCK(pmap, spl);
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ register int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) |
+ (va & BATC_BLKMASK );
+ return (pa);
+ }
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
-#if 0
- printf("pmap_extract ret %x\n", pa);
-#endif /* 0 */
- return (pa);
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+ PMAP_UNLOCK(pmap, spl);
+ return (pa);
} /* pamp_extract() */
/*
@@ -3358,36 +3186,37 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
vm_offset_t
pmap_extract_unlocked(pmap_t pmap, vm_offset_t va)
{
- pt_entry_t *pte;
- vm_offset_t pa;
- int i;
-
- if (pmap == PMAP_NULL)
- panic("pmap_extract: pmap is NULL");
-
- /*
- * check BATC first
- */
- if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used-1; i > 0; i--)
- if (batc_entry[i].lba == M88K_BTOBLK(va)) {
- pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
- return (pa);
- }
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else {
- if (PDT_VALID(pte))
- pa = M88K_PTOB(pte->pfn);
- else
- pa = (vm_offset_t) 0;
- }
-
- if (pa)
- pa |= (va & M88K_PGOFSET); /* offset within page */
-
- return (pa);
+ pt_entry_t *pte;
+ vm_offset_t pa;
+ int i;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) |
+ (va & BATC_BLKMASK );
+ return (pa);
+ }
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
+
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+
+ return (pa);
} /* pamp_extract_unlocked() */
@@ -3413,10 +3242,10 @@ pmap_extract_unlocked(pmap_t pmap, vm_offset_t va)
*/
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
+ vm_size_t len, vm_offset_t src_addr)
{
#ifdef lint
- dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
+ dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
#endif
@@ -3448,8 +3277,8 @@ void
pmap_update(void)
{
#ifdef DBG
- if ((pmap_con_dbg & (CD_UPD | CD_FULL)) == (CD_UPD | CD_FULL))
- printf("(pmap_update :%x) Called \n", curproc);
+ if ((pmap_con_dbg & (CD_UPD | CD_FULL)) == (CD_UPD | CD_FULL))
+ printf("(pmap_update :%x) Called \n", curproc);
#endif
}/* pmap_update() */
@@ -3499,126 +3328,137 @@ void
pmap_collect(pmap_t pmap)
{
- vm_offset_t sdt_va; /* outer loop index */
- vm_offset_t sdt_vt; /* end of segment */
- sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
- sdt_entry_t *sdtp; /* ptr to index into segment table */
- sdt_entry_t *sdt; /* ptr to index into segment table */
- pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
- pt_entry_t *gdtp; /* ptr to index into a page table */
- boolean_t found_gdt_wired; /* flag indicating a wired page exists in */
- /* a page table's address range */
- int spl;
- unsigned int i,j;
-
-
-
- if (pmap == PMAP_NULL) {
- panic("pmap_collect: pmap is NULL");
- }
- if (pmap == kernel_pmap) {
+ vm_offset_t sdt_va; /* outer loop index */
+ vm_offset_t sdt_vt; /* end of segment */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ sdt_entry_t *sdtp; /* ptr to index into segment table */
+ sdt_entry_t *sdt; /* ptr to index into segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
+ pt_entry_t *gdtp; /* ptr to index into a page table */
+ boolean_t found_gdt_wired; /* flag indicating a wired page exists
+ in a page table's address range */
+ int spl;
+ unsigned int i,j;
+
+
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_collect: pmap is NULL");
+ }
+ if (pmap == kernel_pmap) {
#ifdef MACH_KERNEL
- return;
+ return;
#else
- panic("pmap_collect attempted on kernel pmap");
+ panic("pmap_collect attempted on kernel pmap");
#endif
- }
+ }
- CHECK_PMAP_CONSISTENCY ("pmap_collect");
+ CHECK_PMAP_CONSISTENCY ("pmap_collect");
#ifdef DBG
- if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
- printf ("(pmap_collect :%x) pmap %x\n", curproc, pmap);
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ printf ("(pmap_collect :%x) pmap %x\n", curproc, pmap);
#endif
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- sdttbl = pmap->sdt_vaddr; /* addr of segment table */
- sdtp = sdttbl;
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+ sdtp = sdttbl;
- /*
- This contortion is here instead of the natural loop
- because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
- */
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS
+ is near 0xffffffff
+ */
- i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- if ( j < 1024 ) j++;
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
- /* Segment table loop */
- for ( ; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE) {
- sdt_va = VM_MIN_USER_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
+ /* Segment table loop */
+ for ( ; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE) {
+ sdt_va = VM_MIN_USER_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
- gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va);
+ gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va);
- if (gdttbl == PT_ENTRY_NULL)
- continue; /* no maps in this range */
+ if (gdttbl == PT_ENTRY_NULL)
+ continue; /* no maps in this range */
- gdttblend = gdttbl + (PDT_ENTRIES * PDT_TABLE_GROUP_SIZE);
+ gdttblend = gdttbl + (PDT_ENTRIES * PDT_TABLE_GROUP_SIZE);
- /* scan page maps for wired pages */
- found_gdt_wired = FALSE;
- for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
- if (gdtp->wired) {
- found_gdt_wired = TRUE;
- break;
- }
- }
+ /* scan page maps for wired pages */
+ found_gdt_wired = FALSE;
+ for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
+ if (gdtp->wired) {
+ found_gdt_wired = TRUE;
+ break;
+ }
+ }
- if (found_gdt_wired)
- continue; /* can't free this range */
+ if (found_gdt_wired)
+ continue; /* can't free this range */
- /* figure out end of range. Watch for wraparound */
+ /* figure out end of range. Watch for wraparound */
- sdt_vt = sdt_va <= VM_MAX_USER_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
- sdt_va+PDT_TABLE_GROUP_VA_SPACE :
- VM_MAX_USER_ADDRESS;
+ sdt_vt = sdt_va <= VM_MAX_USER_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
+ sdt_va+PDT_TABLE_GROUP_VA_SPACE :
+ VM_MAX_USER_ADDRESS;
- /* invalidate all maps in this range */
- pmap_remove_range (pmap, (vm_offset_t)sdt_va, (vm_offset_t)sdt_vt);
+ /* invalidate all maps in this range */
+ pmap_remove_range (pmap, (vm_offset_t)sdt_va,(vm_offset_t)sdt_vt);
- /*
- * we can safely deallocated the page map(s)
- */
- for (sdt = sdtp; sdt < (sdtp+PDT_TABLE_GROUP_SIZE); sdt++) {
- ((sdt_entry_template_t *) sdt) -> bits = 0;
- ((sdt_entry_template_t *) sdt+SDT_ENTRIES) -> bits = 0;
- }
+ /*
+ * we can safely deallocated the page map(s)
+ */
+ for (sdt = sdtp; sdt < (sdtp+PDT_TABLE_GROUP_SIZE); sdt++) {
+ ((sdt_entry_template_t *) sdt) -> bits = 0;
+ ((sdt_entry_template_t *) sdt+SDT_ENTRIES) -> bits = 0;
+ }
- /*
- * we have to unlock before freeing the table, since PT_FREE
- * calls kmem_free or zfree, which will invoke another pmap routine
- */
- PMAP_UNLOCK(pmap, spl);
- PT_FREE(gdttbl);
- PMAP_LOCK(pmap, spl);
+ /*
+ * we have to unlock before freeing the table, since PT_FREE
+ * calls kmem_free or zfree, which will invoke another
+ * pmap routine
+ */
+ PMAP_UNLOCK(pmap, spl);
+ PT_FREE(gdttbl);
+ PMAP_LOCK(pmap, spl);
- } /* Segment table Loop */
+ } /* Segment table Loop */
- PMAP_UNLOCK(pmap, spl);
+ PMAP_UNLOCK(pmap, spl);
#ifdef DBG
- if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
- printf ("(pmap_collect :%x) done \n", curproc);
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ printf ("(pmap_collect :%x) done \n", curproc);
#endif
- CHECK_PMAP_CONSISTENCY("pmap_collect");
+ CHECK_PMAP_CONSISTENCY("pmap_collect");
} /* pmap collect() */
/*
- * Routine: PMAP_ACTIVATE
+ * Routine: PMAP_ACTIVATE
+ *
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ * In a mono-processor implementation the my_cpu
+ * argument is ignored, and the PMAP_ACTIVATE macro
+ * simply sets the MMU root pointer element of the PCB
+ * to the physical address of the segment descriptor table.
*
- * Function:
- * Binds the given physical map to the given
- * processor, and returns a hardware map description.
- * In a mono-processor implementation the my_cpu
- * argument is ignored, and the PMAP_ACTIVATE macro
- * simply sets the MMU root pointer element of the PCB
- * to the physical address of the segment descriptor table.
+ * Parameters:
+ * p pointer to proc structure
+ *
+ * Notes:
+ * If the specified pmap is not kernel_pmap, this routine makes arp
+ * template and stores it into UAPR (user area pointer register) in the
+ * CMMUs connected to the specified CPU.
+ *
+ * If kernel_pmap is specified, only flushes the TLBs mapping kernel
+ * virtual space, in the CMMUs connected to the specified CPU.
*
- * Parameters:
- * p pointer to proc structure
*/
void
pmap_activate(struct proc *p)
@@ -3630,7 +3470,7 @@ pmap_activate(struct proc *p)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) pmap 0x%x\n", p, (unsigned)pmap);
+ printf("(pmap_activate :%x) pmap 0x%x\n", p, (unsigned)pmap);
#endif
if (pmap != kernel_pmap) {
@@ -3648,11 +3488,13 @@ pmap_activate(struct proc *p)
#ifdef notyet
#ifdef OMRON_PMAP
/*
- * cmmu_pmap_activate will set the uapr and the batc entries, then
- * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE
- * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL.
+ * cmmu_pmap_activate will set the uapr and the batc entries,
+ * then flush the *USER* TLB. IF THE KERNEL WILL EVER CARE
+ * ABOUT THE BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE
+ * FLUSHED AS WELL.
*/
- cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc);
+ cmmu_pmap_activate(my_cpu, apr_data.bits,
+ pmap->i_batc, pmap->d_batc);
for (n = 0; n < BATC_MAX; n++)
*(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
#else
@@ -3681,24 +3523,24 @@ pmap_activate(struct proc *p)
#ifdef DEBUG
if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc);
+ printf("(pmap_activate :%x) called for kernel_pmap\n", curproc);
#endif
}
} /* pmap_activate() */
-
-
/*
- * Routine: PMAP_DEACTIVATE
+ * Routine: PMAP_DEACTIVATE
*
- * Function:
- * Unbinds the given physical map from the given processor,
- * i.e. the pmap i no longer is use on the processor.
- * In a mono-processor the PMAP_DEACTIVATE macro is null.
+ * Function:
+ * Unbinds the given physical map from the given processor,
+ * i.e. the pmap i no longer is use on the processor.
+ *
+ * Parameters:
+ * p pointer to proc structure
+ *
+ * _pmap_deactive simply clears the cpus_using field in given pmap structure.
*
- * Parameters:
- * p pointer to proc structure
*/
void
pmap_deactivate(struct proc *p)
@@ -3717,21 +3559,6 @@ pmap_deactivate(struct proc *p)
}
} /* pmap_deactivate() */
-
-/*
- * Routine: PMAP_KERNEL
- *
- * Function:
- * Retruns a pointer to the kernel pmap.
- */
-#if 0 /* Now a macro XXX smurph */
-pmap_t
-pmap_kernel(void)
-{
- return (kernel_pmap);
-}/* pmap_kernel() */
-#endif
-
/*
* Routine: PMAP_COPY_PAGE
*
@@ -3763,46 +3590,49 @@ pmap_kernel(void)
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
{
- vm_offset_t dstva, srcva;
- unsigned int spl_sav;
- int i;
- int aprot;
- pte_template_t template;
- pt_entry_t *dstpte, *srcpte;
- int my_cpu = cpu_number();
-
- /*
- * Map source physical address.
- */
- aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
-
- srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE));
- dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
-
- srcpte = pmap_pte(kernel_pmap, srcva);
- dstpte = pmap_pte(kernel_pmap, dstva);
-
- for (i=0; i < ptes_per_vm_page; i++, src += M88K_PGBYTES, dst += M88K_PGBYTES) {
- template.bits = M88K_TRUNC_PAGE(src) | aprot | DT_VALID | CACHE_GLOBAL;
-
- /* do we need to write back dirty bits */
- spl_sav = splimp();
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
-
- /*
- * Map destination physical address.
- */
- template.bits = M88K_TRUNC_PAGE(dst) | aprot | CACHE_GLOBAL | DT_VALID;
- cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
- *dstpte = template.pte;
- splx(spl_sav);
-
- bcopy((void*)srcva, (void*)dstva, M88K_PGBYTES);
- /* flush source, dest out of cache? */
- cmmu_flush_remote_data_cache(my_cpu, src, M88K_PGBYTES);
- cmmu_flush_remote_data_cache(my_cpu, dst, M88K_PGBYTES);
- }
+ vm_offset_t dstva, srcva;
+ unsigned int spl_sav;
+ int i;
+ int aprot;
+ pte_template_t template;
+ pt_entry_t *dstpte, *srcpte;
+ int my_cpu = cpu_number();
+
+ /*
+ * Map source physical address.
+ */
+ aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE));
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ dstpte = pmap_pte(kernel_pmap, dstva);
+
+ for (i=0; i < ptes_per_vm_page; i++,
+ src += M88K_PGBYTES, dst += M88K_PGBYTES) {
+ template.bits = M88K_TRUNC_PAGE(src) | aprot |
+ DT_VALID | CACHE_GLOBAL;
+
+ /* do we need to write back dirty bits */
+ spl_sav = splimp();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ /*
+ * Map destination physical address.
+ */
+ template.bits = M88K_TRUNC_PAGE(dst) | aprot |
+ CACHE_GLOBAL | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+ splx(spl_sav);
+
+ bcopy((void*)srcva, (void*)dstva, M88K_PGBYTES);
+ /* flush source, dest out of cache? */
+ cmmu_flush_remote_data_cache(my_cpu, src, M88K_PGBYTES);
+ cmmu_flush_remote_data_cache(my_cpu, dst, M88K_PGBYTES);
+ }
} /* pmap_copy_page() */
@@ -3833,44 +3663,42 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
void
copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount)
{
- vm_offset_t dstva;
- pt_entry_t *dstpte;
- int copy_size,
- offset,
- aprot;
- unsigned int i;
- pte_template_t template;
-
- dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
- dstpte = pmap_pte(kernel_pmap, dstva);
- copy_size = M88K_PGBYTES;
- offset = dstpa - M88K_TRUNC_PAGE(dstpa);
- dstpa -= offset;
-
- aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
- while (bytecount > 0) {
- copy_size = M88K_PGBYTES - offset;
- if (copy_size > bytecount)
- copy_size = bytecount;
-
- /*
- * Map distation physical address.
- */
-
- for (i = 0; i < ptes_per_vm_page; i++) {
- template.bits = M88K_TRUNC_PAGE(dstpa) | aprot | CACHE_WT | DT_VALID;
- cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
- *dstpte = template.pte;
-
- dstva += offset;
- bcopy((void*)srcva, (void*)dstva, copy_size);
- srcva += copy_size;
- dstva += copy_size;
- dstpa += M88K_PGBYTES;
- bytecount -= copy_size;
- offset = 0;
- }
- }
+ vm_offset_t dstva;
+ pt_entry_t *dstpte;
+ int copy_size,
+ offset,
+ aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ dstpte = pmap_pte(kernel_pmap, dstva);
+ copy_size = M88K_PGBYTES;
+ offset = dstpa - M88K_TRUNC_PAGE(dstpa);
+ dstpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0) {
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+ /*
+ * Map destination physical address.
+ */
+ for (i = 0; i < ptes_per_vm_page; i++) {
+ template.bits = M88K_TRUNC_PAGE(dstpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+
+ dstva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcva += copy_size;
+ dstva += copy_size;
+ dstpa += M88K_PGBYTES;
+ bytecount -= copy_size;
+ offset = 0;
+ }
+ }
}
/*
@@ -3899,44 +3727,42 @@ copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount)
void
copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount)
{
- register vm_offset_t srcva;
- register pt_entry_t *srcpte;
- register int copy_size, offset;
- int aprot;
- unsigned int i;
- pte_template_t template;
-
- srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
- srcpte = pmap_pte(kernel_pmap, srcva);
- copy_size = M88K_PGBYTES;
- offset = srcpa - M88K_TRUNC_PAGE(srcpa);
- srcpa -= offset;
-
- aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
- while (bytecount > 0) {
- copy_size = M88K_PGBYTES - offset;
- if (copy_size > bytecount)
- copy_size = bytecount;
-
- /*
- * Map destnation physical address.
- */
-
- for (i=0; i < ptes_per_vm_page; i++) {
- template.bits = M88K_TRUNC_PAGE(srcpa) | aprot | CACHE_WT | DT_VALID;
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
-
- srcva += offset;
- bcopy((void*)srcva, (void*)dstva, copy_size);
- srcpa += M88K_PGBYTES;
- dstva += copy_size;
- srcva += copy_size;
- bytecount -= copy_size;
- offset = 0;
- /* cache flush source? */
- }
- }
+ register vm_offset_t srcva;
+ register pt_entry_t *srcpte;
+ register int copy_size, offset;
+ int aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ copy_size = M88K_PGBYTES;
+ offset = srcpa - M88K_TRUNC_PAGE(srcpa);
+ srcpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0) {
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+ /*
+ * Map destination physical address.
+ */
+ for (i=0; i < ptes_per_vm_page; i++) {
+ template.bits = M88K_TRUNC_PAGE(srcpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ srcva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcpa += M88K_PGBYTES;
+ dstva += copy_size;
+ srcva += copy_size;
+ bytecount -= copy_size;
+ offset = 0;
+ /* cache flush source? */
+ }
+ }
}
/*
@@ -3962,10 +3788,10 @@ copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount)
*/
void
pmap_pageable(pmap_t pmap, vm_offset_t start, vm_offset_t end,
- boolean_t pageable)
+ boolean_t pageable)
{
#ifdef lint
- pmap++; start++; end++; pageable++;
+ pmap++; start++; end++; pageable++;
#endif
} /* pmap_pagealbe() */
@@ -3998,45 +3824,43 @@ pmap_pageable(pmap_t pmap, vm_offset_t start, vm_offset_t end,
void
pmap_redzone(pmap_t pmap, vm_offset_t va)
{
- pt_entry_t *pte;
- int spl, spl_sav;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- va = M88K_ROUND_PAGE(va);
- PMAP_LOCK(pmap, spl);
-
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte))
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = M88K_RO;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va +=M88K_PGBYTES;
- }
-
- PMAP_UNLOCK(pmap, spl);
-
-} /* pmap_redzone() */
+ pt_entry_t *pte;
+ int spl, spl_sav;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ va = M88K_ROUND_PAGE(va);
+ PMAP_LOCK(pmap, spl);
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+ if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte))
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va +=M88K_PGBYTES;
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+} /* pmap_redzone() */
/*
* Routine: PMAP_CLEAR_MODIFY
@@ -4069,94 +3893,87 @@ pmap_redzone(pmap_t pmap, vm_offset_t va)
void
pmap_clear_modify(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *pte;
- pmap_t pmap;
- int spl, spl_sav;
- vm_offset_t va;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ vm_offset_t va;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_CMOD)
- printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
clear_modify_Retry:
- pvl = PA_TO_PVH(phys);
- CHECK_PV_LIST (phys, pvl, "pmap_clear_modify");
- LOCK_PVH(phys);
+ pvl = PA_TO_PVH(phys);
+ CHECK_PV_LIST (phys, pvl, "pmap_clear_modify");
+ LOCK_PVH(phys);
- /* update correspoinding pmap_modify_list element */
- SET_ATTRIB(phys, 0);
+ /* update correspoinding pmap_modify_list element */
+ SET_ATTRIB(phys, 0);
- if (pvl->pmap == PMAP_NULL) {
+ if (pvl->pmap == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
- printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return;
+ }
- UNLOCK_PVH(phys);
- SPLX(spl);
- return;
- }
-
- /* for each listed pmap, trun off the page modified bit */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- pmap = pvep->pmap;
- va = pvep->va;
- if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
- goto clear_modify_Retry;
- }
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_clear_modify: bad pv list entry.");
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- /* clear modified bit */
- opte.pte.modified = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
- simple_unlock(&pmap->lock);
- pvep = pvep->next;
- }
-
- UNLOCK_PVH(phys);
- SPLX(spl);
-
+ /* for each listed pmap, trun off the page modified bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto clear_modify_Retry;
+ }
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_modify: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the modified bit
+ * and/or the reference being written back by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ /* clear modified bit */
+ opte.pte.modified = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+ simple_unlock(&pmap->lock);
+ pvep = pvep->next;
+ }
+ UNLOCK_PVH(phys);
+ SPLX(spl);
} /* pmap_clear_modify() */
-
-
/*
* Routine: PMAP_IS_MODIFIED
*
@@ -4195,90 +4012,87 @@ clear_modify_Retry:
boolean_t
pmap_is_modified(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- int i;
- boolean_t modified_flag;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+ boolean_t modified_flag;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_IMOD)
- printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
+ if (pmap_con_dbg & CD_IMOD)
+ printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
#endif
- return (FALSE);
- }
+ return (FALSE);
+ }
- SPLVM(spl);
+ SPLVM(spl);
- pvl = PA_TO_PVH(phys);
- CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
-is_mod_Retry:
+ pvl = PA_TO_PVH(phys);
+ CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
+ is_mod_Retry:
- if ((boolean_t) PA_TO_ATTRIB(phys)) {
- /* we've already cached a modify flag for this page,
- no use looking further... */
+ if ((boolean_t) PA_TO_ATTRIB(phys)) {
+ /* we've already cached a modify flag for this page,
+ no use looking further... */
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
- printf("(pmap_is_modified :%x) already cached a modify flag for this page\n", curproc);
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) already cached a modify flag for this page\n", curproc);
#endif
- SPLX(spl);
- return (TRUE);
- }
- LOCK_PVH(phys);
-
- if (pvl->pmap == PMAP_NULL) {
- /* unmapped page - get info from page_modified array
- maintained by pmap_remove_range/ pmap_remove_all */
- modified_flag = (boolean_t) PA_TO_ATTRIB(phys);
+ SPLX(spl);
+ return (TRUE);
+ }
+ LOCK_PVH(phys);
+
+ if (pvl->pmap == PMAP_NULL) {
+ /* unmapped page - get info from page_modified array
+ maintained by pmap_remove_range/ pmap_remove_all */
+ modified_flag = (boolean_t) PA_TO_ATTRIB(phys);
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
- printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- UNLOCK_PVH(phys);
- SPLX(spl);
- return (modified_flag);
- }
-
- /* for each listed pmap, check modified bit for given page */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(phys);
- goto is_mod_Retry;
- }
-
- ptep = pmap_pte(pvep->pmap, pvep->va);
- if (ptep == PT_ENTRY_NULL) {
- printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
- panic("pmap_is_modified: bad pv list entry");
- }
- for (i = ptes_per_vm_page; i > 0; i--) {
- if (ptep->modified) {
- simple_unlock(&pvep->pmap->lock);
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (modified_flag);
+ }
+
+ /* for each listed pmap, check modified bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto is_mod_Retry;
+ }
+
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL) {
+ printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
+ panic("pmap_is_modified: bad pv list entry");
+ }
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->modified) {
+ simple_unlock(&pvep->pmap->lock);
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
- printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
+ if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
+ printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
#endif
- UNLOCK_PVH(phys);
- SPLX(spl);
- return (TRUE);
- }
- ptep++;
- }
- simple_unlock(&pvep->pmap->lock);
- pvep = pvep->next;
- }
-
- UNLOCK_PVH(phys);
- SPLX(spl);
- return (FALSE);
-
-} /* pmap_is_modified() */
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
+ pvep = pvep->next;
+ }
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (FALSE);
+} /* pmap_is_modified() */
/*
* Routine: PMAP_CLEAR_REFERECE
@@ -4315,92 +4129,86 @@ is_mod_Retry:
void
pmap_clear_reference(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *pte;
- pmap_t pmap;
- int spl, spl_sav;
- vm_offset_t va;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ vm_offset_t va;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_CREF) {
- printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
- }
+ if (pmap_con_dbg & CD_CREF) {
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
+ }
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
-clear_reference_Retry:
- LOCK_PVH(phys);
- pvl = PA_TO_PVH(phys);
- CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
+ clear_reference_Retry:
+ LOCK_PVH(phys);
+ pvl = PA_TO_PVH(phys);
+ CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
- if (pvl->pmap == PMAP_NULL) {
+ if (pvl->pmap == PMAP_NULL) {
#ifdef DBG
- if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
- printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
+ if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
#endif
- UNLOCK_PVH(phys);
- SPLX(spl);
- return;
- }
-
- /* for each listed pmap, turn off the page refrenced bit */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- pmap = pvep->pmap;
- va = pvep->va;
- if (!simple_lock_try(&pmap->lock)) {
- UNLOCK_PVH(phys);
- goto clear_reference_Retry;
- }
- users = pmap->cpus_using;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_clear_reference: bad pv list entry.");
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- /* clear reference bit */
- opte.pte.pg_used = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
-
- simple_unlock(&pmap->lock);
- pvep = pvep->next;
- }
-
- UNLOCK_PVH(phys);
- SPLX(spl);
-
-} /* pmap_clear_reference() */
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return;
+ }
+ /* for each listed pmap, turn off the page refrenced bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto clear_reference_Retry;
+ }
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_reference: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the modified bit
+ * and/or the reference being written back by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ /* clear reference bit */
+ opte.pte.pg_used = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+ simple_unlock(&pmap->lock);
+ pvep = pvep->next;
+ }
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+} /* pmap_clear_reference() */
/*
* Routine: PMAP_IS_REFERENCED
@@ -4435,59 +4243,59 @@ clear_reference_Retry:
* examined. If a used bit is found on, the function returns TRUE
* immediately (doesn't need to walk remainder of list).
*/
+
boolean_t
pmap_is_referenced(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- int i;
-
- if (!PMAP_MANAGED(phys))
- return (FALSE);
-
- SPLVM(spl);
-
- pvl = PA_TO_PVH(phys);
- CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
-
-is_ref_Retry:
-
- if (pvl->pmap == PMAP_NULL) {
- SPLX(spl);
- return (FALSE);
- }
-
- LOCK_PVH(phys);
-
- /* for each listed pmap, check used bit for given page */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(phys);
- goto is_ref_Retry;
- }
- ptep = pmap_pte(pvep->pmap, pvep->va);
- if (ptep == PT_ENTRY_NULL)
- panic("pmap_is_referenced: bad pv list entry.");
- for (i = ptes_per_vm_page; i > 0; i--) {
- if (ptep->pg_used) {
- simple_unlock(&pvep->pmap->lock);
- UNLOCK_PVH(phys);
- SPLX(spl);
- return (TRUE);
- }
- ptep++;
- }
- simple_unlock(&pvep->pmap->lock);
- pvep = pvep->next;
- }
-
- UNLOCK_PVH(phys);
- SPLX(spl);
- return (FALSE);
+ pv_entry_t pvl;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+
+ if (!PMAP_MANAGED(phys))
+ return (FALSE);
+
+ SPLVM(spl);
+
+ pvl = PA_TO_PVH(phys);
+ CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
+
+ is_ref_Retry:
+
+ if (pvl->pmap == PMAP_NULL) {
+ SPLX(spl);
+ return (FALSE);
+ }
+
+ LOCK_PVH(phys);
+
+ /* for each listed pmap, check used bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(phys);
+ goto is_ref_Retry;
+ }
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL)
+ panic("pmap_is_referenced: bad pv list entry.");
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->pg_used) {
+ simple_unlock(&pvep->pmap->lock);
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
+ pvep = pvep->next;
+ }
+
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (FALSE);
} /* pmap_is referenced() */
/*
@@ -4519,30 +4327,28 @@ is_ref_Retry:
boolean_t
pmap_verify_free(vm_offset_t phys)
{
- pv_entry_t pv_h;
- int spl;
- boolean_t result;
+ pv_entry_t pv_h;
+ int spl;
+ boolean_t result;
- if (!pmap_initialized)
- return (TRUE);
+ if (!pmap_initialized)
+ return (TRUE);
- if (!PMAP_MANAGED(phys))
- return (FALSE);
+ if (!PMAP_MANAGED(phys))
+ return (FALSE);
- SPLVM(spl);
+ SPLVM(spl);
- pv_h = PA_TO_PVH(phys);
- LOCK_PVH(phys);
+ pv_h = PA_TO_PVH(phys);
+ LOCK_PVH(phys);
- result = (pv_h->pmap == PMAP_NULL);
- UNLOCK_PVH(phys);
- SPLX(spl);
-
- return (result);
+ result = (pv_h->pmap == PMAP_NULL);
+ UNLOCK_PVH(phys);
+ SPLX(spl);
+ return (result);
} /* pmap_verify_free */
-
/*
* Routine: PMAP_VALID_PAGE
*
@@ -4553,9 +4359,9 @@ boolean_t
pmap_valid_page(vm_offset_t p)
{
#ifdef lint
- p++;
+ p++;
#endif
- return (TRUE);
+ return (TRUE);
} /* pmap_valid_page() */
/*
@@ -4570,17 +4376,17 @@ pmap_valid_page(vm_offset_t p)
void
pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
{
- switch (prot) {
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- pmap_copy_on_write(phys);
- break;
- case VM_PROT_ALL:
- break;
- default:
- pmap_remove_all(phys);
- break;
- }
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_copy_on_write(phys);
+ break;
+ case VM_PROT_ALL:
+ break;
+ default:
+ pmap_remove_all(phys);
+ break;
+ }
}
#if FUTURE_MAYBE
@@ -4611,84 +4417,81 @@ pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
void
pagemove(vm_offset_t from, vm_offset_t to, int size)
{
- vm_offset_t pa;
- pt_entry_t *srcpte, *dstpte;
- int pfi;
- pv_entry_t pvl;
- int spl;
- int i;
- unsigned users;
- pte_template_t opte;
-
- PMAP_LOCK(kernel_pmap, spl);
-
- users = kernel_pmap->cpus_using;
-
- while (size > 0) {
-
- /*
- * check if the source addr is mapped
- */
- if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) {
- printf("pagemove: source vaddr 0x%x\n", from);
- panic("pagemove: Source addr not mapped");
- }
-
- /*
- *
- */
- if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL)
- if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE))
- == PT_ENTRY_NULL)
- panic("pagemove: Cannot allocate distination pte");
- /*
- *
- */
- if (dstpte->dtype == DT_VALID) {
- printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte));
- panic("pagemove: Distination pte already valid");
- }
+ vm_offset_t pa;
+ pt_entry_t *srcpte, *dstpte;
+ pv_entry_t pvl;
+ int spl;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+
+ PMAP_LOCK(kernel_pmap, spl);
+
+ users = kernel_pmap->cpus_using;
+
+ while (size > 0) {
+
+ /*
+ * check if the source addr is mapped
+ */
+ if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) {
+ printf("pagemove: source vaddr 0x%x\n", from);
+ panic("pagemove: Source addr not mapped");
+ }
+
+ /*
+ *
+ */
+ if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL)
+ if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE))
+ == PT_ENTRY_NULL)
+ panic("pagemove: Cannot allocate distination pte");
+ /*
+ *
+ */
+ if (dstpte->dtype == DT_VALID) {
+ printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte));
+ panic("pagemove: Distination pte already valid");
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
- printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
- if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
- printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
+ if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
+ printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
+ if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
+ printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
#endif /* DEBUG */
- /*
- * Update pv_list
- */
- pa = M88K_PTOB(srcpte->pfn);
- if (PMAP_MANAGED(pa)) {
- LOCK_PVH(pa);
- pvl = PA_TO_PVH(pa);
- CHECK_PV_LIST(pa, pvl, "pagemove");
- pvl->va = (vm_offset_t)to;
- UNLOCK_PVH(pa);
- }
-
- /*
- * copy pte
- */
- for (i = ptes_per_vm_page; i > 0; i--) {
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(srcpte);
- flush_atc_entry(users, from, 1);
- ((pte_template_t *)dstpte)->bits = opte.bits;
- from += M88K_PGBYTES;
- to += M88K_PGBYTES;
- srcpte++; dstpte++;
- }
- size -= PAGE_SIZE;
- }
-
- PMAP_UNLOCK(kernel_pmap, spl);
+ /*
+ * Update pv_list
+ */
+ pa = M88K_PTOB(srcpte->pfn);
+ if (PMAP_MANAGED(pa)) {
+ LOCK_PVH(pa);
+ pvl = PA_TO_PVH(pa);
+ CHECK_PV_LIST(pa, pvl, "pagemove");
+ pvl->va = (vm_offset_t)to;
+ UNLOCK_PVH(pa);
+ }
+ /*
+ * copy pte
+ */
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid the modified bit
+ * and/or the reference being written back by other cpu.
+ */
+ opte.bits = invalidate_pte(srcpte);
+ flush_atc_entry(users, from, 1);
+ ((pte_template_t *)dstpte)->bits = opte.bits;
+ from += M88K_PGBYTES;
+ to += M88K_PGBYTES;
+ srcpte++; dstpte++;
+ }
+ size -= PAGE_SIZE;
+ }
+ PMAP_UNLOCK(kernel_pmap, spl);
} /* pagemove */
#endif /* FUTURE_MAYBE */
@@ -4718,17 +4521,17 @@ pagemove(vm_offset_t from, vm_offset_t to, int size)
void
icache_flush(vm_offset_t pa)
{
- int i;
- int cpu = 0;
-
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- for (cpu=0; cpu<max_cpus; cpu++) {
- if (cpu_sets[cpu]) {
- cmmu_flush_remote_inst_cache(cpu, pa, M88K_PGBYTES);
- }
- }
- }
-
+ int i;
+ int cpu = 0;
+
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ for (cpu=0; cpu<max_cpus; cpu++) {
+ if (cpu_sets[cpu]) {
+ cmmu_flush_remote_inst_cache(cpu, pa,
+ M88K_PGBYTES);
+ }
+ }
+ }
} /* icache_flush */
/*
@@ -4752,21 +4555,21 @@ icache_flush(vm_offset_t pa)
void
pmap_dcache_flush(pmap_t pmap, vm_offset_t va)
{
- vm_offset_t pa;
- int i;
- int spl;
+ vm_offset_t pa;
+ int i;
+ int spl;
- if (pmap == PMAP_NULL)
- panic("pmap_dcache_flush: pmap is NULL");
+ if (pmap == PMAP_NULL)
+ panic("pmap_dcache_flush: pmap is NULL");
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- cmmu_flush_data_cache(pa, M88K_PGBYTES);
- }
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cmmu_flush_data_cache(pa, M88K_PGBYTES);
+ }
- PMAP_UNLOCK(pmap, spl);
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_dcache_flush */
@@ -4774,55 +4577,55 @@ pmap_dcache_flush(pmap_t pmap, vm_offset_t va)
STATIC void
cache_flush_loop(int mode, vm_offset_t pa, int size)
{
- int i;
- int ncpus;
- void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
-
- switch (mode) {
- default:
- panic("bad cache_flush_loop mode");
- return;
-
- case FLUSH_CACHE: /* All caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu_flush_remote_cache;
- break;
-
- case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu_flush_remote_inst_cache;
- break;
-
- case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
- ncpus = max_cpus;
- cfunc = cmmu_flush_remote_data_cache;
- break;
-
- case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_cache;
- break;
-
- case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_inst_cache;
- break;
-
- case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_data_cache;
- break;
- }
-
- if (ncpus == 1) {
- (*cfunc)(cpu_number(), pa, size);
- } else {
- for (i=0; i<max_cpus; i++) {
- if (cpu_sets[i]) {
- (*cfunc)(i, pa, size);
- }
- }
- }
+ int i;
+ int ncpus;
+ void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
+
+ switch (mode) {
+ default:
+ panic("bad cache_flush_loop mode");
+ return;
+
+ case FLUSH_CACHE: /* All caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+
+ case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+ }
+
+ if (ncpus == 1) {
+ (*cfunc)(cpu_number(), pa, size);
+ } else {
+ for (i=0; i<max_cpus; i++) {
+ if (cpu_sets[i]) {
+ (*cfunc)(i, pa, size);
+ }
+ }
+ }
}
/*
@@ -4832,31 +4635,31 @@ cache_flush_loop(int mode, vm_offset_t pa, int size)
void
pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode)
{
- vm_offset_t pa;
- vm_offset_t va;
- int i;
- int spl;
-
- if (pmap == PMAP_NULL)
- panic("pmap_dcache_flush: NULL pmap");
-
- /*
- * If it is more than a couple of pages, just blow the whole cache
- * because of the number of cycles involved.
- */
- if (bytes > 2*M88K_PGBYTES) {
- cache_flush_loop(mode, 0, -1);
- return;
- }
-
- PMAP_LOCK(pmap, spl);
- for (va = virt; bytes > 0; bytes -= M88K_PGBYTES,va += M88K_PGBYTES) {
- pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- cache_flush_loop(mode, pa, M88K_PGBYTES);
- }
- }
- PMAP_UNLOCK(pmap, spl);
+ vm_offset_t pa;
+ vm_offset_t va;
+ int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_cache_flush: NULL pmap");
+
+ /*
+ * If it is more than a couple of pages, just blow the whole cache
+ * because of the number of cycles involved.
+ */
+ if (bytes > 2*M88K_PGBYTES) {
+ cache_flush_loop(mode, 0, -1);
+ return;
+ }
+
+ PMAP_LOCK(pmap, spl);
+ for (va = virt; bytes > 0; bytes -= M88K_PGBYTES,va += M88K_PGBYTES) {
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cache_flush_loop(mode, pa, M88K_PGBYTES);
+ }
+ }
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_cache_flush */
#ifdef DEBUG
@@ -4900,48 +4703,48 @@ pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode)
STATIC void
check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
{
- pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t pa;
-
- if (pv_h != PA_TO_PVH(phys)) {
- printf("check_pv_list: incorrect pv_h supplied.\n");
- panic(who);
- }
-
- if (!PAGE_ALIGNED(phys)) {
- printf("check_pv_list: supplied phys addr not page aligned.\n");
- panic(who);
- }
-
- if (pv_h->pmap == PMAP_NULL) {
- if (pv_h->next != PV_ENTRY_NULL) {
- printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
- panic(who);
- } else return; /* proper empry lst */
- }
-
- pv_e = pv_h;
- while (pv_e != PV_ENTRY_NULL) {
- if (!PAGE_ALIGNED(pv_e->va)) {
- printf("check_pv_list: non-aligned VA in entry at 0x%x.\n", pv_e);
- panic(who);
- }
- /*
- * We can't call pmap_extract since it requires lock.
- */
- if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t)0;
- else
- pa = M88K_PTOB(pte->pfn) | (pv_e->va & M88K_PGOFSET);
-
- if (pa != phys) {
- printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
- panic(who);
- }
-
- pv_e = pv_e->next;
- }
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t pa;
+
+ if (pv_h != PA_TO_PVH(phys)) {
+ printf("check_pv_list: incorrect pv_h supplied.\n");
+ panic(who);
+ }
+
+ if (!PAGE_ALIGNED(phys)) {
+ printf("check_pv_list: supplied phys addr not page aligned.\n");
+ panic(who);
+ }
+
+ if (pv_h->pmap == PMAP_NULL) {
+ if (pv_h->next != PV_ENTRY_NULL) {
+ printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
+ panic(who);
+ } else return; /* proper empry lst */
+ }
+
+ pv_e = pv_h;
+ while (pv_e != PV_ENTRY_NULL) {
+ if (!PAGE_ALIGNED(pv_e->va)) {
+ printf("check_pv_list: non-aligned VA in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+ /*
+ * We can't call pmap_extract since it requires lock.
+ */
+ if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t)0;
+ else
+ pa = M88K_PTOB(pte->pfn) | (pv_e->va & M88K_PGOFSET);
+
+ if (pa != phys) {
+ printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+
+ pv_e = pv_e->next;
+ }
} /* check_pv_list() */
@@ -4979,111 +4782,111 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
STATIC void
check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
{
- vm_offset_t va,
- old_va,
- phys;
- pv_entry_t pv_h,
- pv_e,
- saved_pv_e;
- pt_entry_t *ptep;
- boolean_t found;
- int loopcnt;
+ vm_offset_t va,
+ old_va,
+ phys;
+ pv_entry_t pv_h,
+ pv_e,
+ saved_pv_e;
+ pt_entry_t *ptep;
+ boolean_t found;
+ int loopcnt;
#if defined(MACHINE_NEW_NONCONTIG)
- int bank;
- unsigned npages;
+ int bank;
+ unsigned npages;
#endif
- /*
- * for each page in the address space, check to see if there's
- * a valid mapping. If so makes sure it's listed in the PV_list.
- */
+ /*
+ * for each page in the address space, check to see if there's
+ * a valid mapping. If so makes sure it's listed in the PV_list.
+ */
- if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
- printf("(check_map) checking map at 0x%x\n", map);
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) checking map at 0x%x\n", map);
- old_va = s;
- for (va = s; va < e; va += PAGE_SIZE) {
- /* check for overflow - happens if e=0xffffffff */
- if (va < old_va)
- break;
- else
- old_va = va;
+ old_va = s;
+ for (va = s; va < e; va += PAGE_SIZE) {
+ /* check for overflow - happens if e=0xffffffff */
+ if (va < old_va)
+ break;
+ else
+ old_va = va;
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2)
- /* don't try anything with these */
- continue;
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2)
+ /* don't try anything with these */
+ continue;
- ptep = pmap_pte(map, va);
+ ptep = pmap_pte(map, va);
- if (ptep == PT_ENTRY_NULL) {
- /* no page table, skip to next segment entry */
- va = SDT_NEXT(va)-PAGE_SIZE;
- continue;
- }
+ if (ptep == PT_ENTRY_NULL) {
+ /* no page table, skip to next segment entry */
+ va = SDT_NEXT(va)-PAGE_SIZE;
+ continue;
+ }
- if (!PDT_VALID(ptep))
- continue; /* no page mapping */
+ if (!PDT_VALID(ptep))
+ continue; /* no page mapping */
- phys = M88K_PTOB(ptep->pfn); /* pick up phys addr */
+ phys = M88K_PTOB(ptep->pfn); /* pick up phys addr */
- if (!PMAP_MANAGED(phys))
- continue; /* no PV list */
+ if (!PMAP_MANAGED(phys))
+ continue; /* no PV list */
- /* note: vm_page_startup allocates some memory for itself
- through pmap_map before pmap_init is run. However,
- it doesn't adjust the physical start of memory.
- So, pmap thinks those pages are managed - but they're
- not actually under it's control. So, the following
- conditional is a hack to avoid those addresses
- reserved by vm_page_startup */
- /* pmap_init also allocate some memory for itself. */
+ /* note: vm_page_startup allocates some memory for itself
+ through pmap_map before pmap_init is run. However,
+ it doesn't adjust the physical start of memory.
+ So, pmap thinks those pages are managed - but they're
+ not actually under it's control. So, the following
+ conditional is a hack to avoid those addresses
+ reserved by vm_page_startup */
+ /* pmap_init also allocate some memory for itself. */
#if defined(MACHINE_NEW_NONCONTIG)
- for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
- npages += vm_physmem[bank].end - vm_physmem[bank].start;
- if (map == kernel_pmap &&
- va < round_page((vm_offset_t)(pmap_modify_list + npages)))
- continue;
+ for (npages = 0, bank = 0; bank < vm_nphysseg; bank++)
+ npages += vm_physmem[bank].end - vm_physmem[bank].start;
+ if (map == kernel_pmap &&
+ va < round_page((vm_offset_t)(pmap_modify_list + npages)))
+ continue;
#else
- if (map == kernel_pmap &&
- va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start))))
- continue;
+ if (map == kernel_pmap &&
+ va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start))))
+ continue;
#endif
- pv_h = PA_TO_PVH(phys);
- found = FALSE;
-
- if (pv_h->pmap != PMAP_NULL) {
-
- loopcnt = 10000; /* loop limit */
- pv_e = pv_h;
- while (pv_e != PV_ENTRY_NULL) {
-
- if (loopcnt-- < 0) {
- printf("check_map: loop in PV list at PVH 0x%x (for phys 0x%x)\n", pv_h, phys);
- panic(who);
- }
-
- if (pv_e->pmap == map && pv_e->va == va) {
- if (found) {
- printf("check_map: Duplicate PV list entries at 0x%x and 0x%x in PV list 0x%x.\n", saved_pv_e, pv_e, pv_h);
- printf("check_map: for pmap 0x%x, VA 0x%x,phys 0x%x.\n", map, va, phys);
- panic(who);
- } else {
- found = TRUE;
- saved_pv_e = pv_e;
- }
- }
- pv_e = pv_e->next;
- }
- }
-
- if (!found) {
- printf("check_map: Mapping for pmap 0x%x VA 0x%x Phys 0x%x does not appear in PV list 0x%x.\n", map, va, phys, pv_h);
- }
- }
-
- if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
- printf("(check_map) done \n");
+ pv_h = PA_TO_PVH(phys);
+ found = FALSE;
+
+ if (pv_h->pmap != PMAP_NULL) {
+
+ loopcnt = 10000; /* loop limit */
+ pv_e = pv_h;
+ while (pv_e != PV_ENTRY_NULL) {
+
+ if (loopcnt-- < 0) {
+ printf("check_map: loop in PV list at PVH 0x%x (for phys 0x%x)\n", pv_h, phys);
+ panic(who);
+ }
+
+ if (pv_e->pmap == map && pv_e->va == va) {
+ if (found) {
+ printf("check_map: Duplicate PV list entries at 0x%x and 0x%x in PV list 0x%x.\n", saved_pv_e, pv_e, pv_h);
+ printf("check_map: for pmap 0x%x, VA 0x%x,phys 0x%x.\n", map, va, phys);
+ panic(who);
+ } else {
+ found = TRUE;
+ saved_pv_e = pv_e;
+ }
+ }
+ pv_e = pv_e->next;
+ }
+ }
+
+ if (!found) {
+ printf("check_map: Mapping for pmap 0x%x VA 0x%x Phys 0x%x does not appear in PV list 0x%x.\n", map, va, phys, pv_h);
+ }
+ }
+
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) done \n");
} /* check_map() */
@@ -5124,59 +4927,59 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
STATIC void
check_pmap_consistency(char *who)
{
- pmap_t p;
- int i;
- vm_offset_t phys;
- pv_entry_t pv_h;
- int spl;
+ pmap_t p;
+ int i;
+ vm_offset_t phys;
+ pv_entry_t pv_h;
+ int spl;
#ifdef MACHINE_NEW_NONCONTIG
- int bank;
- unsigned npages;
+ int bank;
+ unsigned npages;
#endif
- if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
- printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
- if (pv_head_table == PV_ENTRY_NULL) {
+ if (pv_head_table == PV_ENTRY_NULL) {
- printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
- return;
- }
+ printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
- p = kernel_pmap;
- check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who);
+ p = kernel_pmap;
+ check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who);
- /* run through all pmaps. check consistency of each one... */
- i = PMAP_MAX;
- for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) {
- if (i == 0) { /* can not read pmap list */
- printf("check_pmap_consistency: pmap strcut loop error.\n");
- panic(who);
- }
- check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who);
- }
+ /* run through all pmaps. check consistency of each one... */
+ i = PMAP_MAX;
+ for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) {
+ if (i == 0) { /* can not read pmap list */
+ printf("check_pmap_consistency: pmap strcut loop error.\n");
+ panic(who);
+ }
+ check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who);
+ }
- /* run through all managed paes, check pv_list for each one */
+ /* run through all managed paes, check pv_list for each one */
#if defined(MACHINE_NEW_NONCONTIG)
- for (npages = 0, bank = 0; bank < vm_nphysseg; bank++){
- for (phys = ptoa(vm_physmem[bank].start); phys < ptoa(vm_physmem[bank].end); phys += PAGE_SIZE) {
- pv_h = PA_TO_PVH(phys);
- check_pv_list(phys, pv_h, who);
+ for (npages = 0, bank = 0; bank < vm_nphysseg; bank++) {
+ for (phys = ptoa(vm_physmem[bank].start); phys < ptoa(vm_physmem[bank].end); phys += PAGE_SIZE) {
+ pv_h = PA_TO_PVH(phys);
+ check_pv_list(phys, pv_h, who);
+ }
}
- }
#else
- for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) {
- pv_h = PA_TO_PVH(phys);
- check_pv_list(phys, pv_h, who);
- }
+ for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) {
+ pv_h = PA_TO_PVH(phys);
+ check_pv_list(phys, pv_h, who);
+ }
#endif /* defined(MACHINE_NEW_NONCONTIG) */
- SPLX(spl);
+ SPLX(spl);
- if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
- printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
} /* check_pmap_consistency() */
#endif /* DEBUG */
@@ -5202,7 +5005,7 @@ check_pmap_consistency(char *who)
p->prot, \
p->dtype, \
p->wired, \
- p->modified, \
+ p->modified, \
p->pg_used);
/*
@@ -5225,63 +5028,63 @@ check_pmap_consistency(char *who)
void
pmap_print(pmap_t pmap)
{
- sdt_entry_t *sdtp;
- sdt_entry_t *sdtv;
- int i;
+ sdt_entry_t *sdtp;
+ sdt_entry_t *sdtv;
+ int i;
- printf("Pmap @ 0x%x:\n", (unsigned)pmap);
- sdtp = pmap->sdt_paddr;
- sdtv = pmap->sdt_vaddr;
- printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n",
- (unsigned)sdtp, (unsigned)sdtv,
- pmap->ref_count);
+ printf("Pmap @ 0x%x:\n", (unsigned)pmap);
+ sdtp = pmap->sdt_paddr;
+ sdtv = pmap->sdt_vaddr;
+ printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n",
+ (unsigned)sdtp, (unsigned)sdtv,
+ pmap->ref_count);
#ifdef statistics_not_yet_maintained
- printf(" statistics: pagesize %d: free_count %d; "
- "active_count %d; inactive_count %d; wire_count %d\n",
- pmap->stats.pagesize,
- pmap->stats.free_count,
- pmap->stats.active_count,
- pmap->stats.inactive_count,
- pmap->stats.wire_count);
-
- printf(" zero_fill_count %d; reactiveations %d; "
- "pageins %d; pageouts %d; faults %d\n",
- pmap->stats.zero_fill_count,
- pmap->stats.reactivations,
- pmap->stats.pageins,
- pmap->stats.pageouts,
- pmap->stats.fault);
-
- printf(" cow_faults %d, lookups %d, hits %d\n",
- pmap->stats.cow_faults,
- pmap->stats.loopups,
- pmap->stats.faults);
+ printf(" statistics: pagesize %d: free_count %d; "
+ "active_count %d; inactive_count %d; wire_count %d\n",
+ pmap->stats.pagesize,
+ pmap->stats.free_count,
+ pmap->stats.active_count,
+ pmap->stats.inactive_count,
+ pmap->stats.wire_count);
+
+ printf(" zero_fill_count %d; reactiveations %d; "
+ "pageins %d; pageouts %d; faults %d\n",
+ pmap->stats.zero_fill_count,
+ pmap->stats.reactivations,
+ pmap->stats.pageins,
+ pmap->stats.pageouts,
+ pmap->stats.fault);
+
+ printf(" cow_faults %d, lookups %d, hits %d\n",
+ pmap->stats.cow_faults,
+ pmap->stats.loopups,
+ pmap->stats.faults);
#endif
- sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */
- sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */
- if (sdtp == (sdt_entry_t *)0)
- printf("Error in pmap - sdt_paddr is null.\n");
- else {
- int count = 0;
- printf(" Segment table at 0x%x (0x%x):\n",
- (unsigned)sdtp, (unsigned)sdtv);
- for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
- if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) {
- if (count != 0)
- printf("sdt entry %d skip !!\n", count);
- count = 0;
- printf(" (%x)phys: ", i);
- PRINT_SDT(sdtp);
- printf(" (%x)virt: ", i);
- PRINT_SDT(sdtv);
- } else
- count++;
- }
- if (count != 0)
- printf("sdt entry %d skip !!\n", count);
- }
+ sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */
+ sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */
+ if (sdtp == (sdt_entry_t *)0)
+ printf("Error in pmap - sdt_paddr is null.\n");
+ else {
+ int count = 0;
+ printf(" Segment table at 0x%x (0x%x):\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) {
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ count = 0;
+ printf(" (%x)phys: ", i);
+ PRINT_SDT(sdtp);
+ printf(" (%x)virt: ", i);
+ PRINT_SDT(sdtv);
+ } else
+ count++;
+ }
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ }
} /* pmap_print() */
@@ -5308,113 +5111,113 @@ pmap_print(pmap_t pmap)
void
pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format)
{
- sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */
- sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */
- pt_entry_t *ptep; /* ptr to pte table of physical page addresses */
-
- int i; /* table loop index */
- unsigned long prev_entry; /* keep track of value of previous table entry */
- int n_dup_entries; /* count contiguous duplicate entries */
-
- printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n",
- va, (unsigned)pmap);
-
- /*** SDT TABLES ***/
- /* get addrs of sdt tables */
- sdtp = (sdt_entry_t *)pmap->sdt_vaddr;
- sdtv = sdtp + SDT_ENTRIES;
-
- if (sdtp == SDT_ENTRY_NULL) {
- printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n");
- return;
- }
-
- n_dup_entries = 0;
- prev_entry = 0xFFFFFFFF;
-
- if (long_format) {
- printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n",
- (unsigned)sdtp, (unsigned)sdtv);
- for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
- if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits
- && SDTIDX(va) != i && i != SDT_ENTRIES-1) {
- n_dup_entries++;
- continue; /* suppress duplicate entry */
- }
- if (n_dup_entries != 0) {
- printf(" - %d duplicate entries skipped -\n",n_dup_entries);
- n_dup_entries = 0;
- }
- prev_entry = ((pte_template_t *)sdtp)->bits;
- if (SDTIDX(va) == i) {
- printf(" >> (%x)phys: ", i);
- } else {
- printf(" (%x)phys: ", i);
- }
- PRINT_SDT(sdtp);
- if (SDTIDX(va) == i) {
- printf(" >> (%x)virt: ", i);
- } else {
- printf(" (%x)virt: ", i);
- }
- PRINT_SDT(sdtv);
- } /* for */
- } else {
- /* index into both tables for given VA */
- sdtp += SDTIDX(va);
- sdtv += SDTIDX(va);
- printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n",
- SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv);
- printf(" phys: ");
- PRINT_SDT(sdtp);
- printf(" virt: ");
- PRINT_SDT(sdtv);
- }
-
- /*** PTE TABLES ***/
- /* get addrs of page (pte) table (no shadow table) */
-
- sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va);
+ sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */
+ sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */
+ pt_entry_t *ptep; /* ptr to pte table of physical page addresses */
+
+ int i; /* table loop index */
+ unsigned long prev_entry; /* keep track of value of previous table entry */
+ int n_dup_entries; /* count contiguous duplicate entries */
+
+ printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n",
+ va, (unsigned)pmap);
+
+ /*** SDT TABLES ***/
+ /* get addrs of sdt tables */
+ sdtp = (sdt_entry_t *)pmap->sdt_vaddr;
+ sdtv = sdtp + SDT_ENTRIES;
+
+ if (sdtp == SDT_ENTRY_NULL) {
+ printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+
+ if (long_format) {
+ printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits
+ && SDTIDX(va) != i && i != SDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress duplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)sdtp)->bits;
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)phys: ", i);
+ } else {
+ printf(" (%x)phys: ", i);
+ }
+ PRINT_SDT(sdtp);
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)virt: ", i);
+ } else {
+ printf(" (%x)virt: ", i);
+ }
+ PRINT_SDT(sdtv);
+ } /* for */
+ } else {
+ /* index into both tables for given VA */
+ sdtp += SDTIDX(va);
+ sdtv += SDTIDX(va);
+ printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n",
+ SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv);
+ printf(" phys: ");
+ PRINT_SDT(sdtp);
+ printf(" virt: ");
+ PRINT_SDT(sdtv);
+ }
+
+ /*** PTE TABLES ***/
+ /* get addrs of page (pte) table (no shadow table) */
+
+ sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va);
#ifdef DBG
- printf("*** DEBUG (sdtp) ");
- PRINT_SDT(sdtp);
+ printf("*** DEBUG (sdtp) ");
+ PRINT_SDT(sdtp);
#endif
- sdtv = sdtp + SDT_ENTRIES;
- ptep = (pt_entry_t *)(M88K_PTOB(sdtv->table_addr));
- if (sdtp->dtype != DT_VALID) {
- printf(" segment table entry invlid, trace stops.\n");
- return;
- }
-
- n_dup_entries = 0;
- prev_entry = 0xFFFFFFFF;
- if (long_format) {
- printf(" page table (ptes) at 0x%x\n", (unsigned)ptep);
- for (i = 0; i < PDT_ENTRIES; i++, ptep++) {
- if (prev_entry == ((pte_template_t *)ptep)->bits
- && PDTIDX(va) != i && i != PDT_ENTRIES-1) {
- n_dup_entries++;
- continue; /* suppress suplicate entry */
- }
- if (n_dup_entries != 0) {
- printf(" - %d duplicate entries skipped -\n",n_dup_entries);
- n_dup_entries = 0;
- }
- prev_entry = ((pte_template_t *)ptep)->bits;
- if (PDTIDX(va) == i) {
- printf(" >> (%x)pte: ", i);
- } else {
- printf(" (%x)pte: ", i);
- }
- PRINT_PDT(ptep);
- } /* for */
- } else {
- /* index into page table */
- ptep += PDTIDX(va);
- printf(" pte index 0x%x\n", PDTIDX(va));
- printf(" pte: ");
- PRINT_PDT(ptep);
- }
+ sdtv = sdtp + SDT_ENTRIES;
+ ptep = (pt_entry_t *)(M88K_PTOB(sdtv->table_addr));
+ if (sdtp->dtype != DT_VALID) {
+ printf(" segment table entry invlid, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+ if (long_format) {
+ printf(" page table (ptes) at 0x%x\n", (unsigned)ptep);
+ for (i = 0; i < PDT_ENTRIES; i++, ptep++) {
+ if (prev_entry == ((pte_template_t *)ptep)->bits
+ && PDTIDX(va) != i && i != PDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress suplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)ptep)->bits;
+ if (PDTIDX(va) == i) {
+ printf(" >> (%x)pte: ", i);
+ } else {
+ printf(" (%x)pte: ", i);
+ }
+ PRINT_PDT(ptep);
+ } /* for */
+ } else {
+ /* index into page table */
+ ptep += PDTIDX(va);
+ printf(" pte index 0x%x\n", PDTIDX(va));
+ printf(" pte: ");
+ PRINT_PDT(ptep);
+ }
} /* pmap_print_trace() */
/*
@@ -5427,43 +5230,43 @@ pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format)
boolean_t
pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type)
{
- pt_entry_t *pte;
- sdt_entry_t *sdt;
- int spl;
-
- PMAP_LOCK(pmap, spl);
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- }
-
- if (!PDT_VALID(pte)) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- }
-
- /*
- * Valid pte. If the transaction was a read, there is no way it
- * could have been a fault, so return true. For now, assume
- * that a write transaction could have caused a fault. We need
- * to check pte and sdt entries for write permission to really
- * tell.
- */
-
- if (type == VM_PROT_READ) {
- PMAP_UNLOCK(pmap, spl);
- return TRUE;
- } else {
- sdt = SDTENT(pmap,va);
- if (sdt->prot || pte->prot) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- } else {
- PMAP_UNLOCK(pmap, spl);
- return TRUE;
- }
- }
+ pt_entry_t *pte;
+ sdt_entry_t *sdt;
+ int spl;
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
+
+ if (!PDT_VALID(pte)) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
+
+ /*
+ * Valid pte. If the transaction was a read, there is no way it
+ * could have been a fault, so return true. For now, assume
+ * that a write transaction could have caused a fault. We need
+ * to check pte and sdt entries for write permission to really
+ * tell.
+ */
+
+ if (type == VM_PROT_READ) {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ } else {
+ sdt = SDTENT(pmap,va);
+ if (sdt->prot || pte->prot) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ } else {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ }
+ }
}
/* New functions to satisfy rpd - contributed by danner */
@@ -5471,25 +5274,25 @@ pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type)
void
pmap_virtual_space(vm_offset_t *startp, vm_offset_t *endp)
{
- *startp = virtual_avail;
- *endp = virtual_end;
+ *startp = virtual_avail;
+ *endp = virtual_end;
}
unsigned int
pmap_free_pages(void)
{
- return atop(avail_end - avail_next);
+ return atop(avail_end - avail_next);
}
boolean_t
pmap_next_page(vm_offset_t *addrp)
{
- if (avail_next == avail_end)
- return FALSE;
+ if (avail_next == avail_end)
+ return FALSE;
- *addrp = avail_next;
- avail_next += PAGE_SIZE;
- return TRUE;
+ *addrp = avail_next;
+ avail_next += PAGE_SIZE;
+ return TRUE;
}
#if USING_BATC
@@ -5499,58 +5302,58 @@ pmap_next_page(vm_offset_t *addrp)
*/
void
pmap_set_batc(
- pmap_t pmap,
- boolean_t data,
- int i,
- vm_offset_t va,
- vm_offset_t pa,
- boolean_t super,
- boolean_t wt,
- boolean_t global,
- boolean_t ci,
- boolean_t wp,
- boolean_t valid)
+ pmap_t pmap,
+ boolean_t data,
+ int i,
+ vm_offset_t va,
+ vm_offset_t pa,
+ boolean_t super,
+ boolean_t wt,
+ boolean_t global,
+ boolean_t ci,
+ boolean_t wp,
+ boolean_t valid)
{
- register batc_template_t batctmp;
-
- if (i < 0 || i > (BATC_MAX - 1)) {
- panic("pmap_set_batc: illegal batc number");
- /* bad number */
- return;
- }
-
- batctmp.field.lba = va >> 19;
- batctmp.field.pba = pa >> 19;
- batctmp.field.sup = super;
- batctmp.field.wt = wt;
- batctmp.field.g = global;
- batctmp.field.ci = ci;
- batctmp.field.wp = wp;
- batctmp.field.v = valid;
-
- if (data) {
- pmap->d_batc[i].bits = batctmp.bits;
- } else {
- pmap->i_batc[i].bits = batctmp.bits;
- }
+ register batc_template_t batctmp;
+
+ if (i < 0 || i > (BATC_MAX - 1)) {
+ panic("pmap_set_batc: illegal batc number");
+ /* bad number */
+ return;
+ }
+
+ batctmp.field.lba = va >> 19;
+ batctmp.field.pba = pa >> 19;
+ batctmp.field.sup = super;
+ batctmp.field.wt = wt;
+ batctmp.field.g = global;
+ batctmp.field.ci = ci;
+ batctmp.field.wp = wp;
+ batctmp.field.v = valid;
+
+ if (data) {
+ pmap->d_batc[i].bits = batctmp.bits;
+ } else {
+ pmap->i_batc[i].bits = batctmp.bits;
+ }
}
-void use_batc(
- task_t task,
- boolean_t data, /* for data-cmmu ? */
- int i, /* batc number */
- vm_offset_t va, /* virtual address */
- vm_offset_t pa, /* physical address */
- boolean_t s, /* for super-mode ? */
- boolean_t wt, /* is writethrough */
- boolean_t g, /* is global ? */
- boolean_t ci, /* is cache inhibited ? */
- boolean_t wp, /* is write-protected ? */
- boolean_t v) /* is valid ? */
+void
+use_batc(task_t task,
+ boolean_t data, /* for data-cmmu ? */
+ int i, /* batc number */
+ vm_offset_t va, /* virtual address */
+ vm_offset_t pa, /* physical address */
+ boolean_t s, /* for super-mode ? */
+ boolean_t wt, /* is writethrough */
+ boolean_t g, /* is global ? */
+ boolean_t ci, /* is cache inhibited ? */
+ boolean_t wp, /* is write-protected ? */
+ boolean_t v) /* is valid ? */
{
- pmap_t pmap;
- pmap = vm_map_pmap(task->map);
- pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
+ pmap_t pmap;
+ pmap = vm_map_pmap(task->map);
+ pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
}
#endif
@@ -5572,15 +5375,15 @@ void use_batc(
void
pmap_destroy_ranges(pmap_range_t *ranges)
{
- pmap_range_t this, next;
-
- this = *ranges;
- while (this != 0) {
- next = this->next;
- pmap_range_free(this);
- this = next;
- }
- *ranges = 0;
+ pmap_range_t this, next;
+
+ this = *ranges;
+ while (this != 0) {
+ next = this->next;
+ pmap_range_free(this);
+ this = next;
+ }
+ *ranges = 0;
}
/*
@@ -5589,15 +5392,15 @@ pmap_destroy_ranges(pmap_range_t *ranges)
boolean_t
pmap_range_lookup(pmap_range_t *ranges, vm_offset_t address)
{
- pmap_range_t range;
-
- for (range = *ranges; range != 0; range = range->next) {
- if (address < range->start)
- return FALSE;
- if (address < range->end)
- return TRUE;
- }
- return FALSE;
+ pmap_range_t range;
+
+ for (range = *ranges; range != 0; range = range->next) {
+ if (address < range->start)
+ return FALSE;
+ if (address < range->end)
+ return TRUE;
+ }
+ return FALSE;
}
/*
@@ -5607,52 +5410,52 @@ pmap_range_lookup(pmap_range_t *ranges, vm_offset_t address)
void
pmap_range_add(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
{
- pmap_range_t range, *prev;
+ pmap_range_t range, *prev;
- /* look for the start address */
+ /* look for the start address */
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start < range->start)
- break;
- if (start <= range->end)
- goto start_overlaps;
- }
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start < range->start)
+ break;
+ if (start <= range->end)
+ goto start_overlaps;
+ }
- /* start address is not present */
+ /* start address is not present */
- if ((range == 0) || (end < range->start)) {
- /* no overlap; allocate a new range */
+ if ((range == 0) || (end < range->start)) {
+ /* no overlap; allocate a new range */
- range = pmap_range_alloc();
- range->start = start;
- range->end = end;
- range->next = *prev;
- *prev = range;
- return;
- }
+ range = pmap_range_alloc();
+ range->start = start;
+ range->end = end;
+ range->next = *prev;
+ *prev = range;
+ return;
+ }
- /* extend existing range forward to start */
+ /* extend existing range forward to start */
- range->start = start;
+ range->start = start;
start_overlaps:
- assert((range->start <= start) && (start <= range->end));
+ assert((range->start <= start) && (start <= range->end));
- /* delete redundant ranges */
+ /* delete redundant ranges */
- while ((range->next != 0) && (range->next->start <= end)) {
- pmap_range_t old;
+ while ((range->next != 0) && (range->next->start <= end)) {
+ pmap_range_t old;
- old = range->next;
- range->next = old->next;
- range->end = old->end;
- pmap_range_free(old);
- }
+ old = range->next;
+ range->next = old->next;
+ range->end = old->end;
+ pmap_range_free(old);
+ }
- /* extend existing range backward to end */
+ /* extend existing range backward to end */
- if (range->end < end)
- range->end = end;
+ if (range->end < end)
+ range->end = end;
}
/*
@@ -5662,45 +5465,41 @@ start_overlaps:
void
pmap_range_remove(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
{
- pmap_range_t range, *prev;
-
- /* look for start address */
-
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start <= range->start)
- break;
- if (start < range->end) {
- if (end < range->end) {
- pmap_range_t new;
-
- /* split this range */
-
- new = pmap_range_alloc();
- new->next = range->next;
- new->start = end;
- new->end = range->end;
-
- range->next = new;
- range->end = start;
- return;
- }
-
- /* truncate this range */
-
- range->end = start;
- }
- }
+ pmap_range_t range, *prev;
+
+ /* look for start address */
+
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start <= range->start)
+ break;
+ if (start < range->end) {
+ if (end < range->end) {
+ pmap_range_t new;
+ /* split this range */
+ new = pmap_range_alloc();
+ new->next = range->next;
+ new->start = end;
+ new->end = range->end;
+
+ range->next = new;
+ range->end = start;
+ return;
+ }
+ /* truncate this range */
+ range->end = start;
+ }
+ }
- /* start address is not in the middle of a range */
+ /* start address is not in the middle of a range */
- while ((range != 0) && (range->end <= end)) {
- *prev = range->next;
- pmap_range_free(range);
- range = *prev;
- }
+ while ((range != 0) && (range->end <= end)) {
+ *prev = range->next;
+ pmap_range_free(range);
+ range = *prev;
+ }
- if ((range != 0) && (range->start < end))
- range->start = end;
+ if ((range != 0) && (range->start < end))
+ range->start = end;
}
#endif /* FUTURE_MAYBE */
diff --git a/sys/arch/mvme88k/mvme88k/pmap_table.c b/sys/arch/mvme88k/mvme88k/pmap_table.c
index 9f00f13ca18..41fe155a734 100644
--- a/sys/arch/mvme88k/mvme88k/pmap_table.c
+++ b/sys/arch/mvme88k/mvme88k/pmap_table.c
@@ -41,78 +41,69 @@
#define PAGE M88K_PGBYTES
#define SEG M88K_SGBYTES
-#define M188_UTILITY U(0xFF000000)
-#define M188_UTILITY_SIZE U(0x01000000)
-#if 0
#undef VEQR_ADDR
#define VEQR_ADDR 0
-#endif
-
/* phys_start, virt_start, size, prot, cacheability */
#ifdef MVME187
static pmap_table_entry m187_board_table[] = {
- { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
- { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
- { OBIO_START , OBIO_START , OBIO_SIZE , RW, CI},
- { 0 , 0 , 0xffffffff , 0 , 0},
+ { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
+ { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
+ { OBIO_START , OBIO_START , OBIO_SIZE , RW, CI},
+ { 0 , 0 , 0xffffffff , 0 , 0},
};
#endif
#ifdef MVME188
static pmap_table_entry m188_board_table[] = {
- { MVME188_UTILITY, MVME188_UTILITY, MVME188_UTILITY_SIZE, RW, CI},
- { 0 , VEQR_ADDR , 0/*filled in later*/, RW, CG},
- { 0 , 0 , 0/*filled in later*/, RW, CG},
- { 0 , 0 , 0xffffffff , 0, 0},
+ { MVME188_UTILITY, MVME188_UTILITY, MVME188_UTILITY_SIZE, RW, CI},
+ { 0 , 0 , 0xffffffff , 0, 0},
};
#endif
#ifdef MVME197
static pmap_table_entry m197_board_table[] = {
- { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
- { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
- { OBIO_START , OBIO_START , OBIO_SIZE , RW, CG},
- { 0 , 0 , 0xffffffff , 0 , 0},
+ { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
+ { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
+ { OBIO_START , OBIO_START , OBIO_SIZE , RW, CI},
+ { 0 , 0 , 0xffffffff , 0 , 0},
};
#endif
-pmap_table_t pmap_table_build(unsigned memory_size)
+pmap_table_t
+pmap_table_build(endoftext)
+unsigned endoftext;
{
- extern int kernelstart;
- unsigned int i;
- pmap_table_t bt, pbt;
+ extern int kernelstart;
+ unsigned int i;
+ pmap_table_t bt, pbt;
- switch (cputyp) {
+ switch (cputyp) {
#ifdef MVME187
- case CPU_187:
- bt = m187_board_table;
- break;
+ case CPU_187:
+ bt = m187_board_table;
+ break;
#endif
#ifdef MVME188
- case CPU_188:
- bt = m188_board_table;
- /* fill in the veqr map entry */
- m188_board_table[1].size = memory_size;
- m188_board_table[2].size = (unsigned)&kernelstart;
- break;
+ case CPU_188:
+ bt = m188_board_table;
+ break;
#endif
#ifdef MVME197
- case CPU_197:
- bt = m197_board_table;
- break;
+ case CPU_197:
+ bt = m197_board_table;
+ break;
#endif
- default:
- panic("pmap_table_build: Unknown CPU type.");
- /* NOT REACHED */
- }
-
- /* round off all entries to nearest segment */
- pbt = bt;
- for (i = 0; pbt->size != 0xffffffff; i++){
- if (pbt->size>0)
- pbt->size = (pbt->size + M88K_PGBYTES-1) & ~(M88K_PGBYTES-1);
- pbt++;
- }
+ default:
+ panic("pmap_table_build: Unknown CPU type.");
+ /* NOT REACHED */
+ }
- return bt;
+ /* round off all entries to nearest segment */
+ pbt = bt;
+ for (i = 0; pbt->size != 0xffffffff; i++) {
+ if (pbt->size>0)
+ pbt->size = (pbt->size + M88K_PGBYTES-1) & ~(M88K_PGBYTES-1);
+ pbt++;
+ }
+ return bt;
}
diff --git a/sys/arch/mvme88k/mvme88k/process_machdep.c b/sys/arch/mvme88k/mvme88k/process_machdep.c
index b69af52c741..d28305c2a11 100644
--- a/sys/arch/mvme88k/mvme88k/process_machdep.c
+++ b/sys/arch/mvme88k/mvme88k/process_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: process_machdep.c,v 1.7 1999/09/27 19:13:24 smurph Exp $ */
+/* $OpenBSD: process_machdep.c,v 1.8 2001/02/01 03:38:22 smurph Exp $ */
/*
* Copyright (c) 1993 The Regents of the University of California.
@@ -78,18 +78,18 @@
int
process_read_regs(p, regs)
- struct proc *p;
- struct reg *regs;
+struct proc *p;
+struct reg *regs;
{
-
- bcopy((caddr_t)USER_REGS(p), (caddr_t)regs, sizeof(struct reg));
+
+ bcopy((caddr_t)USER_REGS(p), (caddr_t)regs, sizeof(struct reg));
return (0);
}
int
process_write_regs(p, regs)
- struct proc *p;
- struct reg *regs;
+struct proc *p;
+struct reg *regs;
{
bcopy((caddr_t)regs, (caddr_t)USER_REGS(p), sizeof(struct reg));
return (0);
@@ -97,8 +97,8 @@ process_write_regs(p, regs)
int
process_sstep(p, sstep)
- struct proc *p;
- int sstep;
+struct proc *p;
+int sstep;
{
if (sstep)
cpu_singlestep(p);
@@ -107,29 +107,29 @@ process_sstep(p, sstep)
int
process_set_pc(p, addr)
- struct proc *p;
- caddr_t addr;
+struct proc *p;
+caddr_t addr;
{
struct reg *regs;
-
- regs = USER_REGS(p);
- regs->sxip = (u_int)addr;
- regs->snip = (u_int)addr + 4;
- /*
- p->p_md.md_tf->sxip = (u_int)addr;
- p->p_md.md_tf->snip = (u_int)addr + 4;
- */
+
+ regs = USER_REGS(p);
+ regs->sxip = (u_int)addr;
+ regs->snip = (u_int)addr + 4;
+ /*
+ p->p_md.md_tf->sxip = (u_int)addr;
+ p->p_md.md_tf->snip = (u_int)addr + 4;
+ */
return (0);
}
int
process_read_fpregs(p, regs)
-struct proc *p;
-struct fpreg *regs;
+struct proc *p;
+struct fpreg *regs;
{
#if 0
- extern struct fpstate initfpstate;
- struct fpstate *statep = &initfpstate;
+ extern struct fpstate initfpstate;
+ struct fpstate *statep = &initfpstate;
/* NOTE: struct fpreg == struct fpstate */
if (p->p_md.md_fpstate)
@@ -141,8 +141,8 @@ struct fpreg *regs;
int
process_write_fpregs(p, regs)
-struct proc *p;
-struct fpreg *regs;
+struct proc *p;
+struct fpreg *regs;
{
#if 0
if (p->p_md.md_fpstate == NULL)
diff --git a/sys/arch/mvme88k/mvme88k/trap.c b/sys/arch/mvme88k/mvme88k/trap.c
index d2d43a3b00e..a31eaa0b3be 100644
--- a/sys/arch/mvme88k/mvme88k/trap.c
+++ b/sys/arch/mvme88k/mvme88k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.11 2001/01/13 05:19:00 smurph Exp $ */
+/* $OpenBSD: trap.c,v 1.12 2001/02/01 03:38:22 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -100,29 +100,30 @@ unsigned traptrace = 0;
/* XXX MAJOR CLEANUP REQUIRED TO PORT TO BSD */
char *trap_type[] = {
- "Reset",
- "Interrupt Exception",
- "Instruction Access",
- "Data Access Exception",
- "Misaligned Access",
- "Unimplemented Opcode",
- "Privilege Violation"
- "Bounds Check Violation",
- "Illegal Integer Divide",
- "Integer Overflow",
- "Error Exception",
+ "Reset",
+ "Interrupt Exception",
+ "Instruction Access",
+ "Data Access Exception",
+ "Misaligned Access",
+ "Unimplemented Opcode",
+ "Privilege Violation"
+ "Bounds Check Violation",
+ "Illegal Integer Divide",
+ "Integer Overflow",
+ "Error Exception",
};
char *pbus_exception_type[] = {
- "Success (No Fault)",
- "unknown 1",
- "unknown 2",
- "Bus Error",
- "Segment Fault",
- "Page Fault",
- "Supervisor Violation",
- "Write Violation",
+ "Success (No Fault)",
+ "unknown 1",
+ "unknown 2",
+ "Bus Error",
+ "Segment Fault",
+ "Page Fault",
+ "Supervisor Violation",
+ "Write Violation",
};
+
extern ret_addr;
#define NSIR 8
void (*sir_routines[NSIR])();
@@ -134,546 +135,571 @@ int trap_types = sizeof trap_type / sizeof trap_type[0];
static inline void
userret(struct proc *p, struct m88100_saved_state *frame, u_quad_t oticks)
{
- int sig;
- int s;
-
- /* take pending signals */
- while ((sig = CURSIG(p)) != 0)
- postsig(sig);
- p->p_priority = p->p_usrpri;
-
- if (want_resched) {
- /*
- * Since we are curproc, clock will normally just change
- * our priority without moving us from one queue to another
- * (since the running process is not on a queue.)
- * If that happened after we put ourselves on the run queue
- * but before we switched, we might not be on the queue
- * indicated by our priority.
- */
- s = splstatclock();
- setrunqueue(p);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
- (void) splx(s);
- while ((sig = CURSIG(p)) != 0)
- postsig(sig);
- }
-
- /*
- * If profiling, charge recent system time to the trapped pc.
- */
- if (p->p_flag & P_PROFIL)
- addupc_task(p, frame->sxip & ~3,
- (int)(p->p_sticks - oticks));
-
- curpriority = p->p_priority;
+ int sig;
+ int s;
+
+ /* take pending signals */
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ p->p_priority = p->p_usrpri;
+
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ (void) splx(s);
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL)
+ addupc_task(p, frame->sxip & ~3,(int)(p->p_sticks - oticks));
+ curpriority = p->p_priority;
}
void
panictrap(int type, struct m88100_saved_state *frame)
{
- static int panicing = 0;
-
- if (panicing++ == 0) {
- if (type == 2) { /* instruction exception */
- DEBUG_MSG("\nInstr access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->ipfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
- } else if (type == 3) { /* data access exception */
- DEBUG_MSG("\nData access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
- } else
- DEBUG_MSG("\ntrap type %d, v = %x, frame %x\n", type, frame->sxip & ~3, frame);
- regdump(frame);
- }
- if ((u_int)type < trap_types)
- panic(trap_type[type]);
- panic("trap");
- /*NOTREACHED*/
+ static int panicing = 0;
+
+ if (panicing++ == 0) {
+ if (type == 2) { /* instruction exception */
+ DEBUG_MSG("\nInstr access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->ipfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
+ } else if (type == 3) { /* data access exception */
+ DEBUG_MSG("\nData access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
+ } else
+ DEBUG_MSG("\ntrap type %d, v = %x, frame %x\n", type, frame->sxip & ~3, frame);
+ regdump(frame);
+ }
+ if ((u_int)type < trap_types)
+ panic(trap_type[type]);
+ panic("trap");
+ /*NOTREACHED*/
}
-
#if defined(MVME187) || defined(MVME188)
unsigned last_trap[4] = {0,0,0,0};
+unsigned last_vector = 0;
+
/*ARGSUSED*/
void
trap(unsigned type, struct m88100_saved_state *frame)
{
- struct proc *p;
- u_quad_t sticks = 0;
- vm_map_t map;
- vm_offset_t va;
- vm_prot_t ftype;
- int fault_type;
- u_long fault_code;
- unsigned nss, fault_addr;
- struct vmspace *vm;
- union sigval sv;
- int su = 0;
- int result;
- int sig = 0;
- unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
-
- extern vm_map_t kernel_map;
- extern int fubail(), subail();
- extern unsigned guarded_access_start;
- extern unsigned guarded_access_end;
- extern unsigned guarded_access_bad;
-
- if (type != last_trap[3]) {
- last_trap[0] = last_trap[1];
- last_trap[1] = last_trap[2];
- last_trap[2] = last_trap[3];
- last_trap[3] = type;
- }
+ struct proc *p;
+ u_quad_t sticks = 0;
+ vm_map_t map;
+ vm_offset_t va;
+ vm_prot_t ftype;
+ int fault_type;
+ u_long fault_code;
+ unsigned nss, fault_addr;
+ struct vmspace *vm;
+ union sigval sv;
+ int su = 0;
+ int result;
+ int sig = 0;
+ unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
+
+ extern vm_map_t kernel_map;
+ extern int fubail(), subail();
+ extern unsigned guarded_access_start;
+ extern unsigned guarded_access_end;
+ extern unsigned guarded_access_bad;
+
+ if (type != last_trap[3]) {
+ last_trap[0] = last_trap[1];
+ last_trap[1] = last_trap[2];
+ last_trap[2] = last_trap[3];
+ last_trap[3] = type;
+ }
#if defined(UVM)
- uvmexp.traps++;
+ uvmexp.traps++;
#else
- cnt.v_trap++;
+ cnt.v_trap++;
#endif
- if ((p = curproc) == NULL)
- p = &proc0;
-
- if (USERMODE(frame->epsr)) {
- sticks = p->p_sticks;
- type += T_USER;
- p->p_md.md_tf = frame; /* for ptrace/signals */
- fault_type = 0;
- fault_code = 0;
- }
-/* printf("trap 0x%x ", type); */
- switch (type) {
- default:
- panictrap(frame->vector, frame);
- /*NOTREACHED*/
+ if ((p = curproc) == NULL)
+ p = &proc0;
+
+ if (USERMODE(frame->epsr)) {
+ sticks = p->p_sticks;
+ type += T_USER;
+ p->p_md.md_tf = frame; /* for ptrace/signals */
+ fault_type = 0;
+ fault_code = 0;
+ }
+ switch (type) {
+ default:
+ panictrap(frame->vector, frame);
+ /*NOTREACHED*/
#if defined(DDB)
- case T_KDB_BREAK:
- /*FALLTHRU*/
- case T_KDB_BREAK+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
- case T_KDB_ENTRY:
- /*FALLTHRU*/
- case T_KDB_ENTRY+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
-
- #if 0
- case T_ILLFLT:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
- "error fault", (db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
- #endif /* 0 */
+ case T_KDB_BREAK:
+ /*FALLTHRU*/
+ case T_KDB_BREAK+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ case T_KDB_ENTRY:
+ /*FALLTHRU*/
+ case T_KDB_ENTRY+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+
+#if 0
+ case T_ILLFLT:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
+ "error fault", (db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+#endif /* 0 */
#endif /* DDB */
- case T_INT:
- case T_INT+T_USER:
- /* This function pointer is set in machdep.c
- It calls m188_ext_int or sbc_ext_int depending
- on the value of cputyp - smurph */
- (*mdfp.interrupt_func)(T_INT, frame);
- return;
-
- case T_MISALGNFLT:
- DEBUG_MSG("kernel misalgined "
- "access exception @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_INSTFLT:
- /* kernel mode instruction access fault.
- * Should never, never happen for a non-paged kernel.
- */
- DEBUG_MSG("kernel mode instruction "
- "page fault @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_DATAFLT:
- /* kernel mode data fault */
- /*
- * If the faulting address is in user space, handle it in
- * the context of the user process. Else, use kernel map.
- */
-
- if (type == T_DATAFLT) {
- fault_addr = frame->dma0;
- if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- } else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
- } else {
- fault_addr = frame->sxip & XIP_ADDR;
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
-
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* data fault on a kernel address... */
- if (frame->dmt0 & DMT_DAS)
- map = kernel_map;
-
- /*
- * We don't want to call vm_fault() if it is fuwintr() or
- * suwintr(). These routines are for copying from interrupt
- * context and vm_fault() can potentially sleep. You may
- * wonder if it isn't bad karma for an interrupt handler to
- * touch the current process. Indeed it is, but clock interrupt
- * does it while doing profiling. It is OK in that context.
- */
-
- if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
- p->p_addr->u_pcb.pcb_onfault == (int)subail)
- goto outtahere;
-
- /* data fault on the user address */
- if (type == T_DATAFLT && (frame->dmt0 & DMT_DAS) == 0) {
- type = T_DATAFLT + T_USER;
- goto user_fault;
- }
-
- /*
- * If it is a guarded access, bus error is OK.
- */
-
- if ((frame->dpfsr >> 16 & 0x7) == 0x3 && /* bus error */
- (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
- (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
-
- frame->snip = ((unsigned)&guarded_access_bad ) | FIP_V;
- frame->sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V;
- frame->sxip = 0;
- frame->dmt0 = 0;/* XXX what about other trans. in data unit */
- frame->dpfsr = 0;
- return;
- }
-
- /*
- * On a segment or a page fault, call vm_fault() to resolve
- * the fault.
- */
- if ((frame->dpfsr >> 16 & 0x7) == 0x4 /* seg fault */
- || (frame->dpfsr >> 16 & 0x7) == 0x5) { /* page fault */
+ case T_INT:
+ case T_INT+T_USER:
+ /* This function pointer is set in machdep.c
+ It calls m188_ext_int or sbc_ext_int depending
+ on the value of cputyp - smurph */
+ (*mdfp.interrupt_func)(T_INT, frame);
+ return;
+
+ case T_MISALGNFLT:
+ DEBUG_MSG("kernel misalgined "
+ "access exception @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_INSTFLT:
+ /* kernel mode instruction access fault.
+ * Should never, never happen for a non-paged kernel.
+ */
+ DEBUG_MSG("kernel mode instruction "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_DATAFLT:
+ /* kernel mode data fault */
+ /*
+ * If the faulting address is in user space, handle it in
+ * the context of the user process. Else, use kernel map.
+ */
+
+ if (type == T_DATAFLT) {
+ fault_addr = frame->dma0;
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ } else {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+ } else {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* data fault on a kernel address... */
+ if (frame->dmt0 & DMT_DAS)
+ map = kernel_map;
+
+ /*
+ * We don't want to call vm_fault() if it is fuwintr() or
+ * suwintr(). These routines are for copying from interrupt
+ * context and vm_fault() can potentially sleep. You may
+ * wonder if it isn't bad karma for an interrupt handler to
+ * touch the current process. Indeed it is, but clock interrupt
+ * does it while doing profiling. It is OK in that context.
+ */
+
+ if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
+ p->p_addr->u_pcb.pcb_onfault == (int)subail)
+ goto outtahere;
+
+ /* data fault on the user address */
+ if (type == T_DATAFLT && (frame->dmt0 & DMT_DAS) == 0) {
+ type = T_DATAFLT + T_USER;
+ goto user_fault;
+ }
+#if 0
+ printf("\nKernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
+ ((frame->dpfsr >> 16) & 0x7),
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ fault_addr, frame, frame->cpu);
+#endif
+ /*
+ * If it is a guarded access, bus error is OK.
+ */
+ if ((frame->dpfsr >> 16 & 0x7) == 0x3 && /* bus error */
+ (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
+ (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
+
+ frame->snip = ((unsigned)&guarded_access_bad ) | FIP_V;
+ frame->sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V;
+ frame->sxip = 0;
+ /* We sort of resolved the fault ourselves because
+ * we know where it came from. [gaurded_assess()]
+ * But we must still think about the other possible
+ * transactions in dmt1 & dmt2. Mark dmt0 so that
+ * data_access_emulation skips it. XXX smurph
+ */
+ frame->dmt0 = DMT_SKIP;
+ frame->dpfsr = 0;
+ data_access_emulation(frame);
+ /* so data_access_emulation doesn't get called again. */
+ frame->dmt0 = 0;
+ return;
+ }
+ /*
+ * On a no fault, just return.
+ */
+ if ((frame->dpfsr >> 16 & 0x7) == 0x0) { /* no fault */
+ /*
+ * The fault was resolved. Call data_access_emulation
+ * to drain the data unit pipe line and reset dmt0
+ * so that trap won't get called again.
+ * For inst faults, back up the pipe line.
+ */
+ if (type == T_DATAFLT) {
+ /*
+ printf("calling data_access_emulation()\n");
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ } else {
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ return;
+ }
+
+ /*
+ * On a segment or a page fault, call vm_fault() to resolve
+ * the fault.
+ */
+ if ((unsigned)map & 3) {
+ printf("map is not word aligned! 0x%x\n", map);
+ Debugger();
+ }
+ if ((frame->dpfsr >> 16 & 0x7) == 0x4 /* seg fault */
+ || (frame->dpfsr >> 16 & 0x7) == 0x5) { /* page fault */
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
+ if (result == KERN_SUCCESS) {
+ /*
+ * We could resolve the fault. Call
+ * data_access_emulation to drain the data unit pipe
+ * line and reset dmt0 so that trap won't get called
+ * again. For inst faults, back up the pipe line.
+ */
+ if (type == T_DATAFLT) {
+ /*
+ printf("calling data_access_emulation()\n");
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ } else {
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ return;
+ }
+ }
+ /*
+ printf ("PBUS Fault %d (%s) va = 0x%x\n", ((frame->dpfsr >> 16) & 0x7),
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7], va);
+ */
+ /*
+ * if still the fault is not resolved ...
+ */
+ if (!p->p_addr->u_pcb.pcb_onfault)
+ panictrap(frame->vector, frame);
+
+outtahere:
+ frame->snip = ((unsigned)p->p_addr->u_pcb.pcb_onfault ) | FIP_V;
+ frame->sfip = ((unsigned)p->p_addr->u_pcb.pcb_onfault + 4) | FIP_V;
+ frame->sxip = 0;
+ /* We sort of resolved the fault ourselves because
+ * we know where it came from. [fuwintr() or suwintr()]
+ * But we must still think about the other possible
+ * transactions in dmt1 & dmt2. Mark dmt0 so that
+ * data_access_emulation skips it. XXX smurph
+ */
+ frame->dmt0 = DMT_SKIP;
+ frame->dpfsr = 0;
+ data_access_emulation(frame);
+ /* so data_access_emulation doesn't get called again. */
+ frame->dmt0 = 0;
+ return;
+ case T_INSTFLT+T_USER:
+ /* User mode instruction access fault */
+ /* FALLTHRU */
+ case T_DATAFLT+T_USER:
+ user_fault:
+ if (type == T_INSTFLT+T_USER) {
+ fault_addr = frame->sxip & XIP_ADDR;
+ } else {
+ fault_addr = frame->dma0;
+ }
+#if 0
+ printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
+ ((frame->dpfsr >> 16) & 0x7),
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ fault_addr, frame, frame->cpu);
+#endif
-
- /*
- printf("vm_fault(map 0x%x, va 0x%x, ftype 0x%x, FALSE) -> %d (%s)\n",
- map, va, ftype, result,
- result ? "KERN_INVALID_ADDRESS" : "KERN_SUCCESS");
- */
- if (result == KERN_SUCCESS) {
- /*
- * We could resolve the fault. Call
- * data_access_emulation to drain the data unit pipe
- * line and reset dmt0 so that trap won't get called
- * again. For inst faults, back up the pipe line.
- */
- if (type == T_DATAFLT) {
- /*
- printf("calling data_access_emulation()\n");
- */
- data_access_emulation(frame);
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- } else {
- frame->sfip = frame->snip & ~FIP_E;
- frame->snip = frame->sxip & ~NIP_E;
- }
- return;
- }
- }
- /*
- printf ("PBUS Fault %d (%s) va = 0x%x\n", ((frame->dpfsr >> 16) & 0x7),
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7], va);
- */
- /*
- * if still the fault is not resolved ...
- */
- if (!p->p_addr->u_pcb.pcb_onfault)
- panictrap(frame->vector, frame);
-
- outtahere:
- frame->snip = ((unsigned)p->p_addr->u_pcb.pcb_onfault ) | FIP_V;
- frame->sfip = ((unsigned)p->p_addr->u_pcb.pcb_onfault + 4) | FIP_V;
- frame->sxip = 0;
- frame->dmt0 = 0; /* XXX what about other trans. in data unit */
- frame->dpfsr = 0;
- return;
- case T_INSTFLT+T_USER:
- /* User mode instruction access fault */
- /*FALLTHRU*/
- case T_DATAFLT+T_USER:
- user_fault:
-/* printf("\nUser Data access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
-*/
-
- if (type == T_INSTFLT+T_USER) {
- fault_addr = frame->sxip & XIP_ADDR;
- } else {
- fault_addr = frame->dma0;
- }
-
- if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- } else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
-
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* Call vm_fault() to resolve non-bus error faults */
- if ((frame->ipfsr >> 16 & 0x7) != 0x3 &&
- (frame->dpfsr >> 16 & 0x7) != 0x3) {
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ } else {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ if ((unsigned)map & 3) {
+ printf("map is not word aligned! 0x%x\n", map);
+ Debugger();
+ }
+ /* Call vm_fault() to resolve non-bus error faults */
+ if ((frame->ipfsr >> 16 & 0x7) != 0x3 &&
+ (frame->dpfsr >> 16 & 0x7) != 0x3) {
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
- frame->ipfsr = frame->dpfsr = 0;
- /*
- printf("vm_fault(map 0x%x, va 0x%x, ftype 0x%x, FALSE) -> %d (%s)\n",
- map, va, ftype, result,
- result ? "KERN_INVALID_ADDRESS" : "KERN_SUCCESS");
- */
- }
-
- if ((caddr_t)va >= vm->vm_maxsaddr) {
- if (result == KERN_SUCCESS) {
- nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
- if (nss > vm->vm_ssize)
- vm->vm_ssize = nss;
- } else if (result == KERN_PROTECTION_FAILURE)
- result = KERN_INVALID_ADDRESS;
- }
-
- if (result == KERN_SUCCESS) {
- if (type == T_DATAFLT+T_USER) {
- /*
- printf("calling data_access_emulation()\n");
- */
- /*
- * We could resolve the fault. Call
- * data_access_emulation to drain the data unit
- * pipe line and reset dmt0 so that trap won't
- * get called again.
- */
- data_access_emulation(frame);
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- } else {
- /* back up SXIP, SNIP clearing the the Error bit */
- frame->sfip = frame->snip & ~FIP_E;
- frame->snip = frame->sxip & ~NIP_E;
- }
- } else {
- sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
- fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
- : SEGV_MAPERR;
- }
- /*
- printf("sig == %d, fault_type == %d\n", sig, fault_type);
- */
- break;
-
- case T_MISALGNFLT+T_USER:
-/* DEBUG_MSG("T_MISALGNFLT\n");*/
- sig = SIGBUS;
- fault_type = BUS_ADRALN;
-/* panictrap(fault_type, frame);*/
- break;
-
- case T_PRIVINFLT+T_USER:
- case T_ILLFLT+T_USER:
- sig = SIGILL;
- break;
-
- case T_BNDFLT+T_USER:
- sig = SIGFPE;
- break;
- case T_ZERODIV+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTDIV;
- break;
- case T_OVFFLT+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTOVF;
- break;
-
- case T_FPEPFLT+T_USER:
- case T_FPEIFLT+T_USER:
- sig = SIGFPE;
- break;
-
- case T_SIGTRAP+T_USER:
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
-
- case T_STEPBPT+T_USER:
- /*
- * This trap is used by the kernel to support single-step
- * debugging (although any user could generate this trap
- * which should probably be handled differently). When a
- * process is continued by a debugger with the PT_STEP
- * function of ptrace (single step), the kernel inserts
- * one or two breakpoints in the user process so that only
- * one instruction (or two in the case of a delayed branch)
- * is executed. When this breakpoint is hit, we get the
- * T_STEPBPT trap.
- */
-
- {
- register unsigned va;
- unsigned instr;
- struct uio uio;
- struct iovec iov;
-
- /* compute address of break instruction */
- va = pc;
-
- /* read break instruction */
- instr = fuiword((caddr_t)pc);
+ frame->ipfsr = frame->dpfsr = 0;
+ }
+
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (result == KERN_SUCCESS) {
+ nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (result == KERN_PROTECTION_FAILURE)
+ result = KERN_INVALID_ADDRESS;
+ }
+
+ if (result == KERN_SUCCESS) {
+ if (type == T_DATAFLT+T_USER) {
+ /*
+ * We could resolve the fault. Call
+ * data_access_emulation to drain the data unit
+ * pipe line and reset dmt0 so that trap won't
+ * get called again.
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ } else {
+ /* back up SXIP, SNIP clearing the the Error bit */
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ } else {
+ sig = result == KERN_PROTECTION_FAILURE ?
+ SIGBUS : SIGSEGV;
+ fault_type = result == KERN_PROTECTION_FAILURE ?
+ BUS_ADRERR : SEGV_MAPERR;
+ }
+ break;
+ case T_MISALGNFLT+T_USER:
+ sig = SIGBUS;
+ fault_type = BUS_ADRALN;
+ break;
+ case T_PRIVINFLT+T_USER:
+ case T_ILLFLT+T_USER:
+ sig = SIGILL;
+ break;
+ case T_BNDFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_ZERODIV+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTDIV;
+ break;
+ case T_OVFFLT+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTOVF;
+ break;
+ case T_FPEPFLT+T_USER:
+ case T_FPEIFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_SIGTRAP+T_USER:
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ case T_STEPBPT+T_USER:
+ /*
+ * This trap is used by the kernel to support single-step
+ * debugging (although any user could generate this trap
+ * which should probably be handled differently). When a
+ * process is continued by a debugger with the PT_STEP
+ * function of ptrace (single step), the kernel inserts
+ * one or two breakpoints in the user process so that only
+ * one instruction (or two in the case of a delayed branch)
+ * is executed. When this breakpoint is hit, we get the
+ * T_STEPBPT trap.
+ */
+ {
+ register unsigned va;
+ unsigned instr;
+ struct uio uio;
+ struct iovec iov;
+
+ /* compute address of break instruction */
+ va = pc;
+
+ /* read break instruction */
+ instr = fuiword((caddr_t)pc);
#if 0
- printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
- p->p_comm, p->p_pid, instr, pc,
- p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
#endif
- /* check and see if we got here by accident */
- if ((p->p_md.md_ss_addr != pc &&
- p->p_md.md_ss_taken_addr != pc) ||
- instr != SSBREAKPOINT) {
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
- }
- /* restore original instruction and clear BP */
- instr = p->p_md.md_ss_instr;
- va = p->p_md.md_ss_addr;
- if (va != 0) {
- iov.iov_base = (caddr_t)&instr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)va;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- procfs_domem(p, p, NULL, &uio);
- }
-
- /* branch taken instruction */
- instr = p->p_md.md_ss_taken_instr;
- va = p->p_md.md_ss_taken_addr;
- if (instr != 0) {
- iov.iov_base = (caddr_t)&instr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)va;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- procfs_domem(p, p, NULL, &uio);
- }
+ /* check and see if we got here by accident */
+ if ((p->p_md.md_ss_addr != pc &&
+ p->p_md.md_ss_taken_addr != pc) ||
+ instr != SSBREAKPOINT) {
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ }
+ /* restore original instruction and clear BP */
+ instr = p->p_md.md_ss_instr;
+ va = p->p_md.md_ss_addr;
+ if (va != 0) {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ procfs_domem(p, p, NULL, &uio);
+ }
+
+ /* branch taken instruction */
+ instr = p->p_md.md_ss_taken_instr;
+ va = p->p_md.md_ss_taken_addr;
+ if (instr != 0) {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ procfs_domem(p, p, NULL, &uio);
+ }
#if 1
- frame->sfip = frame->snip; /* set up next FIP */
- frame->snip = pc; /* set up next NIP */
- frame->snip |= 2; /* set valid bit */
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = pc; /* set up next NIP */
+ frame->snip |= 2; /* set valid bit */
#endif
- p->p_md.md_ss_addr = 0;
- p->p_md.md_ss_instr = 0;
- p->p_md.md_ss_taken_addr = 0;
- p->p_md.md_ss_taken_instr = 0;
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- }
- break;
-
- case T_USERBPT+T_USER:
- /*
- * This trap is meant to be used by debuggers to implement
- * breakpoint debugging. When we get this trap, we just
- * return a signal which gets caught by the debugger.
- */
- frame->sfip = frame->snip; /* set up the next FIP */
- frame->snip = frame->sxip; /* set up the next NIP */
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- break;
-
- case T_ASTFLT+T_USER:
+ p->p_md.md_ss_addr = 0;
+ p->p_md.md_ss_instr = 0;
+ p->p_md.md_ss_taken_addr = 0;
+ p->p_md.md_ss_taken_instr = 0;
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ }
+ break;
+
+ case T_USERBPT+T_USER:
+ /*
+ * This trap is meant to be used by debuggers to implement
+ * breakpoint debugging. When we get this trap, we just
+ * return a signal which gets caught by the debugger.
+ */
+ frame->sfip = frame->snip; /* set up the next FIP */
+ frame->snip = frame->sxip; /* set up the next NIP */
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+
+ case T_ASTFLT+T_USER:
#if defined(UVM)
- uvmexp.softs++;
+ uvmexp.softs++;
#else
- cnt.v_soft++;
+ cnt.v_soft++;
#endif
- want_ast = 0;
- if (p->p_flag & P_OWEUPC) {
- p->p_flag &= ~P_OWEUPC;
- ADDUPROF(p);
- }
- break;
- }
-
- /*
- * If trap from supervisor mode, just return
- */
- if (SYSTEMMODE(frame->epsr))
- return;
-
- if (sig) {
- sv.sival_int = fault_addr;
- trapsignal(p, sig, fault_code, fault_type, sv);
- /*
- * don't want multiple faults - we are going to
- * deliver signal.
- */
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- }
-
- userret(p, frame, sticks);
+ want_ast = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ break;
+ }
+
+ /*
+ * If trap from supervisor mode, just return
+ */
+ if (SYSTEMMODE(frame->epsr))
+ return;
+
+ if (sig) {
+ sv.sival_int = fault_addr;
+ trapsignal(p, sig, fault_code, fault_type, sv);
+ /*
+ * don't want multiple faults - we are going to
+ * deliver signal.
+ */
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ }
+
+ userret(p, frame, sticks);
}
#endif /* defined(MVME187) || defined(MVME188) */
/*ARGSUSED*/
@@ -681,911 +707,896 @@ trap(unsigned type, struct m88100_saved_state *frame)
void
trap2(unsigned type, struct m88100_saved_state *frame)
{
- struct proc *p;
- u_quad_t sticks = 0;
- vm_map_t map;
- vm_offset_t va;
- vm_prot_t ftype;
- int fault_type;
- u_long fault_code;
- unsigned nss, fault_addr;
- struct vmspace *vm;
- union sigval sv;
- int su = 0;
- int result;
- int sig = 0;
- unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
- unsigned dsr, isr, user = 0, write = 0, data = 0;
-
- extern vm_map_t kernel_map;
- extern int fubail(), subail();
- extern unsigned guarded_access_start;
- extern unsigned guarded_access_end;
- extern unsigned guarded_access_bad;
+ struct proc *p;
+ u_quad_t sticks = 0;
+ vm_map_t map;
+ vm_offset_t va;
+ vm_prot_t ftype;
+ int fault_type;
+ u_long fault_code;
+ unsigned nss, fault_addr;
+ struct vmspace *vm;
+ union sigval sv;
+ int su = 0;
+ int result;
+ int sig = 0;
+ unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
+ unsigned dsr, isr, user = 0, write = 0, data = 0;
+
+ extern vm_map_t kernel_map;
+ extern int fubail(), subail();
+ extern unsigned guarded_access_start;
+ extern unsigned guarded_access_end;
+ extern unsigned guarded_access_bad;
#if defined(UVM)
- uvmexp.traps++;
+ uvmexp.traps++;
#else
- cnt.v_trap++;
+ cnt.v_trap++;
#endif
-
- if ((p = curproc) == NULL)
- p = &proc0;
-
- if (USERMODE(frame->epsr)) {
- sticks = p->p_sticks;
- type += T_USER;
- p->p_md.md_tf = frame; /* for ptrace/signals */
- fault_type = 0;
- fault_code = 0;
- }
- printf("m197_trap 0x%x ", type);
- switch (type) {
- default:
- panictrap(frame->vector, frame);
- /*NOTREACHED*/
- case T_197_READ+T_USER:
- user = 1;
- case T_197_READ:
- va = (vm_offset_t) frame->dlar;
- /* if it was a user read, handle in context of the user */
- if ((frame->dsr & CMMU_DSR_SU) && !user) {
- map = kernel_map;
- } else {
- vm = p->p_vmspace;
- map = &vm->vm_map;
- }
- result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_DATA);
- if (result) {
- switch (result) {
- case 4: /* Seg Fault */
- frame->dsr |= CMMU_DSR_SI | CMMU_DSR_RW;
- break;
- case 5: /* Page Fault */
- frame->dsr |= CMMU_DSR_PI | CMMU_DSR_RW;
- break;
- case 6: /* Supervisor Violation */
- frame->dsr |= CMMU_DSR_SP | CMMU_DSR_RW;
- break;
- }
- /* table search failed and we are going to report a data fault */
- if (user) {
- type = T_DATAFLT+T_USER;
- goto m197_user_fault;
- } else {
- type = T_DATAFLT;
- goto m197_data_fault;
- }
- } else {
- return; /* PATC sucessfully loaded */
- }
- break;
- case T_197_WRITE+T_USER:
- user = 1;
- case T_197_WRITE:
- /* if it was a user read, handle in context of the user */
- if ((frame->dsr & CMMU_DSR_SU) && !user) {
- map = kernel_map;
- } else {
- vm = p->p_vmspace;
- map = &vm->vm_map;
- }
- va = (vm_offset_t) frame->dlar;
- result = m197_table_search(map->pmap, va, CMMU_WRITE, user, CMMU_DATA);
- if (result) {
- switch (result) {
- case 4: /* Seg Fault */
- frame->dsr |= CMMU_DSR_SI;
- break;
- case 5: /* Page Fault */
- frame->dsr |= CMMU_DSR_PI;
- break;
- case 6: /* Supervisor Violation */
- frame->dsr |= CMMU_DSR_SP;
- break;
- case 7: /* Write Violation */
- frame->dsr |= CMMU_DSR_WE;
- break;
- }
- /* table search failed and we are going to report a data fault */
- if (user) {
- type = T_DATAFLT+T_USER;
- goto m197_user_fault;
- } else {
- type = T_DATAFLT;
- goto m197_data_fault;
- }
- } else {
- return; /* PATC sucessfully loaded */
- }
- break;
- case T_197_INST+T_USER:
- user = 1;
- case T_197_INST:
- /* if it was a user read, handle in context of the user */
- if ((frame->isr & CMMU_ISR_SU) && !user) {
- map = kernel_map;
- } else {
- vm = p->p_vmspace;
- map = &vm->vm_map;
- }
- va = (vm_offset_t) frame->sxip;
- result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_INST);
- if (result) {
- switch (result) {
- case 4: /* Seg Fault */
- frame->isr |= CMMU_ISR_SI;
- break;
- case 5: /* Page Fault */
- frame->isr |= CMMU_ISR_PI;
- break;
- case 6: /* Supervisor Violation */
- frame->isr |= CMMU_ISR_SP;
- break;
- }
- /* table search failed and we are going to report a data fault */
- if (user) {
- type = T_INSTFLT+T_USER;
- goto m197_user_fault;
- } else {
- type = T_INSTFLT;
- goto m197_inst_fault;
- }
- } else {
- return; /* PATC sucessfully loaded */
- }
- break;
+
+ if ((p = curproc) == NULL)
+ p = &proc0;
+
+ if (USERMODE(frame->epsr)) {
+ sticks = p->p_sticks;
+ type += T_USER;
+ p->p_md.md_tf = frame; /* for ptrace/signals */
+ fault_type = 0;
+ fault_code = 0;
+ }
+ printf("m197_trap 0x%x ", type);
+ switch (type) {
+ default:
+ panictrap(frame->vector, frame);
+ /*NOTREACHED*/
+ case T_197_READ+T_USER:
+ user = 1;
+ case T_197_READ:
+ va = (vm_offset_t) frame->dlar;
+ /* if it was a user read, handle in context of the user */
+ if ((frame->dsr & CMMU_DSR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_DATA);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->dsr |= CMMU_DSR_SI | CMMU_DSR_RW;
+ break;
+ case 5: /* Page Fault */
+ frame->dsr |= CMMU_DSR_PI | CMMU_DSR_RW;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->dsr |= CMMU_DSR_SP | CMMU_DSR_RW;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_DATAFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_DATAFLT;
+ goto m197_data_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
+ case T_197_WRITE+T_USER:
+ user = 1;
+ case T_197_WRITE:
+ /* if it was a user read, handle in context of the user */
+ if ((frame->dsr & CMMU_DSR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ va = (vm_offset_t) frame->dlar;
+ result = m197_table_search(map->pmap, va, CMMU_WRITE, user, CMMU_DATA);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->dsr |= CMMU_DSR_SI;
+ break;
+ case 5: /* Page Fault */
+ frame->dsr |= CMMU_DSR_PI;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->dsr |= CMMU_DSR_SP;
+ break;
+ case 7: /* Write Violation */
+ frame->dsr |= CMMU_DSR_WE;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_DATAFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_DATAFLT;
+ goto m197_data_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
+ case T_197_INST+T_USER:
+ user = 1;
+ case T_197_INST:
+ /* if it was a user read, handle in context of the user */
+ if ((frame->isr & CMMU_ISR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ va = (vm_offset_t) frame->sxip;
+ result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_INST);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->isr |= CMMU_ISR_SI;
+ break;
+ case 5: /* Page Fault */
+ frame->isr |= CMMU_ISR_PI;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->isr |= CMMU_ISR_SP;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_INSTFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_INSTFLT;
+ goto m197_inst_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
#if defined(DDB)
- case T_KDB_BREAK:
- /*FALLTHRU*/
- case T_KDB_BREAK+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
- case T_KDB_ENTRY:
- /*FALLTHRU*/
- case T_KDB_ENTRY+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
-
- #if 0
- case T_ILLFLT:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
- "error fault", (db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
- #endif /* 0 */
- #endif /* DDB */
- case T_ILLFLT:
- DEBUG_MSG("test trap "
- "page fault @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
+ case T_KDB_BREAK:
+ /*FALLTHRU*/
+ case T_KDB_BREAK+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ case T_KDB_ENTRY:
+ /*FALLTHRU*/
+ case T_KDB_ENTRY+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+
+#if 0
+ case T_ILLFLT:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
+ "error fault", (db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+#endif /* 0 */
+#endif /* DDB */
+ case T_ILLFLT:
+ DEBUG_MSG("test trap "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_MISALGNFLT:
+ DEBUG_MSG("kernel misalgined "
+ "access exception @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_INSTFLT:
+ m197_inst_fault:
+ /* kernel mode instruction access fault.
+ * Should never, never happen for a non-paged kernel.
+ */
+ DEBUG_MSG("kernel mode instruction "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
break;
- case T_MISALGNFLT:
- DEBUG_MSG("kernel misalgined "
- "access exception @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_INSTFLT:
-m197_inst_fault:
- /* kernel mode instruction access fault.
- * Should never, never happen for a non-paged kernel.
- */
- DEBUG_MSG("kernel mode instruction "
- "page fault @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_DATAFLT:
- /* kernel mode data fault */
- /*
- * If the faulting address is in user space, handle it in
- * the context of the user process. Else, use kernel map.
- */
-m197_data_fault:
- if (type == T_DATAFLT) {
- fault_addr = frame->dlar;
- if (frame->dsr & CMMU_DSR_RW) {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- } else {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- write = 1;
- }
- data = 1;
- } else {
- fault_addr = frame->sxip & XIP_ADDR;
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* data fault on a kernel address... */
- if (type == T_DATAFLT) {
- if (frame->dsr & CMMU_DSR_SU) {
- map = kernel_map;
- }
- }
-
- /*
- * We don't want to call vm_fault() if it is fuwintr() or
- * suwintr(). These routines are for copying from interrupt
- * context and vm_fault() can potentially sleep. You may
- * wonder if it isn't bad karma for an interrupt handler to
- * touch the current process. Indeed it is, but clock interrupt
- * does it while doing profiling. It is OK in that context.
- */
-
- if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
- p->p_addr->u_pcb.pcb_onfault == (int)subail)
- goto m197_outtahere;
-
- /* data fault on the user address */
- if (type == T_DATAFLT && (frame->dsr & CMMU_DSR_SU) == 0) {
- type = T_DATAFLT + T_USER;
- goto m197_user_fault;
- }
-
- /*
- * If it is a guarded access, bus error is OK.
- */
-
- if ((frame->dsr & CMMU_DSR_BE) && /* bus error */
- (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
- (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
- return;
- }
-
- /*
- * On a segment or a page fault, call vm_fault() to resolve
- * the fault.
- */
- result = m197_table_search(map->pmap, va, write, 1, data);
-/* todo
- switch (result) {
- case :
- }
-*/
- if (type == T_DATAFLT) {
- if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
- || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
+ case T_DATAFLT:
+ /* kernel mode data fault */
+ /*
+ * If the faulting address is in user space, handle it in
+ * the context of the user process. Else, use kernel map.
+ */
+ m197_data_fault:
+ if (type == T_DATAFLT) {
+ fault_addr = frame->dlar;
+ if (frame->dsr & CMMU_DSR_RW) {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ write = 1;
+ }
+ data = 1;
+ } else {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* data fault on a kernel address... */
+ if (type == T_DATAFLT) {
+ if (frame->dsr & CMMU_DSR_SU) {
+ map = kernel_map;
+ }
+ }
+
+ /*
+ * We don't want to call vm_fault() if it is fuwintr() or
+ * suwintr(). These routines are for copying from interrupt
+ * context and vm_fault() can potentially sleep. You may
+ * wonder if it isn't bad karma for an interrupt handler to
+ * touch the current process. Indeed it is, but clock interrupt
+ * does it while doing profiling. It is OK in that context.
+ */
+
+ if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
+ p->p_addr->u_pcb.pcb_onfault == (int)subail)
+ goto m197_outtahere;
+
+ /* data fault on the user address */
+ if (type == T_DATAFLT && (frame->dsr & CMMU_DSR_SU) == 0) {
+ type = T_DATAFLT + T_USER;
+ goto m197_user_fault;
+ }
+
+ /*
+ * If it is a guarded access, bus error is OK.
+ */
+
+ if ((frame->dsr & CMMU_DSR_BE) && /* bus error */
+ (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
+ (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
+ return;
+ }
+
+ /*
+ * On a segment or a page fault, call vm_fault() to resolve
+ * the fault.
+ */
+ result = m197_table_search(map->pmap, va, write, 1, data);
+#ifdef todo
+ switch (result) {
+ case :
+ }
+#endif
+ if (type == T_DATAFLT) {
+ if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
+ || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
- if (result == KERN_SUCCESS) {
- return;
- }
- }
- } else {
- if ((frame->isr & CMMU_ISR_SI) /* seg fault */
- || (frame->isr & CMMU_ISR_PI)) { /* page fault */
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ } else {
+ if ((frame->isr & CMMU_ISR_SI) /* seg fault */
+ || (frame->isr & CMMU_ISR_PI)) { /* page fault */
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
- if (result == KERN_SUCCESS) {
- return;
- }
- }
- }
-
- /*
- printf ("PBUS Fault %d (%s) va = 0x%x\n", ((frame->dpfsr >> 16) & 0x7),
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7], va);
- */
- /*
- * if still the fault is not resolved ...
- */
- if (!p->p_addr->u_pcb.pcb_onfault)
- panictrap(frame->vector, frame);
-
-m197_outtahere:
- frame->sxip = ((unsigned)p->p_addr->u_pcb.pcb_onfault);
- return;
- case T_INSTFLT+T_USER:
- /* User mode instruction access fault */
- /*FALLTHRU*/
- case T_DATAFLT+T_USER:
-m197_user_fault:
-/* printf("\nUser Data access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
-*/
-
- if (type == T_INSTFLT+T_USER) {
- fault_addr = frame->sxip & XIP_ADDR;
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- } else {
- fault_addr = frame->dlar;
- if (frame->dsr & CMMU_DSR_RW) {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- } else {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- }
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
-
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* Call vm_fault() to resolve non-bus error faults */
- if (type == T_DATAFLT+T_USER) {
- if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
- || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ }
+ /*
+ * if still the fault is not resolved ...
+ */
+ if (!p->p_addr->u_pcb.pcb_onfault)
+ panictrap(frame->vector, frame);
+
+ m197_outtahere:
+ frame->sxip = ((unsigned)p->p_addr->u_pcb.pcb_onfault);
+ return;
+ case T_INSTFLT+T_USER:
+ /* User mode instruction access fault */
+ /*FALLTHRU*/
+ case T_DATAFLT+T_USER:
+ m197_user_fault:
+ if (type == T_INSTFLT+T_USER) {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ fault_addr = frame->dlar;
+ if (frame->dsr & CMMU_DSR_RW) {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ }
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+
+ /* Call vm_fault() to resolve non-bus error faults */
+ if (type == T_DATAFLT+T_USER) {
+ if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
+ || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
- if (result == KERN_SUCCESS) {
- return;
- }
- }
- } else {
- if ((frame->isr & CMMU_ISR_SI) /* seg fault */
- || (frame->isr & CMMU_ISR_PI)) { /* page fault */
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ } else {
+ if ((frame->isr & CMMU_ISR_SI) /* seg fault */
+ || (frame->isr & CMMU_ISR_PI)) { /* page fault */
#if defined(UVM)
- result = uvm_fault(map, va, 0, ftype);
+ result = uvm_fault(map, va, 0, ftype);
#else
- result = vm_fault(map, va, ftype, FALSE);
+ result = vm_fault(map, va, ftype, FALSE);
#endif
- if (result == KERN_SUCCESS) {
- return;
- }
- }
- }
-
- if ((caddr_t)va >= vm->vm_maxsaddr) {
- if (result == KERN_SUCCESS) {
- nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
- if (nss > vm->vm_ssize)
- vm->vm_ssize = nss;
- } else if (result == KERN_PROTECTION_FAILURE)
- result = KERN_INVALID_ADDRESS;
- }
-
- if (result != KERN_SUCCESS) {
- sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
- fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
- : SEGV_MAPERR;
- } else {
- return;
- }
- /*
- printf("sig == %d, fault_type == %d\n", sig, fault_type);
- */
- break;
-
- case T_MISALGNFLT+T_USER:
-/* DEBUG_MSG("T_MISALGNFLT\n");*/
- sig = SIGBUS;
- fault_type = BUS_ADRALN;
-/* panictrap(fault_type, frame);*/
- break;
-
- case T_PRIVINFLT+T_USER:
- case T_ILLFLT+T_USER:
- sig = SIGILL;
- break;
-
- case T_BNDFLT+T_USER:
- sig = SIGFPE;
- break;
- case T_ZERODIV+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTDIV;
- break;
- case T_OVFFLT+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTOVF;
- break;
-
- case T_FPEPFLT+T_USER:
- case T_FPEIFLT+T_USER:
- sig = SIGFPE;
- break;
-
- case T_SIGTRAP+T_USER:
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
-
- case T_STEPBPT+T_USER:
- /*
- * This trap is used by the kernel to support single-step
- * debugging (although any user could generate this trap
- * which should probably be handled differently). When a
- * process is continued by a debugger with the PT_STEP
- * function of ptrace (single step), the kernel inserts
- * one or two breakpoints in the user process so that only
- * one instruction (or two in the case of a delayed branch)
- * is executed. When this breakpoint is hit, we get the
- * T_STEPBPT trap.
- */
- #if 0
- frame->sfip = frame->snip; /* set up next FIP */
- frame->snip = frame->sxip; /* set up next NIP */
- break;
- #endif
- {
- register unsigned va;
- unsigned instr;
- struct uio uio;
- struct iovec iov;
-
- /* compute address of break instruction */
- va = pc;
-
- /* read break instruction */
- instr = fuiword((caddr_t)pc);
- #if 1
- printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
- p->p_comm, p->p_pid, instr, pc,
- p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
- #endif
- /* check and see if we got here by accident */
-/*
- if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) {
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
- }
-*/
- /* restore original instruction and clear BP */
- /*sig = suiword((caddr_t)pc, p->p_md.md_ss_instr);*/
- instr = p->p_md.md_ss_instr;
- if (instr == 0) {
- printf("Warning: can't restore instruction at %x: %x\n",
- p->p_md.md_ss_addr, p->p_md.md_ss_instr);
- } else {
- iov.iov_base = (caddr_t)&instr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)pc;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- }
-
- frame->sfip = frame->snip; /* set up next FIP */
- frame->snip = frame->sxip; /* set up next NIP */
- frame->snip |= 2; /* set valid bit */
- p->p_md.md_ss_addr = 0;
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- break;
- }
-
- case T_USERBPT+T_USER:
- /*
- * This trap is meant to be used by debuggers to implement
- * breakpoint debugging. When we get this trap, we just
- * return a signal which gets caught by the debugger.
- */
- frame->sfip = frame->snip; /* set up the next FIP */
- frame->snip = frame->sxip; /* set up the next NIP */
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- break;
-
- case T_ASTFLT+T_USER:
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ }
+
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (result == KERN_SUCCESS) {
+ nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (result == KERN_PROTECTION_FAILURE)
+ result = KERN_INVALID_ADDRESS;
+ }
+
+ if (result != KERN_SUCCESS) {
+ sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
+ fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
+ : SEGV_MAPERR;
+ } else {
+ return;
+ }
+ break;
+ case T_MISALGNFLT+T_USER:
+ sig = SIGBUS;
+ fault_type = BUS_ADRALN;
+ break;
+ case T_PRIVINFLT+T_USER:
+ case T_ILLFLT+T_USER:
+ sig = SIGILL;
+ break;
+ case T_BNDFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_ZERODIV+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTDIV;
+ break;
+ case T_OVFFLT+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTOVF;
+ break;
+ case T_FPEPFLT+T_USER:
+ case T_FPEIFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_SIGTRAP+T_USER:
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ case T_STEPBPT+T_USER:
+ /*
+ * This trap is used by the kernel to support single-step
+ * debugging (although any user could generate this trap
+ * which should probably be handled differently). When a
+ * process is continued by a debugger with the PT_STEP
+ * function of ptrace (single step), the kernel inserts
+ * one or two breakpoints in the user process so that only
+ * one instruction (or two in the case of a delayed branch)
+ * is executed. When this breakpoint is hit, we get the
+ * T_STEPBPT trap.
+ */
+#if 0
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = frame->sxip; /* set up next NIP */
+ break;
+#endif
+ {
+ register unsigned va;
+ unsigned instr;
+ struct uio uio;
+ struct iovec iov;
+
+ /* compute address of break instruction */
+ va = pc;
+
+ /* read break instruction */
+ instr = fuiword((caddr_t)pc);
+#if 1
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+#endif
+ /* check and see if we got here by accident */
+#ifdef notyet
+ if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) {
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ }
+#endif
+ /* restore original instruction and clear BP */
+ /*sig = suiword((caddr_t)pc, p->p_md.md_ss_instr);*/
+ instr = p->p_md.md_ss_instr;
+ if (instr == 0) {
+ printf("Warning: can't restore instruction at %x: %x\n",
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr);
+ } else {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)pc;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ }
+
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = frame->sxip; /* set up next NIP */
+ frame->snip |= 2; /* set valid bit */
+ p->p_md.md_ss_addr = 0;
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+ }
+ case T_USERBPT+T_USER:
+ /*
+ * This trap is meant to be used by debuggers to implement
+ * breakpoint debugging. When we get this trap, we just
+ * return a signal which gets caught by the debugger.
+ */
+ frame->sfip = frame->snip; /* set up the next FIP */
+ frame->snip = frame->sxip; /* set up the next NIP */
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+
+ case T_ASTFLT+T_USER:
#if defined(UVM)
- uvmexp.softs++;
+ uvmexp.softs++;
#else
- cnt.v_soft++;
+ cnt.v_soft++;
#endif
- want_ast = 0;
- if (p->p_flag & P_OWEUPC) {
- p->p_flag &= ~P_OWEUPC;
- ADDUPROF(p);
- }
- break;
- }
-
- /*
- * If trap from supervisor mode, just return
- */
- if (SYSTEMMODE(frame->epsr))
- return;
-
- if (sig) {
- sv.sival_int = fault_addr;
- trapsignal(p, sig, fault_code, fault_type, sv);
- /*
- * don't want multiple faults - we are going to
- * deliver signal.
- */
- frame->dsr = 0;
- }
- userret(p, frame, sticks);
+ want_ast = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ break;
+ }
+ /*
+ * If trap from supervisor mode, just return
+ */
+ if (SYSTEMMODE(frame->epsr))
+ return;
+
+ if (sig) {
+ sv.sival_int = fault_addr;
+ trapsignal(p, sig, fault_code, fault_type, sv);
+ /*
+ * don't want multiple faults - we are going to
+ * deliver signal.
+ */
+ frame->dsr = 0;
+ }
+ userret(p, frame, sticks);
}
#endif /* MVME197 */
+
void
test_trap2(int num, int m197)
{
- DEBUG_MSG("\n[test_trap (Good News[tm]) m197 = %d, vec = %d]\n", m197, num);
- bugreturn();
+ DEBUG_MSG("\n[test_trap (Good News[tm]) m197 = %d, vec = %d]\n", m197, num);
+ bugreturn();
}
void
test_trap(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[test_trap (Good News[tm]) frame 0x%08x]\n", frame);
- regdump((struct trapframe*)frame);
- bugreturn();
+ DEBUG_MSG("\n[test_trap (Good News[tm]) frame 0x%08x]\n", frame);
+ regdump((struct trapframe*)frame);
+ bugreturn();
}
+
void
error_fault(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[ERROR EXCEPTION (Bad News[tm]) frame 0x%08x]\n", frame);
- regdump((struct trapframe*)frame);
- DEBUG_MSG("trap trace %x -> %x -> %x -> %x\n", last_trap[0], last_trap[1], last_trap[2], last_trap[3]);
+ DEBUG_MSG("\n[ERROR EXCEPTION (Bad News[tm]) frame 0x%08x]\n", frame);
+ DEBUG_MSG("This is usually an exception within an exception. The trap\n");
+ DEBUG_MSG("frame shadow registers you are about to see are invalid.\n");
+ DEBUG_MSG("(read totaly useless) But R1 to R31 might be interesting.\n");
+ regdump((struct trapframe*)frame);
+#if defined(MVME187) || defined(MVME188)
+ DEBUG_MSG("trap trace %d -> %d -> %d -> %d ", last_trap[0], last_trap[1], last_trap[2], last_trap[3]);
+ DEBUG_MSG("last exception vector = %d\n", last_vector);
+#endif
#if DDB
- gimmeabreak();
- DEBUG_MSG("[you really can't restart after an error exception.]\n");
- gimmeabreak();
+ gimmeabreak();
+ DEBUG_MSG("You really can't restart after an error exception!\n");
+ gimmeabreak();
#endif /* DDB */
- bugreturn(); /* This gets us to Bug instead of a loop forever */
+ bugreturn(); /* This gets us to Bug instead of a loop forever */
}
void
error_reset(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[RESET EXCEPTION (Really Bad News[tm]) frame 0x%08x]\n", frame);
- DEBUG_MSG("This is usually caused by a branch to a NULL function pointer.\n");
- DEBUG_MSG("Use the debugger trace command to track it down.\n");
-
+ DEBUG_MSG("\n[RESET EXCEPTION (Really Bad News[tm]) frame 0x%08x]\n", frame);
+ DEBUG_MSG("This is usually caused by a branch to a NULL function pointer.\n");
+ DEBUG_MSG("e.g. jump to address 0. Use the debugger trace command to track it down.\n");
#if DDB
- gimmeabreak();
- DEBUG_MSG("[It's useless to restart after a reset exception. You might as well reboot.]\n");
- gimmeabreak();
+ gimmeabreak();
+ DEBUG_MSG("It's useless to restart after a reset exception! You might as well reboot.\n");
+ gimmeabreak();
#endif /* DDB */
- bugreturn(); /* This gets us to Bug instead of a loop forever */
+ bugreturn(); /* This gets us to Bug instead of a loop forever */
}
syscall(register_t code, struct m88100_saved_state *tf)
{
- register int i, nsys, *ap, nap;
- register struct sysent *callp;
- register struct proc *p;
- int error, new;
- struct args {
- int i[8];
- } args;
- int rval[2];
- u_quad_t sticks;
- extern struct pcb *curpcb;
+ register int i, nsys, *ap, nap;
+ register struct sysent *callp;
+ register struct proc *p;
+ int error, new;
+ struct args {
+ int i[8];
+ } args;
+ int rval[2];
+ u_quad_t sticks;
+ extern struct pcb *curpcb;
#if defined(UVM)
- uvmexp.syscalls++;
+ uvmexp.syscalls++;
#else
- cnt.v_syscall++;
+ cnt.v_syscall++;
#endif
- p = curproc;
+ p = curproc;
- callp = p->p_emul->e_sysent;
- nsys = p->p_emul->e_nsysent;
+ callp = p->p_emul->e_sysent;
+ nsys = p->p_emul->e_nsysent;
#ifdef DIAGNOSTIC
- if (USERMODE(tf->epsr) == 0)
- panic("syscall");
- if (curpcb != &p->p_addr->u_pcb)
- panic("syscall curpcb/ppcb");
- if (tf != (struct trapframe *)&curpcb->user_state)
- panic("syscall trapframe");
+ if (USERMODE(tf->epsr) == 0)
+ panic("syscall");
+ if (curpcb != &p->p_addr->u_pcb)
+ panic("syscall curpcb/ppcb");
+ if (tf != (struct trapframe *)&curpcb->user_state)
+ panic("syscall trapframe");
#endif
- sticks = p->p_sticks;
- p->p_md.md_tf = tf;
-
- /*
- * For 88k, all the arguments are passed in the registers (r2-r12)
- * For syscall (and __syscall), r2 (and r3) has the actual code.
- * __syscall takes a quad syscall number, so that other
- * arguments are at their natural alignments.
- */
- ap = &tf->r[2];
- nap = 6;
-
- switch (code) {
- case SYS_syscall:
- code = *ap++;
- nap--;
- break;
- case SYS___syscall:
- if (callp != sysent)
- break;
- code = ap[_QUAD_LOWWORD];
- ap += 2;
- nap -= 2;
- break;
- }
-
- /* Callp currently points to syscall, which returns ENOSYS. */
-
- if (code < 0 || code >= nsys)
- callp += p->p_emul->e_nosys;
- else {
- callp += code;
- i = callp->sy_argsize / sizeof(register_t);
- if (i > 8)
- panic("syscall nargs");
- /*
- * just copy them; syscall stub made sure all the
- * args are moved from user stack to registers.
- */
- bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
- }
+ sticks = p->p_sticks;
+ p->p_md.md_tf = tf;
+
+ /*
+ * For 88k, all the arguments are passed in the registers (r2-r12)
+ * For syscall (and __syscall), r2 (and r3) has the actual code.
+ * __syscall takes a quad syscall number, so that other
+ * arguments are at their natural alignments.
+ */
+ ap = &tf->r[2];
+ nap = 6;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ if (callp != sysent)
+ break;
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ /* Callp currently points to syscall, which returns ENOSYS. */
+
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else {
+ callp += code;
+ i = callp->sy_argsize / sizeof(register_t);
+ if (i > 8)
+ panic("syscall nargs");
+ /*
+ * just copy them; syscall stub made sure all the
+ * args are moved from user stack to registers.
+ */
+ bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
+ }
#ifdef SYSCALL_DEBUG
- scdebug_call(p, code, args.i);
+ scdebug_call(p, code, args.i);
#endif
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSCALL))
- ktrsyscall(p, code, callp->sy_argsize, args.i);
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, callp->sy_argsize, args.i);
#endif
- rval[0] = 0;
- rval[1] = 0;
- error = (*callp->sy_call)(p, &args, rval);
- /*
- * system call will look like:
- * ld r10, r31, 32; r10,r11,r12 might be garbage.
- * ld r11, r31, 36
- * ld r12, r31, 40
- * or r13, r0, <code>
- * tb0 0, r0, <128> <- xip
- * br err <- nip
- * jmp r1 <- fip
- * err: or.u r3, r0, hi16(errno)
- * st r2, r3, lo16(errno)
- * subu r2, r0, 1
- * jmp r1
- *
- * So, when we take syscall trap, sxip/snip/sfip will be as
- * shown above.
- * Given this,
- * 1. If the system call returned 0, need to skip nip.
- * nip = fip, fip += 4
- * (doesn't matter what fip + 4 will be but we will never
- * execute this since jmp r1 at nip will change the execution flow.)
- * 2. If the system call returned an errno > 0, plug the value
- * in r2, and leave nip and fip unchanged. This will have us
- * executing "br err" on return to user space.
- * 3. If the system call code returned ERESTART,
- * we need to rexecute the trap instruction. Back up the pipe
- * line.
- * fip = nip, nip = xip
- * 4. If the system call returned EJUSTRETURN, don't need to adjust
- * any pointers.
- */
-
- if (error == 0) {
- /*
- * If fork succeeded and we are the child, our stack
- * has moved and the pointer tf is no longer valid,
- * and p is wrong. Compute the new trapframe pointer.
- * (The trap frame invariably resides at the
- * tippity-top of the u. area.)
- */
- p = curproc;
- tf = USER_REGS(p);
- tf->r[2] = rval[0];
- tf->r[3] = rval[1];
- tf->epsr &= ~PSR_C;
- tf->snip = tf->sfip & ~FIP_E;
- tf->sfip = tf->snip + 4;
- } else if (error > 0) {
- /* error != ERESTART && error != EJUSTRETURN*/
- tf->r[2] = error;
- tf->epsr |= PSR_C; /* fail */
- tf->snip = tf->snip & ~NIP_E;
- tf->sfip = tf->sfip & ~FIP_E;
- } else if (error == ERESTART) {
- /*
- * If (error == ERESTART), back up the pipe line. This
- * will end up reexecuting the trap.
- */
- tf->epsr &= ~PSR_C;
- tf->sfip = tf->snip & ~NIP_E;
- tf->snip = tf->sxip & ~NIP_E;
- } else {
- /* if (error == EJUSTRETURN), leave the ip's alone */
- tf->epsr &= ~PSR_C;
- }
+ rval[0] = 0;
+ rval[1] = 0;
+ error = (*callp->sy_call)(p, &args, rval);
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- xip
+ * br err <- nip
+ * jmp r1 <- fip
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip/sfip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to skip nip.
+ * nip = fip, fip += 4
+ * (doesn't matter what fip + 4 will be but we will never
+ * execute this since jmp r1 at nip will change the execution flow.)
+ * 2. If the system call returned an errno > 0, plug the value
+ * in r2, and leave nip and fip unchanged. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART,
+ * we need to rexecute the trap instruction. Back up the pipe
+ * line.
+ * fip = nip, nip = xip
+ * 4. If the system call returned EJUSTRETURN, don't need to adjust
+ * any pointers.
+ */
+
+ if (error == 0) {
+ /*
+ * If fork succeeded and we are the child, our stack
+ * has moved and the pointer tf is no longer valid,
+ * and p is wrong. Compute the new trapframe pointer.
+ * (The trap frame invariably resides at the
+ * tippity-top of the u. area.)
+ */
+ p = curproc;
+ tf = USER_REGS(p);
+ tf->r[2] = rval[0];
+ tf->r[3] = rval[1];
+ tf->epsr &= ~PSR_C;
+ tf->snip = tf->sfip & ~FIP_E;
+ tf->sfip = tf->snip + 4;
+ } else if (error > 0) {
+ /* error != ERESTART && error != EJUSTRETURN*/
+ tf->r[2] = error;
+ tf->epsr |= PSR_C; /* fail */
+ tf->snip = tf->snip & ~NIP_E;
+ tf->sfip = tf->sfip & ~FIP_E;
+ } else if (error == ERESTART) {
+ /*
+ * If (error == ERESTART), back up the pipe line. This
+ * will end up reexecuting the trap.
+ */
+ tf->epsr &= ~PSR_C;
+ tf->sfip = tf->snip & ~NIP_E;
+ tf->snip = tf->sxip & ~NIP_E;
+ } else {
+ /* if (error == EJUSTRETURN), leave the ip's alone */
+ tf->epsr &= ~PSR_C;
+ }
#ifdef SYSCALL_DEBUG
- scdebug_ret(p, code, error, rval);
+ scdebug_ret(p, code, error, rval);
#endif
- userret(p, tf, sticks);
+ userret(p, tf, sticks);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
- ktrsysret(p, code, error, rval[0]);
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, error, rval[0]);
#endif
}
/* Instruction pointers opperate differently on mc88110 */
m197_syscall(register_t code, struct m88100_saved_state *tf)
{
- register int i, nsys, *ap, nap;
- register struct sysent *callp;
- register struct proc *p;
- int error, new;
- struct args {
- int i[8];
- } args;
- int rval[2];
- u_quad_t sticks;
- extern struct pcb *curpcb;
+ register int i, nsys, *ap, nap;
+ register struct sysent *callp;
+ register struct proc *p;
+ int error, new;
+ struct args {
+ int i[8];
+ } args;
+ int rval[2];
+ u_quad_t sticks;
+ extern struct pcb *curpcb;
#if defined(UVM)
- uvmexp.syscalls++;
+ uvmexp.syscalls++;
#else
- cnt.v_syscall++;
+ cnt.v_syscall++;
#endif
- p = curproc;
+ p = curproc;
- callp = p->p_emul->e_sysent;
- nsys = p->p_emul->e_nsysent;
+ callp = p->p_emul->e_sysent;
+ nsys = p->p_emul->e_nsysent;
#ifdef DIAGNOSTIC
- if (USERMODE(tf->epsr) == 0)
- panic("syscall");
- if (curpcb != &p->p_addr->u_pcb)
- panic("syscall curpcb/ppcb");
- if (tf != (struct trapframe *)&curpcb->user_state)
- panic("syscall trapframe");
+ if (USERMODE(tf->epsr) == 0)
+ panic("syscall");
+ if (curpcb != &p->p_addr->u_pcb)
+ panic("syscall curpcb/ppcb");
+ if (tf != (struct trapframe *)&curpcb->user_state)
+ panic("syscall trapframe");
#endif
- sticks = p->p_sticks;
- p->p_md.md_tf = tf;
-
- /*
- * For 88k, all the arguments are passed in the registers (r2-r12)
- * For syscall (and __syscall), r2 (and r3) has the actual code.
- * __syscall takes a quad syscall number, so that other
- * arguments are at their natural alignments.
- */
- ap = &tf->r[2];
- nap = 6;
-
- switch (code) {
- case SYS_syscall:
- code = *ap++;
- nap--;
- break;
- case SYS___syscall:
- if (callp != sysent)
- break;
- code = ap[_QUAD_LOWWORD];
- ap += 2;
- nap -= 2;
- break;
- }
-
- /* Callp currently points to syscall, which returns ENOSYS. */
-
- if (code < 0 || code >= nsys)
- callp += p->p_emul->e_nosys;
- else {
- callp += code;
- i = callp->sy_argsize / sizeof(register_t);
- if (i > 8)
- panic("syscall nargs");
- /*
- * just copy them; syscall stub made sure all the
- * args are moved from user stack to registers.
- */
- bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
- }
+ sticks = p->p_sticks;
+ p->p_md.md_tf = tf;
+
+ /*
+ * For 88k, all the arguments are passed in the registers (r2-r12)
+ * For syscall (and __syscall), r2 (and r3) has the actual code.
+ * __syscall takes a quad syscall number, so that other
+ * arguments are at their natural alignments.
+ */
+ ap = &tf->r[2];
+ nap = 6;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ if (callp != sysent)
+ break;
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ /* Callp currently points to syscall, which returns ENOSYS. */
+
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else {
+ callp += code;
+ i = callp->sy_argsize / sizeof(register_t);
+ if (i > 8)
+ panic("syscall nargs");
+ /*
+ * just copy them; syscall stub made sure all the
+ * args are moved from user stack to registers.
+ */
+ bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
+ }
#ifdef SYSCALL_DEBUG
- scdebug_call(p, code, args.i);
+ scdebug_call(p, code, args.i);
#endif
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSCALL))
- ktrsyscall(p, code, callp->sy_argsize, args.i);
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, callp->sy_argsize, args.i);
#endif
- rval[0] = 0;
- rval[1] = 0;
- error = (*callp->sy_call)(p, &args, rval);
- /*
- * system call will look like:
- * ld r10, r31, 32; r10,r11,r12 might be garbage.
- * ld r11, r31, 36
- * ld r12, r31, 40
- * or r13, r0, <code>
- * tb0 0, r0, <128> <- sxip
- * br err <- snip
- * jmp r1
- * err: or.u r3, r0, hi16(errno)
- * st r2, r3, lo16(errno)
- * subu r2, r0, 1
- * jmp r1
- *
- * So, when we take syscall trap, sxip/snip will be as
- * shown above.
- * Given this,
- * 1. If the system call returned 0, need to jmp r1.
- * sxip += 8
- * 2. If the system call returned an errno > 0, increment
- * sxip += 4 and plug the value in r2. This will have us
- * executing "br err" on return to user space.
- * 3. If the system call code returned ERESTART,
- * we need to rexecute the trap instruction. leave xip as is.
- * 4. If the system call returned EJUSTRETURN, just return.
- * sxip += 8
- */
-
- if (error == 0) {
- /*
- * If fork succeeded and we are the child, our stack
- * has moved and the pointer tf is no longer valid,
- * and p is wrong. Compute the new trapframe pointer.
- * (The trap frame invariably resides at the
- * tippity-top of the u. area.)
- */
- p = curproc;
- tf = USER_REGS(p);
- tf->r[2] = rval[0];
- tf->r[3] = rval[1];
- tf->epsr &= ~PSR_C;
- tf->sxip += 8;
- tf->sxip &= ~3;
- } else if (error > 0) {
- /* error != ERESTART && error != EJUSTRETURN*/
- tf->r[2] = error;
- tf->epsr |= PSR_C; /* fail */
- tf->sxip += 4;
- tf->sxip &= ~3;
- } else if (error == ERESTART) {
- /*
- * If (error == ERESTART), back up the pipe line. This
- * will end up reexecuting the trap.
- */
- tf->epsr &= ~PSR_C;
- } else {
- /* if (error == EJUSTRETURN) */
- tf->epsr &= ~PSR_C;
- tf->sxip += 8;
- tf->sxip &= ~3;
- }
+ rval[0] = 0;
+ rval[1] = 0;
+ error = (*callp->sy_call)(p, &args, rval);
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- sxip
+ * br err <- snip
+ * jmp r1
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to jmp r1.
+ * sxip += 8
+ * 2. If the system call returned an errno > 0, increment
+ * sxip += 4 and plug the value in r2. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART,
+ * we need to rexecute the trap instruction. leave xip as is.
+ * 4. If the system call returned EJUSTRETURN, just return.
+ * sxip += 8
+ */
+
+ if (error == 0) {
+ /*
+ * If fork succeeded and we are the child, our stack
+ * has moved and the pointer tf is no longer valid,
+ * and p is wrong. Compute the new trapframe pointer.
+ * (The trap frame invariably resides at the
+ * tippity-top of the u. area.)
+ */
+ p = curproc;
+ tf = USER_REGS(p);
+ tf->r[2] = rval[0];
+ tf->r[3] = rval[1];
+ tf->epsr &= ~PSR_C;
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ } else if (error > 0) {
+ /* error != ERESTART && error != EJUSTRETURN*/
+ tf->r[2] = error;
+ tf->epsr |= PSR_C; /* fail */
+ tf->sxip += 4;
+ tf->sxip &= ~3;
+ } else if (error == ERESTART) {
+ /*
+ * If (error == ERESTART), back up the pipe line. This
+ * will end up reexecuting the trap.
+ */
+ tf->epsr &= ~PSR_C;
+ } else {
+ /* if (error == EJUSTRETURN) */
+ tf->epsr &= ~PSR_C;
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ }
#ifdef SYSCALL_DEBUG
- scdebug_ret(p, code, error, rval);
+ scdebug_ret(p, code, error, rval);
#endif
- userret(p, tf, sticks);
+ userret(p, tf, sticks);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
- ktrsysret(p, code, error, rval[0]);
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, error, rval[0]);
#endif
}
@@ -1596,24 +1607,24 @@ m197_syscall(register_t code, struct m88100_saved_state *tf)
void
child_return(struct proc *p)
{
- struct trapframe *tf;
-
- tf = USER_REGS(p);
- tf->r[2] = 0;
- tf->r[3] = 0;
- tf->epsr &= ~PSR_C;
- if (cputyp != CPU_197) {
- tf->snip = tf->sfip & ~3;
- tf->sfip = tf->snip + 4;
- } else {
- tf->sxip += 8;
- tf->sxip &= ~3;
- }
-
- userret(p, tf, p->p_sticks);
+ struct trapframe *tf;
+
+ tf = USER_REGS(p);
+ tf->r[2] = 0;
+ tf->r[3] = 0;
+ tf->epsr &= ~PSR_C;
+ if (cputyp != CPU_197) {
+ tf->snip = tf->sfip & ~3;
+ tf->sfip = tf->snip + 4;
+ } else {
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ }
+
+ userret(p, tf, p->p_sticks);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
- ktrsysret(p, SYS_fork, 0, 0);
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, SYS_fork, 0, 0);
#endif
}
@@ -1625,27 +1636,26 @@ allocate_sir(proc, arg)
void (*proc)();
void *arg;
{
- int bit;
-
- if (next_sir >= NSIR)
- panic("allocate_sir: none left");
- bit = next_sir++;
- sir_routines[bit] = proc;
- sir_args[bit] = arg;
- return (1 << bit);
+ int bit;
+
+ if (next_sir >= NSIR)
+ panic("allocate_sir: none left");
+ bit = next_sir++;
+ sir_routines[bit] = proc;
+ sir_args[bit] = arg;
+ return (1 << bit);
}
void
init_sir()
{
- extern void netintr();
+ extern void netintr();
- sir_routines[0] = netintr;
- sir_routines[1] = softclock;
- next_sir = 2;
+ sir_routines[0] = netintr;
+ sir_routines[1] = softclock;
+ next_sir = 2;
}
-
/************************************\
* User Single Step Debugging Support *
\************************************/
@@ -1653,41 +1663,41 @@ init_sir()
unsigned
ss_get_value(struct proc *p, unsigned addr, int size)
{
- struct uio uio;
- struct iovec iov;
- unsigned value;
-
- iov.iov_base = (caddr_t)&value;
- iov.iov_len = size;
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)addr;
- uio.uio_resid = size;
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_READ;
- uio.uio_procp = curproc;
- procfs_domem(curproc, p, NULL, &uio);
- return value;
+ struct uio uio;
+ struct iovec iov;
+ unsigned value;
+
+ iov.iov_base = (caddr_t)&value;
+ iov.iov_len = size;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = size;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_procp = curproc;
+ procfs_domem(curproc, p, NULL, &uio);
+ return value;
}
int
ss_put_value(struct proc *p, unsigned addr, unsigned value, int size)
{
- struct uio uio;
- struct iovec iov;
- int i;
-
- iov.iov_base = (caddr_t)&value;
- iov.iov_len = size;
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)addr;
- uio.uio_resid = size;
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- i = procfs_domem(curproc, p, NULL, &uio);
- return i;
+ struct uio uio;
+ struct iovec iov;
+ int i;
+
+ iov.iov_base = (caddr_t)&value;
+ iov.iov_len = size;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = size;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ i = procfs_domem(curproc, p, NULL, &uio);
+ return i;
}
/*
@@ -1703,41 +1713,41 @@ ss_put_value(struct proc *p, unsigned addr, unsigned value, int size)
*/
unsigned
ss_branch_taken(
- unsigned inst,
- unsigned pc,
- unsigned (*func)(unsigned int, struct trapframe *),
- struct trapframe *func_data) /* 'opaque' */
+ unsigned inst,
+ unsigned pc,
+ unsigned (*func)(unsigned int, struct trapframe *),
+ struct trapframe *func_data) /* 'opaque' */
{
- /* check if br/bsr */
- if ((inst & 0xf0000000U) == 0xc0000000U) {
- /* signed 26 bit pc relative displacement, shift left two bits */
- inst = (inst & 0x03ffffffU)<<2;
- /* check if sign extension is needed */
- if (inst & 0x08000000U)
- inst |= 0xf0000000U;
- return pc + inst;
- }
-
- /* check if bb0/bb1/bcnd case */
- switch ((inst & 0xf8000000U)) {
- case 0xd0000000U: /* bb0 */
- case 0xd8000000U: /* bb1 */
- case 0xe8000000U: /* bcnd */
- /* signed 16 bit pc relative displacement, shift left two bits */
- inst = (inst & 0x0000ffffU)<<2;
- /* check if sign extension is needed */
- if (inst & 0x00020000U)
- inst |= 0xfffc0000U;
- return pc + inst;
- }
-
- /* check jmp/jsr case */
- /* check bits 5-31, skipping 10 & 11 */
- if ((inst & 0xfffff3e0U) == 0xf400c000U)
- return (*func)(inst & 0x1f, func_data); /* the register value */
-
- return 0; /* keeps compiler happy */
+ /* check if br/bsr */
+ if ((inst & 0xf0000000U) == 0xc0000000U) {
+ /* signed 26 bit pc relative displacement, shift left two bits */
+ inst = (inst & 0x03ffffffU)<<2;
+ /* check if sign extension is needed */
+ if (inst & 0x08000000U)
+ inst |= 0xf0000000U;
+ return pc + inst;
+ }
+
+ /* check if bb0/bb1/bcnd case */
+ switch ((inst & 0xf8000000U)) {
+ case 0xd0000000U: /* bb0 */
+ case 0xd8000000U: /* bb1 */
+ case 0xe8000000U: /* bcnd */
+ /* signed 16 bit pc relative displacement, shift left two bits */
+ inst = (inst & 0x0000ffffU)<<2;
+ /* check if sign extension is needed */
+ if (inst & 0x00020000U)
+ inst |= 0xfffc0000U;
+ return pc + inst;
+ }
+
+ /* check jmp/jsr case */
+ /* check bits 5-31, skipping 10 & 11 */
+ if ((inst & 0xfffff3e0U) == 0xf400c000U)
+ return (*func)(inst & 0x1f, func_data); /* the register value */
+
+ return 0; /* keeps compiler happy */
}
/*
@@ -1748,37 +1758,35 @@ ss_branch_taken(
unsigned
ss_getreg_val(unsigned regno, struct trapframe *tf)
{
- if (regno == 0)
- return 0;
- else if (regno < 31)
- return tf->r[regno];
- else {
- panic("bad register number to ss_getreg_val.");
- return 0;/*to make compiler happy */
- }
+ if (regno == 0)
+ return 0;
+ else if (regno < 31)
+ return tf->r[regno];
+ else {
+ panic("bad register number to ss_getreg_val.");
+ return 0;/*to make compiler happy */
+ }
}
boolean_t
ss_inst_branch(unsigned ins)
{
- /* check high five bits */
-
- switch (ins >> (32-5))
- {
- case 0x18: /* br */
- case 0x1a: /* bb0 */
- case 0x1b: /* bb1 */
- case 0x1d: /* bcnd */
- return TRUE;
- break;
- case 0x1e: /* could be jmp */
- if ((ins & 0xfffffbe0U) == 0xf400c000U)
- return TRUE;
- }
-
- return FALSE;
-}
+ /* check high five bits */
+
+ switch (ins >> (32-5)) {
+ case 0x18: /* br */
+ case 0x1a: /* bb0 */
+ case 0x1b: /* bb1 */
+ case 0x1d: /* bcnd */
+ return TRUE;
+ break;
+ case 0x1e: /* could be jmp */
+ if ((ins & 0xfffffbe0U) == 0xf400c000U)
+ return TRUE;
+ }
+ return FALSE;
+}
/* ss_inst_delayed - this instruction is followed by a delay slot. Could be
br.n, bsr.n bb0.n, bb1.n, bcnd.n or jmp.n or jsr.n */
@@ -1786,102 +1794,99 @@ ss_inst_branch(unsigned ins)
boolean_t
ss_inst_delayed(unsigned ins)
{
- /* check the br, bsr, bb0, bb1, bcnd cases */
- switch ((ins & 0xfc000000U)>>(32-6))
- {
- case 0x31: /* br */
- case 0x33: /* bsr */
- case 0x35: /* bb0 */
- case 0x37: /* bb1 */
- case 0x3b: /* bcnd */
- return TRUE;
- }
-
- /* check the jmp, jsr cases */
- /* mask out bits 0-4, bit 11 */
- return ((ins & 0xfffff7e0U) == 0xf400c400U) ? TRUE : FALSE;
+ /* check the br, bsr, bb0, bb1, bcnd cases */
+ switch ((ins & 0xfc000000U)>>(32-6)) {
+ case 0x31: /* br */
+ case 0x33: /* bsr */
+ case 0x35: /* bb0 */
+ case 0x37: /* bb1 */
+ case 0x3b: /* bcnd */
+ return TRUE;
+ }
+
+ /* check the jmp, jsr cases */
+ /* mask out bits 0-4, bit 11 */
+ return ((ins & 0xfffff7e0U) == 0xf400c400U) ? TRUE : FALSE;
}
unsigned
ss_next_instr_address(struct proc *p, unsigned pc, unsigned delay_slot)
{
- if (delay_slot == 0)
- return pc + 4;
- else
- {
- if (ss_inst_delayed(ss_get_value(p, pc, sizeof(int))))
- return pc + 4;
- else
- return pc;
- }
+ if (delay_slot == 0)
+ return pc + 4;
+ else {
+ if (ss_inst_delayed(ss_get_value(p, pc, sizeof(int))))
+ return pc + 4;
+ else
+ return pc;
+ }
}
int
cpu_singlestep(p)
register struct proc *p;
{
- register unsigned va;
- struct trapframe *sstf = USER_REGS(p); /*p->p_md.md_tf;*/
- unsigned pc, brpc;
- int i;
- int bpinstr = SSBREAKPOINT;
- unsigned curinstr;
- unsigned inst;
- struct uio uio;
- struct iovec iov;
-
- pc = PC_REGS(sstf);
- /*
- * User was stopped at pc, e.g. the instruction
- * at pc was not executed.
- * Fetch what's at the current location.
- */
- curinstr = ss_get_value(p, pc, sizeof(int));
-
- /* compute next address after current location */
- if (curinstr != 0) {
- if (ss_inst_branch(curinstr) || inst_call(curinstr) || inst_return(curinstr)) {
- brpc = ss_branch_taken(curinstr, pc, ss_getreg_val, sstf);
- if (brpc != pc) { /* self-branches are hopeless */
+ register unsigned va;
+ struct trapframe *sstf = USER_REGS(p); /*p->p_md.md_tf;*/
+ unsigned pc, brpc;
+ int i;
+ int bpinstr = SSBREAKPOINT;
+ unsigned curinstr;
+ unsigned inst;
+ struct uio uio;
+ struct iovec iov;
+
+ pc = PC_REGS(sstf);
+ /*
+ * User was stopped at pc, e.g. the instruction
+ * at pc was not executed.
+ * Fetch what's at the current location.
+ */
+ curinstr = ss_get_value(p, pc, sizeof(int));
+
+ /* compute next address after current location */
+ if (curinstr != 0) {
+ if (ss_inst_branch(curinstr) || inst_call(curinstr) || inst_return(curinstr)) {
+ brpc = ss_branch_taken(curinstr, pc, ss_getreg_val, sstf);
+ if (brpc != pc) { /* self-branches are hopeless */
#if 0
- printf("SS %s (%d): next taken breakpoint set at %x\n",
- p->p_comm, p->p_pid, brpc);
+ printf("SS %s (%d): next taken breakpoint set at %x\n",
+ p->p_comm, p->p_pid, brpc);
#endif
- p->p_md.md_ss_taken_addr = brpc;
- p->p_md.md_ss_taken_instr = ss_get_value(p, brpc, sizeof(int));
- /* Store breakpoint instruction at the "next" location now. */
- i = ss_put_value(p, brpc, bpinstr, sizeof(int));
- if (i < 0) return (EFAULT);
- }
- }
- pc = ss_next_instr_address(p, pc, 0);
+ p->p_md.md_ss_taken_addr = brpc;
+ p->p_md.md_ss_taken_instr = ss_get_value(p, brpc, sizeof(int));
+ /* Store breakpoint instruction at the "next" location now. */
+ i = ss_put_value(p, brpc, bpinstr, sizeof(int));
+ if (i < 0) return (EFAULT);
+ }
+ }
+ pc = ss_next_instr_address(p, pc, 0);
#if 0
- printf("SS %s (%d): next breakpoint set at %x\n",
- p->p_comm, p->p_pid, pc);
+ printf("SS %s (%d): next breakpoint set at %x\n",
+ p->p_comm, p->p_pid, pc);
#endif
- } else {
- pc = PC_REGS(sstf) + 4;
+ } else {
+ pc = PC_REGS(sstf) + 4;
#if 0
- printf("SS %s (%d): next breakpoint set at %x\n",
- p->p_comm, p->p_pid, pc);
+ printf("SS %s (%d): next breakpoint set at %x\n",
+ p->p_comm, p->p_pid, pc);
#endif
- }
-
- if (p->p_md.md_ss_addr) {
- printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
- p->p_comm, p->p_pid, p->p_md.md_ss_addr, pc); /* XXX */
- return (EFAULT);
- }
+ }
- p->p_md.md_ss_addr = pc;
+ if (p->p_md.md_ss_addr) {
+ printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
+ p->p_comm, p->p_pid, p->p_md.md_ss_addr, pc); /* XXX */
+ return (EFAULT);
+ }
- /* Fetch what's at the "next" location. */
- p->p_md.md_ss_instr = ss_get_value(p, pc, sizeof(int));
+ p->p_md.md_ss_addr = pc;
- /* Store breakpoint instruction at the "next" location now. */
- i = ss_put_value(p, pc, bpinstr, sizeof(int));
+ /* Fetch what's at the "next" location. */
+ p->p_md.md_ss_instr = ss_get_value(p, pc, sizeof(int));
- if (i < 0) return (EFAULT);
- return (0);
-}
+ /* Store breakpoint instruction at the "next" location now. */
+ i = ss_put_value(p, pc, bpinstr, sizeof(int));
+ if (i < 0) return (EFAULT);
+ return (0);
+}
diff --git a/sys/arch/mvme88k/stand/bugcrt/Makefile b/sys/arch/mvme88k/stand/bugcrt/Makefile
index 581ee9ab1fd..013119fabea 100644
--- a/sys/arch/mvme88k/stand/bugcrt/Makefile
+++ b/sys/arch/mvme88k/stand/bugcrt/Makefile
@@ -1,4 +1,4 @@
-# $OpenBSD: Makefile,v 1.7 2001/01/13 05:19:01 smurph Exp $
+# $OpenBSD: Makefile,v 1.8 2001/02/01 03:38:22 smurph Exp $
#
# DO NOT OPTMIZE bugcrt (i.e. no "-O2")
#
@@ -22,7 +22,7 @@ stage1crt.o: crt.c
mv a.out ${.TARGET}
stage2crt.o: crt.c
- ${CC} ${CFLAGS} ${STAGE1_CFLAGS} -c ${.CURDIR}/crt.c -o ${.TARGET}
+ ${CC} ${CFLAGS} ${STAGE2_CFLAGS} -c ${.CURDIR}/crt.c -o ${.TARGET}
${LD} -x -r ${.TARGET}
mv a.out ${.TARGET}
diff --git a/sys/arch/mvme88k/stand/bugcrt/crt.c b/sys/arch/mvme88k/stand/bugcrt/crt.c
index 9b5c1debce4..7776e42b364 100644
--- a/sys/arch/mvme88k/stand/bugcrt/crt.c
+++ b/sys/arch/mvme88k/stand/bugcrt/crt.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: crt.c,v 1.1 2001/01/13 05:19:01 smurph Exp $ */
+/* $OpenBSD: crt.c,v 1.2 2001/02/01 03:38:23 smurph Exp $ */
#include <sys/types.h>
#include <machine/prom.h>
@@ -24,7 +24,6 @@ start()
extern int edata, end;
struct mvmeprom_brdid *id, *mvmeprom_brdid();
-#ifdef STAGE1
/*
* This code enables the SFU1 and is used for single stage
* bootstraps or the first stage of a two stage bootstrap.
@@ -33,9 +32,8 @@ start()
*/
asm("| enable SFU1");
asm(" ldcr r25,cr1");
- asm(" xor r25,r25,0x8");
+ asm(" clr r25,r25,1<3>"); /* bit 3 is SFU1D */
asm(" stcr r25,cr1");
-#endif
bugargs.dev_lun = dev_lun;
bugargs.ctrl_lun = ctrl_lun;