summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1995-10-18 10:54:29 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1995-10-18 10:54:29 +0000
commitebb22450a0f6bd0357a6727bae293b3ed7a8d5e2 (patch)
tree03ceebb5ca61ef2d13f65051a6c5222e92816689
parent77b048c5ca1ef345d036f981ff0b954c647efd7a (diff)
initial 88k import; code by nivas and based on mach luna88k
-rw-r--r--sys/arch/mvme88k/Makefile42
-rw-r--r--sys/arch/mvme88k/compile/.keep_me0
-rw-r--r--sys/arch/mvme88k/conf/GENERIC118
-rw-r--r--sys/arch/mvme88k/conf/MYBOX63
-rw-r--r--sys/arch/mvme88k/conf/Makefile.m88k178
-rw-r--r--sys/arch/mvme88k/conf/files.m88k.newconf101
-rw-r--r--sys/arch/mvme88k/conf/std.m88k7
-rw-r--r--sys/arch/mvme88k/ddb/db_disasm.c777
-rw-r--r--sys/arch/mvme88k/ddb/db_interface.c834
-rw-r--r--sys/arch/mvme88k/ddb/db_sstep.c256
-rw-r--r--sys/arch/mvme88k/ddb/db_trace.c1221
-rw-r--r--sys/arch/mvme88k/dev/bugtty.c490
-rw-r--r--sys/arch/mvme88k/dev/clock.c109
-rw-r--r--sys/arch/mvme88k/dev/m88k/bugio.c108
-rw-r--r--sys/arch/mvme88k/dev/mb.c87
-rw-r--r--sys/arch/mvme88k/dev/pcc2.c253
-rw-r--r--sys/arch/mvme88k/dev/pcctwo.c252
-rw-r--r--sys/arch/mvme88k/include/ansi.h73
-rw-r--r--sys/arch/mvme88k/include/asm.h141
-rw-r--r--sys/arch/mvme88k/include/asm_macro.h116
-rw-r--r--sys/arch/mvme88k/include/assert.h8
-rw-r--r--sys/arch/mvme88k/include/assym.s60
-rw-r--r--sys/arch/mvme88k/include/autoconf.h37
-rw-r--r--sys/arch/mvme88k/include/board.h78
-rw-r--r--sys/arch/mvme88k/include/bug.h12
-rw-r--r--sys/arch/mvme88k/include/bugio.h62
-rw-r--r--sys/arch/mvme88k/include/cdefs.h35
-rw-r--r--sys/arch/mvme88k/include/cpu.h125
-rw-r--r--sys/arch/mvme88k/include/cpus.h64
-rw-r--r--sys/arch/mvme88k/include/db_machdep.h169
-rw-r--r--sys/arch/mvme88k/include/disklabel.h94
-rw-r--r--sys/arch/mvme88k/include/endian.h89
-rw-r--r--sys/arch/mvme88k/include/exception_vectors.h167
-rw-r--r--sys/arch/mvme88k/include/exec.h327
-rw-r--r--sys/arch/mvme88k/include/foo124
-rw-r--r--sys/arch/mvme88k/include/limits.h85
-rw-r--r--sys/arch/mvme88k/include/locore.h301
-rw-r--r--sys/arch/mvme88k/include/m88100.h69
-rw-r--r--sys/arch/mvme88k/include/m882xx.h259
-rw-r--r--sys/arch/mvme88k/include/mmu.h306
-rw-r--r--sys/arch/mvme88k/include/param.h223
-rw-r--r--sys/arch/mvme88k/include/pcb.h141
-rw-r--r--sys/arch/mvme88k/include/pcctworeg.h146
-rw-r--r--sys/arch/mvme88k/include/pmap.h219
-rw-r--r--sys/arch/mvme88k/include/pmap_table.h44
-rw-r--r--sys/arch/mvme88k/include/proc.h58
-rw-r--r--sys/arch/mvme88k/include/profile.h71
-rw-r--r--sys/arch/mvme88k/include/psl.h97
-rw-r--r--sys/arch/mvme88k/include/ptrace.h54
-rw-r--r--sys/arch/mvme88k/include/reg.h47
-rw-r--r--sys/arch/mvme88k/include/setjmp.h7
-rw-r--r--sys/arch/mvme88k/include/signal.h45
-rw-r--r--sys/arch/mvme88k/include/stdarg.h176
-rw-r--r--sys/arch/mvme88k/include/trap.h86
-rw-r--r--sys/arch/mvme88k/include/types.h83
-rw-r--r--sys/arch/mvme88k/include/va-m88k.h85
-rw-r--r--sys/arch/mvme88k/include/varargs.h189
-rw-r--r--sys/arch/mvme88k/include/vid.h56
-rw-r--r--sys/arch/mvme88k/include/vmparam.h219
-rw-r--r--sys/arch/mvme88k/m88k/TODO4
-rw-r--r--sys/arch/mvme88k/m88k/autoconf.c204
-rw-r--r--sys/arch/mvme88k/m88k/clock.c483
-rw-r--r--sys/arch/mvme88k/m88k/cmmu.c1199
-rw-r--r--sys/arch/mvme88k/m88k/conf.c348
-rw-r--r--sys/arch/mvme88k/m88k/continuation.s238
-rw-r--r--sys/arch/mvme88k/m88k/eh.S1749
-rw-r--r--sys/arch/mvme88k/m88k/exception_return.s255
-rw-r--r--sys/arch/mvme88k/m88k/genassym.c151
-rw-r--r--sys/arch/mvme88k/m88k/locore.S496
-rw-r--r--sys/arch/mvme88k/m88k/locore2.c99
-rw-r--r--sys/arch/mvme88k/m88k/locore_asm_routines.S1668
-rw-r--r--sys/arch/mvme88k/m88k/locore_c_routines.c391
-rw-r--r--sys/arch/mvme88k/m88k/m1x7_init.c205
-rw-r--r--sys/arch/mvme88k/m88k/m88100_fp.S2463
-rw-r--r--sys/arch/mvme88k/m88k/machdep.c1360
-rw-r--r--sys/arch/mvme88k/m88k/misc.s64
-rw-r--r--sys/arch/mvme88k/m88k/pmap.c5538
-rw-r--r--sys/arch/mvme88k/m88k/process.S270
-rw-r--r--sys/arch/mvme88k/m88k/process_machdep.c155
-rw-r--r--sys/arch/mvme88k/m88k/swapgeneric.c237
-rw-r--r--sys/arch/mvme88k/m88k/syscall.stub29
-rw-r--r--sys/arch/mvme88k/m88k/timerreg.h8
-rw-r--r--sys/arch/mvme88k/m88k/trap.c608
-rw-r--r--sys/arch/mvme88k/m88k/vm_machdep.c289
-rw-r--r--sys/arch/mvme88k/stand/Makefile4
-rw-r--r--sys/arch/mvme88k/stand/boot/Makefile30
-rw-r--r--sys/arch/mvme88k/stand/boot/boot.1bin0 -> 512 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/boot.2bin0 -> 4608 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/foobin0 -> 512 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/foo.1bin0 -> 512 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/foo.2bin0 -> 4096 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/main.c264
-rw-r--r--sys/arch/mvme88k/stand/boot/wrtvidbin0 -> 7132 bytes
-rw-r--r--sys/arch/mvme88k/stand/boot/wrtvid.c108
-rw-r--r--sys/arch/mvme88k/stand/bugcrt/Makefile10
-rw-r--r--sys/arch/mvme88k/stand/bugcrt/bugcrt.c40
-rw-r--r--sys/arch/mvme88k/stand/bugexec/Makefile26
-rw-r--r--sys/arch/mvme88k/stand/bugexec/hellobin0 -> 8608 bytes
-rw-r--r--sys/arch/mvme88k/stand/bugexec/hello.c54
-rw-r--r--sys/arch/mvme88k/stand/bugexec/wrtos.c64
-rw-r--r--sys/arch/mvme88k/stand/bugexec/xyzbin0 -> 4512 bytes
-rw-r--r--sys/arch/mvme88k/stand/include/bug.h8
-rw-r--r--sys/arch/mvme88k/stand/include/bugio.h62
-rw-r--r--sys/arch/mvme88k/stand/kerncrt/Makefile9
-rw-r--r--sys/arch/mvme88k/stand/kerncrt/kerncrt.c11
-rw-r--r--sys/arch/mvme88k/stand/libbug/Makefile28
-rw-r--r--sys/arch/mvme88k/stand/libbug/bugio.c101
107 files changed, 29073 insertions, 0 deletions
diff --git a/sys/arch/mvme88k/Makefile b/sys/arch/mvme88k/Makefile
new file mode 100644
index 00000000000..e17d8971023
--- /dev/null
+++ b/sys/arch/mvme88k/Makefile
@@ -0,0 +1,42 @@
+# from: @(#)Makefile 7.3 (Berkeley) 6/9/91
+# $Id: Makefile,v 1.1 1995/10/18 10:54:18 deraadt Exp $
+S=${BSDSRCDIR}/sys
+COMM= $S/vm/*.[ch] $S/ufs/*.[ch] $S/sys/*.h $S/compat/sunos/*.[ch] \
+ $S/nfs/*.[ch] $S/netns/*.[ch] $S/netiso/*.[ch] \
+ $S/netiso/xebec/*.[ch] $S/netinet/*.[ch] $S/netccitt/*.[ch] \
+ $S/net/*.[ch] $S/miscfs/*/*.[ch] $S/kern/*.[ch] $S/dev/*.[ch] \
+ $S/scsi/*.[ch] $S/lib/libkern/m88k/*.[ch] $S/lib/libkern/*.[ch]
+
+# Makefile for m88k tags file
+
+all:
+ @echo "make tags or links only"
+
+TM88K= $S/arch/m88k/tags
+SM88K= $S/arch/m88k/m88k/*.[ch] $S/arch/m88k/include/*.h \
+ $S/arch/m88k/dev/*.[ch]
+AM88K= $S/arch/m88k/m88k/*.s
+
+# Directories in which to place m88k tags links
+DM88K= m88k dev include
+
+TAGS:
+ -etags -dt ${COMM} ${SM88K} ${AM88K}
+ egrep "^ENTRY(.*)|^ALTENTRY(.*)" ${AM88K} | \
+ sed "s;\([^:]*\):\([^(]*\)(\([^, )]*\)\(.*\);\3 \1 /^\2(\3\4$$/;" \
+ >> ${TM88K}/tags
+
+tags:
+ -ctags -dtf ${TM88K} ${COMM} ${SM88K}
+ egrep "^ENTRY(.*)|^ALTENTRY(.*)" ${AM88K} | \
+ sed "s;\([^:]*\):\([^(]*\)(\([^, )]*\)\(.*\);\3 \1 /^\2(\3\4$$/;" \
+ >> ${TM88K}
+ sort -o ${TM88K} ${TM88K}
+
+links:
+ -for i in ${DM88K}; do \
+ rm -f $$i/tags; rm -f $$i/TAGS ; \
+ ln -s ../tags $$i/tags; ln -s ../TAGS $$i/TAGS; done
+
+.include <bsd.prog.mk>
+
diff --git a/sys/arch/mvme88k/compile/.keep_me b/sys/arch/mvme88k/compile/.keep_me
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/sys/arch/mvme88k/compile/.keep_me
diff --git a/sys/arch/mvme88k/conf/GENERIC b/sys/arch/mvme88k/conf/GENERIC
new file mode 100644
index 00000000000..1b451e53073
--- /dev/null
+++ b/sys/arch/mvme88k/conf/GENERIC
@@ -0,0 +1,118 @@
+#
+# GENERIC AMIGA
+#
+# $Id: GENERIC,v 1.1 1995/10/18 10:54:18 deraadt Exp $
+#
+# This configuration file contains all possible options
+#
+
+include "std.mvme1x7"
+
+maxusers 8
+options TIMEZONE=300, DST=1
+
+#
+# processors this kernel should support
+#
+options "M88000" # support for 88K
+
+options SWAPPAGER # Pager for processes (Required)
+options VNODEPAGER # Pager for vnodes (Required)
+options DEVPAGER # Pager for devices (Required)
+
+#
+# Networking options
+#
+options INET # IP networking support (Required)
+#options ISO # ISO Networking support
+#options TPIP # ARGO TP networking support
+#options CCITT # CCITT X.25
+#options NS # Xerox XNS
+#options EON # ISO CLNL over IP
+#options GATEWAY # Packet forwarding
+#options DIRECTED_BROADCAST # Broadcast across subnets
+#options NSIP # XNS over IP
+
+#
+# File system related options
+#
+options QUOTA # Disk quotas for local disks
+options NFSSERVER # Network File System server side code
+options NFSCLIENT # Network File System client side code
+
+#
+# File systems
+#
+options FFS # Berkeley fast file system
+options MFS # Memory based filesystem
+options PROCFS # Process filesystem
+options KERNFS # Kernel parameter filesystem (Recommended)
+options FDESC # /dev/fd filesystem
+options NULLFS # Loopback filesystem
+options FIFO # FIFO operations on vnodes (Recommended)
+options "CD9660" # ISO 9660 file system, with Rock Ridge
+#options PORTAL # Portal filesystem
+#options MSDOSFS # MS-DOS filesystem
+
+
+#
+# Compatability options for various existing systems
+#
+options "COMPAT_09" # compatability with older NetBSD release
+options "COMPAT_43" # 4.3 BSD compatible system calls
+options "TCP_COMPAT_42" # Use 4.2 BSD style TCP
+options "COMPAT_NOMID" # allow nonvalid machine id executables
+#options COMPAT_HPUX # HP300 compatability
+
+#
+# Support for System V IPC facilities.
+#
+#options SYSVSHM # System V-like shared memory
+#options SYSVMSG # System V-like messages
+#options SYSVSEM # System V-like semaphores
+
+#
+# Support for various kernel options
+#
+options GENERIC # Mini-root boot support
+#options LKM # Loadable kernel modules
+options KTRACE # Add kernel tracing system call
+options DIAGNOSTIC # Add additional error checking code
+options "NKMEMCLUSTERS=256" # Size of kernel malloc area
+
+#
+# Misc. debuging options
+#
+options PANICWAIT # Require keystroke to dump/reboot
+options DEBUG # Add debugging statements
+#options DDB # Kernel debugger
+#options SYSCALL_DEBUG # debug all syscalls.
+#options SCSIDEBUG # Add SCSI debugging statements
+#options KGDB # Kernel debugger (KGDB) support
+#options PANICBUTTON # Forced crash via keypress (???)
+#
+# devices
+#
+m187le0 at mainbus0 # Ethernet
+m187tty0 at mainbus # tty
+
+# scsi stuff, all possible
+m187scsi0 at mainbus0
+bugscsi0 at mainbus0
+
+scsibus0 at m187scsi0
+scsibus1 at bugscsi0
+
+# each hard drive from low target to high
+# will configure to the next available sd unit number
+sd* at scsibus? target ? lun ? # scsi disks
+
+st* at scsibus? target ? lun ? # scsi tapes
+cd* at scsibus? target ? lun ? # scsi cd's
+
+pseudo-device sl # slip
+pseudo-device ppp # ppp
+pseudo-device pty 16 # pseudo terminals
+pseudo-device loop # network loopback
+
+config netbsd swap on generic
diff --git a/sys/arch/mvme88k/conf/MYBOX b/sys/arch/mvme88k/conf/MYBOX
new file mode 100644
index 00000000000..b6662c4c145
--- /dev/null
+++ b/sys/arch/mvme88k/conf/MYBOX
@@ -0,0 +1,63 @@
+# $Id: MYBOX,v 1.1 1995/10/18 10:54:18 deraadt Exp $
+
+include "std.m88k"
+
+maxusers 8
+options TIMEZONE=300, DST=1
+options BYTE_MSF
+options SWAPPAGER, VNODEPAGER, DEVPAGER
+#options INET
+options FFS, MFS, FDESC
+#options "COMPAT_42", "COMPAT_43"
+options GENERIC, KTRACE, DIAGNOSTIC, "NKMEMCLUSTERS=256"
+#options PANICWAIT, DEBUG, DDB
+options PANICWAIT, DEBUG, DDB
+
+#options "CD9660", PORTAL, MSDOSFS, PROCFS, NULLFS, FIFO, KERNFS
+#options NFSSERVER, NFSCLIENT
+#options SYSVSHM, SYSVMSG, SYSVSEM
+#options SYSCALL_DEBUG, SCSIDEBUG, KGDB
+
+# scsi stuff, all possible
+#m187le0 at mainbus0 # Ethernet
+#m187tty0 at mainbus0 # tty
+#ser0 at mainbus0 # tty
+#ser0 at pcc0 # tty
+
+bugtty0 at mainbus0 # bug tty
+
+# scsi stuff, all possible
+#m187scsi0 at mainbus0
+#bugscsi0 at mainbus0
+#bugscsi0 at pcc0
+
+#scsibus0 at m187scsi0
+#scsibus0 at bugscsi0
+#scsibus0 at scsi0
+#
+# compat.
+#
+#sd0 at scsibus? target 0 lun 0
+#sd1 at scsibus? target 1 lun 0
+#sd2 at scsibus? target 2 lun 0
+#sd3 at scsibus? target 3 lun 0
+#sd4 at scsibus? target 4 lun 0
+#sd5 at scsibus? target 5 lun 0
+#sd6 at scsibus? target 6 lun 0
+
+#
+# This is nicer however many amiga setups expect sd units to refer to
+# scsi target numbers. If this is not the case, you can remove the
+# specific sdx lines above and each hard drive from low target to high
+# will configure to the next available sd unit number
+
+#sd* at scsibus? target ? lun ? # scsi disks
+#st* at scsibus? target ? lun ? # scsi tapes
+#cd* at scsibus? target ? lun ? # scsi cd's
+
+#pseudo-device sl # slip
+#pseudo-device ppp # ppp
+#pseudo-device pty 16 # pseudo terminals
+#pseudo-device loop # network loopback
+#
+config netbsd swap on generic
diff --git a/sys/arch/mvme88k/conf/Makefile.m88k b/sys/arch/mvme88k/conf/Makefile.m88k
new file mode 100644
index 00000000000..4443f54975b
--- /dev/null
+++ b/sys/arch/mvme88k/conf/Makefile.m88k
@@ -0,0 +1,178 @@
+# @(#)Makefile.hp300 7.10 (Berkeley) 6/27/91
+# $Id: Makefile.m88k,v 1.1 1995/10/18 10:54:19 deraadt Exp $
+#
+# Makefile for NetBSD
+#
+# This makefile is constructed from a machine description:
+# config machineid
+# Most changes should be made in the machine description
+# /sys/arch/m88k/conf/``machineid''
+# after which you should do
+# config machineid
+# Machine generic makefile changes should be made in
+# /sys/arch/m88k/conf/Makefile.m88k
+# after which config should be rerun for all machines of that type.
+#
+# N.B.: NO DEPENDENCIES ON FOLLOWING FLAGS ARE VISIBLE TO MAKEFILE
+# IF YOU CHANGE THE DEFINITION OF ANY OF THESE RECOMPILE EVERYTHING
+#
+# -DTRACE compile in kernel tracing hooks
+# -DQUOTA compile in file system quotas
+
+
+# DEBUG is set to -g by config if debugging is requested (config -g).
+# PROF is set to -pg by config if profiling is requested (config -p).
+AS= as ${DEBUG}
+CC= cc ${DEBUG}
+CPP= cpp
+LD= ld
+TOUCH= touch -f -c
+AWK= awk
+TR= tr -s
+
+# source tree is located via $S relative to the compilation directory
+S= ../../../..
+M88K= ../..
+
+INCLUDES= -I. -I$S/arch -I$S -I$S/sys
+.if defined(DESTDIR)
+INCLUDES+= -nostdinc -idirafter ${DESTDIR}/usr/include
+.endif
+COPTS= ${INCLUDES} ${IDENT} -DKERNEL -D_KERNEL -DGOOFYLDOFFSET=0x20 -Dm88k
+CFLAGS= -O ${COPTS} -fwritable-strings
+
+### find out what to use for libkern
+.include "$S/lib/libkern/Makefile.inc"
+.ifndef PROF
+LIBKERN= ${KERNLIB}
+.else
+LIBKERN= ${KERNLIB_PROF}
+.endif
+
+### find out what to use for libcompact
+.include "$S/compat/common/Makefile.inc"
+.ifndef PROF
+LIBCOMPACT= ${COMPATLIB}
+.else
+LIBCOMPACT= ${COMPATLIB_PROF}
+.endif
+
+# compile rules: rules are named ${TYPE}_${SUFFIX}${CONFIG_DEP}
+# where TYPE is NORMAL, DRIVER, or PROFILE}; SUFFIX is the file suffix,
+# capitalized (e.g. C for a .c file), and CONFIG_DEP is _C if the file
+# is marked as config-dependent.
+
+NORMAL_C= ${CC} -c ${CFLAGS} ${PROF} $<
+NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
+
+DRIVER_C= ${CC} -c ${CFLAGS} ${PROF} $<
+DRIVER_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<
+
+PROFILE_C= ${CC} -S -c ${COPTS} $<; \
+ sed -e s/_mcount/mcount/ -e s/subrmcount/subr_mcount/ <$*.s | \
+ ${AS} -o $@; \
+ rm -f $*.s
+
+NORMAL_S= ${CPP} ${COPTS} $< | ${TR} '\\' '\012' | ${AS} -o $@
+NORMAL_S_C= ${CPP} ${COPTS} ${PARAM} $< | ${TR} '\\' '\012' | ${AS} -o $@
+
+%OBJS
+
+%CFILES
+
+# load lines for config "xxx" will be emitted as:
+# xxx: ${SYSTEM_DEP} swapxxx.o
+# ${SYSTEM_LD_HEAD}
+# ${SYSTEM_LD} swapxxx.o
+# ${SYSTEM_LD_TAIL}
+SYSTEM_OBJ= locore.o ${FPSP} vnode_if.o ${OBJS} param.o ioconf.o \
+ ${LIBKERN} ${LIBCOMPAT}
+SYSTEM_DEP= Makefile ${SYSTEM_OBJ}
+SYSTEM_LD_HEAD= @echo loading $@;rm -f $@
+SYSTEM_LD= -@if [ X${DEBUG} = X-g ]; \
+ then strip=-X; \
+ else strip=-x; \
+ fi; \
+ echo ${LD} $$strip -Ttext 10020 -o $@ ${SYSTEM_OBJ} vers.o; \
+ ${LD} $$strip -Ttext 10020 -o $@ ${SYSTEM_OBJ} libgcc.a vers.o
+SYSTEM_LD_TAIL= @size $@; chmod 755 $@; \
+ [ X${DEBUG} = X-g ] && { \
+ echo cp $@ $@.gdb; rm -f $@.gdb; cp $@ $@.gdb; \
+ echo strip -d $@; strip -d $@; } || true
+
+%LOAD
+
+vers.o: newvers
+
+newvers:
+ sh $S/conf/newvers.sh ${KERN_IDENT}
+ ${CC} $(CFLAGS) -c vers.c
+
+clean::
+ rm -f eddep *netbsd netbsd.gdb tags vnode_if.[ch] *.o locore.i \
+ [a-z]*.s [Ee]rrs errs linterrs makelinks
+
+lint: /tmp param.c
+ @lint -hbxn -DGENERIC -Dvolatile= ${COPTS} ${PARAM} -UKGDB \
+ ${M88K}/m88k/Locore.c ${CFILES} ${M88K}/m88k/swapgeneric.c \
+ ioconf.c param.c| \
+ grep -v 'struct/union .* never defined' | \
+ grep -v 'possible pointer alignment problem'
+
+locore.o: assym.s ${M88K}/m88k/eh.S ${M88K}/m88k/locore.S
+locore.o: machine/trap.h machine/psl.h machine/cpu.h
+ ${CPP} -DLOCORE ${COPTS} ${M88K}/m88k/locore.S | ${TR} '\\' '\012' | ${AS} -o locore.o
+
+# the following is necessary because autoconf.o depends on #if GENERIC
+autoconf.o: Makefile
+
+# the following are necessary because the files depend on the types of
+# hp cpu's included in the system configuration
+machdep.o sys_machdep.o pmap.o pmap_bootstrap.o trap.o dma.o: Makefile
+
+# depend on network or filesystem configuration
+uipc_proto.o vfs_conf.o locore.o: Makefile
+
+# depend on maxusers
+assym.s: Makefile
+
+assym.s: genassym
+ ./genassym >assym.s
+# cp assym.s ${.CURDIR}/../../include
+
+genassym:
+ ${CC} -static ${INCLUDES} ${IDENT} ${PARAM} -Dm88k \
+ -o genassym ${M88K}/m88k/genassym.c
+ rm genassym.o
+
+depend: assym.s param.c vnode_if.h
+ for f in ${CFILES} ioconf.c param.c ; \
+ do \
+ (mkdep -a ${COPTS} $${f}) \
+ done
+ mkdep -a -p ${INCLUDES} ${IDENT} ${PARAM} ${M88K}/m88k/genassym.c
+
+links:
+ egrep '#if' ${CFILES} | sed -f $S/conf/defines | \
+ sed -e 's/:.*//' -e 's/\.c/.o/' | sort -u > dontlink
+ echo ${CFILES} | tr -s ' ' '\12' | sed 's/\.c/.o/' | \
+ sort -u | comm -23 - dontlink | \
+ sed 's,../.*/\(.*.o\),rm -f \1;ln -s ../GENERIC/\1 \1,' > makelinks
+ sh makelinks && rm -f dontlink
+
+tags:
+ @echo "see $S/kern/Makefile for tags"
+
+ioconf.o: ioconf.c
+ ${CC} -c ${CFLAGS} ioconf.c
+
+param.c: $S/conf/param.c
+ rm -f param.c
+ cp $S/conf/param.c .
+
+param.o: param.c Makefile
+ ${CC} -c ${CFLAGS} ${PARAM} param.c
+vnode_if.c vnode_if.h: $S/kern/vnode_if.sh $S/kern/vnode_if.src
+ AWK="${AWK}" sh $S/kern/vnode_if.sh $S/kern/vnode_if.src
+
+%RULES
diff --git a/sys/arch/mvme88k/conf/files.m88k.newconf b/sys/arch/mvme88k/conf/files.m88k.newconf
new file mode 100644
index 00000000000..4cbf2e0773b
--- /dev/null
+++ b/sys/arch/mvme88k/conf/files.m88k.newconf
@@ -0,0 +1,101 @@
+maxusers 2 8 64
+
+device mainbus at root {}
+file arch/m88k/dev/mb.c
+
+# this should be removed after bringup
+
+device bugtty at mainbus: tty
+file arch/m88k/dev/bugtty.c bugtty needs-count
+
+device cpu at mainbus
+
+device pcc at mainbus {}
+file arch/m88k/dev/pcc2.c pcc
+
+device vme at mainbus {}
+
+device nvram at pcc
+file arch/m88k/dev/nvram.c nvram
+
+device clock at pcc
+file arch/m88k/dev/rtc.c clock
+
+#device ser at pcc: tty
+#file arch/m88k/dev/bugtty.c ser needs-count
+
+device bugscsi at pcc
+#file arch/m88k/dev/bugscsi.c bugscsi needs-flag
+
+device ether at pcc
+
+device scsi at pcc {}
+
+#define scsi {}
+
+device scsibus at scsi {target = -1, lun = -1}
+# adapter driver for 1x7
+
+device cd at scsibus: disk
+#file scsi/cd.c cd needs-flag
+device sd at scsibus: disk
+#file scsi/sd.c sd needs-flag
+device ch at scsibus: disk
+#file scsi/ch.c cd needs-flag
+device st at scsibus: tape
+#file scsi/st.c st needs-flag
+device su at scsibus: disk
+#file scsi/su.c su needs-flag
+device uk at scsibus: disk
+#file scsi/uk.c uk needs-flag
+
+# list of standard files
+file dev/cons.c ite ser tty
+file dev/cninit.c
+#file scsi/scsi_base.c scsi
+#file scsi/scsi_ioctl.c scsi
+#file scsi/scsiconf.c scsi
+#file arch/m88k/autoconf.c
+#file arch/m88k/conf.c
+##file arch/m88k/db_disasm.c
+##file arch/m88k/db_interface.c
+##file arch/m88k/db_trace.c
+#file arch/m88k/disksubr.c
+#file arch/m88k/in_cksum.c
+#file arch/m88k/machdep.c
+#file arch/m88k/mem.c
+##file arch/m88k/microtime.s
+##file arch/m88k/ns_cksum.c
+#file arch/m88k/pmap.c
+##file arch/m88k/process_machdep.c
+##file arch/m88k/random.s
+#file arch/m88k/sys_machdep.c
+#file arch/m88k/trap.c
+#file arch/m88k/vm_machdep.c
+#file arch/m88k/locore.S
+#file arch/m88k/
+file arch/m88k/dev/clock.c
+file arch/m88k/m88k/autoconf.c
+file arch/m88k/m88k/conf.c
+file arch/m88k/m88k/cmmu.c
+file arch/m88k/m88k/eh.S
+#file arch/m88k/m88k/genassym.c
+#file arch/m88k/m88k/locore.S
+file arch/m88k/m88k/locore2.c
+file arch/m88k/m88k/locore_asm_routines.S
+file arch/m88k/m88k/locore_c_routines.c
+file arch/m88k/m88k/m1x7_init.c
+file arch/m88k/m88k/m88100_fp.S
+file arch/m88k/m88k/machdep.c
+file arch/m88k/m88k/pmap.c
+file arch/m88k/m88k/process.S
+file arch/m88k/m88k/process_machdep.c
+file arch/m88k/m88k/trap.c
+file arch/m88k/m88k/vm_machdep.c
+
+file arch/m88k/ddb/db_disasm.c
+file arch/m88k/ddb/db_interface.c
+file arch/m88k/ddb/db_sstep.c
+file arch/m88k/ddb/db_trace.c
+
+file arch/m88k/dev/m88k/bugio.c
diff --git a/sys/arch/mvme88k/conf/std.m88k b/sys/arch/mvme88k/conf/std.m88k
new file mode 100644
index 00000000000..2b9a56abff4
--- /dev/null
+++ b/sys/arch/mvme88k/conf/std.m88k
@@ -0,0 +1,7 @@
+# standard amiga information
+# $Id: std.m88k,v 1.1 1995/10/18 10:54:18 deraadt Exp $
+machine m88k
+
+mainbus0 at root
+#pcc0 at mainbus0
+#scsi0 at pcc0
diff --git a/sys/arch/mvme88k/ddb/db_disasm.c b/sys/arch/mvme88k/ddb/db_disasm.c
new file mode 100644
index 00000000000..17733167572
--- /dev/null
+++ b/sys/arch/mvme88k/ddb/db_disasm.c
@@ -0,0 +1,777 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * m88k disassembler for use in ddb
+ */
+
+#include <machine/db_machdep.h>
+#include <ddb/db_sym.h> /* DB_STGY_PROC, db_printsym() */
+#include <ddb/db_access.h> /* db_get_value() */
+#include <ddb/db_output.h> /* db_printf() */
+
+static char *instwidth[4] = {
+ ".d", " ", ".h", ".b"
+};
+
+static char *condname[6] = {
+ "gt0 ", "eq0 ", "ge0 ", "lt0 ", "ne0 ", "le0 "
+};
+
+static char *ctrlreg[64] = {
+ "cr0(PID) ",
+ "cr1(PSR) ",
+ "cr2(EPSR) ",
+ "cr3(SSBR) ",
+ "cr4(SXIP) ",
+ "cr5(SNIP) ",
+ "cr6(SFIP) ",
+ "cr7(VBR) ",
+ "cr8(DMT0) ",
+ "cr9(DMD0) ",
+ "cr10(DMA0) ",
+ "cr11(DMT1) ",
+ "cr12(DMD1) ",
+ "cr13(DMA1) ",
+ "cr14(DMT2) ",
+ "cr15(DMD2) ",
+ "cr16(DMA2) ",
+ "cr17(SR0) ",
+ "cr18(SR1) ",
+ "cr19(SR2) ",
+ "cr20(SR3) ",
+ "fcr0(FPECR)",
+ "fcr1(FPHS1)",
+ "fcr2(FPLS1)",
+ "fcr3(FPHS2)",
+ "fcr4(FPLS2)",
+ "fcr5(FPPT) ",
+ "fcr6(FPRH) ",
+ "fcr7(FPRL) ",
+ "fcr8(FPIT) ",
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ "fcr62(FPSR)",
+ "fcr63(FPCR)"
+};
+#define printval(x) if (x<0) db_printf ("-0x%X", -x); else db_printf("0x%X",x)
+
+/* Handlers immediate integer arithmetic instructions */
+static void
+oimmed(long inst, char *opcode, long iadr)
+{
+ register int Linst = inst & 0177777;
+ register int Hinst = inst >> 16;
+ register int H6inst = Hinst >> 10;
+ register int rs1 = Hinst & 037;
+ register int rd = (Hinst >> 5) & 037;
+
+ if ((H6inst > 017) && (H6inst < 030) && (H6inst & 01) == 1)
+ db_printf("\t%s.u", opcode);
+ else {
+ db_printf("\t%s", opcode);
+ db_printf(" ");
+ }
+ db_printf("\t\tr%-3d,r%-3d,", rd, rs1);
+ printval(Linst);
+}
+
+
+/* Handles instructions dealing with control registers */
+static void
+ctrlregs(long inst, char *opcode, long iadr)
+{
+ register int L6inst = (inst >> 11) & 037;
+ register int creg = (inst >> 5) & 077;
+ register int rd = (inst >> 21) & 037;
+ register int rs1 = (inst >> 16) & 037;
+
+ db_printf("\t%s", opcode);
+
+ if (L6inst == 010 || L6inst == 011)
+ db_printf("\t\tr%-3d,%s", rd, ctrlreg[creg]);
+ else
+ if (L6inst == 020 || L6inst == 021)
+ db_printf("\t\tr%-3d,%s", rs1, ctrlreg[creg]);
+ else
+ db_printf("\t\tr%-3d,r%-3d,%s", rd, rs1, ctrlreg[creg]);
+}
+
+
+static void
+printsod(int t)
+{
+ if (t == 0)
+ db_printf("s");
+ else
+ db_printf("d");
+}
+/* Handles floating point instructions */
+static void
+sindou(int inst, char *opcode, long iadr)
+{
+ register int rs2 = inst & 037;
+ register int td = (inst >> 5) & 03;
+ register int t2 = (inst >> 7) & 03;
+ register int t1 = (inst >> 9) & 03;
+ register int rs1 = (inst >> 16) & 037;
+ register int rd = (inst >> 21) & 037;
+ register int checkbits = (inst >> 11) & 037;
+
+ db_printf("\t%s.", opcode);
+ printsod(td);
+ if ((checkbits > 010 && checkbits < 014) || (checkbits == 04)) {
+ printsod(t2);
+ db_printf(" ");
+ if (checkbits == 012 || checkbits == 013)
+ db_printf("\t\tr%-3d,r%-3d", rd, rs2);
+ else
+ db_printf("\t\tr%-3d,r%-3d", rd, rs2);
+ } else {
+ printsod(t1);
+ printsod(t2);
+ db_printf("\t\tr%-3d,r%-3d,r%-3d", rd, rs1, rs2);
+ }
+}
+
+
+static void
+jump(long inst, char *opcode, long iadr)
+{
+ register int rs2 = inst & 037;
+ register int Nbit = (inst >> 10) & 01;
+
+ db_printf("\t%s", opcode);
+ if (Nbit == 1)
+ db_printf(".n");
+ else
+ db_printf(" ");
+ db_printf("\t\tr%-3d", rs2);
+}
+
+
+/* Handles ff1, ff0, tbnd and rte instructions */
+static void
+instset(long inst, char *opcode, long iadr)
+{
+ register int rs2 = inst & 037;
+ register int rs1 = (inst >> 16) & 037;
+ register int rd = (inst >> 21) & 037;
+ register int checkbits = (inst >> 10) & 077;
+ register int H6inst = (inst >> 26) & 077;
+
+ db_printf("\t%s", opcode);
+ if (H6inst == 076) {
+ db_printf("\t\tr%-3d,", rs1);
+ printval(inst & 0177777);
+ } else
+ if ((checkbits == 072) || (checkbits == 073))
+ db_printf("\t\tr%-3d,r%-3d", rd, rs2);
+ else
+ if (checkbits == 076)
+ db_printf("\t\tr%-3d,r%-3d", rs1, rs2);
+}
+
+static void
+symofset(int disp, int bit, int iadr)
+{
+ long addr;
+
+ if (disp & (1 << (bit - 1))) {
+ /* negative value */
+ addr = iadr + ((disp << 2) | (~0 << bit));
+ } else {
+ addr = iadr + (disp << 2);
+ }
+ db_printsym(addr, DB_STGY_PROC);
+ return;
+}
+
+static void
+obranch(int inst, char *opcode, long iadr)
+{
+ int cond = (inst >> 26) & 01;
+ int disp = inst & 0377777777;
+
+ if (cond == 0) {
+ db_printf("\t%s\t\t", opcode);
+ symofset(disp, 26, iadr);
+ } else {
+ db_printf("\t%s.n\t\t", opcode);
+ symofset(disp, 26, iadr);
+ }
+}
+
+
+/* Handles branch on conditions instructions */
+static void
+brcond(int inst, char *opcode, long iadr)
+{
+ int cond = (inst >> 26) & 1;
+ int match = (inst >> 21) & 037;
+ int rs = (inst >> 16) & 037;
+ int disp = (inst & 0177777);
+
+ if (cond == 0)
+ db_printf("\t%s\t\t", opcode);
+ else
+ db_printf("\t%s.n\t\t", opcode);
+ if (((inst >> 27) & 03) == 1)
+ switch (match) {
+ case 1:
+ db_printf("%s,", condname[0]);
+ break;
+ case 2:
+ db_printf("%s,", condname[1]);
+ break;
+ case 3:
+ db_printf("%s,", condname[2]);
+ break;
+ case 12:
+ db_printf("%s,", condname[3]);
+ break;
+ case 13:
+ db_printf("%s,", condname[4]);
+ break;
+ case 14:
+ db_printf("%s,", condname[5]);
+ break;
+ default:
+ printval(match);
+ db_printf(",");
+ }
+ else {
+ printval(match);
+ db_printf(",");
+ }
+
+ db_printf("r%-3d,", rs);
+ symofset(disp, 16, iadr);
+}
+
+
+static void
+otrap(int inst, char *opcode, long iadr)
+{
+ int vecno = inst & 0777;
+ int match = (inst >> 21) & 037;
+ int rs = (inst >> 16) & 037;
+
+ db_printf("\t%s\t", opcode);
+ if (((inst >> 12) & 017) == 0xe)
+ switch (match) {
+ case 1:
+ db_printf("%s,", condname[0]);
+ break;
+ case 2:
+ db_printf("%s,", condname[1]);
+ break;
+ case 3:
+ db_printf("%s,", condname[2]);
+ break;
+ case 12:
+ db_printf("%s,", condname[3]);
+ break;
+ case 13:
+ db_printf("%s,", condname[4]);
+ break;
+ case 14:
+ db_printf("%s,", condname[5]);
+ break;
+ default:
+ printval(match);
+ db_printf(",");
+ }
+ else {
+ printval(match);
+ db_printf(",");
+ }
+ db_printf("\tr%-3d,", rs);
+ printval(vecno);
+}
+
+
+/* Handles 10 bit immediate bit field operations */
+static void
+obit(int inst, char *opcode, long iadr)
+{
+ int rs = (inst >> 16) & 037;
+ int rd = (inst >> 21) & 037;
+ int width = (inst >> 5) & 037;
+ int offset = (inst & 037);
+
+ db_printf("\t%s\t\tr%-3d,r%-3d,", opcode, rd, rs);
+ if (((inst >> 10) & 077) == 052) {
+ db_printf("<");
+ printval(offset);
+ db_printf(">");
+ } else {
+ printval(width);
+ db_printf("<");
+ printval(offset);
+ db_printf(">");
+ }
+}
+
+
+/* Handles triadic mode bit field instructions */
+static void
+bitman(int inst, char *opcode, long iadr)
+{
+
+ int rs1 = (inst >> 16) & 037;
+ int rd = (inst >> 21) & 037;
+ int rs2 = inst & 037;
+
+ db_printf("\t%s\t\tr%-3d,r%-3d,r%-3d", opcode, rd, rs1, rs2);
+}
+
+
+/* Handles immediate load/store/exchange instructions */
+static void
+immem(int inst, char *opcode, long iadr)
+{
+ register int immed = inst & 0xFFFF;
+ register int rd = (inst >> 21) & 037;
+ register int rs = (inst >> 16) & 037;
+ register int st_lda = (inst >> 28) & 03;
+ register int aryno = (inst >> 26) & 03;
+ char c = ' ';
+
+ if (!st_lda) {
+ if ((aryno == 0) || (aryno == 01))
+ opcode = "xmem";
+ else
+ opcode = "ld";
+ if (aryno == 0)
+ aryno = 03;
+ if (!(aryno == 01))
+ c = 'u';
+ } else
+ if (st_lda == 01)
+ opcode = "ld";
+
+ db_printf("\t%s%s%c\t\tr%-3d,r%-3d,", opcode, instwidth[aryno],
+ c, rd, rs);
+ printval(immed);
+}
+
+
+/* Handles triadic mode load/store/exchange instructions */
+static void
+nimmem(int inst, char *opcode, long iadr)
+{
+ register int scaled = (inst >> 9) & 01;
+ register int rd = (inst >> 21) & 037;
+ register int rs1 = (inst >> 16) & 037;
+ register int rs2 = inst & 037;
+ register int st_lda = (inst >> 12) & 03;
+ register int aryno = (inst >> 10) & 03;
+ register int user_bit = 0;
+ int signed_fg = 1;
+ char *user = " ";
+ char c = ' ';
+
+ if (!st_lda) {
+ if ((aryno == 0) || (aryno == 01))
+ opcode = "xmem";
+ else
+ opcode = "ld";
+ if (aryno == 0)
+ aryno = 03;
+ if (!(aryno == 01)) {
+ c = 'u';
+ signed_fg = 0;
+ }
+ } else
+ if (st_lda == 01)
+ opcode = "ld";
+
+ if (!(st_lda == 03)) {
+ user_bit = (inst >> 8) & 01;
+ if (user_bit)
+ user = ".usr";
+ }
+ if (user_bit && signed_fg && (aryno == 01)) {
+ if (st_lda)
+ db_printf("\t%s%s\tr%-3d,r%-3d", opcode,
+ user, rd, rs1);
+ else
+ db_printf("\t%s%s\tr%-3d,r%-3d", opcode,
+ user, rd, rs1);
+ } else
+ if (user_bit && signed_fg)
+ db_printf("\t%s%s%s\tr%-3d,r%-3d", opcode,
+ instwidth[aryno], user, rd, rs1);
+ else
+ db_printf("\t%s%s%c%s\tr%-3d,r%-3d", opcode,
+ instwidth[aryno], c, user, rd, rs1);
+
+ if (scaled)
+ db_printf("[r%-3d]", rs2);
+ else
+ db_printf(",r%-3d", rs2);
+}
+
+
+/* Handles triadic mode logical instructions */
+static void
+lognim(int inst, char *opcode, long iadr)
+{
+ register int rd = (inst >> 21) & 037;
+ register int rs1 = (inst >> 16) & 037;
+ register int rs2 = inst & 037;
+ register int complemt = (inst >> 10) & 01;
+ char *c = " ";
+
+ if (complemt)
+ c = ".c";
+
+ db_printf("\t%s%s\t\tr%-3d,r%-3d,r%-3d", opcode, c, rd, rs1, rs2);
+}
+
+
+/* Handles triadic mode arithmetic instructions */
+static void
+onimmed(int inst, char *opcode, long iadr)
+{
+ register int rd = (inst >> 21) & 037;
+ register int rs1 = (inst >> 16) & 037;
+ register int rs2 = inst & 037;
+ register int carry = (inst >> 8) & 03;
+ register int nochar = (inst >> 10) & 07;
+ register int nodecode = (inst >> 11) & 01;
+ char *tab, *c;
+
+ if (nochar > 02)
+ tab = "\t\t";
+ else
+ tab = "\t";
+
+ if (!nodecode) {
+ if (carry == 01)
+ c = ".co ";
+ else
+ if (carry == 02)
+ c = ".ci ";
+ else
+ if (carry == 03)
+ c = ".cio";
+ else
+ c = " ";
+ } else
+ c = " ";
+
+ db_printf("\t%s%s%sr%-3d,r%-3d,r%-3d", opcode, c,
+ tab, rd, rs1, rs2);
+}
+
+static struct opdesc {
+ unsigned mask, match;
+ void (*opfun) ();
+ char *farg;
+} opdecode[] = {
+ /* ORDER IS IMPORTANT BELOW */
+
+ {
+ 0xF0000000 U, 0x00000000 U, immem, 0,
+ } ,
+ {
+ 0xF0000000 U, 0x10000000 U, immem, 0,
+ } ,
+ {
+ 0xF0000000 U, 0x20000000 U, immem, "st"
+ } ,
+ {
+ 0xF0000000 U, 0x30000000 U, immem, "lda"
+ } ,
+
+ {
+ 0xF8000000 U, 0x40000000 U, oimmed, "and"
+ } ,
+ {
+ 0xF8000000 U, 0x48000000 U, oimmed, "mask"
+ } ,
+ {
+ 0xF8000000 U, 0x50000000 U, oimmed, "xor"
+ } ,
+ {
+ 0xF8000000 U, 0x58000000 U, oimmed, "or"
+ } ,
+ {
+ 0xFC000000 U, 0x60000000 U, oimmed, "addu"
+ } ,
+ {
+ 0xFC000000 U, 0x64000000 U, oimmed, "subu"
+ } ,
+ {
+ 0xFC000000 U, 0x68000000 U, oimmed, "divu"
+ } ,
+ {
+ 0xFC000000 U, 0x6C000000 U, oimmed, "mul"
+ } ,
+ {
+ 0xFC000000 U, 0x70000000 U, oimmed, "add"
+ } ,
+ {
+ 0xFC000000 U, 0x74000000 U, oimmed, "sub"
+ } ,
+ {
+ 0xFC000000 U, 0x78000000 U, oimmed, "div"
+ } ,
+ {
+ 0xFC000000 U, 0x7C000000 U, oimmed, "cmp"
+ } ,
+
+ {
+ 0xFC00F800 U, 0x80004000 U, ctrlregs, "ldcr"
+ } ,
+ {
+ 0xFC00F800 U, 0x80004800 U, ctrlregs, "fldcr"
+ } ,
+ {
+ 0xFC00F800 U, 0x80008000 U, ctrlregs, "stcr"
+ } ,
+ {
+ 0xFC00F800 U, 0x80008800 U, ctrlregs, "fstcr"
+ } ,
+ {
+ 0xFC00F800 U, 0x8000C000 U, ctrlregs, "xcr"
+ } ,
+ {
+ 0xFC00F800 U, 0x8000C800 U, ctrlregs, "fxcr"
+ } ,
+
+ {
+ 0xFC00F800 U, 0x84000000 U, sindou, "fmul"
+ } ,
+ {
+ 0xFC1FFF80 U, 0x84002000 U, sindou, "flt"
+ } ,
+ {
+ 0xFC00F800 U, 0x84002800 U, sindou, "fadd"
+ } ,
+ {
+ 0xFC00F800 U, 0x84003000 U, sindou, "fsub"
+ } ,
+ {
+ 0xFC00F860 U, 0x84003800 U, sindou, "fcmp"
+ } ,
+ {
+ 0xFC1FFE60 U, 0x84004800 U, sindou, "int"
+ } ,
+ {
+ 0xFC1FFE60 U, 0x84005000 U, sindou, "nint"
+ } ,
+ {
+ 0xFC1FFE60 U, 0x84005800 U, sindou, "trnc"
+ } ,
+ {
+ 0xFC00F800 U, 0x84007000 U, sindou, "fdiv"
+ } ,
+
+ {
+ 0xF8000000 U, 0xC0000000 U, obranch, "br"
+ } ,
+ {
+ 0xF8000000 U, 0xC8000000 U, obranch, "bsr"
+ } ,
+
+ {
+ 0xF8000000 U, 0xD0000000 U, brcond, "bb0"
+ } ,
+ {
+ 0xF8000000 U, 0xD8000000 U, brcond, "bb1"
+ } ,
+ {
+ 0xF8000000 U, 0xE8000000 U, brcond, "bcnd"
+ } ,
+
+ {
+ 0xFC00FC00 U, 0xF0008000 U, obit, "clr"
+ } ,
+ {
+ 0xFC00FC00 U, 0xF0008800 U, obit, "set"
+ } ,
+ {
+ 0xFC00FC00 U, 0xF0009000 U, obit, "ext"
+ } ,
+ {
+ 0xFC00FC00 U, 0xF0009800 U, obit, "extu"
+ } ,
+ {
+ 0xFC00FC00 U, 0xF000A000 U, obit, "mak"
+ } ,
+ {
+ 0xFC00FC00 U, 0xF000A800 U, obit, "rot"
+ } ,
+
+ {
+ 0xFC00FE00 U, 0xF000D000 U, otrap, "tb0"
+ } ,
+ {
+ 0xFC00FE00 U, 0xF000D800 U, otrap, "tb1"
+ } ,
+ {
+ 0xFC00FE00 U, 0xF000E800 U, otrap, "tcnd"
+ } ,
+
+ {
+ 0xFC00F2E0 U, 0xF4000000 U, nimmem, 0,
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4000200 U, nimmem, 0,
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4001000 U, nimmem, 0,
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4001200 U, nimmem, 0,
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4002000 U, nimmem, "st"
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4002200 U, nimmem, "st"
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4003000 U, nimmem, "lda"
+ } ,
+ {
+ 0xFC00F2E0 U, 0xF4003200 U, nimmem, "lda"
+ } ,
+
+ {
+ 0xFC00FBE0 U, 0xF4004000 U, lognim, "and"
+ } ,
+ {
+ 0xFC00FBE0 U, 0xF4005000 U, lognim, "xor"
+ } ,
+ {
+ 0xFC00FBE0 U, 0xF4005800 U, lognim, "or"
+ } ,
+
+ {
+ 0xFC00FCE0 U, 0xF4006000 U, onimmed, "addu"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4006400 U, onimmed, "subu"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4006800 U, onimmed, "divu"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4006C00 U, onimmed, "mul"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4007000 U, onimmed, "add"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4007400 U, onimmed, "sub"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4007800 U, onimmed, "div"
+ } ,
+ {
+ 0xFC00FCE0 U, 0xF4007C00 U, onimmed, "cmp"
+ } ,
+
+ {
+ 0xFC00FFE0 U, 0xF4008000 U, bitman, "clr"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF4008800 U, bitman, "set"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF4009000 U, bitman, "ext"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF4009800 U, bitman, "extu"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF400A000 U, bitman, "mak"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF400A800 U, bitman, "rot"
+ } ,
+
+ {
+ 0xFC00FBE0 U, 0xF400C000 U, jump, "jmp"
+ } ,
+ {
+ 0xFC00FBE0 U, 0xF400C800 U, jump, "jsr"
+ } ,
+
+ {
+ 0xFC00FFE0 U, 0xF400E800 U, instset, "ff1"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF400EC00 U, instset, "ff0"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF400F800 U, instset, "tbnd"
+ } ,
+ {
+ 0xFC00FFE0 U, 0xF400FC00 U, instset, "rte"
+ } ,
+ {
+ 0xFC000000 U, 0xF8000000 U, instset, "tbnd"
+ } ,
+ {
+ 0, 0, 0, 0
+ }
+};
+
+static char *badop = "\t???";
+
+int
+m88k_print_instruction(unsigned iadr, long inst)
+{
+ register struct opdesc *p;
+
+ /* this messes up "orb" instructions ever so slightly, */
+ /* but keeps us in sync between routines... */
+ if (inst == 0) {
+ db_printf("\t.word 0");
+ } else {
+ for (p = opdecode; p->mask; p++)
+ if ((inst & p->mask) == p->match) {
+ (*p->opfun) (inst, p->farg, iadr);
+ break;
+ }
+ if (!p->mask)
+ db_printf(badop);
+ }
+
+ return iadr + 4;
+}
+db_addr_t
+db_disasm(db_addr_t loc, boolean_t altfmt)
+{
+ m88k_print_instruction(loc, db_get_value(loc, 4, FALSE));
+ db_printf("\n");
+ return loc + 4;
+}
diff --git a/sys/arch/mvme88k/ddb/db_interface.c b/sys/arch/mvme88k/ddb/db_interface.c
new file mode 100644
index 00000000000..6c18e1503d4
--- /dev/null
+++ b/sys/arch/mvme88k/ddb/db_interface.c
@@ -0,0 +1,834 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * m88k interface to ddb debugger
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/systm.h> /* just for boothowto --eichin */
+#include <setjmp.h>
+
+#include <vm/vm.h>
+
+#include <machine/m882xx.h> /* CMMU defs */
+#include <machine/trap.h> /* current_thread() */
+#include <machine/db_machdep.h> /* local ddb stuff */
+#include <machine/bug.h> /* bug routines */
+#include <machine/mmu.h>
+
+#include <ddb/db_command.h>
+#include <ddb/db_sym.h>
+
+extern jmp_buf *db_recover;
+extern unsigned int db_maxoff;
+
+int db_active = 0;
+int db_noisy = 0;
+int quiet_db_read_bytes = 0;
+
+/*
+ * Received keyboard interrupt sequence.
+ */
+kdb_kintr(regs)
+ register struct m88100_saved_state *regs;
+{
+ if (db_active == 0 && (boothowto & RB_KDB)) {
+ printf("\n\nkernel: keyboard interrupt\n");
+ m88k_db_trap(-1, regs);
+ }
+}
+/************************/
+/* PRINTING *************/
+/************************/
+
+static void
+m88k_db_str(char *str)
+{
+ db_printf(str);
+}
+
+static void
+m88k_db_str1(char *str, int arg1)
+{
+ db_printf(str, arg1);
+}
+
+static void
+m88k_db_str2(char *str, int arg1, int arg2)
+{
+ db_printf(str, arg1, arg2);
+}
+/************************/
+/* DB_REGISTERS ****/
+/************************/
+
+/*
+ *
+ * If you really feel like understanding the following procedure and
+ * macros, see pages 6-22 to 6-30 (Section 6.7.3) of
+ *
+ * MC881000 RISC Microprocessor User's Manual Second Edition
+ * (Motorola Order: MC88100UM/AD REV 1)
+ *
+ * and ERRATA-5 (6-23, 6-24, 6-24) of
+ *
+ * Errata to MC88100 User's Manual Second Edition MC88100UM/AD Rev 1
+ * (Oct 2, 1990)
+ * (Motorola Order: MC88100UMAD/AD)
+ *
+ */
+
+/* macros for decoding dmt registers */
+
+#define XMEM(x) ((x) & (1<<12))
+#define XMEM_MODE(x) ((((x)>>2 & 0xf) == 0xf) ? "" : ".bu")
+#define MODE(x) ((x)>>2 & 0xf)
+#define DOUB(x) ((x) & (1<<13))
+#define SIGN(x) ((x) & (1<<6))
+#define DAS(x) (((x) & (1<<14)) ? "" : ".usr")
+#define REG(x) (((x)>>7) & 0x1f)
+#define STORE(x) ((x) & 0x2)
+
+/*
+ * return 1 if the printing of the next stage should be surpressed
+ */
+static int
+m88k_dmx_print(unsigned t, unsigned d, unsigned a, unsigned no)
+{
+ static unsigned addr_mod[16] = {0, 3, 2, 2, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ static char *mode[16] = {"?", ".b", ".b", ".h", ".b", "?", "?", "?",
+ ".b", ".h", "?", "?", "?", "?", "?", ""};
+ static unsigned mask[16] = {0, 0xff, 0xff00, 0xffff,
+ 0xff0000, 0, 0, 0,
+ 0xff000000 U, 0xffff0000 U, 0, 0,
+ 0, 0, 0, 0xffffffff U};
+ static unsigned shift[16] = {0, 0, 8, 0, 16, 0, 0, 0,
+ 24, 16, 0, 0, 0, 0, 0, 0};
+ int reg = REG(t);
+
+ if (XMEM(t)) {
+ db_printf("xmem%s%s r%d(0x%x) <-> mem(0x%x),",
+ XMEM_MODE(t), DAS(t), reg,
+ (((t) >> 2 & 0xf) == 0xf) ? d : (d & 0xff), a);
+ return 1;
+ } else {
+ if (MODE(t) == 0xf) {
+ /* full or double word */
+ if (STORE(t))
+ if (DOUB(t) && no == 2)
+ db_printf("st.d%s -> mem(0x%x) (** restart sxip **)",
+ DAS(t), a);
+ else
+ db_printf("st%s (0x%x) -> mem(0x%x)", DAS(t), d, a);
+ else /* load */
+ if (DOUB(t) && no == 2)
+ db_printf("ld.d%s r%d <- mem(0x%x), r%d <- mem(0x%x)",
+ DAS(t), reg, a, reg + 1, a + 4);
+ else
+ db_printf("ld%s r%d <- mem(0x%x)", DAS(t), reg, a);
+ } else {
+ /* fractional word - check if load or store */
+ a += addr_mod[MODE(t)];
+ if (STORE(t))
+ db_printf("st%s%s (0x%x) -> mem(0x%x)", mode[MODE(t)], DAS(t),
+ (d & mask[MODE(t)]) >> shift[MODE(t)], a);
+ else
+ db_printf("ld%s%s%s r%d <- mem(0x%x)",
+ mode[MODE(t)], SIGN(t) ? "" : "u", DAS(t), reg, a);
+ }
+ }
+ return 0;
+}
+
+static void
+m88k_db_print_frame(
+ db_expr_t addr,
+ boolean_t have_addr,
+ int count,
+ char *modif)
+{
+ struct m88100_saved_state *s = (struct m88100_saved_state *) addr;
+ char *name;
+ db_expr_t offset;
+ int surpress1 = 0, surpress2 = 0;
+ int c, force = 0, help = 0;
+
+ if (!have_addr) {
+ db_printf("requires address of frame\n");
+ help = 1;
+ }
+ while (modif && *modif) {
+ switch (c = *modif++, c) {
+ case 'f':
+ force = 1;
+ break;
+ case 'h':
+ help = 1;
+ break;
+ default:
+ db_printf("unknown modifier [%c]\n", c);
+ help = 1;
+ break;
+ }
+ }
+
+ if (help) {
+ db_printf("usage: mach frame/[f] ADDRESS\n");
+ db_printf(" /f force printing of insane frames.\n");
+ return;
+ }
+ if (badwordaddr((vm_offset_t) s) ||
+ badwordaddr((vm_offset_t) (&((db_regs_t *) s)->mode))) {
+ db_printf("frame at 0x%08x is unreadable\n", s);
+ return;
+ }
+ if (!frame_is_sane(s)) {/* see db_trace.c */
+ db_printf("frame seems insane (");
+
+ if (force)
+ db_printf("forging ahead anyway...)\n");
+ else {
+ db_printf("use /f to force)\n");
+ return;
+ }
+ }
+#define R(i) s->r[i]
+#define IPMASK(x) ((x) & ~(3))
+ db_printf("R00-05: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(0), R(1), R(2), R(3), R(4), R(5));
+ db_printf("R06-11: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(6), R(7), R(8), R(9), R(10), R(11));
+ db_printf("R12-17: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(12), R(13), R(14), R(15), R(16), R(17));
+ db_printf("R18-23: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(18), R(19), R(20), R(21), R(22), R(23));
+ db_printf("R24-29: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(24), R(25), R(26), R(27), R(28), R(29));
+ db_printf("R30-31: 0x%08x 0x%08x\n", R(30), R(31));
+
+ db_printf("sxip: 0x%08x ", s->sxip);
+ db_find_xtrn_sym_and_offset((db_addr_t) IPMASK(s->sxip), &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n");
+ if (s->snip != s->sxip + 4) {
+ db_printf("snip: 0x%08x ", s->snip);
+ db_find_xtrn_sym_and_offset((db_addr_t) IPMASK(s->snip), &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n");
+ }
+ if (s->sfip != s->snip + 4) {
+ db_printf("sfip: 0x%08x ", s->sfip);
+ db_find_xtrn_sym_and_offset((db_addr_t) IPMASK(s->sfip), &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n");
+ }
+ db_printf("vector: 0x%02x interrupt mask: 0x%08x\n",
+ s->vector, s->mask >> 8);
+ db_printf("epsr: 0x%08x current process: 0x%x\n",
+ s->epsr, curproc);
+
+ /*
+ * If the vector indicates trap, instead of an exception or
+ * interrupt, skip the check of dmt and fp regs.
+ *
+ * Interrupt and exceptions are vectored at 0-10 and 114-127.
+ */
+
+ if (!(s->vector <= 10 || (114 <= s->vector && s->vector <= 127))) {
+ db_printf("\n\n");
+ return;
+ }
+ if (s->vector == /* data */ 3 || s->dmt0 & 1) {
+ db_printf("dmt,d,a0: 0x%08x 0x%08x 0x%08x ", s->dmt0, s->dmd0, s->dma0);
+ db_find_xtrn_sym_and_offset((db_addr_t) s->dma0, &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n ");
+ surpress1 = m88k_dmx_print(s->dmt0 | 0x01, s->dmd0, s->dma0, 0);
+ db_printf("\n");
+
+ if ((s->dmt1 & 1) && (!surpress1)) {
+ db_printf("dmt,d,a1: 0x%08x 0x%08x 0x%08x ", s->dmt1, s->dmd1, s->dma1);
+ db_find_xtrn_sym_and_offset((db_addr_t) s->dma1, &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n ");
+ surpress2 = m88k_dmx_print(s->dmt1, s->dmd1, s->dma1, 1);
+ db_printf("\n");
+
+ if ((s->dmt2 & 1) && (!surpress2)) {
+ db_printf("dmt,d,a2: 0x%08x 0x%08x 0x%08x ", s->dmt2, s->dmd2, s->dma2);
+ db_find_xtrn_sym_and_offset((db_addr_t) s->dma2, &name, &offset);
+ if (name != 0 && (unsigned) offset <= db_maxoff)
+ db_printf("%s+0x%08x", name, (unsigned) offset);
+ db_printf("\n ");
+ (void) m88k_dmx_print(s->dmt2, s->dmd2, s->dma2, 2);
+ db_printf("\n");
+ }
+ }
+ }
+ if (s->fpecr & 255) { /* floating point error occured */
+ db_printf("fpecr: 0x%08x fpsr: 0x%08x fpcr: 0x%08x\n",
+ s->fpecr, s->fpsr, s->fpcr);
+ db_printf("fcr1-4: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ s->fphs1, s->fpls1, s->fphs2, s->fpls2);
+ db_printf("fcr5-8: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ s->fppt, s->fprh, s->fprl, s->fpit);
+ }
+ db_printf("\n\n");
+}
+
+static void
+m88k_db_registers(
+ db_expr_t addr,
+ boolean_t have_addr,
+ int count,
+ char *modif)
+{
+ if (modif && *modif) {
+ db_printf("usage: mach regs\n");
+ return;
+ }
+ m88k_db_print_frame((db_expr_t) DDB_REGS, TRUE, 0, 0);
+ return;
+}
+/************************/
+/* PAUSE ****************/
+/************************/
+
+/*
+ * pause for 2*ticks many cycles
+ */
+static void
+m88k_db_pause(unsigned volatile ticks)
+{
+ while (ticks)
+ ticks -= 1;
+ return;
+}
+/*
+ * m88k_db_trap - field a TRACE or BPT trap
+ */
+
+m88k_db_trap(
+ int type,
+ register struct m88100_saved_state * regs)
+{
+
+ int i;
+
+#if 0
+ if ((i = db_spl()) != 7)
+ m88k_db_str1("WARNING: spl is not high in m88k_db_trap (spl=%x)\n", i);
+#endif /* 0 */
+
+ if (db_are_interrupts_disabled())
+ m88k_db_str("WARNING: entered debugger with interrupts disabled\n");
+
+ switch (type) {
+
+ case T_KDB_BREAK:
+ case T_KDB_TRACE:
+ case T_KDB_ENTRY:
+ break;
+ case -1:
+ break;
+ default:
+ kdbprinttrap(type, 0);
+ if (db_recover != 0) {
+ db_error("Caught exception in ddb.\n");
+ /* NOTREACHED */
+ }
+ }
+
+ ddb_regs = *regs;
+
+ db_active++;
+ cnpollc(TRUE);
+ db_trap(type, 0);
+ cnpollc(FALSE);
+ db_active--;
+
+ *regs = ddb_regs;
+
+#if 0
+ (void) spl7();
+#endif
+ return (1);
+}
+
+extern char *trap_type[];
+extern int trap_types;
+
+/*
+ * Print trap reason.
+ */
+kdbprinttrap(type, code)
+ int type, code;
+{
+ printf("kernel: ");
+ if (type >= trap_types || type < 0)
+ printf("type %d", type);
+ else
+ printf("%s", trap_type[type]);
+ printf(" trap\n");
+}
+
+int
+Debugger()
+{
+ asm(ENTRY_ASM); /* entry trap */
+ /* ends up at ddb_entry_trap below */
+}
+
+/* gimmeabreak - drop execute the ENTRY trap */
+void
+gimmeabreak(void)
+{
+ asm(ENTRY_ASM); /* entry trap */
+ /* ends up at ddb_entry_trap below */
+}
+
+/* fielded a non maskable interrupt */
+int
+ddb_nmi_trap(int level, db_regs_t * eframe)
+{
+ NOISY(m88k_db_str("kernel: nmi interrupt\n");)
+ m88k_db_trap(T_KDB_ENTRY, eframe);
+ return 0;
+}
+
+/*
+ * When the below routine is entered interrupts should be on
+ * but spl should be high
+ *
+ * The following routine is for breakpoint and watchpoint entry.
+ */
+
+/* breakpoint/watchpoint entry */
+int
+ddb_break_trap(type, eframe)
+ int type;
+ db_regs_t *eframe;
+{
+ m88k_db_trap(type, eframe);
+
+ if (type == T_KDB_BREAK) {
+ /* back up an instruction and retry the instruction at the
+ * breakpoint address */
+ eframe->sfip = eframe->snip;
+ eframe->snip = eframe->sxip;
+ }
+ return 0;
+}
+
+/* enter at splhigh */
+int
+ddb_entry_trap(level, eframe)
+ int level;
+ db_regs_t *eframe;
+{
+ m88k_db_trap(T_KDB_ENTRY, eframe);
+ return 0;
+}
+
+/*
+ * When the below routine is entered interrupts should be on
+ * but spl should be high
+ */
+/* error trap - unreturnable */
+void
+ddb_error_trap(error, eframe)
+ char *error;
+ db_regs_t *eframe;
+{
+ m88k_db_str1("KERNEL: terminal error [%s]\n", (int) error);
+ m88k_db_str("KERNEL: Exiting debugger will cause abort to rom\n");
+ m88k_db_str1("at 0x%x ", eframe->sxip & ~3);
+ m88k_db_str2("dmt0 0x%x dma0 0x%x", eframe->dmt0, eframe->dma0);
+ m88k_db_pause(1000000);
+ m88k_db_trap(T_KDB_BREAK, eframe);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+void
+db_read_bytes(addr, size, data)
+ vm_offset_t addr;
+ register int size;
+ register char *data;
+{
+ register char *src;
+
+ src = (char *) addr;
+
+ while (--size >= 0)
+ *data++ = *src++;
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ * This should make a text page writable to be able
+ * to plant a break point (right now text is mapped with
+ * write access in pmap_bootstrap()). XXX nivas
+ */
+void
+db_write_bytes(addr, size, data)
+ char *addr;
+ int size;
+ char *data;
+{
+
+ register char *dst;
+ int i = size;
+ vm_offset_t physaddr;
+ pte_template_t *pte;
+
+ dst = (char *) addr;
+
+ while (--size >= 0) {
+#if 0
+ db_printf("byte %x\n", *data);
+#endif /* 0 */
+ *dst++ = *data++;
+ }
+ physaddr = pmap_extract(kernel_pmap, (vm_offset_t) addr);
+ cmmu_flush_cache(physaddr, i);
+}
+
+/* to print a character to the console */
+void
+db_putc(int c)
+{
+ bugoutchr(c & 0xff);
+}
+
+/* to peek at the console; returns -1 if no character is there */
+int
+db_getc(void)
+{
+ if (buginstat())
+ return (buginchr());
+ else
+ return -1;
+}
+
+/* display where all the cpus are stopped at */
+static void
+m88k_db_where(void)
+{
+ struct m88100_saved_state *s;
+ char *name;
+ int *offset;
+ int i;
+ int l;
+
+ s = DDB_REGS;
+
+ l = m88k_pc(s); /* clear low bits */
+
+ db_find_xtrn_sym_and_offset((db_addr_t) l, &name, (db_expr_t *) & offset);
+ if (name && (unsigned) offset <= db_maxoff)
+ db_printf("stopped at 0x%x (%s+0x%x)\n",
+ l, name, offset);
+ else
+ db_printf("stopped at 0x%x\n", l);
+}
+
+/*
+ * Walk back a stack, looking for exception frames.
+ * These frames are recognized by the routine frame_is_sane. Frames
+ * only start with zero, so we only call frame_is_sane if the
+ * current address contains zero.
+ *
+ * If addr is given, it is assumed to an address on the stack to be
+ * searched. Otherwise, r31 of the current cpu is used.
+ */
+static void
+m88k_db_frame_search(db_expr_t addr, boolean_t have_addr)
+{
+#if 1
+ db_printf("sorry, frame search currently disabled.\n");
+#else
+ if (have_addr)
+ addr &= ~3; /* round to word */
+ else
+ addr = (DDB_REGS->r[31]);
+
+ /* walk back up stack until 8k boundry, looking for 0 */
+ while (addr & ((8 * 1024) - 1)) {
+ int i;
+ db_read_bytes(addr, 4, &i);
+ if (i == 0 && frame_is_sane(i))
+ db_printf("frame found at 0x%x\n", i);
+ addr += 4;
+ }
+
+ db_printf("(Walked back until 0x%x)\n", addr);
+#endif
+}
+/* flush icache */
+static void
+m88k_db_iflush(db_expr_t addr, boolean_t have_addr)
+{
+ addr = 0;
+ cmmu_remote_set(addr, CMMU_SCR, 0, CMMU_FLUSH_CACHE_CBI_ALL);
+}
+/* flush dcache */
+
+static void
+m88k_db_dflush(db_expr_t addr, boolean_t have_addr)
+{
+ addr = 0;
+
+ cmmu_remote_set(addr, CMMU_SCR, 1, CMMU_FLUSH_CACHE_CBI_ALL);
+}
+/* probe my cache */
+static void
+m88k_db_peek(
+ db_expr_t addr,
+ boolean_t have_addr,
+ int count,
+ char *modif)
+{
+ int pa12;
+ int valmask;
+
+ pa12 = addr & ~((1 << 12) - 1);
+
+ /* probe dcache */
+ cmmu_remote_set(0, CMMU_SAR, 1, addr);
+
+ valmask = cmmu_remote_get(0, CMMU_CSSP, 1);
+ db_printf("dcache valmask 0x%x\n", (unsigned) valmask);
+ db_printf("dcache tag ports 0x%x 0x%x 0x%x 0x%x\n",
+ (unsigned) cmmu_remote_get(0, CMMU_CTP0, 1),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP1, 1),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP2, 1),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP3, 1));
+
+ /* probe icache */
+ cmmu_remote_set(0, CMMU_SAR, 0, addr);
+
+ valmask = cmmu_remote_get(0, CMMU_CSSP, 0);
+ db_printf("icache valmask 0x%x\n", (unsigned) valmask);
+ db_printf("icache tag ports 0x%x 0x%x 0x%x 0x%x\n",
+ (unsigned) cmmu_remote_get(0, CMMU_CTP0, 0),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP1, 0),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP2, 0),
+ (unsigned) cmmu_remote_get(0, CMMU_CTP3, 0));
+
+}
+
+
+/*
+ * control how much info the debugger prints about itself
+ */
+static void
+m88k_db_noise(db_expr_t addr, boolean_t have_addr)
+{
+ if (!have_addr) {
+ /* if off make noisy; if noisy or very noisy turn off */
+ if (db_noisy) {
+ db_printf("changing debugger status from %s to quiet\n",
+ db_noisy == 1 ? "noisy" :
+ db_noisy == 2 ? "very noisy" : "violent");
+ db_noisy = 0;
+ } else {
+ db_printf("changing debugger status from quiet to noisy\n");
+ db_noisy = 1;
+ }
+ } else
+ if (addr < 0 || addr > 3)
+ db_printf("invalid noise level to m88k_db_noisy; should be 0, 1, 2, or 3\n");
+ else {
+ db_noisy = addr;
+ db_printf("debugger noise level set to %s\n",
+ db_noisy == 0 ? "quiet" :
+ (db_noisy == 1 ? "noisy" :
+ db_noisy == 2 ? "very noisy" : "violent"));
+ }
+}
+/*
+ * See how a virtual address translates.
+ * Must have an address.
+ */
+static void
+m88k_db_translate(
+ db_expr_t addr,
+ boolean_t have_addr,
+ unsigned count,
+ char *modif)
+{
+#if 0
+ char c;
+ int verbose_flag = 0;
+ int supervisor_flag = 1;
+ int wanthelp = 0;
+
+ if (!have_addr)
+ wanthelp = 1;
+ else {
+ while (c = *modif++, c != 0) {
+ switch (c) {
+ default:
+ db_printf("bad modifier [%c]\n", c);
+ wanthelp = 1;
+ break;
+ case 'h':
+ wanthelp = 1;
+ break;
+ case 'v':
+ verbose_flag++;
+ break;
+ case 's':
+ supervisor_flag = 1;
+ break;
+ case 'u':
+ supervisor_flag = 0;
+ break;
+ }
+ }
+ }
+ if (wanthelp) {
+ db_printf("usage: translate[/vvsu] address\n");
+ db_printf("flags: v - be verbose (vv - be very verbose)\n");
+ db_printf(" s - use cmmu's supervisor area pointer (default)\n");
+ db_printf(" u - use cmmu's user area pointer\n");
+ return;
+ }
+ cmmu_show_translation(addr, supervisor_flag, verbose_flag);
+#endif /* 0 */
+}
+
+void
+cpu_interrupt_to_db(int cpu_no)
+{
+}
+
+
+/************************/
+/* COMMAND TABLE / INIT */
+/************************/
+
+static struct db_command m88k_cache_cmds[] =
+{
+ {"iflush", m88k_db_iflush, 0, 0},
+ {"dflush", m88k_db_dflush, 0, 0},
+ {"peek", m88k_db_peek, 0, 0},
+ {(char *) 0,}
+};
+
+struct db_command db_machine_cmds[] =
+{
+ {"cache", 0, 0, m88k_cache_cmds},
+ {"frame", m88k_db_print_frame, 0, 0},
+ {"noise", m88k_db_noise, 0, 0},
+ {"regs", m88k_db_registers, 0, 0},
+ {"searchframe", m88k_db_frame_search, 0, 0},
+ {"translate", m88k_db_translate, 0, 0},
+ {"where", m88k_db_where, 0, 0},
+ {(char *) 0,}
+};
+/*
+ * Called from "m88k/m1x7_init.c"
+ */
+void
+kdb_init(void)
+{
+#ifdef DB_MACHINE_COMMANDS
+ db_machine_commands_install(db_machine_cmds);
+#endif
+ ddb_init();
+
+ db_printf("ddb enabled\n");
+}
+/*
+ * Attempt to figure out the UX name of the task.
+ * This is kludgy at best... we can't even be sure the task is a UX task...
+ */
+#define TOP_OF_USER_STACK USRSTACK
+#define MAX_DISTANCE_TO_LOOK (1024 * 10)
+
+#define DB_TASK_NAME_LEN 50
+
+char
+ *
+db_task_name()
+{
+ static unsigned buffer[(DB_TASK_NAME_LEN + 5) / sizeof(unsigned)];
+ unsigned ptr = (vm_offset_t) (TOP_OF_USER_STACK - 4);
+ unsigned limit = ptr - MAX_DISTANCE_TO_LOOK;
+ unsigned word;
+ int i;
+
+ /* skip zeros at the end */
+ while (ptr > limit &&
+ (i = db_trace_get_val((vm_offset_t) ptr, &word))
+ && (word == 0)) {
+ ptr -= 4; /* continue looking for a non-null word */
+ }
+
+ if (ptr <= limit) {
+ db_printf("bad name at line %d\n", __LINE__);
+ return "<couldn't find 1>";
+ } else
+ if (i != 1) {
+ return "<nostack>";
+ }
+ /* skip looking for null before all the text */
+ while (ptr > limit
+ && (i = db_trace_get_val(ptr, &word))
+ && (word != 0)) {
+ ptr -= 4; /* continue looking for a null word */
+ }
+
+ if (ptr <= limit) {
+ db_printf("bad name at line %d\n", __LINE__);
+ return "<couldn't find 2>";
+ } else
+ if (i != 1) {
+ db_printf("bad name read of %x "
+ "at line %d\n", ptr, __LINE__);
+ return "<bad read 2>";
+ }
+ ptr += 4; /* go back to the non-null word after this one */
+
+ for (i = 0; i < sizeof(buffer); i++, ptr += 4) {
+ buffer[i] = 0; /* just in case it's not read */
+ db_trace_get_val((vm_offset_t) ptr, &buffer[i]);
+ }
+ return (char *) buffer;
+}
diff --git a/sys/arch/mvme88k/ddb/db_sstep.c b/sys/arch/mvme88k/ddb/db_sstep.c
new file mode 100644
index 00000000000..373ff40c109
--- /dev/null
+++ b/sys/arch/mvme88k/ddb/db_sstep.c
@@ -0,0 +1,256 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/db_machdep.h>
+#include <ddb/db_access.h> /* db_get_value() */
+
+/*
+ * Support routines for software single step.
+ *
+ * Author: Daniel Stodolsky (danner@cs.cmu.edu)
+ *
+ */
+
+/* is the instruction a branch or jump instruction (br, bb0, bb1, bcnd, jmp)
+ but not a function call (bsr or jsr) */
+
+boolean_t
+inst_branch(unsigned ins)
+{
+ /* check high five bits */
+
+ switch (ins >> (32 - 5)) {
+ case 0x18: /* br */
+ case 0x1a: /* bb0 */
+ case 0x1b: /* bb1 */
+ case 0x1d: /* bcnd */
+ return TRUE;
+ break;
+ case 0x1e: /* could be jmp */
+ if ((ins & 0xfffffbe0 U) == 0xf400c000 U)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+/* inst_load(ins) - returns the number of words the instruction loads. byte,
+ half and word count as 1; double word as 2 */
+
+unsigned
+inst_load(unsigned ins)
+{
+ /* look at the top six bits, for starters */
+
+ switch (ins >> (32 - 6)) {
+ case 0x0: /* xmem byte imm */
+ case 0x1: /* xmem word imm */
+
+ case 0x2: /* unsigned half-word load imm */
+ case 0x3: /* unsigned byte load imm */
+ case 0x5: /* signed word load imm */
+ case 0x6: /* signed half-word load imm */
+ case 0x7: /* signed byte load imm */
+ return 1;
+
+ case 0x4: /* signed double word load imm */
+ return 2;
+
+ case 0x3d: /* load/store/xmem scaled/unscaled instruction */
+ if ((ins & 0xf400c0e0 U) == 0xf4000000 U) /* is load/xmem */
+ switch ((ins & 0x0000fce0) >> 5) { /* look at bits 15-5,
+ * but mask bits 8-9 */
+ case 0x0: /* xmem byte */
+ case 0x1: /* xmem word */
+ case 0x2: /* unsigned half word */
+ case 0x3: /* unsigned byte load */
+ case 0x5: /* signed word load */
+ case 0x6: /* signed half-word load */
+ case 0x7: /* signed byte load */
+ return 1;
+
+ case 0x4: /* signed double word load */
+ return 2;
+ } /* end switch load/xmem */
+ break;
+ } /* end switch 32-6 */
+
+ return 0;
+}
+/* inst_store - like inst_load, except for store instructions. */
+
+unsigned
+inst_store(unsigned ins)
+{
+ /* decode top 6 bits again */
+ switch (ins >> (32 - 6)) {
+ case 0x0: /* xmem byte imm */
+ case 0x1: /* xmem word imm */
+ case 0x9: /* store word imm */
+ case 0xa: /* store half-word imm */
+ case 0xb: /* store byte imm */
+ return 1;
+
+ case 0x8: /* store double word */
+ return 2;
+ case 0x3d: /* load/store/xmem scaled/unscaled instruction */
+ /* check bits 15,14,12,7,6,5 are all 0 */
+ if ((ins & 0x0000d0e0 U) == 0)
+ switch ((ins & 0x00003c00 U) >> 10) { /* decode bits 10-13 */
+ case 0x0: /* xmem byte imm */
+ case 0x1: /* xmem word imm */
+ case 0x9: /* store word */
+ case 0xa: /* store half-word */
+ case 0xb: /* store byte */
+ return 1;
+
+ case 0x8: /* store double word */
+ return 2;
+ } /* end switch store/xmem */
+ break;
+ } /* end switch 32-6 */
+
+ return 0;
+}
+/* inst_delayed - this instruction is followed by a delay slot. Could be
+ br.n, bsr.n bb0.n, bb1.n, bcnd.n or jmp.n or jsr.n */
+
+boolean_t
+inst_delayed(unsigned ins)
+{
+ /* check the br, bsr, bb0, bb1, bcnd cases */
+ switch ((ins & 0xfc000000 U) >> (32 - 6)) {
+ case 0x31: /* br */
+ case 0x33: /* bsr */
+ case 0x35: /* bb0 */
+ case 0x37: /* bb1 */
+ case 0x3b: /* bcnd */
+ return TRUE;
+ }
+
+ /* check the jmp, jsr cases */
+ /* mask out bits 0-4, bit 11 */
+ return ((ins & 0xfffff7e0 U) == 0xf400c400 U) ? TRUE : FALSE;
+}
+
+
+/*
+ * next_instr_address(pc,delay_slot,task) has the following semantics.
+ * Let inst be the instruction at pc.
+ * If delay_slot = 1, next_instr_address should return
+ * the address of the instruction in the delay slot; if this instruction
+ * does not have a delay slot, it should return pc.
+ * If delay_slot = 0, next_instr_address should return the
+ * address of next sequential instruction, or pc if the instruction is
+ * followed by a delay slot.
+ *
+ * 91-11-28 jfriedl: I think the above is wrong. I think it should be:
+ * if delay_slot true, return address of the delay slot if there is one,
+ * return pc otherwise.
+ * if delay_slot false, return (pc + 4) regardless.
+ *
+ */
+db_addr_t
+next_instr_address(db_addr_t pc, unsigned delay_slot)
+{
+ if (delay_slot == 0)
+ return pc + 4;
+ else {
+ if (inst_delayed(db_get_value(pc, sizeof(int), FALSE)))
+ return pc + 4;
+ else
+ return pc;
+ }
+}
+
+
+/*
+ * branch_taken(instruction, program counter, func, func_data)
+ *
+ * instruction will be a control flow instruction location at address pc.
+ * Branch taken is supposed to return the address to which the instruction
+ * would jump if the branch is taken. Func can be used to get the current
+ * register values when invoked with a register number and func_data as
+ * arguments.
+ *
+ * If the instruction is not a control flow instruction, panic.
+ */
+unsigned
+branch_taken(
+ unsigned inst,
+ unsigned pc,
+ db_expr_t(*func) (unsigned int, db_regs_t *),
+ db_regs_t * func_data)
+{ /* 'opaque' */
+
+ /* check if br/bsr */
+ if ((inst & 0xf0000000 U) == 0xc0000000 U) {
+ /* signed 26 bit pc relative displacement, shift left two bits */
+ inst = (inst & 0x03ffffff U) << 2;
+ /* check if sign extension is needed */
+ if (inst & 0x08000000 U)
+ inst |= 0xf0000000 U;
+ return pc + inst;
+ }
+ /* check if bb0/bb1/bcnd case */
+ switch ((inst & 0xf8000000 U)) {
+ case 0xd0000000 U: /* bb0 */
+ case 0xd8000000 U: /* bb1 */
+ case 0xe8000000 U: /* bcnd */
+ /* signed 16 bit pc relative displacement, shift left two bits */
+ inst = (inst & 0x0000ffff U) << 2;
+ /* check if sign extension is needed */
+ if (inst & 0x00020000 U)
+ inst |= 0xfffc0000 U;
+ return pc + inst;
+ }
+
+ /* check jmp/jsr case */
+ /* check bits 5-31, skipping 10 & 11 */
+ if ((inst & 0xfffff3e0 U) == 0xf400c000 U)
+ return (*func) (inst & 0x1f, func_data); /* the register value */
+
+ panic("branch_taken");
+ return 0; /* keeps compiler happy */
+}
+/*
+ * getreg_val - handed a register number and an exception frame.
+ * Returns the value of the register in the specified
+ * frame. Only makes sense for general registers.
+ */
+db_expr_t
+getreg_val(unsigned regno, db_regs_t * frame)
+{
+ if (regno == 0)
+ return 0;
+ else
+ if (regno < 31)
+ return frame->r[regno];
+ else {
+ panic("bad register number to getreg_val.");
+ return 0; /* to make compiler happy */
+ }
+}
diff --git a/sys/arch/mvme88k/ddb/db_trace.c b/sys/arch/mvme88k/ddb/db_trace.c
new file mode 100644
index 00000000000..70336eeedab
--- /dev/null
+++ b/sys/arch/mvme88k/ddb/db_trace.c
@@ -0,0 +1,1221 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+union instruction {
+ unsigned rawbits;
+
+ struct {
+ unsigned int:5;
+ unsigned int n:1;
+ signed int d26:26;
+ } br;
+
+ struct {
+ unsigned int:4;
+ unsigned int isbb1:1; /* isbb1==0 means bb0, isbb1==1 means
+ * bb1 */
+ unsigned int n:1;
+ unsigned int b5:5;
+ unsigned int s1:5;
+ signed int d16:16;
+ } bb; /* bcnd too, except "isbb1" makes no sense for
+ * bcnd */
+
+ struct {
+ unsigned int:6;
+ unsigned int b5:5;
+ unsigned int s1:5;
+ unsigned int:7;
+ unsigned int vec9:9;
+ } tb; /* tcnd too */
+
+ struct {
+ unsigned int:21;
+ unsigned int n:1;
+ unsigned int:5;
+ unsigned int s2:5;
+ } jump; /* jmp, jsr */
+
+ struct {
+ unsigned int:6;
+ unsigned int d:5;
+ unsigned int s1:5;
+ unsigned int i16:16;
+ } diatic; /* general reg/reg/i16 instructions */
+
+ struct {
+ unsigned int:6;
+ unsigned int d:5;
+ unsigned int s1:5;
+ unsigned int:11;
+ unsigned int s2:5;
+ } triatic; /* general reg/reg/reg instructions */
+
+};
+
+static inline unsigned
+br_dest(unsigned addr, union instruction inst)
+{
+ return addr + inst.br.d26 * 4;
+}
+
+
+#define TRACE_DEBUG /* undefine to disable debugging */
+
+#include <machine/db_machdep.h> /* lots of stuff */
+#include <setjmp.h> /* jmp_buf, etc. */
+#include <ddb/db_variables.h> /* db_variable, DB_VAR_GET, etc. */
+#include <ddb/db_output.h> /* db_printf */
+#include <ddb/db_sym.h> /* DB_STGY_PROC, etc. */
+#include <ddb/db_command.h> /* db_recover */
+
+/*
+ * Some macros to tell if the given text is the instruction.
+ */
+#define JMPN_R1(I) ( (I) == 0xf400c401U) /* jmp.n r1 */
+#define JMP_R1(I) ( (I) == 0xf400c001U) /* jmp r1 */
+
+/* gets the IMM16 value from an instruction */
+#define IMM16VAL(I) (((union instruction)(I)).diatic.i16)
+
+/* subu r31, r31, IMM */
+#define SUBU_R31_R31_IMM(I) (((I) & 0xffff0000U) == 0x67ff0000U)
+
+/* st r1, r31, IMM */
+#define ST_R1_R31_IMM(I) (((I) & 0xffff0000U) == 0x243f0000U)
+
+static trace_flags = 0;
+#define TRACE_DEBUG_FLAG 0x01
+#define TRACE_SHOWCALLPRESERVED_FLAG 0x02
+#define TRACE_SHOWADDRESS_FLAG 0x04
+#define TRACE_SHOWFRAME_FLAG 0x08
+#define TRACE_USER_FLAG 0x10
+
+#ifdef TRACE_DEBUG
+#define DEBUGGING_ON (trace_flags & TRACE_DEBUG_FLAG)
+#endif
+
+#ifndef TRACE_DEBUG
+#define SHOW_INSTRUCTION(Addr, Inst, Note) { /*nothing*/ }
+#else
+#define SHOW_INSTRUCTION(Addr, Inst, Note) if (DEBUGGING_ON) { \
+ db_printf("%s0x%x: (0x%08x) ", Note, (unsigned)(Addr), (Inst)); \
+ m88k_print_instruction((unsigned)(Addr), (Inst)); \
+ db_printf("\n"); \
+ }
+#endif
+
+extern jmp_buf *db_recover;
+extern int quiet_db_read_bytes;
+/*
+ * m88k trace/register state interface for ddb.
+ */
+
+/* lifted from mips */
+static int
+db_setf_regs(
+ struct db_variable * vp,
+ db_expr_t * valuep,
+ int op)
+{ /* read/write */
+ register int *regp = (int *) ((char *) DDB_REGS + (int) (vp->valuep));
+
+ if (op == DB_VAR_GET)
+ *valuep = *regp;
+ else
+ if (op == DB_VAR_SET)
+ *regp = *valuep;
+}
+#define N(s, x) {s, (int *)&(((db_regs_t *) 0)->x), db_setf_regs}
+
+struct db_variable db_regs[] = {
+ N("r1", r[1]), N("r2", r[2]), N("r3", r[3]), N("r4", r[4]),
+ N("r5", r[5]), N("r6", r[6]), N("r7", r[7]), N("r8", r[8]),
+ N("r9", r[9]), N("r10", r[10]), N("r11", r[11]), N("r12", r[12]),
+ N("r13", r[13]), N("r14", r[14]), N("r15", r[15]), N("r16", r[16]),
+ N("r17", r[17]), N("r18", r[18]), N("r19", r[19]), N("r20", r[20]),
+ N("r21", r[21]), N("r22", r[22]), N("r23", r[23]), N("r24", r[24]),
+ N("r25", r[25]), N("r26", r[26]), N("r27", r[27]), N("r28", r[28]),
+ N("r29", r[29]), N("r30", r[30]), N("r31", r[31]), N("epsr", epsr),
+ N("sxip", sxip), N("snip", snip), N("sfip", sfip), N("ssbr", ssbr),
+ N("dmt0", dmt0), N("dmd0", dmd0), N("dma0", dma0), N("dmt1", dmt1),
+ N("dmd1", dmd1), N("dma1", dma1), N("dmt2", dmt2), N("dmd2", dmd2),
+ N("dma2", dma2), N("fpecr", fpecr), N("fphs1", fphs1), N("fpls1", fpls1),
+ N("fphs2", fphs2), N("fpls2", fpls2), N("fppt", fppt), N("fprh", fprh),
+ N("fprl", fprl), N("fpit", fpit), N("fpsr", fpsr), N("fpcr", fpcr),
+ N("mask", mask), /* interrupt mask */
+ N("mode", mode), /* interrupt mode */
+ N("exvc", vector), /* exception vector */
+};
+#undef N
+
+struct db_variable *db_eregs = db_regs + sizeof(db_regs) / sizeof(db_regs[0]);
+
+
+#define TRASHES 0x001 /* clobbers instruction field D */
+#define STORE 0x002 /* does a store to S1+IMM16 */
+#define LOAD 0x004 /* does a load from S1+IMM16 */
+#define DOUBLE 0x008 /* double-register */
+#define FLOW_CTRL 0x010 /* flow-control instruction */
+#define DELAYED 0x020 /* delayed flow control */
+#define JSR 0x040 /* flow-control is a jsr[.n] */
+#define BSR 0x080 /* flow-control is a bsr[.n] */
+
+/*
+ * Given a word of instruction text, return some flags about that
+ * instruction (flags defined above).
+ */
+static unsigned
+m88k_instruction_info(unsigned instruction)
+{
+ static struct {
+ unsigned mask, value, flags;
+ } *ptr, control[] =
+ {
+ /* runs in the same order as 2nd Ed 88100 manual Table 3-14 */
+ {
+ 0xf0000000 U, 0x00000000 U, /* xmem */ TRASHES | STORE | LOAD
+ } ,
+ {
+ 0xec000000 U, 0x00000000 U, /* ld.d */ TRASHES | LOAD | DOUBLE
+ } ,
+ {
+ 0xe0000000 U, 0x00000000 U, /* load */ TRASHES | LOAD
+ } ,
+ {
+ 0xfc000000 U, 0x20000000 U, /* st.d */ STORE | DOUBLE
+ } ,
+ {
+ 0xf0000000 U, 0x20000000 U, /* store */ STORE
+ } ,
+ {
+ 0xc0000000 U, 0x40000000 U, /* arith */ TRASHES
+ } ,
+ {
+ 0xfc004000 U, 0x80004000 U, /* ld cr */ TRASHES
+ } ,
+ {
+ 0xfc004000 U, 0x80000000 U, /* st cr */ 0
+ } ,
+ {
+ 0xfc008060 U, 0x84000000 U, /* f */ TRASHES
+ } ,
+ {
+ 0xfc008060 U, 0x84000020 U, /* f.d */ TRASHES | DOUBLE
+ } ,
+ {
+ 0xfc000000 U, 0xcc000000 U, /* bsr.n */ FLOW_CTRL | DELAYED | BSR
+ } ,
+ {
+ 0xfc000000 U, 0xc8000000 U, /* bsr */ FLOW_CTRL | BSR
+ } ,
+ {
+ 0xe4000000 U, 0xc4000000 U, /* br/bb.n */ FLOW_CTRL | DELAYED
+ } ,
+ {
+ 0xe4000000 U, 0xc0000000 U, /* br/bb */ FLOW_CTRL
+ } ,
+ {
+ 0xfc000000 U, 0xec000000 U, /* bcnd.n */ FLOW_CTRL | DELAYED
+ } ,
+ {
+ 0xfc000000 U, 0xe8000000 U, /* bcnd */ FLOW_CTRL
+ } ,
+ {
+ 0xfc00c000 U, 0xf0008000 U, /* bits */ TRASHES
+ } ,
+ {
+ 0xfc00c000 U, 0xf000c000 U, /* trap */ 0
+ } ,
+ {
+ 0xfc00f0e0 U, 0xf4002000 U, /* st */ 0
+ } ,
+ {
+ 0xfc00cce0 U, 0xf4000000 U, /* ld.d */ TRASHES | DOUBLE
+ } ,
+ {
+ 0xfc00c0e0 U, 0xf4000000 U, /* ld */ TRASHES
+ } ,
+ {
+ 0xfc00c0e0 U, 0xf4004000 U, /* arith */ TRASHES
+ } ,
+ {
+ 0xfc00c3e0 U, 0xf4008000 U, /* bits */ TRASHES
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400cc00 U, /* jsr.n */ FLOW_CTRL | DELAYED | JSR
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400c800 U, /* jsr */ FLOW_CTRL | JSR
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400c400 U, /* jmp.n */ FLOW_CTRL | DELAYED
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400c000 U, /* jmp */ FLOW_CTRL
+ } ,
+ {
+ 0xfc00fbe0 U, 0xf400e800 U, /* ff */ TRASHES
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400f800 U, /* tbnd */ 0
+ } ,
+ {
+ 0xfc00ffe0 U, 0xf400fc00 U, /* rte */ FLOW_CTRL
+ } ,
+ {
+ 0xfc000000 U, 0xf8000000 U, /* tbnd */ 0
+ } ,
+ };
+#define ctrl_count (sizeof(control)/sizeof(control[0]))
+ for (ptr = &control[0]; ptr < &control[ctrl_count]; ptr++)
+ if ((instruction & ptr->mask) == ptr->value)
+ return ptr->flags;
+ SHOW_INSTRUCTION(0, instruction, "bad m88k_instruction_info");
+ return 0;
+}
+
+static int
+hex_value_needs_0x(unsigned value)
+{
+ int i;
+ unsigned last = 0;
+ unsigned char c;
+ unsigned have_a_hex_digit = 0;
+
+ if (value <= 9)
+ return 0;
+
+ for (i = 0; i < 8; i++) {
+ c = value & 0xf;
+ value >>= 4;
+ if (c)
+ last = c;
+ if (c > 9)
+ have_a_hex_digit = 1;
+ }
+ if (have_a_hex_digit == 0)
+ return 1;
+ if (last > 9)
+ return 1;
+ return 0;
+}
+
+
+/*
+ * returns
+ * 1 if regs seems to be a reasonable kernel exception frame.
+ * 2 if regs seems to be a reasonable user exception frame
+ * (in the current task).
+ * 0 if this looks like neither.
+ */
+int
+frame_is_sane(db_regs_t * regs)
+{
+ /* no good if we can't read the whole frame */
+ if (badwordaddr((vm_offset_t) regs) || badwordaddr((vm_offset_t) & regs->mode))
+ return 0;
+
+#ifndef DIAGNOSTIC
+ /* disabled for now -- see fpu_enable in luna88k/eh.s */
+ /* r0 must be 0 (obviously) */
+ if (regs->r[0] != 0)
+ return 0;
+#endif
+
+ /* stack sanity ... r31 must be nonzero, but must be word aligned */
+ if (regs->r[31] == 0 || (regs->r[31] & 3) != 0)
+ return 0;
+
+ /* sxip is reasonable */
+#if 0
+ if ((regs->sxip & 1) == 1)
+ return 0;
+#endif
+ /* snip is reasonable */
+ if ((regs->snip & 3) != 2)
+ return 0;
+ /* sfip is reasonable */
+ if ((regs->sfip & 3) != 2)
+ return 0;
+
+ /* epsr sanity */
+ if ((regs->epsr & 0x8FFFFFF5 U) == 0x800003f0 U) { /* kernel mode */
+ if (regs->epsr & 0x40000000)
+ db_printf("[WARNING: byte order in kernel frame at %x "
+ "is little-endian!]\n", regs);
+ return 1;
+ }
+ if ((regs->epsr & 0x8FFFFFFF U) == 0x000003f0 U) { /* user mode */
+ if (regs->epsr & 0x40000000)
+ db_printf("[WARNING: byte order in user frame at %x "
+ "is little-endian!]\n", regs);
+ return 2;
+ }
+ return 0;
+}
+
+char
+ *
+m88k_exception_name(unsigned vector)
+{
+ switch (vector) {
+ default:
+ case 0:return "Reset";
+ case 1:
+ return "Interrupt";
+ case 2:
+ return "Instruction Access Exception";
+ case 3:
+ return "Data Access Exception";
+ case 4:
+ return "Misaligned Access Exception";
+ case 5:
+ return "Unimplemented Opcode Exception";
+ case 6:
+ return "Privilege Violation";
+ case 7:
+ return "Bounds Check";
+ case 8:
+ return "Integer Divide Exception";
+ case 9:
+ return "Integer Overflow Exception";
+ case 10:
+ return "Error Exception";
+ case 114:
+ return "FPU precise";
+ case 115:
+ return "FPU imprecise";
+ case 130:
+ return "Ddb break";
+ case 131:
+ return "Ddb trace";
+ case 132:
+ return "Ddb trap";
+ case 451:
+ return "Syscall";
+ }
+}
+/*
+ * Read a word at address addr.
+ * Return 1 if was able to read, 0 otherwise.
+ */
+unsigned
+db_trace_get_val(vm_offset_t addr, unsigned *ptr)
+{
+ jmp_buf db_jmpbuf;
+ jmp_buf *prev = db_recover;
+ boolean_t old_quiet_db_read_bytes = quiet_db_read_bytes;
+
+ quiet_db_read_bytes = 1;
+
+ if (setjmp(*(db_recover = &db_jmpbuf)) != 0) {
+ db_recover = prev;
+ quiet_db_read_bytes = old_quiet_db_read_bytes;
+ return 0;
+ } else {
+ db_read_bytes((char *) addr, 4, (char *) ptr);
+ db_recover = prev;
+ quiet_db_read_bytes = old_quiet_db_read_bytes;
+ return 1;
+ }
+}
+
+
+#define FIRST_CALLPRESERVED_REG 14
+#define LAST_CALLPRESERVED_REG 29
+#define FIRST_ARG_REG 2
+#define LAST_ARG_REG 9
+#define RETURN_VAL_REG 1
+
+static unsigned global_saved_list = 0x0; /* one bit per register */
+static unsigned local_saved_list = 0x0; /* one bit per register */
+static unsigned trashed_list = 0x0; /* one bit per register */
+static unsigned saved_reg[32]; /* one value per register */
+
+#define reg_bit(reg) (1<<((reg)%32))
+
+static void
+save_reg(int reg, unsigned value)
+{
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("save_reg(%d, %x)\n", reg, value);
+#endif
+ if (trashed_list & reg_bit(reg)) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("<trashed>\n");
+#endif
+ return; /* don't save trashed registers */
+ }
+ saved_reg[(reg % 32)] = value;
+ global_saved_list |= reg_bit(reg);
+ local_saved_list |= reg_bit(reg);
+}
+#define mark_reg_trashed(reg) (trashed_list |= reg_bit(reg))
+
+#define have_global_reg(reg) (global_saved_list & (1<<(reg)))
+#define have_local_reg(reg) (local_saved_list & (1<<(reg)))
+
+#define clear_local_saved_regs() { local_saved_list = trashed_list = 0; }
+#define clear_global_saved_regs() { local_saved_list = global_saved_list = 0; }
+
+#define saved_reg_value(reg) (saved_reg[(reg)])
+
+/*
+ * Show any arguments that we might have been able to determine.
+ */
+static void
+print_args(void)
+{
+ int reg, last_arg;
+
+ /* find the highest argument register saved */
+ for (last_arg = LAST_ARG_REG; last_arg >= FIRST_ARG_REG; last_arg--)
+ if (have_local_reg(last_arg))
+ break;
+ if (last_arg < FIRST_ARG_REG)
+ return; /* none were saved */
+
+ db_printf("(");
+
+ /* print each one, up to the highest */
+ for (reg = FIRST_ARG_REG; /* nothing */ ; reg++) {
+ if (!have_local_reg(reg))
+ db_printf("?");
+ else {
+ unsigned value = saved_reg_value(reg);
+ db_printf("%s%x", hex_value_needs_0x(value) ? "0x" : "", value);
+ }
+ if (reg == last_arg)
+ break;
+ else
+ db_printf(", ");
+ }
+ db_printf(")");
+}
+
+
+#define JUMP_SOURCE_IS_BAD 0
+#define JUMP_SOURCE_IS_OK 1
+#define JUMP_SOURCE_IS_UNLIKELY 2
+
+/*
+ * Give an address to where we return, and an address to where we'd jumped,
+ * Decided if it all makes sense.
+ *
+ * Gcc sometimes optimized something like
+ * if (condition)
+ * func1();
+ * else
+ * OtherStuff...
+ * to
+ * bcnd !condition mark
+ * bsr.n func1
+ * or r1, r0, mark2
+ * mark:
+ * OtherStuff...
+ * mark2:
+ *
+ * So RETURN_TO will be MARK2, even though we really did branch via
+ * 'bsr.n func1', so this makes it difficult to be certaian about being
+ * wrong.
+ */
+static int
+is_jump_source_ok(unsigned return_to, unsigned jump_to)
+{
+ unsigned flags;
+ union instruction instruction;
+
+ /*
+ * Delayed branches are most common... look two instructions before
+ * where we were going to return to to see if it's a delayed branch.
+ */
+ if (!db_trace_get_val(return_to - 8, &instruction.rawbits))
+ return JUMP_SOURCE_IS_BAD;
+ flags = m88k_instruction_info(instruction.rawbits);
+
+ if ((flags & FLOW_CTRL) && (flags & DELAYED) && (flags & (JSR | BSR))) {
+ if (flags & JSR)
+ return JUMP_SOURCE_IS_OK; /* have to assume it's
+ * correct */
+ /* calculate the offset */
+ if (br_dest(return_to - 8, instruction) == jump_to)
+ return JUMP_SOURCE_IS_OK; /* exactamundo! */
+ else
+ return JUMP_SOURCE_IS_UNLIKELY; /* seems wrong */
+ }
+ /*
+ * Try again, looking for a non-delayed jump one back.
+ */
+ if (!db_trace_get_val(return_to - 4, &instruction.rawbits))
+ return JUMP_SOURCE_IS_BAD;
+ flags = m88k_instruction_info(instruction.rawbits);
+
+ if ((flags & FLOW_CTRL) && !(flags & DELAYED) && (flags & (JSR | BSR))) {
+ if (flags & JSR)
+ return JUMP_SOURCE_IS_OK; /* have to assume it's
+ * correct */
+ /* calculate the offset */
+ if (br_dest(return_to - 4, instruction) == jump_to)
+ return JUMP_SOURCE_IS_OK; /* exactamundo! */
+ else
+ return JUMP_SOURCE_IS_UNLIKELY; /* seems wrong */
+ }
+ return JUMP_SOURCE_IS_UNLIKELY;
+}
+
+static char *note = 0;
+static int next_address_likely_wrong = 0;
+
+/* How much slop we expect in the stack trace */
+#define FRAME_PLAY 8
+
+/*
+ * Stack decode -
+ * unsigned addr; program counter
+ * unsigned *stack; IN/OUT stack pointer
+ *
+ * given an address within a function and a stack pointer,
+ * try to find the function from which this one was called
+ * and the stack pointer for that function.
+ *
+ * The return value is zero (if we get confused) or
+ * we determine that the return address has not yet
+ * been saved (early in the function prologue). Otherwise
+ * the return value is the address from which this function
+ * was called.
+ *
+ * Note that even is zero is returned (the second case) the
+ * stack pointer can be adjusted.
+ *
+ */
+static int
+stack_decode(unsigned addr, unsigned *stack)
+{
+ db_sym_t proc;
+ unsigned offset_from_proc;
+ unsigned instructions_to_search;
+ unsigned check_addr;
+ unsigned function_addr; /* start of function */
+ unsigned r31 = *stack; /* the r31 of the function */
+ unsigned inst; /* text of an instruction */
+ unsigned ret_addr; /* address to which we return */
+ unsigned tried_to_save_r1 = 0;
+
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("\n>>>stack_decode(addr=%x, stack=%x)\n",
+ addr, *stack);
+#endif
+
+ /* get what we hope will be the db_sym_t for the function name */
+ proc = db_search_symbol(addr, DB_STGY_PROC, &offset_from_proc);
+ if (offset_from_proc == addr) /* i.e. no symbol found */
+ proc = DB_SYM_NULL;
+
+ /*
+ * Somehow, find the start of this function.
+ * If we found a symbol above, it'll have the address.
+ * Otherwise, we've got to search for it....
+ */
+ if (proc != DB_SYM_NULL) {
+ char *names;
+ db_symbol_values(proc, &names, &function_addr);
+ if (names == 0)
+ return 0;
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("name %s address 0x%x\n",
+ names, function_addr);
+#endif
+ } else {
+ int instructions_to_check = 400;
+ /*
+ * hmm - unable to find symbol. Search back
+ * looking for a function prolog.
+ */
+ for (check_addr = addr; instructions_to_check-- > 0; check_addr -= 4) {
+ if (!db_trace_get_val(check_addr, &inst))
+ break;
+
+ if (SUBU_R31_R31_IMM(inst)) {
+#if 0
+ /*
+ * If the next instruction is "st r1, r31, ####"
+ * then we can feel safe we have the start of
+ * a function.
+ */
+ if (!db_trace_get_val(check_addr + 4, &inst))
+ continue;
+ if (ST_R1_R31_IMM(instr))
+ break; /* sucess */
+#else
+ /*
+ * Latest GCC optimizer is just too good... the store
+ * of r1 might come much later... so we'll have to
+ * settle for just the "subr r31, r31, ###" to mark
+ * the start....
+ */
+ break;
+#endif
+ }
+ /*
+ * if we come across a [jmp r1] or [jmp.n r1] assume we have hit
+ * the previous functions epilogue and stop our search.
+ * Since we know we would have hit the "subr r31, r31" if it was
+ * right in front of us, we know this doesn't have one so
+ * we just return failure....
+ */
+ if (JMP_R1(inst) || JMPN_R1(inst)) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("ran into a [jmp r1] at %x (addr=%x)\n",
+ check_addr, addr);
+#endif
+ return 0;
+ }
+ }
+ if (instructions_to_check < 0) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("couldn't find func start (addr=%x)\n", addr);
+#endif
+ return 0; /* bummer, couldn't find it */
+ }
+ function_addr = check_addr;
+ }
+
+ /*
+ * We now know the start of the function (function_addr).
+ * If we're stopped right there, or if it's not a
+ * subu r31, r31, ####
+ * then we're done.
+ */
+ if (addr == function_addr) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("at start of func\n");
+#endif
+ return 0;
+ }
+ if (!db_trace_get_val(function_addr, &inst)) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("couldn't read %x at line %d\n",
+ function_addr, __LINE__);
+#endif
+ return 0;
+ }
+ SHOW_INSTRUCTION(function_addr, inst, "start of function: ");
+ if (!SUBU_R31_R31_IMM(inst)) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("<not subu,r31,r31,imm>\n");
+#endif
+ return 0;
+ }
+ /* add the size of this frame to the stack (for the next frame) */
+ *stack += IMM16VAL(inst);
+
+ /*
+ * Search from the beginning of the function (funstart) to where we are
+ * in the function (addr) looking to see what kind of registers have
+ * been saved on the stack.
+ *
+ * We'll stop looking before we get to ADDR if we hit a branch.
+ */
+ clear_local_saved_regs();
+ check_addr = function_addr + 4; /* we know the first inst isn't a
+ * store */
+
+ for (instructions_to_search = (addr - check_addr) / sizeof(long);
+ instructions_to_search-- > 0;
+ check_addr += 4) {
+ union instruction instruction;
+ unsigned flags;
+
+ /* read the instruction */
+ if (!db_trace_get_val(check_addr, &instruction.rawbits)) {
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("couldn't read %x at line %d\n",
+ check_addr, __LINE__);
+#endif
+ break;
+ }
+ SHOW_INSTRUCTION(check_addr, instruction.rawbits, "prolog: ");
+
+ /* find out the particulars about this instruction */
+ flags = m88k_instruction_info(instruction.rawbits);
+
+ /* if a store to something off the stack pointer, note the
+ * value */
+ if ((flags & STORE) && instruction.diatic.s1 == /* stack pointer */ 31) {
+ unsigned value;
+ if (!have_local_reg(instruction.diatic.d)) {
+ if (instruction.diatic.d == 1)
+ tried_to_save_r1 = r31 + instruction.diatic.i16;
+ if (db_trace_get_val(r31 + instruction.diatic.i16, &value))
+ save_reg(instruction.diatic.d, value);
+ }
+ if ((flags & DOUBLE) && !have_local_reg(instruction.diatic.d + 1)) {
+ if (instruction.diatic.d == 0)
+ tried_to_save_r1 = r31 + instruction.diatic.i16 + 4;
+ if (db_trace_get_val(r31 + instruction.diatic.i16 + 4, &value))
+ save_reg(instruction.diatic.d + 1, value);
+ }
+ }
+ /* if an inst that kills D (and maybe D+1), note that */
+ if (flags & TRASHES) {
+ mark_reg_trashed(instruction.diatic.d);
+ if (flags & DOUBLE)
+ mark_reg_trashed(instruction.diatic.d + 1);
+ }
+ /* if a flow control instruction, stop now (or next if
+ * delayed) */
+ if ((flags & FLOW_CTRL) && instructions_to_search != 0)
+ instructions_to_search = (flags & DELAYED) ? 1 : 0;
+ }
+
+ /*
+ * If we didn't save r1 at some point, we're hosed.
+ */
+ if (!have_local_reg(1)) {
+ if (tried_to_save_r1) {
+ db_printf(" <return value of next fcn unreadable in %08x>\n",
+ tried_to_save_r1);
+ }
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("didn't save r1\n");
+#endif
+ return 0;
+ }
+ ret_addr = saved_reg_value(1);
+
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("Return value is = %x, function_addr is %x.\n",
+ ret_addr, function_addr);
+#endif
+
+ /*
+ * In support of this, continuation.s puts the low bit on the
+ * return address for continuations (the return address will never
+ * be used, so it's ok to do anything you want to it).
+ */
+ if (ret_addr & 1) {
+ note = "<<can not trace past a continuation>>";
+ ret_addr = 0;
+ } else
+ if (ret_addr != 0x00) {
+ switch (is_jump_source_ok(ret_addr, function_addr)) {
+ case JUMP_SOURCE_IS_OK:
+ break; /* excellent */
+
+ case JUMP_SOURCE_IS_BAD:
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("jump is bad\n");
+#endif
+ return 0; /* bummer */
+
+ case JUMP_SOURCE_IS_UNLIKELY:
+ next_address_likely_wrong = 1;;
+ break;
+ }
+ }
+ return ret_addr;
+}
+
+static void
+db_stack_trace_cmd2(db_regs_t * regs)
+{
+ unsigned stack;
+ unsigned depth = 1;
+ unsigned where;
+ unsigned ft;
+ unsigned pair[2];
+ int i;
+
+ /*
+ * Frame_is_sane returns:
+ * 1 if regs seems to be a reasonable kernel exception frame.
+ * 2 if regs seems to be a reasonable user exception frame
+ * (in the current task).
+ * 0 if this looks like neither.
+ */
+ if (ft = frame_is_sane(regs), ft == 0) {
+ db_printf("Register frame 0x%x is suspicous; skipping trace\n", regs);
+ return;
+ }
+ /* if user space and no user space trace specified, puke */
+ if (ft == 2 && !(trace_flags & TRACE_USER_FLAG))
+ return;
+
+ /* fetch address */
+ /* use sxip if valid, otherwise try snip or sfip */
+ where = ((regs->sxip & 2) ? regs->sxip :
+ ((regs->snip & 2) ? regs->snip :
+ regs->sfip)) & ~3;
+ stack = regs->r[31];
+ db_printf("stack base = 0x%x\n", stack);
+ db_printf("(0) "); /* depth of trace */
+ if (trace_flags & TRACE_SHOWADDRESS_FLAG)
+ db_printf("%08x ", where);
+ db_printsym(where, DB_STGY_PROC);
+ clear_global_saved_regs();
+
+ /* see if this routine had a stack frame */
+ if ((where = stack_decode(where, &stack)) == 0) {
+ where = regs->r[1];
+ db_printf("(stackless)");
+ } else {
+ print_args();
+ if (trace_flags & TRACE_SHOWFRAME_FLAG)
+ db_printf(" [frame 0x%x]", stack);
+ }
+ db_printf("\n");
+ if (note) {
+ db_printf(" %s\n", note);
+ note = 0;
+ }
+ do {
+ /*
+ * If requested, show preserved registers at the time
+ * the next-shown call was made. Only registers known to have
+ * changed from the last exception frame are shown, as others
+ * can be gotten at by looking at the exception frame.
+ */
+ if (trace_flags & TRACE_SHOWCALLPRESERVED_FLAG) {
+ int r, title_printed = 0;
+
+ for (r = FIRST_CALLPRESERVED_REG; r <= LAST_CALLPRESERVED_REG; r++) {
+ if (have_global_reg(r)) {
+ unsigned value = saved_reg_value(r);
+ if (title_printed == 0) {
+ title_printed = 1;
+ db_printf("[in next func:");
+ }
+ if (value == 0)
+ db_printf(" r%d", r);
+ else
+ if (value <= 9)
+ db_printf(" r%d=%x", r, value);
+ else
+ db_printf(" r%d=x%x", r, value);
+ }
+ }
+ if (title_printed)
+ db_printf("]\n");
+ }
+ db_printf("(%d)%c", depth++, next_address_likely_wrong ? '?' : ' ');
+ next_address_likely_wrong = 0;
+
+ if (trace_flags & TRACE_SHOWADDRESS_FLAG)
+ db_printf("%08x ", where);
+ db_printsym(where, DB_STGY_PROC);
+ where = stack_decode(where, &stack);
+ print_args();
+ if (trace_flags & TRACE_SHOWFRAME_FLAG)
+ db_printf(" [frame 0x%x]", stack);
+ db_printf("\n");
+ if (note) {
+ db_printf(" %s\n", note);
+ note = 0;
+ }
+ } while (where);
+
+ /* try to trace back over trap/exception */
+
+ stack &= ~7; /* double word aligned */
+ /* take last top of stack, and try to find an exception frame near it */
+
+ i = FRAME_PLAY;
+
+#ifdef TRACE_DEBUG
+ if (DEBUGGING_ON)
+ db_printf("(searching for exception frame at 0x%x)\n", stack);
+#endif
+
+ while (i) {
+ /*
+ * On the stack, a pointer to the exception frame is written
+ * in two adjacent words. In the case of a fault from the kernel,
+ * this should point to the frame right above them:
+ *
+ * Exception Frame Top
+ * ..
+ * Exception Frame Bottom <-- frame addr
+ * frame addr
+ * frame addr <-- stack pointer
+ *
+ * In the case of a fault from user mode, the top of stack
+ * will just have the address of the frame
+ * replicated twice.
+ *
+ * frame addr <-- top of stack
+ * frame addr
+ *
+ * Here we are just looking for kernel exception frames.
+ */
+
+ if (badwordaddr((vm_offset_t) stack) ||
+ badwordaddr((vm_offset_t) (stack + 4)))
+ break;
+
+ db_read_bytes((char *) stack, 2 * sizeof(int), (char *) pair);
+
+ /* the pairs should match and equal stack+8 */
+ if (pair[0] == pair[1]) {
+ if (pair[0] != stack + 8) {
+ /*
+ if (!badwordaddr((vm_offset_t)pair[0]) && (pair[0]!=0))
+ db_printf("stack_trace:found pair 0x%x but != to stack+8\n",
+ pair[0]);
+ */
+ } else
+ if (frame_is_sane((db_regs_t *) pair[0])) {
+ db_regs_t *frame = (db_regs_t *) pair[0];
+ char *cause = m88k_exception_name(frame->vector);
+
+ db_printf("-------------- %s [EF: 0x%x] -------------\n",
+ cause, frame);
+ db_stack_trace_cmd2(frame);
+ return;
+ }
+#ifdef TRACE_DEBUG
+ else
+ if (DEBUGGING_ON)
+ db_printf("pair matched, but frame at 0x%x looks insane\n",
+ stack + 8);
+#endif
+ }
+ stack += 8;
+ i--;
+ }
+
+ /*
+ * If we go here, crawling back on the stack failed to find us
+ * a previous exception frame. Look for a user frame pointer
+ * pointed to by a word 8 bytes off of the top of the stack
+ * if the "u" option was specified.
+ */
+ if (trace_flags & TRACE_USER_FLAG) {
+ db_regs_t *user;
+
+ /* Make sure we are back on the right page */
+ stack -= 4 * FRAME_PLAY;
+ stack = stack & ~(KERNEL_STACK_SIZE - 1); /* point to the bottom */
+ stack += KERNEL_STACK_SIZE - 8;
+
+ if (badwordaddr((vm_offset_t) stack) ||
+ badwordaddr((vm_offset_t) stack))
+ return;
+
+ db_read_bytes((char *) stack, 2 * sizeof(int), (char *) pair);
+ if (pair[0] != pair[1])
+ return;
+
+ /* have a hit */
+ user = *((db_regs_t **) stack);
+
+ if (frame_is_sane(user) == 2) {
+ db_printf("---------------- %s [EF : 0x%x] -------------\n",
+ m88k_exception_name(user->vector), user);
+ db_stack_trace_cmd2(user);
+ }
+ }
+}
+/*
+ * stack trace - needs a pointer to a m88k saved state.
+ *
+ * If argument f is given, the stack pointer of each call frame is
+ * printed.
+ */
+void
+db_stack_trace_cmd(
+ db_regs_t * addr,
+ int have_addr,
+ db_expr_t count,
+ char *modif)
+{
+ enum {
+ Default, Stack, Proc, Frame
+ } style = Default;
+ db_regs_t frame; /* a m88100_saved_state */
+ db_regs_t *regs;
+ union {
+ db_regs_t *frame;
+ struct proc *proc;
+ unsigned num;
+ } arg;
+ arg.frame = addr;
+
+ trace_flags = 0; /* flags will be set via modifers */
+
+ while (modif && *modif) {
+ switch (*modif++) {
+ case 'd':
+#ifdef TRACE_DEBUG
+ trace_flags |= TRACE_DEBUG_FLAG;
+#else
+ db_printtf("<debug trace not compiled in, ignoring>\n");
+#endif
+ break;
+
+ case 's':
+ style = Stack;
+ break;
+ case 'f':
+ style = Frame;
+ break;
+ case 'p':
+ trace_flags |= TRACE_SHOWCALLPRESERVED_FLAG;
+ break;
+ case 'a':
+ trace_flags |= TRACE_SHOWADDRESS_FLAG;
+ break;
+ case 'F':
+ trace_flags |= TRACE_SHOWFRAME_FLAG;
+ break;
+ case 'u':
+ trace_flags |= TRACE_USER_FLAG;
+ break;
+ default:
+ db_printf("unknown trace modifier [%c]\n", modif[-1]);
+ /* FALLTHROUGH */
+ case 'h':
+ db_printf("usage: trace/[MODIFIER] [ARG]\n");
+ db_printf(" u = include user trace\n");
+ db_printf(" F = print stack frames\n");
+ db_printf(" a = show return addresses\n");
+ db_printf(" p = show call-preserved registers\n");
+ db_printf(" s = ARG is a stack pointer\n");
+ db_printf(" f = ARG is a frame pointer\n");
+#ifdef TRACE_DEBUG
+ db_printf(" d = trace-debugging output\n");
+#endif
+ return;
+ }
+ }
+
+ if (!have_addr && style != Default) {
+ db_printf("expecting argument with /s or /f\n");
+ return;
+ }
+ if (have_addr && style == Default)
+ style = Proc;
+
+ switch (style) {
+ case Default:
+ regs = DDB_REGS;
+ break;
+
+ case Frame:
+ regs = arg.frame;
+ break;
+
+ case Stack:
+ {
+ unsigned val1, val2, sxip;
+ unsigned ptr;
+ bzero((void *) &frame, sizeof(frame));
+#define REASONABLE_FRAME_DISTANCE 2048
+
+ /*
+ * We've got to find the top of a stack frame so we can get both
+ * a PC and and real SP.
+ */
+ for (ptr = arg.num; /**/ ; ptr += 4) {
+ /* Read a word from the named stack */
+ if (db_trace_get_val(ptr, &val1) == 0) {
+ db_printf("can't read from %x, aborting.\n", ptr);
+ return;
+ }
+ /*
+ * See if it's a frame pointer.... if so it will be larger than
+ * the address it was taken from (i.e. point back up the stack)
+ * and we'll be able to read where it points.
+ */
+ if (val1 <= ptr ||
+ (val1 & 3) ||
+ val1 > (ptr + REASONABLE_FRAME_DISTANCE))
+ continue;
+
+ /* peek at the next word to see if it could be
+ * a return address */
+ if (db_trace_get_val(ptr, &sxip) == 0) {
+ db_printf("can't read from %x, aborting.\n", ptr);
+ return;
+ }
+ if (sxip == 0 || !db_trace_get_val(sxip, &val2))
+ continue;
+
+ if (db_trace_get_val(val1, &val2) == 0) {
+ db_printf("can't read from %x, aborting.\n", val1);
+ continue;
+ }
+ /*
+ * The value we've just read will be either another frame pointer,
+ * or the start of another exception frame.
+ */
+ if (
+#ifdef JEFF_DEBUG
+ val2 == 0
+#else
+ val2 == 0x12345678
+#endif
+ && db_trace_get_val(val1 - 4, &val2) && val2 == val1
+ && db_trace_get_val(val1 - 8, &val2) && val2 == val1) {
+ /* we've found a frame, so the stack
+ * must have been good */
+ db_printf("%x looks like a frame, accepting %x\n", val1, ptr);
+ break;
+ }
+ if (val2 > val1 && (val2 & 3) == 0) {
+ /* well, looks close enough to be
+ * another frame pointer */
+ db_printf("*%x = %x looks like a stack frame pointer, accepting %x\n", val1, val2, ptr);
+ break;
+ }
+ }
+
+ frame.r[31] = ptr;
+ frame.epsr = 0x800003f0 U;
+ frame.sxip = sxip | 2;
+ frame.snip = frame.sxip + 4;
+ frame.sfip = frame.snip + 4;
+ db_printf("[r31=%x, sxip=%x]\n", frame.r[31], frame.sxip);
+ regs = &frame;
+ }
+ }
+
+ db_stack_trace_cmd2(regs);
+}
diff --git a/sys/arch/mvme88k/dev/bugtty.c b/sys/arch/mvme88k/dev/bugtty.c
new file mode 100644
index 00000000000..48f668a3102
--- /dev/null
+++ b/sys/arch/mvme88k/dev/bugtty.c
@@ -0,0 +1,490 @@
+/* $NetBSD$ */
+
+/*
+ * Copyright (c) 1995 Dale Rahn.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Dale Rahn.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ioctl.h>
+#include <sys/device.h>
+#include <sys/tty.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/queue.h>
+#include <dev/cons.h>
+
+#include <machine/autoconf.h>
+#include <machine/cpu.h>
+
+#include "bugtty.h"
+
+int bugttymatch __P((struct device *parent, void *self, void *aux));
+void bugttyattach __P((struct device *parent, struct device *self, void *aux));
+
+struct cfdriver bugttycd = {
+ NULL, "bugtty", bugttymatch, bugttyattach,
+ DV_TTY, sizeof(struct device)
+};
+
+/* prototypes */
+int bugttycnprobe __P((struct consdev *cp));
+int bugttycninit __P((struct consdev *cp));
+int bugttycngetc __P((dev_t dev));
+int bugttycnputc __P((dev_t dev, char c));
+
+int bugttyopen __P((dev_t dev, int flag, int mode, struct proc *p));
+int bugttyclose __P((dev_t dev, int flag, int mode, struct proc *p));
+int bugttyread __P((dev_t dev, struct uio *uio, int flag));
+int bugttywrite __P((dev_t dev, struct uio *uio, int flag));
+int bugttyioctl __P((dev_t dev, int cmd, caddr_t data, int flag, struct proc *p));
+int bugttystop __P((struct tty *tp, int flag));
+
+#define DIALOUT(x) ((x) & 0x80)
+#define SWFLAGS(dev) (bugttyswflags | (DIALOUT(dev) ? TIOCFLAG_SOFTCAR : 0))
+
+#define BUGBUF 80
+char bugtty_ibuffer[BUGBUF+1];
+volatile char *pinchar = bugtty_ibuffer;
+char bug_obuffer[BUGBUF+1];
+
+#define bugtty_tty bugttytty
+struct tty *bugtty_tty[NBUGTTY];
+int needprom = 1;
+
+int
+bugttymatch(parent, self, aux)
+ struct device *parent;
+ void *self;
+ void *aux;
+{
+ extern int needprom;
+
+ if (needprom == 0)
+ return (0);
+ return (1);
+}
+
+void
+bugttyattach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ printf("\n");
+}
+
+#define BUGTTYUNIT(x) ((x) & (0x7f))
+void bugttyoutput __P((struct tty *tp));
+
+int bugttydefaultrate = TTYDEF_SPEED;
+int bugttyswflags;
+
+int
+bugttymctl(dev, bits, how)
+ dev_t dev;
+ int bits, how;
+{
+ static int settings = TIOCM_DTR | TIOCM_RTS |
+ TIOCM_CTS | TIOCM_CD | TIOCM_DSR;
+ int s;
+
+ /*printf("mctl: dev %x, bits %x, how %x,",dev, bits, how);*/
+
+ /* settings are currently ignored */
+ s = spltty();
+ switch (how) {
+ case DMSET:
+ break;
+ case DMBIC:
+ break;
+ case DMBIS:
+ break;
+ case DMGET:
+ break;
+ }
+ (void)splx(s);
+
+ bits = 0;
+ /* proper defaults? */
+ bits |= TIOCM_DTR;
+ bits |= TIOCM_RTS;
+ bits |= TIOCM_CTS;
+ bits |= TIOCM_CD;
+ /* bits |= TIOCM_RI; */
+ bits |= TIOCM_DSR;
+
+ /* printf("retbits %x\n", bits); */
+ return (bits);
+}
+
+int
+bugttyopen(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ int s, unit = BUGTTYUNIT(dev);
+ struct tty *tp;
+ extern int needprom;
+
+ if (needprom == 0)
+ return (ENODEV);
+
+ s = spltty();
+ if (bugtty_tty[unit]) {
+ tp = bugtty_tty[unit];
+ } else {
+ tp = bugtty_tty[unit] = ttymalloc();
+ }
+ tp->t_oproc = bugttyoutput;
+ tp->t_param = NULL;
+ tp->t_dev = dev;
+
+ if ((tp->t_state & TS_ISOPEN) == 0) {
+ tp->t_state |= TS_WOPEN;
+ ttychars(tp);
+ if (tp->t_ispeed == 0) {
+ /*
+ * only when cleared do we reset to defaults.
+ */
+ tp->t_iflag = TTYDEF_IFLAG;
+ tp->t_oflag = TTYDEF_OFLAG;
+ tp->t_cflag = TTYDEF_CFLAG;
+ tp->t_lflag = TTYDEF_LFLAG;
+ tp->t_ispeed = tp->t_ospeed = bugttydefaultrate;
+ }
+ /* bugtty does not have carrier */
+ tp->t_cflag |= CLOCAL;
+ /*
+ * do these all the time
+ */
+ if (bugttyswflags & TIOCFLAG_CLOCAL)
+ tp->t_cflag |= CLOCAL;
+ if (bugttyswflags & TIOCFLAG_CRTSCTS)
+ tp->t_cflag |= CRTSCTS;
+ if (bugttyswflags & TIOCFLAG_MDMBUF)
+ tp->t_cflag |= MDMBUF;
+ bugttyparam(tp, &tp->t_termios);
+ ttsetwater(tp);
+
+ (void)bugttymctl(dev, TIOCM_DTR | TIOCM_RTS, DMSET);
+ /*
+ if ((SWFLAGS(dev) & TIOCFLAG_SOFTCAR) ||
+ (bugttymctl(dev, 0, DMGET) & TIOCM_CD))
+ tp->t_state |= TS_CARR_ON;
+ else
+ tp->t_state &= ~TS_CARR_ON;
+ */
+ tp->t_state |= TS_CARR_ON;
+ } else if (tp->t_state & TS_XCLUDE && p->p_ucred->cr_uid != 0) {
+ splx(s);
+ return (EBUSY);
+ }
+
+ /*
+ * if NONBLOCK requested, ignore carrier
+ */
+/*
+ if (flag & O_NONBLOCK)
+ goto done;
+*/
+
+ splx(s);
+ /*
+ * Reset the tty pointer, as there could have been a dialout
+ * use of the tty with a dialin open waiting.
+ */
+ tp->t_dev = dev;
+ return ((*linesw[tp->t_line].l_open)(dev, tp));
+}
+
+int
+bugttyparam()
+{
+ return (0);
+}
+
+void
+bugttyoutput(tp)
+ struct tty *tp;
+{
+ int cc, s, unit, cnt ;
+
+ /* only supports one unit */
+
+ if ((tp->t_state & TS_ISOPEN) == 0)
+ return;
+
+ s = spltty();
+ cc = tp->t_outq.c_cc;
+ while (cc > 0) {
+ cnt = min(BUGBUF, cc);
+ cnt = q_to_b(&tp->t_outq, bug_obuffer, cnt);
+ bugoutstr(bug_obuffer, &bug_obuffer[cnt]);
+ cc -= cnt;
+ }
+ splx(s);
+}
+
+int
+bugttyclose(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ int unit = BUGTTYUNIT(dev);
+ struct tty *tp = bugtty_tty[unit];
+
+ (*linesw[tp->t_line].l_close)(tp, flag);
+
+ ttyclose(tp);
+#if 0
+ bugtty_tty[unit] = NULL;
+#endif
+ return (0);
+}
+
+int
+bugttyread(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+ struct tty *tp;
+
+ if ((tp = bugtty_tty[BUGTTYUNIT(dev)]) == NULL)
+ return (ENXIO);
+ return ((*linesw[tp->t_line].l_read)(tp, uio, flag));
+}
+
+#if 1
+/* only to be called at splclk() */
+bugtty_chkinput()
+{
+ struct tty *tp;
+
+ tp = bugtty_tty[0]; /* Kinda ugly hack */
+ if (tp == NULL )
+ return;
+
+ if (buginstat()) {
+ while (buginstat()) {
+ u_char c = buginchr() & 0xff;
+ (*linesw[tp->t_line].l_rint)(c, tp);
+ }
+ /*
+ wakeup(tp);
+ */
+ }
+}
+#endif
+
+int
+bugttywrite(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+#if 0
+ /* bypass tty output routines. */
+ int i, cnt, s;
+ int oldoff;
+
+ s = spltty();
+ oldoff = uio->uio_offset;
+ do {
+ uiomove(bug_obuffer, BUGBUF, uio);
+ bugoutstr(bug_obuffer, &bug_obuffer[uio->uio_offset - oldoff]);
+ oldoff = uio->uio_offset;
+ } while (uio->uio_resid != 0);
+ splx(s);
+
+ return (0);
+#else
+ struct tty *tp;
+ if((tp = bugtty_tty[BUGTTYUNIT(dev)]) == NULL)
+ return (ENXIO);
+ return ((*linesw[tp->t_line].l_write)(tp, uio, flag));
+#endif
+}
+
+int
+bugttyioctl(dev, cmd, data, flag, p)
+ dev_t dev;
+ int cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ int unit = BUGTTYUNIT(dev);
+ struct tty *tp = bugtty_tty[unit];
+ int error;
+
+ if (!tp)
+ return (ENXIO);
+
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
+ if (error >= 0)
+ return (error);
+
+ error = ttioctl(tp, cmd, data, flag, p);
+ if (error >= 0)
+ return (error);
+
+ switch (cmd) {
+ case TIOCSBRK:
+ /* */
+ break;
+
+ case TIOCCBRK:
+ /* */
+ break;
+
+ case TIOCSDTR:
+ (void) bugttymctl(dev, TIOCM_DTR | TIOCM_RTS, DMBIS);
+ break;
+
+ case TIOCCDTR:
+ (void) bugttymctl(dev, TIOCM_DTR | TIOCM_RTS, DMBIC);
+ break;
+
+ case TIOCMSET:
+ (void) bugttymctl(dev, *(int *) data, DMSET);
+ break;
+
+ case TIOCMBIS:
+ (void) bugttymctl(dev, *(int *) data, DMBIS);
+ break;
+
+ case TIOCMBIC:
+ (void) bugttymctl(dev, *(int *) data, DMBIC);
+ break;
+
+ case TIOCMGET:
+ *(int *)data = bugttymctl(dev, 0, DMGET);
+ break;
+ case TIOCGFLAGS:
+ *(int *)data = SWFLAGS(dev);
+ break;
+ case TIOCSFLAGS:
+ error = suser(p->p_ucred, &p->p_acflag);
+ if (error != 0)
+ return (EPERM);
+
+ bugttyswflags = *(int *)data;
+ bugttyswflags &= /* only allow valid flags */
+ (TIOCFLAG_SOFTCAR | TIOCFLAG_CLOCAL | TIOCFLAG_CRTSCTS);
+ break;
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+int
+bugttystop(tp, flag)
+ struct tty *tp;
+ int flag;
+{
+ int s;
+
+ s = spltty();
+ if (tp->t_state & TS_BUSY) {
+ if ((tp->t_state & TS_TTSTOP) == 0)
+ tp->t_state |= TS_FLUSH;
+ }
+ splx(s);
+ return (0);
+}
+
+/*
+ * bugtty is the last possible choice for a console device.
+ */
+int
+bugttycnprobe(cp)
+ struct consdev *cp;
+{
+ int maj;
+ extern int needprom;
+
+ if (needprom == 0) {
+ cp->cn_pri = CN_DEAD;
+ return (0);
+ }
+
+#if 0
+ switch (cputyp) {
+ case CPU_147:
+ case CPU_162:
+ cp->cn_pri = CN_NORMAL;
+ return (0);
+ default:
+ break;
+ }
+#else
+ cp->cn_pri = CN_NORMAL;
+ return (0);
+#endif /* 0 */
+
+ /* locate the major number */
+ for (maj = 0; maj < nchrdev; maj++)
+ if (cdevsw[maj].d_open == bugttyopen)
+ break;
+
+ cp->cn_dev = makedev(maj, 0);
+ cp->cn_pri = CN_NORMAL;
+
+ return (1);
+}
+
+int
+bugttycninit(cp)
+ struct consdev *cp;
+{
+}
+
+int
+bugttycngetc(dev)
+ dev_t dev;
+{
+ return (buginchr());
+}
+
+int
+bugttycnputc(dev, c)
+ dev_t dev;
+ char c;
+{
+ if (c == '\n')
+ bugoutchr('\r');
+ bugoutchr(c);
+}
diff --git a/sys/arch/mvme88k/dev/clock.c b/sys/arch/mvme88k/dev/clock.c
new file mode 100644
index 00000000000..77b458f2526
--- /dev/null
+++ b/sys/arch/mvme88k/dev/clock.c
@@ -0,0 +1,109 @@
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <machine/board.h>
+#include <machine/bug.h>
+#include <machine/pcctworeg.h>
+
+extern u_int *pcc_io_base;
+extern const u_int timer_reload;
+void setstatclockrate (int hzrate)
+{
+}
+
+resettodr()
+{
+}
+
+int
+hexdectodec(unsigned char n)
+{
+
+ return(((n>>4)&0x0F)*10 + (n&0x0F));
+}
+
+#define STARTOFTIME 1970
+#define FEBRUARY 2
+#define leapyear(year) (((year)%4==0) && ((year)%100) != 0 || ((year)%400) == 0)
+#define days_in_year(year) (leapyear((year)) ? 366 : 365)
+#define days_in_month(a) (month_days[(a) - 1])
+
+static int month_days[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+};
+
+
+inittodr(time_t base)
+{
+ struct bugrtc rtc;
+ u_long sec, min, hour, day, month, year;
+ u_long i, tmp, timebuf;
+
+ /* ignore suggested time, use realtime clock via bug */
+ bugrtcrd(&rtc);
+ sec = hexdectodec(rtc.s);
+ min = hexdectodec(rtc.m);
+ hour = hexdectodec(rtc.H);
+ day = hexdectodec(rtc.D);
+ month = hexdectodec(rtc.M);
+ year = hexdectodec(rtc.Y) + 1900;
+
+ tmp = 0;
+ for (i = STARTOFTIME; i < year; i++) {
+ tmp += days_in_year(i);
+ }
+ for (i = 1; i < month; i++) {
+ tmp += days_in_month(i);
+ }
+ if (leapyear(year) && month > FEBRUARY) {
+ tmp++;
+ }
+ printf("date yy mm dd hh mm.ss:%02d %02d %02d %02d %02d.%02d:",
+ year,month,day,hour,min, sec);
+ tmp += (day -1);
+ timebuf = (((tmp * 24 + hour) * 60 + min) * 60 + sec);
+ printf(" epochsec %d\n",timebuf);
+ time.tv_sec = timebuf;
+ time.tv_usec = 0;
+}
+
+clkread()
+{
+}
+
+cpu_initclocks()
+{
+#if 0
+ u_int *io_base;
+ io_base = 0xfffe1000; /* should really be return of virtmem alloc */
+ /*
+ io_base = pcc_io_base;
+ */
+ /* timer 2 setup */
+ PCC_TIMER2_PRE(io_base) = timer_reload;
+ PCC_TIMER2_CTR(io_base) = 0x7;
+ PCC_TIMER2_ICR(io_base) = 0x8e;
+#endif
+}
+
+/*
+ * Clock interrupts.
+ */
+int
+clockintr(cap)
+ void *cap;
+{
+#if 0
+ volatile register unsigned char icr;
+ /* clear clock interrupt */
+ asm ("ld.b %0,%1" : "=r" (icr) : "" (TIMER2ICR));
+ icr |= ICLR;
+ asm ("st.b %0,%1" : "=r" (icr) : "" (TIMER2ICR));
+
+ /* read the limit register to clear the interrupt */
+#endif /* 0 */
+ hardclock((struct clockframe *)cap);
+
+ return (1);
+}
diff --git a/sys/arch/mvme88k/dev/m88k/bugio.c b/sys/arch/mvme88k/dev/m88k/bugio.c
new file mode 100644
index 00000000000..932614a29d5
--- /dev/null
+++ b/sys/arch/mvme88k/dev/m88k/bugio.c
@@ -0,0 +1,108 @@
+#include <machine/bugio.h>
+
+#define INCHR "0x0000"
+#define INSTAT "0x0001"
+#define INLN "0x0002"
+#define READSTR "0x0003"
+#define READLN "0x0004"
+#define DSKRD "0x0010"
+#define DSKWR "0x0011"
+#define DSKCFIG "0x0012"
+#define OUTCHR "0x0020"
+#define OUTSTR "0x0021"
+#define PCRLF "0x0026"
+#define TMDISP "0x0042"
+#define DELAY "0x0043"
+#define RTC_DSP "0x0052"
+#define RTC_RD "0x0053"
+#define RETURN "0x0063"
+#define BRD_ID "0x0070"
+#define BUGTRAP "0x01F0"
+
+char
+buginchr(void)
+{
+ register int cc asm("r2");
+ asm("or r9,r0," INCHR);
+ asm("tb0 0,r0,0x1F0");
+ /*asm("or %0,r0,r2" : "=r" (cc) : );*/
+ return ((char)cc & 0xFF);
+}
+
+/* return 1 if not empty else 0 */
+
+buginstat(void)
+{
+ int ret;
+ asm("or r9,r0," INSTAT);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return (ret & 0x40 ? 1 : 0);
+}
+
+bugoutchr(unsigned char c)
+{
+ unsigned char cc;
+
+ if ((cc = c) == '\n') {
+ bugpcrlf();
+ return;
+ }
+ asm("or r2,r0,%0" : : "r" (cc));
+ asm("or r9,r0," OUTCHR);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugoutstr(char *s, char *se)
+{
+ asm("or r9,r0," OUTSTR);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugpcrlf(void)
+{
+ asm("or r9,r0," PCRLF);
+ asm("tb0 0,r0,0x1F0");
+}
+/* return 0 on success */
+
+bugdskrd(struct bugdisk_io *arg)
+{
+ int ret;
+ asm("or r9,r0, " DSKRD);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return ((ret&0x4) == 0x4 ? 1 : 0);
+}
+
+/* return 0 on success */
+
+bugdskwr(struct bugdisk_io *arg)
+{
+ int ret;
+ asm("or r9,r0, " DSKWR);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return ((ret&0x4) == 0x4 ? 1 : 0);
+}
+
+bugrtcrd(struct bugrtc *rtc)
+{
+ asm("or r9,r0, " RTC_RD);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugreturn(void)
+{
+ asm("or r9,r0, " RETURN);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugbrdid(struct bugbrdid *id)
+{
+ struct bugbrdid *ptr;
+ asm("or r9,r0, " BRD_ID);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ptr) : );
+ bcopy(ptr, id, sizeof(struct bugbrdid));
+}
diff --git a/sys/arch/mvme88k/dev/mb.c b/sys/arch/mvme88k/dev/mb.c
new file mode 100644
index 00000000000..2cf934de61f
--- /dev/null
+++ b/sys/arch/mvme88k/dev/mb.c
@@ -0,0 +1,87 @@
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <machine/cpu.h>
+
+void mbattach __P((struct device *, struct device *, void *));
+int mbprint __P((void *, char *));
+int mbmatch __P((struct device *, struct cfdata *, void *));
+int submatch( struct device *parent, struct cfdata *self, void *aux);
+
+/*
+ * mainbus driver
+ */
+struct cfdriver mainbuscd = {
+ NULL, "mainbus", mbmatch, mbattach,
+ DV_DULL, sizeof(struct device), NULL, 0
+};
+
+int
+mbmatch(pdp, cfp, auxp)
+ struct device *pdp;
+ struct cfdata *cfp;
+ void *auxp;
+{
+ if (cfp->cf_unit > 0)
+ return(0);
+ /*
+ * We are always here
+ */
+ return(1);
+}
+/*
+ * "find" all the things that should be there.
+ */
+void
+mbattach(pdp, dp, auxp)
+ struct device *pdp, *dp;
+ void *auxp;
+{
+ struct cfdata *cf;
+ extern int machineid;
+
+ /* nothing to do for this bus */
+ printf (" machine type %x\n", machineid);
+
+ if ((cf = config_search(submatch, dp, auxp)) != NULL) {
+ return;
+ }
+
+}
+
+mbprint(auxp, pnp)
+ void *auxp;
+ char *pnp;
+{
+ if (pnp)
+ printf("%s at %s", (char *)auxp, pnp);
+ return(UNCONF);
+}
+
+int
+submatch(parent, self, aux)
+ struct device *parent;
+ struct cfdata *self;
+ void *aux;
+{
+ if (!(*self->cf_driver->cd_match)(parent, self, NULL)) {
+ /*
+ * STOLEN - BE CAREFUL
+ * If we don't do this, isa_configure() will repeatedly try to
+ * probe devices that weren't found. But we need to be careful
+ * to do it only for the ISA bus, or we would cause things like
+ * `com0 at ast? slave ?' to not probe on the second ast.
+ */
+ if (!parent)
+ self->cf_fstate = FSTATE_FOUND;
+
+ return 0;
+ }
+
+ config_attach(parent, self, NULL, mbprint);
+
+ return 1;
+}
diff --git a/sys/arch/mvme88k/dev/pcc2.c b/sys/arch/mvme88k/dev/pcc2.c
new file mode 100644
index 00000000000..c78cbb24a81
--- /dev/null
+++ b/sys/arch/mvme88k/dev/pcc2.c
@@ -0,0 +1,253 @@
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <machine/cpu.h>
+#include <machine/pcc2.h>
+
+int m1x7pccprobe(struct device *parent, struct cfdata *self, void *aux);
+void m1x7pccattach(struct device *parent, struct device *self, void *aux);
+
+int abort_handler();
+extern void abort_intrv();
+extern void pcc_intrv();
+extern int intrh_debug;
+extern int machineid;
+extern void badtrap();
+extern int submatch( struct device *parent, struct cfdata *self, void *aux);
+/* static */ u_int *pcc_io_base;
+static u_int *pcc_vector_base;
+
+static void abort_setup();
+void timer2_intr();
+
+struct pcctwosoftc {
+ struct device sc_dev;
+ caddr_t sc_vaddr;
+ caddr_t sc_paddr;
+ struct pcctworeg *sc_pcc2;
+};
+
+void pcctwoattach __P((struct device *, struct device *, void *));
+int pcctwoprobe __P((struct device *, void *, void *));
+int pcctwoabort __P((struct frame *));
+
+struct cfdriver pcctwocd = {
+ NULL, "pcctwo", pcctwomatch, pcctwoattach,
+ DV_DULL, sizeof(struct pcctwosoftc), 0
+};
+
+struct pcctworeg *sys_pcc2 = NULL;
+
+int
+pcctwomatch(struct device *parent, struct cfdata *self, void *aux)
+{
+#if defined(__m88k__)
+ if (machineid == 0x187) {
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+void
+pcctwoattach(struct device *parent, struct device *self, void *aux)
+{
+ struct cfdata *cf;
+ volatile char *ibvr; /* Interrupt Base Vector Register */
+ u_int ibv; /* Interrupt Base Vector, offset from vbr */
+ u_int *iv; /* interupt vector */
+ int i;
+ u_int vector_base;
+
+ /* attach memory mapped io space */
+ /* map 0xfffe1000 - 0xfffe102f, 0xfffe2800 */
+ /* ppc_io_base = mmio(0xfffe1000, 1800, PG_RW|PG_CI); */
+ pcc_io_base = 0xfffe1000; /* should really be return of virtmem alloc */
+ /* set PCC vector base */
+ ibv = PCC_IBVR(pcc_io_base) & 0xf0;
+ ibvr = &PCC_IBVR(pcc_io_base);
+ printf("pcc:ibvr %x *ibvr %x ibv %x\n",ibvr,*ibvr, ibv);
+ pcc_vector_base = (u_int *)ibv;
+ asm volatile ("movec vbr,%0": "=d" (vector_base));
+ printf("pcc:vector_base %x\n",vector_base);
+ /* register "standard interupt handlers */
+
+ abort_setup();
+ iv = (u_int *)(vector_base + (ibv * 4));
+printf("iv %x\n",iv);
+ for (i = 0; i <= SOFT2_VECTOR; i++) {
+ iv[i] = (u_int)pcc_intrv;
+ }
+ /*
+ timer2_setup();
+ */
+ iv = (u_int *)(vector_base + (ibv + TICK2_VECTOR) * 4);
+ *iv = (u_int)&pcc_intrv;
+
+#ifdef DEBUG
+ if (intrh_debug)
+ pr_intrh();
+#endif
+ if ((cf = config_search(submatch, self, aux)) != NULL) {
+ return;
+ }
+ return ;
+}
+asm (" .text");
+asm (" .global _pcc_intrv");
+asm ("_pcc_intrv:");
+asm (" link a6,#0");
+asm (" movml a0/a1/d0/d1,sp@-");
+asm (" movel a6,a0");
+asm (" addql #4,a0");
+asm (" movel a0,sp@-");
+asm (" jbsr _pcc_handler");
+asm (" addql #4,sp");
+asm (" movml sp@+,a0/a1/d0/d1");
+asm (" unlk a6");
+asm (" jra rei");
+
+asm (" .global _abort_intrv");
+asm ("_abort_intrv:");
+asm (" movml a0/a1/d0/d1,sp@-");
+asm (" jbsr _abort_handler");
+asm (" movml sp@+,a0/a1/d0/d1");
+asm (" jra rei");
+/* asm (" .previous"); */
+
+void *m147le_arg;
+void
+pcc_handler(struct exception_frame *except)
+{
+ u_int vector;
+ int handled = 0;
+
+#if 0
+ printf("except %x\n",except);
+ printf("sr %x\n",except->sr);
+ printf("pc %x\n",except->pc);
+ printf("type %x\n",except->type);
+#endif
+ vector = except->vo;
+/* printf("vector %x\n",vector); */
+ vector = (vector/4 - (u_int)pcc_vector_base);
+/* printf("vector %x\n",vector); */
+
+ switch (vector) {
+ case AC_FAIL_VECTOR:
+ printf("ac_fail vector\n");
+ break;
+ case BERR_VECTOR:
+ printf("berr vector\n");
+ printf("pcc_handler:invalid vector %x\n",vector);
+ break;
+ case ABORT_VECTOR:
+ printf("abort vector\n");
+ abort_handler();
+ handled = 1;
+ break;
+ case SERIAL_VECTOR:
+ printf("serial vector\n");
+ PCC_SERIAL_ICR(0xfffe1000) = 0;
+ break;
+ case LANCE_VECTOR:
+ leintr(m147le_arg);
+ handled = 1;
+ break;
+ case SCSIPORT_VECTOR:
+ printf("scsiport vector\n");
+ m147sc_scintr();
+ break;
+ case SCSIDMA_VECTOR:
+ printf("scsidma vector\n");
+ m147sc_dmaintr();
+ break;
+ case PRINTER_VECTOR:
+ printf("printer vector\n");
+ break;
+ case TICK1_VECTOR:
+ printf("tick1 vector\n");
+ printf("pcc_handler:invalid vector %x\n",vector);
+ break;
+ case TICK2_VECTOR:
+ timer2_intr(except);
+ handled = 1;
+ break;
+ case SOFT1_VECTOR:
+ printf("soft1 vector\n");
+ break;
+ case SOFT2_VECTOR:
+ printf("soft2 vector\n");
+ break;
+ default:
+ printf("pcc_handler:invalid vector %x\n",vector);
+ }
+
+ if (handled == 0) {
+ printf("except %x\n",except);
+ printf("sr %x\n",except->sr);
+ printf("pc %x\n",except->pc);
+ printf("type %x\n",except->type);
+ }
+}
+
+
+int
+abort_handler()
+{
+ printf("aicr = 0x%x\n",PCC_ABRT_ICR(pcc_io_base));
+ PCC_ABRT_ICR(pcc_io_base) = 0x88;
+ printf("aicr = 0x%x\n",PCC_ABRT_ICR(pcc_io_base));
+ Debugger();
+ return 0;
+}
+static void abort_setup()
+{
+ printf("PCC_ABRT_ICR %x\n",&PCC_ABRT_ICR(pcc_io_base));
+ printf("aicr = 0x%x\n",PCC_ABRT_ICR(pcc_io_base));
+ PCC_ABRT_ICR(pcc_io_base) = 0x88;
+ printf("aicr = 0x%x\n",PCC_ABRT_ICR(pcc_io_base));
+}
+
+/* timer2 (clock) driver */
+
+/*const u_int timer_reload = 0; /* .4096 sec ? */
+/* const u_int timer_reload = 62870; 1/60 sec ? */
+const u_int timer_reload = 63936; /* 1/100 sec ? */
+
+#if 0
+void
+timer2_setup()
+{
+ u_int *io_base;
+ pcc_io_base = 0xfffe1000; /* should really be return of virtmem alloc */
+ io_base = pcc_io_base;
+ printf("pcc_io_base %x io_base %x\n",pcc_io_base, io_base);
+ printf("PCC_TIMER2_PRE %x\n",&PCC_TIMER2_PRE(io_base));
+ printf("PCC_TIMER2_CTR %x\n",&PCC_TIMER2_CTR(io_base));
+ printf("PCC_TIMER2_ICR %x\n",&PCC_TIMER2_ICR(io_base));
+ PCC_TIMER2_PRE(io_base) = timer_reload;
+ PCC_TIMER2_CTR(io_base) = 0x7;
+ PCC_TIMER2_ICR(io_base) = 0x8e;
+}
+#endif
+void
+timer2_intr(struct exception_frame *except)
+{
+ u_int *io_base;
+ pcc_io_base = 0xfffe1000; /* should really be return of virtmem alloc */
+ io_base = pcc_io_base;
+
+ if (0x80 && PCC_TIMER2_ICR(io_base)) {
+ PCC_TIMER2_ICR(io_base) = 0x8e;
+ /* hardclock(); */
+ hardclock(except);
+
+ } else {
+ printf("timer2_intr: vector called without interrupt\n");
+ }
+ /* REALLY UGLY HACK */
+ bugtty_chkinput();
+
+ return;
+}
diff --git a/sys/arch/mvme88k/dev/pcctwo.c b/sys/arch/mvme88k/dev/pcctwo.c
new file mode 100644
index 00000000000..be4fc02be6a
--- /dev/null
+++ b/sys/arch/mvme88k/dev/pcctwo.c
@@ -0,0 +1,252 @@
+/* $NetBSD$ */
+
+/*
+ * Copyright (c) 1995 Theo de Raadt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Theo de Raadt
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * VME16x PCC2 chip
+ */
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/ioctl.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/tty.h>
+#include <sys/uio.h>
+#include <sys/callout.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/syslog.h>
+#include <sys/fcntl.h>
+#include <sys/device.h>
+#include <machine/cpu.h>
+#include <machine/autoconf.h>
+#include <dev/cons.h>
+#include <mvme68k/mvme68k/isr.h>
+
+#include <mvme68k/dev/pcctworeg.h>
+
+struct pcctwosoftc {
+ struct device sc_dev;
+ caddr_t sc_vaddr;
+ caddr_t sc_paddr;
+ struct pcctworeg *sc_pcc2;
+ struct intrhand sc_nmiih;
+};
+
+void pcctwoattach __P((struct device *, struct device *, void *));
+int pcctwomatch __P((struct device *, void *, void *));
+int pcctwoabort __P((struct frame *));
+
+struct cfdriver pcctwocd = {
+ NULL, "pcctwo", pcctwomatch, pcctwoattach,
+ DV_DULL, sizeof(struct pcctwosoftc), 0
+};
+
+struct pcctworeg *sys_pcc2 = NULL;
+
+struct intrhand *pcctwointrs[PCC2_NVEC];
+
+int
+pcctwomatch(parent, vcf, args)
+ struct device *parent;
+ void *vcf, *args;
+{
+ struct cfdata *cf = vcf;
+ struct confargs *ca = args;
+ struct pcctworeg *pcc2;
+
+ /* the PCC2 only exists on MVME16x's except the 162, right? */
+ if (cputyp == CPU_162 || cputyp == CPU_147)
+ return (0);
+ pcc2 = (struct pcctworeg *)(IIOV(ca->ca_paddr) + PCC2_PCC2CHIP_OFF);
+ if (badbaddr(pcc2))
+ return (0);
+ if (pcc2->pcc2_chipid != PCC2_CHIPID)
+ return (0);
+ return (1);
+}
+
+int
+pcctwo_print(args, bus)
+ void *args;
+ char *bus;
+{
+ struct confargs *ca = args;
+
+ printf(" offset 0x%x", ca->ca_offset);
+ if (ca->ca_ipl > 0)
+ printf(" ipl %d", ca->ca_ipl);
+ return (UNCONF);
+}
+
+int
+pcctwo_scan(parent, child, args)
+ struct device *parent;
+ void *child, *args;
+{
+ struct cfdata *cf = child;
+ struct pcctwosoftc *sc = (struct pcctwosoftc *)parent;
+ struct confargs *ca = args;
+ struct confargs oca;
+
+ if (parent->dv_cfdata->cf_driver->cd_indirect) {
+ printf(" indirect devices not supported\n");
+ return 0;
+ }
+
+ bzero(&oca, sizeof oca);
+ oca.ca_paddr = sc->sc_paddr + cf->cf_loc[0];
+ if (ISIIOVA(sc->sc_vaddr + cf->cf_loc[0]))
+ oca.ca_vaddr = sc->sc_vaddr + cf->cf_loc[0];
+ else
+ oca.ca_vaddr = (caddr_t)-1;
+ oca.ca_offset = cf->cf_loc[0];
+ oca.ca_ipl = cf->cf_loc[1];
+ oca.ca_bustype = BUS_PCCTWO;
+ oca.ca_master = (void *)sc->sc_pcc2;
+ oca.ca_name = cf->cf_driver->cd_name;
+ if ((*cf->cf_driver->cd_match)(parent, cf, &oca) == 0)
+ return (0);
+ config_attach(parent, cf, &oca, pcctwo_print);
+ return (1);
+}
+
+void
+pcctwoattach(parent, self, args)
+ struct device *parent, *self;
+ void *args;
+{
+ struct confargs *ca = args;
+ struct pcctwosoftc *sc = (struct pcctwosoftc *)self;
+ extern u_long vectab[], pcctwotrap;
+ int i;
+
+ if (sys_pcc2)
+ panic("pcc2 already attached!");
+
+ /*
+ * since we know ourself to land in intiobase land,
+ * we must adjust our address
+ */
+ sc->sc_paddr = ca->ca_paddr;
+ sc->sc_vaddr = (caddr_t)IIOV(sc->sc_paddr);
+ sc->sc_pcc2 = (struct pcctworeg *)(sc->sc_vaddr + PCC2_PCC2CHIP_OFF);
+ sys_pcc2 = sc->sc_pcc2;
+
+ printf(": rev %d\n", sc->sc_pcc2->pcc2_chiprev);
+
+ /*
+ * make the PCCTWO interrupt range point to the pcc2 trap routine.
+ */
+ for (i = 0; i < PCC2_NVEC; i++) {
+ vectab[PCC2_VECBASE+i] = (u_long)&pcctwotrap;
+ }
+
+ sc->sc_pcc2->pcc2_genctl |= PCC2_GENCTL_IEN; /* global irq enable */
+
+ sys_pcc2->pcc2_gpioirq = PCC2_GPIO_PLTY | PCC2_IRQ_IEN | 0x7;/*lvl7*/
+ sys_pcc2->pcc2_gpio = 0; /* do not turn on CR_O or CR_OE */
+ sc->sc_nmiih.ih_fn = pcctwoabort;
+ sc->sc_nmiih.ih_arg = 0;
+ sc->sc_nmiih.ih_lvl = 7;
+ sc->sc_nmiih.ih_wantframe = 1;
+ /*sc->sc_mc->mc_abort .... enable at ipl 7 */
+ pcctwointr_establish(PCC2V_GPIO, &sc->sc_nmiih);
+
+ sc->sc_pcc2->pcc2_vecbase = PCC2_VECBASE;
+ config_search(pcctwo_scan, self, args);
+}
+
+#ifndef PCCTWOINTR_ASM
+/*
+ * pcctwointr: called from locore with the PC and evec from the trap frame.
+ */
+int
+pcctwointr(pc, evec, frame)
+ int pc;
+ int evec;
+ void *frame;
+{
+ int vec = (evec & 0xfff) >> 2; /* XXX should be m68k macro? */
+ extern u_long intrcnt[]; /* XXX from locore */
+ struct intrhand *ih;
+ int r;
+
+ vec = vec & 0xf;
+ if (vec >= PCC2_NVEC)
+ goto bail;
+
+ cnt.v_intr++;
+ for (ih = pcctwointrs[vec]; ih; ih = ih->ih_next) {
+ if (ih->ih_wantframe)
+ r = (*ih->ih_fn)(frame);
+ else
+ r = (*ih->ih_fn)(ih->ih_arg);
+ if (r > 0)
+ return;
+ }
+bail:
+ return (straytrap(pc, evec));
+}
+#endif /* !PCCTWOINTR_ASM */
+
+/*
+ * pcctwointr_establish: establish pcctwo interrupt
+ */
+int
+pcctwointr_establish(vec, ih)
+ int vec;
+ struct intrhand *ih;
+{
+ if (vec >= PCC2_NVEC) {
+ printf("pcctwo: illegal vector: 0x%x\n", vec);
+ panic("pcctwointr_establish");
+ }
+
+ /* XXX should attach at tail */
+ ih->ih_next = pcctwointrs[vec];
+ pcctwointrs[vec] = ih;
+}
+int
+pcctwoabort(frame)
+ struct frame *frame;
+{
+#ifdef REALLY_CARE_ABOUT_DEBOUNCE
+ /* wait for it to debounce */
+ while (sys_pcc2->pcc2_abortirq & PCC2_ABORT_ABS)
+ ;
+#endif
+
+ sys_pcc2->pcc2_gpioirq = sys_pcc2->pcc2_gpioirq | PCC2_IRQ_ICLR;
+
+ nmihand(frame);
+ return (1);
+}
diff --git a/sys/arch/mvme88k/include/ansi.h b/sys/arch/mvme88k/include/ansi.h
new file mode 100644
index 00000000000..d73b85a8f99
--- /dev/null
+++ b/sys/arch/mvme88k/include/ansi.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ * $Id: ansi.h,v 1.1 1995/10/18 10:54:21 deraadt Exp $
+ */
+
+#ifndef _ANSI_H_
+#define _ANSI_H_
+
+/*
+ * Types which are fundamental to the implementation and may appear in
+ * more than one standard header are defined here. Standard headers
+ * then use:
+ * #ifdef _BSD_SIZE_T_
+ * typedef _BSD_SIZE_T_ size_t;
+ * #undef _BSD_SIZE_T_
+ * #endif
+ */
+#define _BSD_CLOCK_T_ unsigned long /* clock() */
+#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned int /* sizeof() */
+#define _BSD_SSIZE_T_ int /* byte count or error */
+#define _BSD_TIME_T_ long /* time() */
+#define _BSD_VA_LIST_ char * /* va_list */
+
+/*
+ * Runes (wchar_t) is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you
+ * lose a bit of ANSI conformance, but your programs will still work.
+ *
+ * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t
+ * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains
+ * defined for ctype.h.
+ */
+#define _BSD_WCHAR_T_ int /* wchar_t */
+#define _BSD_RUNE_T_ int /* rune_t */
+
+#endif /* _ANSI_H_ */
diff --git a/sys/arch/mvme88k/include/asm.h b/sys/arch/mvme88k/include/asm.h
new file mode 100644
index 00000000000..84714410860
--- /dev/null
+++ b/sys/arch/mvme88k/include/asm.h
@@ -0,0 +1,141 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: asm.h,v $
+ * Revision 1.1 1995/10/18 10:54:22 deraadt
+ * Initial revision
+ *
+ * Revision 2.3 93/01/26 18:05:05 danner
+ * Added #ifndef file wrapper.
+ * [93/01/24 jfriedl]
+ *
+ * Revision 2.2 92/08/03 17:46:50 jfriedl
+ * Brought to m88k directory.
+ * [92/07/24 jfriedl]
+ *
+ * Revision 2.1.1.1 92/05/27 15:24:16 danner
+ * Move FLUSH_PIPELINE, REG_OFF definitions here.
+ * [92/05/17 danner]
+ *
+ * Revision 2.3 92/02/18 18:00:24 elf
+ * Typo correction (from Torbjorn Granlund <tege@sics.se>).
+ * [92/02/06 danner]
+ *
+ * moved RTE definition here
+ * [92/02/02 danner]
+ *
+ * Revision 2.2 91/07/09 23:16:20 danner
+ * Initial 3.0 Checkin
+ * [91/06/26 11:57:57 danner]
+ *
+ * Revision 2.2 91/04/05 13:55:26 mbj
+ * Initial code from the Omron 1.10 kernel release corresponding to X130.
+ * The Copyright has been adjusted to correspond to the understanding
+ * between CMU and the Omron Corporation.
+ * [91/04/04 rvb]
+ *
+ * Corrected ENTRY Macro to use NEWLINE instead of \\ Hack
+ * [91/03/07 danner]
+ *
+ */
+
+/*
+ * File: m88k/asm.h
+ *
+ * This header file is intended to hold definitions useful for M88K
+ * assembly routines.
+ *
+ */
+#ifndef __M88K_ASM_H__
+#define __M88K_ASM_H__
+
+#ifndef prepend_underbar
+# ifdef __STDC__
+# define prepend_underbar(NAME) _##NAME
+# else
+# define prepend_underbar(NAME) _/**/NAME
+# endif
+#endif
+
+#define ENTRY(NAME) \
+ align 4 NEWLINE prepend_underbar(NAME): NEWLINE global prepend_underbar(NAME)
+
+#define RTE NOP NEWLINE rte
+
+#define PID cr0
+#define PSR cr1
+#define EPSR cr2
+#define SSBR cr3
+#define SXIP cr4
+#define SNIP cr5
+#define SFIP cr6
+#define VBR cr7
+#define DMT0 cr8
+#define DMD0 cr9
+#define DMA0 cr10
+#define DMT1 cr11
+#define DMD1 cr12
+#define DMA1 cr13
+#define DMT2 cr14
+#define DMD2 cr15
+#define DMA2 cr16
+#define SR0 cr17
+#define SR1 cr18
+#define SR2 cr19
+#define SR3 cr20
+#define FPECR fcr0
+#define FPHS1 fcr1
+#define FPLS1 fcr2
+#define FPHS2 fcr3
+#define FPLS2 fcr4
+#define FPPT fcr5
+#define FPRH fcr6
+#define FPRL fcr7
+#define FPIT fcr8
+#define FPSR fcr62
+#define FPCR fcr63
+
+/*
+ * At various times, there is the need to clear the pipeline (i.e.
+ * synchronize). A "tcnd ne0, r0, foo" will do that (because a trap
+ * instruction always synchronizes, and this particular instruction
+ * will never actually take the trap).
+ */
+#define FLUSH_PIPELINE tcnd ne0, r0, 0
+#define NOP or r0, r0, r0
+
+/* REGister OFFset into the E.F. (exception frame) */
+#define REG_OFF(reg_num) ((reg_num) * 4) /* (num * sizeof(register int)) */
+#define GENREG_OFF(num) (REG_OFF(EF_R0 + (num))) /* GENeral REGister OFFset */
+
+#if !defined(LABEL)
+#define LABEL(name) name: global name NEWLINE
+#define _LABEL(name) name: NEWLINE
+#endif /* LABEL */
+
+#endif /* __M88K_ASM_H__ */
diff --git a/sys/arch/mvme88k/include/asm_macro.h b/sys/arch/mvme88k/include/asm_macro.h
new file mode 100644
index 00000000000..ac51ebefee6
--- /dev/null
+++ b/sys/arch/mvme88k/include/asm_macro.h
@@ -0,0 +1,116 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: asm_macro.h,v $
+ * Revision 1.1 1995/10/18 10:54:22 deraadt
+ * Initial revision
+ *
+ * Revision 2.2 93/01/26 18:07:26 danner
+ * Created.
+ * [93/01/24 jfriedl]
+ *
+ */
+
+#ifndef __M88K_ASM_MACRO_H__
+#define __M88K_ASM_MACRO_H__
+
+/*
+ ** Various compiler macros used for speed and efficiency.
+ ** Anyone can include.
+ */
+
+/*
+ * PSR_TYPE is the type of the Process Status Register.
+ */
+typedef unsigned long m88k_psr_type;
+
+/*
+ * disable_interrupts_return_psr()
+ *
+ * The INTERRUPT_DISABLE bit is set in the PSR and the *PREVIOUS*
+ * PSR is returned. Intended to be used with set_psr() [below] as in:
+ *
+ * {
+ * m88k_psr_type psr;
+ * .
+ * .
+ * psr = disable_interrupts_return_psr();
+ * .
+ * SHORT [time-wise] CRITICAL SECTION HERE
+ * .
+ * set_psr(psr);
+ * .
+ * .
+ */
+static inline m88k_psr_type disable_interrupts_return_psr(void)
+{
+ m88k_psr_type temp, oldpsr;
+ asm volatile (
+ "ldcr %0, cr1 \n"
+ "set %1, %0, 1<1> \n"
+ "stcr %1, cr1 \n"
+ "tcnd ne0, r0, 0 " : "=r" (oldpsr), "=r" (temp));
+ return oldpsr;
+}
+#define disable_interrupt() (void)disable_interrupts_return_psr()
+
+/*
+ * Sets the PSR. See comments above.
+ */
+static inline void set_psr(m88k_psr_type psr)
+{
+ asm volatile ("stcr %0, cr1" :: "r" (psr));
+}
+
+/*
+ * Enables interrupts.
+ */
+static inline m88k_psr_type enable_interrupts_return_psr(void)
+{
+ m88k_psr_type temp, oldpsr; /* need a temporary register */
+ asm volatile (
+ "ldcr %0, cr1 \n"
+ "clr %1, %0, 1<1> \n"
+ "stcr %1, cr1 " : "=r" (oldpsr), "=r" (temp));
+ return oldpsr;
+}
+#define enable_interrupt() (void)enable_interrupts_return_psr()
+
+#define db_enable_interrupt enable_interrupt
+#define db_disable_interrupt disable_interrupt
+
+/*
+ * flushes the data pipeline.
+ */
+static inline void flush_pipeline()
+{
+ asm volatile ("tcnd ne0, r0, 0");
+}
+#define db_flush_pipeline flush_pipeline
+
+#endif /* __M88K_ASM_MACRO_H__ */
diff --git a/sys/arch/mvme88k/include/assert.h b/sys/arch/mvme88k/include/assert.h
new file mode 100644
index 00000000000..c9e72557e77
--- /dev/null
+++ b/sys/arch/mvme88k/include/assert.h
@@ -0,0 +1,8 @@
+#define assert(x) \
+({\
+ if (!(x)) {\
+ printf("assertion failure \"%s\" line %d file %s\n", \
+ #x, __LINE__, __FILE__); \
+ panic("assertion"); \
+ } \
+})
diff --git a/sys/arch/mvme88k/include/assym.s b/sys/arch/mvme88k/include/assym.s
new file mode 100644
index 00000000000..1c61062f668
--- /dev/null
+++ b/sys/arch/mvme88k/include/assym.s
@@ -0,0 +1,60 @@
+#ifndef __GENASSYM_INCLUDED
+#define __GENASSYM_INCLUDED 1
+
+#ifdef ASSEMBLER
+#define NEWLINE \\
+#endif
+#define P_FORW 0
+#define P_BACK 4
+#define P_VMSPACE 32
+#define P_ADDR 240
+#define P_PRIORITY 208
+#define P_STAT 44
+#define P_WCHAN 96
+#define SRUN 2
+#define VM_PMAP 132
+#define V_INTR 12
+#define UPAGES 3
+#define PGSHIFT 12
+#define U_PROF 824
+#define U_PROFSCALE 836
+#define PCB_ONFAULT 328
+#define SIZEOF_PCB 332
+#define SYS_exit 1
+#define SYS_execve 59
+#define SYS_sigreturn 103
+#define EF_R0 0
+#define EF_R31 31
+#define EF_FPSR 32
+#define EF_FPCR 33
+#define EF_EPSR 34
+#define EF_SXIP 35
+#define EF_SFIP 37
+#define EF_SNIP 36
+#define EF_SSBR 38
+#define EF_DMT0 39
+#define EF_DMD0 40
+#define EF_DMA0 41
+#define EF_DMT1 42
+#define EF_DMD1 43
+#define EF_DMA1 44
+#define EF_DMT2 45
+#define EF_DMD2 46
+#define EF_DMA2 47
+#define EF_FPECR 48
+#define EF_FPHS1 49
+#define EF_FPLS1 50
+#define EF_FPHS2 51
+#define EF_FPLS2 52
+#define EF_FPPT 53
+#define EF_FPRH 54
+#define EF_FPRL 55
+#define EF_FPIT 56
+#define EF_VECTOR 57
+#define EF_MASK 58
+#define EF_MODE 59
+#define EF_RET 60
+#define EF_NREGS 62
+#define SIZEOF_EF 248
+
+#endif /* __GENASSYM_INCLUDED */
diff --git a/sys/arch/mvme88k/include/autoconf.h b/sys/arch/mvme88k/include/autoconf.h
new file mode 100644
index 00000000000..fc16b991c1a
--- /dev/null
+++ b/sys/arch/mvme88k/include/autoconf.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1993 Adam Glass
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Adam Glass.
+ * 4. The name of the Author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Adam Glass ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Header: /cvs/OpenBSD/src/sys/arch/mvme88k/include/autoconf.h,v 1.1 1995/10/18 10:54:22 deraadt Exp $
+ */
+
+int always_match __P((struct device *, struct cfdata *, void *));
+
+#define DEVICE_UNIT(device) (device->dv_unit)
+#define CFDATA_LOC(cfdata) (cfdata->cf_loc)
diff --git a/sys/arch/mvme88k/include/board.h b/sys/arch/mvme88k/include/board.h
new file mode 100644
index 00000000000..f53367b7cdf
--- /dev/null
+++ b/sys/arch/mvme88k/include/board.h
@@ -0,0 +1,78 @@
+#ifndef _MACHINE_BOARD_H
+#define _MACHINE_BOARD_H
+/*
+ * VME187 CPU board constants - derived from Luna88k
+ */
+
+/*
+ * Something to put append a 'U' to a long constant if it's C so that
+ * it'll be unsigned in both ANSI and traditional.
+ */
+#if defined(ASSEMBLER)
+# define U(num) num
+#else
+# if defined(__STDC__)
+# define U(num) num ## U
+# else
+# define U(num) num/**/U
+# endif
+#endif
+
+#define MAX_CPUS 1 /* no. of CPUs */
+#define MAX_CMMUS 2 /* 2 CMMUs - 1 data and 1 code */
+
+#define SYSV_BASE U(0x00000000) /* system virtual base */
+
+#define MAXU_ADDR U(0x40000000) /* size of user virtual space */
+#define MAXPHYSMEM U(0x10000000) /* max physical memory */
+
+#define IO_SPACE_START U(0xFFF00000) /* start of local IO */
+#define IO_SPACE_END U(0xFFFFFFFF) /* end of io space */
+
+#define ILLADDRESS U(0x0F000000) /* any faulty address */
+#define PROM_ADDR U(0xFF800000) /* PROM */
+
+#define INT_PRI_LEVEL U(0xFFF4203E) /* interrupt priority level */
+#define INT_MASK_LEVEL U(0xFFF4203F) /* interrupt mask level */
+
+#define LOCAL_IO_DEVS U(0xFFF00000) /* local IO devices */
+#define VMEA16 U(0xFFFF0000) /* VMEbus A16 */
+
+#define PCC_ADDR U(0xFFF42000) /* PCCchip2 Regs */
+#define MEM_CTLR U(0xFFF43000) /* MEMC040 mem controller */
+#define SCC_ADDR U(0xFFF45000) /* Cirrus Chip */
+#define LANCE_ADDR U(0xFFF46000) /* 82596CA */
+#define SCSI_ADDR U(0xFFF47000) /* NCR 710 address */
+#define MK48T08_ADDR U(0xFFFC0000) /* BBRAM, TOD */
+
+#define TOD_CAL_CTL U(0xFFFC1FF8) /* calendar control register */
+#define TOD_CAL_SEC U(0xFFFC1FF9) /* seconds */
+#define TOD_CAL_MIN U(0xFFFC1FFA) /* minutes */
+#define TOD_CAL_HOUR U(0xFFFC1FFB) /* hours */
+#define TOD_CAL_DOW U(0xFFFC1FFC) /* Day Of the Week */
+#define TOD_CAL_DAY U(0xFFFC1FFD) /* days */
+#define TOD_CAL_MON U(0xFFFC1FFE) /* months */
+#define TOD_CAL_YEAR U(0xFFFC1FFF) /* years */
+
+#define CMMU_I U(0xFFF77000) /* CMMU instruction */
+#define CMMU_D U(0xFFF7F000) /* CMMU data */
+
+/* interrupt vectors */
+
+#define PPBSY 0x50 /* printer port busy */
+#define PPPE 0x51 /* printer port PE */
+#define PPSEL 0x52 /* printer port select */
+#define PPFLT 0x53 /* printer port fault */
+#define PPACK 0x54 /* printer port ack */
+#define SCSIIRQ 0x55 /* SCSI IRQ */
+#define LANCERR 0x56 /* LANC ERR */
+#define LANCIRQ 0x57 /* LANC IRQ */
+#define TIMER2IRQ 0x58 /* Tick Timer 2 vec */
+#define TIMER1IRQ 0x59 /* Tick Timer 1 vec */
+#define GPIOIRQ 0x5A /* GPIO IRQ */
+#define SRXEXIRQ 0x5C /* Serial RX Exception IRQ */
+#define SRMIRQ 0x5D /* Serial Modem IRQ */
+#define STXIRQ 0x5E /* Serial TX IRQ */
+#define SRXIRQ 0x5F /* Serial RX IRQ */
+
+#endif /* _MACHINE_BOARD_H */
diff --git a/sys/arch/mvme88k/include/bug.h b/sys/arch/mvme88k/include/bug.h
new file mode 100644
index 00000000000..b1c3686f655
--- /dev/null
+++ b/sys/arch/mvme88k/include/bug.h
@@ -0,0 +1,12 @@
+#include <machine/bugio.h>
+
+struct bugenv {
+ int clun;
+ int dlun;
+ int ipl;
+ int ctlr;
+ int (*entry)();
+ int cfgblk;
+ char *argstart;
+ char *argend;
+};
diff --git a/sys/arch/mvme88k/include/bugio.h b/sys/arch/mvme88k/include/bugio.h
new file mode 100644
index 00000000000..74bb77bd336
--- /dev/null
+++ b/sys/arch/mvme88k/include/bugio.h
@@ -0,0 +1,62 @@
+#include "sys/cdefs.h"
+
+struct bugdisk_io {
+ char clun;
+ char dlun;
+ short status;
+ void *addr;
+ int blkno;
+#define fileno blkno
+ short nblks;
+ char flag;
+#define FILEMARKFLAG 0x80
+#define IGNOREFILENO 0x02
+#define ENDOFFILE 0x01
+ char am;
+};
+
+/* values are in BCD {upper nibble+lower nibble} */
+
+struct bugrtc {
+ unsigned char Y;
+ unsigned char M;
+ unsigned char D;
+ unsigned char d;
+ unsigned char H;
+ unsigned char m;
+ unsigned char s;
+ unsigned char c;
+};
+
+/* Board ID - lots of info */
+
+struct bugbrdid {
+ unsigned char eye[4];
+ char rev;
+ char month;
+ char day;
+ char year;
+ short packetsize;
+ short dummy;
+ short brdno;
+ unsigned char brdsuf[2];
+ char options[3];
+ char family:4;
+ char cpu:4;
+ short clun;
+ short dlun;
+ short type;
+ short dev;
+ int option;
+};
+
+char buginchr __P((void));
+int buginstat __P((void));
+int bugoutchr __P((unsigned char));
+int bugoutstr __P((char *, char *));
+int bugpcrlf __P((void));
+int bugdskrd __P((struct bugdisk_io *));
+int bugdskwr __P((struct bugdisk_io *));
+int bugrtcrd __P((struct bugrtc *));
+int bugreturn __P((void));
+int bugbrdid __P((struct bugbrdid *));
diff --git a/sys/arch/mvme88k/include/cdefs.h b/sys/arch/mvme88k/include/cdefs.h
new file mode 100644
index 00000000000..36f4990a9cc
--- /dev/null
+++ b/sys/arch/mvme88k/include/cdefs.h
@@ -0,0 +1,35 @@
+/* $NetBSD: cdefs.h,v 1.2 1995/03/23 20:10:48 jtc Exp $ */
+
+/*
+ * Written by J.T. Conklin <jtc@wimsey.com> 01/17/95.
+ * Public domain.
+ */
+
+#ifndef _MACHINE_CDEFS_H_
+#define _MACHINE_CDEFS_H_
+
+#ifdef __STDC__
+#define _C_LABEL(x) _STRING(_ ## x)
+#else
+#define _C_LABEL(x) _STRING(_/**/x)
+#endif
+
+#ifdef __GNUC__
+#ifdef __STDC__
+#define __indr_reference(sym,alias) \
+ __asm__(".stabs \"_" #alias "\",11,0,0,0"); \
+ __asm__(".stabs \"_" #sym "\",1,0,0,0")
+#define __warn_references(sym,msg) \
+ __asm__(".stabs \"" msg "\",30,0,0,0"); \
+ __asm__(".stabs \"_" #sym "\",1,0,0,0")
+#else
+#define __indr_reference(sym,alias) \
+ __asm__(".stabs \"_/**/alias\",11,0,0,0"); \
+ __asm__(".stabs \"_/**/sym\",1,0,0,0")
+#define __warn_references(sym,msg) \
+ __asm__(".stabs msg,30,0,0,0"); \
+ __asm__(".stabs \"_/**/sym\",1,0,0,0")
+#endif
+#endif
+
+#endif /* !_MACHINE_CDEFS_H_ */
diff --git a/sys/arch/mvme88k/include/cpu.h b/sys/arch/mvme88k/include/cpu.h
new file mode 100644
index 00000000000..39890d72b66
--- /dev/null
+++ b/sys/arch/mvme88k/include/cpu.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _CPU_H_
+#define _CPU_H_
+
+/*
+ * CTL_MACHDEP definitinos.
+ */
+#define CPU_MAXID 1 /* no valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+}
+
+#ifdef KERNEL
+
+#include <machine/psl.h>
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+#define COPY_SIGCODE /* copy sigcode above user stack in exec */
+
+#define cpu_exec(p) /* nothing */
+#define cpu_wait(p) /* nothing */
+#define cpu_swapout(p) /* nothing */
+
+/*
+ * See syscall() for an explanation of the following. Note that the
+ * locore bootstrap code follows the syscall stack protocol. The
+ * framep argument is unused.
+ */
+#define cpu_set_init_frame(p, fp) \
+ (p)->p_md.md_tf = (struct trapframe *) \
+ ((caddr_t)(p)->p_addr)
+
+/*
+ * Arguments to hardclock and gatherstats encapsulate the previous
+ * machine state in an opaque clockframe.
+ */
+struct clockframe {
+ int pc; /* program counter at time of interrupt */
+ int sr; /* status register at time of interrupt */
+ int ipl; /* mask level at the time of interrupt */
+};
+
+#define CLKF_USERMODE(framep) (((framep)->sr & 80000000) == 0)
+#define CLKF_BASEPRI(framep) ((framep)->ipl == 0)
+#define CLKF_PC(framep) ((framep)->pc & ~3)
+#define CLKF_INTR(framep) (0)
+
+#define SIR_NET 1
+#define SIR_CLOCK 2
+
+#define setsoftnet() (ssir |= SIR_NET, want_ast = 1)
+#define setsoftclock() (ssir |= SIR_CLOCK, want_ast = 1)
+
+#define siroff(x) (ssir &= ~x)
+
+int ssir;
+int want_ast;
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+int want_resched; /* resched() was called */
+#define need_resched() (want_resched = 1, want_ast = 1)
+
+/*
+ * Give a profiling tick to the current process when the user profiling
+ * buffer pages are invalid. On the sparc, request an ast to send us
+ * through trap(), marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+#define signotify(p) (want_ast = 1)
+
+#endif /* KERNEL */
+#endif /* _CPU_H_ */
diff --git a/sys/arch/mvme88k/include/cpus.h b/sys/arch/mvme88k/include/cpus.h
new file mode 100644
index 00000000000..14501d90f26
--- /dev/null
+++ b/sys/arch/mvme88k/include/cpus.h
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * HISTORY
+ */
+/*
+ Versions Idents for 88k family chips
+ */
+
+#ifndef _M88K_CPUS_
+#define _M88K_CPUS_
+
+/*
+ * cpu Processor Identification Register (PID).
+ */
+#ifndef ASSEMBLER
+union cpupid {
+ unsigned cpupid;
+ struct {
+ unsigned
+ /*empty*/:16,
+ arc:8,
+ version:7,
+ master:1;
+ } m88100;
+ struct {
+ unsigned
+ id:8,
+ type:3,
+ version:5,
+ /*empty*/:16;
+ } m88200;
+};
+#endif ASSEMBLER
+
+#define M88100 0
+#define M88200 5
+#define M88204 6
+
+#endif _M88K_CPUS_
diff --git a/sys/arch/mvme88k/include/db_machdep.h b/sys/arch/mvme88k/include/db_machdep.h
new file mode 100644
index 00000000000..c27653ab45f
--- /dev/null
+++ b/sys/arch/mvme88k/include/db_machdep.h
@@ -0,0 +1,169 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ */
+
+/*
+ * Machine-dependent defined for the new kernel debugger
+ */
+
+#ifndef _M88K_DB_MACHDEP_H_
+#define _M88K_DB_MACHDEP_H_ 1
+
+#include <sys/types.h>
+#include <vm/vm_prot.h>
+#include <vm/vm_param.h>
+#include <vm/vm_inherit.h>
+#include <vm/lock.h>
+#include <machine/pcb.h> /* m88100_saved_state */
+#include <machine/psl.h>
+#include <machine/trap.h>
+
+#define BKPT_SIZE (4) /* number of bytes in bkpt inst. */
+#define BKPT_INST (0xF000D082U) /* tb0, 0,r0, vector 132 */
+#define BKPT_SET(inst) (BKPT_INST)
+
+/* Entry trap for the debugger - used for inline assembly breaks*/
+#define ENTRY_ASM "tb0 0, r0, 132"
+#define DDB_ENTRY_TRAP_NO 132
+
+typedef vm_offset_t db_addr_t;
+typedef int db_expr_t;
+typedef struct m88100_saved_state db_regs_t;
+db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+/*
+ * the low two bits of sxip, snip, sfip have valid bits
+ * in them that need to masked to get the correct addresses
+ */
+
+#define m88k_pc(regs) \
+({ \
+ int ret; \
+ \
+ if (regs->sxip & 2) /* is valid */ \
+ ret = regs->sxip & ~3; \
+ else if (regs->snip & 2) \
+ ret = regs->snip & ~3; \
+ else if (regs->sfip & 2) \
+ ret = regs->sfip & ~3; \
+ /* we are in trouble - none of the program counters is valid */ \
+ ret; \
+})
+
+/*
+ * This is an actual function due to the fact that the sxip
+ * or snip could be nooped out due to a jmp or rte
+ */
+#define PC_REGS(regs) (regs->sxip & 2) ? regs->sxip & ~3 : (regs->snip & 2 ? \
+ regs->snip & ~3 : regs->sfip & ~3)
+
+#define pC_REGS(regs) (regs->sxip & 2) ? regs->sxip : (regs->snip & 2 ? \
+ regs->snip : regs->sfip)
+extern int db_noisy;
+#define NOISY(x) if (db_noisy) x
+#define NOISY2(x) if (db_noisy >= 2) x
+#define NOISY3(x) if (db_noisy >= 3) x
+
+extern int quiet_db_read_bytes;
+
+/* These versions are not constantly doing SPL */
+#define cnmaygetc db_getc
+#define cngetc db_getc
+#define cnputc db_putc
+
+/* breakpoint/watchpoint foo */
+#define IS_BREAKPOINT_TRAP(type,code) ((type)==T_KDB_BREAK)
+#if defined(T_WATCHPOINT)
+#define IS_WATCHPOINT_TRAP(type,code) ((type)==T_KDB_WATCH)
+#else
+#define IS_WATCHPOINT_TRAP(type,code) 0
+#endif /* T_WATCHPOINT */
+
+/* we don't want coff support */
+#define DB_NO_COFF 1
+
+/* need software single step */
+#define SOFTWARE_SSTEP 1
+
+/*
+ * Debugger can get to any address space
+ */
+
+#define DB_ACCESS_LEVEL DB_ACCESS_ANY
+
+#define DB_VALID_KERN_ADDR(addr) (!badaddr((void*)(addr), 1))
+#define DB_VALID_ADDRESS(addr,user) \
+ (user ? db_check_user_addr(addr) : DB_VALID_KERN_ADDR(addr))
+
+/* instruction type checking - others are implemented in db_sstep.c */
+
+#define inst_trap_return(ins) ((ins) == 0xf400fc00U)
+
+/* don't need to load symbols */
+#define DB_SYMBOLS_PRELOADED 1
+
+/* machine specific commands have been added to ddb */
+#define DB_MACHINE_COMMANDS 1
+/* inst_return(ins) - is the instruction a function call return.
+ * Not mutually exclusive with inst_branch. Should be a jmp r1. */
+#define inst_return(I) (((I)&0xfffffbffU) == 0xf400c001U ? TRUE : FALSE)
+
+#ifdef __GNUC__
+/*
+ * inst_call - function call predicate: is the instruction a function call.
+ * Could be either bsr or jsr
+ */
+#define inst_call(I) ({ unsigned i = (I); \
+ ((((i) & 0xf8000000U) == 0xc8000000U || /*bsr*/ \
+ ((i) & 0xfffffbe0U) == 0xf400c800U) /*jsr*/ \
+ ? TRUE : FALSE) \
+;})
+
+/*
+ * This routine should return true for instructions that result in unconditonal
+ * transfers of the flow of control. (Unconditional Jumps, subroutine calls,
+ * subroutine returns, etc).
+ *
+ * Trap and return from trap should not be listed here.
+ */
+#define inst_unconditional_flow_transfer(I) ({ unsigned i = (I); \
+ ((((i) & 0xf0000000U) == 0xc0000000U || /* br, bsr */ \
+ ((i) & 0xfffff3e0U) == 0xf400c000U) /* jmp, jsr */ \
+ ? TRUE: FALSE) \
+;})
+
+/* Return true if the instruction has a delay slot. */
+#define db_branch_is_delayed(I) inst_delayed(I)
+
+#endif /* __GNUC__ */
+
+#define db_printf_enter db_printing
+
+#endif /* _M88K_DB_MACHDEP_H_ */
diff --git a/sys/arch/mvme88k/include/disklabel.h b/sys/arch/mvme88k/include/disklabel.h
new file mode 100644
index 00000000000..c7419657305
--- /dev/null
+++ b/sys/arch/mvme88k/include/disklabel.h
@@ -0,0 +1,94 @@
+#ifndef _MACHINE_DISKLABEL_H_
+#define _MACHINE_DISKLABEL_H_
+
+#define MAXPARTITIONS 16
+
+/* number of boot pieces , ie xxboot bootxx */
+#define NUMBOOT 2
+
+#define RAW_PART 2 /* Xd0c is raw part. */
+
+/*
+ * used to encode disk minor numbers
+ * this should probably be moved to sys/disklabel.h
+ */
+#define DISKUNIT(dev) (minor(dev) / MAXPARTITIONS)
+#define DISKPART(dev) (minor(dev) % MAXPARTITIONS)
+#define MAKEDISKDEV(maj, unit, part) \
+ (makedev((maj), ((unit) * MAXPARTITIONS) + (part)))
+
+struct cpu_disklabel {
+ /* VID */
+ unsigned char vid_id[4];
+ unsigned char vid_0[16];
+ unsigned int vid_oss;
+ unsigned short vid_osl;
+ unsigned char vid_1[4];
+ unsigned short vid_osa_u;
+ unsigned short vid_osa_l;
+ unsigned char vid_2[2];
+ unsigned short partitions;
+ unsigned char vid_vd[16];
+ unsigned long bbsize;
+ unsigned long magic1; /* 4 */
+ unsigned short type; /* 2 */
+ unsigned short subtype; /* 2 */
+ unsigned char packname[16]; /* 16 */
+ unsigned long flags; /* 4 */
+ unsigned long drivedata[5]; /* 4 */
+ unsigned long spare[5]; /* 4 */
+ unsigned short checksum; /* 2 */
+
+ unsigned long secpercyl; /* 4 */
+ unsigned long secperunit; /* 4 */
+ unsigned long headswitch; /* 4 */
+
+ unsigned char vid_3[4];
+ unsigned int vid_cas;
+ unsigned char vid_cal;
+ unsigned char vid_4_0[3];
+ unsigned char vid_4[64];
+ unsigned char vid_4_1[28];
+ unsigned long sbsize;
+ unsigned char vid_mot[8];
+ /* CFG */
+ unsigned char cfg_0[4];
+ unsigned short cfg_atm;
+ unsigned short cfg_prm;
+ unsigned short cfg_atw;
+ unsigned short cfg_rec;
+
+ unsigned short sparespertrack;
+ unsigned short sparespercyl;
+ unsigned long acylinders;
+ unsigned short rpm;
+ unsigned short cylskew;
+
+ unsigned char cfg_spt;
+ unsigned char cfg_hds;
+ unsigned short cfg_trk;
+ unsigned char cfg_ilv;
+ unsigned char cfg_sof;
+ unsigned short cfg_psm;
+ unsigned short cfg_shd;
+ unsigned char cfg_2[2];
+ unsigned short cfg_pcom;
+ unsigned char cfg_3;
+ unsigned char cfg_ssr;
+ unsigned short cfg_rwcc;
+ unsigned short cfg_ecc;
+ unsigned short cfg_eatm;
+ unsigned short cfg_eprm;
+ unsigned short cfg_eatw;
+ unsigned char cfg_gpb1;
+ unsigned char cfg_gpb2;
+ unsigned char cfg_gpb3;
+ unsigned char cfg_gpb4;
+ unsigned char cfg_ssc;
+ unsigned char cfg_runit;
+ unsigned short cfg_rsvc1;
+ unsigned short cfg_rsvc2;
+ unsigned long magic2;
+ unsigned char cfg_4[192];
+};
+#endif _MACHINE_DISKLABEL_H_
diff --git a/sys/arch/mvme88k/include/endian.h b/sys/arch/mvme88k/include/endian.h
new file mode 100644
index 00000000000..9e078b1e5ee
--- /dev/null
+++ b/sys/arch/mvme88k/include/endian.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 1987, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)endian.h 8.1 (Berkeley) 6/11/93
+ * $Id: endian.h,v 1.1 1995/10/18 10:54:21 deraadt Exp $
+ */
+
+#ifndef _ENDIAN_H_
+#define _ENDIAN_H_
+
+/*
+ * Define the order of 32-bit words in 64-bit words.
+ */
+#define _QUAD_HIGHWORD 0
+#define _QUAD_LOWWORD 1
+
+#ifndef _POSIX_SOURCE
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+
+#define LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define BIG_ENDIAN 4321 /* MSB first: 68000, 88000 ibm, net */
+#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#define BYTE_ORDER BIG_ENDIAN
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+unsigned long htonl __P((unsigned long));
+unsigned short htons __P((unsigned short));
+unsigned long ntohl __P((unsigned long));
+unsigned short ntohs __P((unsigned short));
+__END_DECLS
+
+/*
+ * Macros for network/external number representation conversion.
+ */
+#if BYTE_ORDER == BIG_ENDIAN && !defined(lint)
+#define ntohl(x) (x)
+#define ntohs(x) (x)
+#define htonl(x) (x)
+#define htons(x) (x)
+
+#define NTOHL(x) (x)
+#define NTOHS(x) (x)
+#define HTONL(x) (x)
+#define HTONS(x) (x)
+
+#else
+
+#define NTOHL(x) (x) = ntohl((u_long)x)
+#define NTOHS(x) (x) = ntohs((u_short)x)
+#define HTONL(x) (x) = htonl((u_long)x)
+#define HTONS(x) (x) = htons((u_short)x)
+#endif
+#endif /* ! _POSIX_SOURCE */
+#endif /* !_ENDIAN_H_ */
diff --git a/sys/arch/mvme88k/include/exception_vectors.h b/sys/arch/mvme88k/include/exception_vectors.h
new file mode 100644
index 00000000000..3df57e99088
--- /dev/null
+++ b/sys/arch/mvme88k/include/exception_vectors.h
@@ -0,0 +1,167 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991, 1992 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef UNDEFINED
+# define UNDEFINED unknown_handler
+#endif
+/* vector 0x00 (#0) */ word error_handler
+/* vector 0x01 (#1) */ word interrupt_handler
+/* vector 0x02 (#2) */ word instruction_access_handler
+/* vector 0x03 (#3) */ word data_exception_handler
+/* vector 0x04 (#4) */ word misaligned_handler
+/* vector 0x05 (#5) */ word unimplemented_handler
+/* vector 0x06 (#6) */ word privilege_handler
+/* vector 0x07 (#7) */ word bounds_handler
+/* vector 0x08 (#8) */ word divide_handler
+/* vector 0x09 (#9) */ word overflow_handler
+/* vector 0x0a (#10) */ word error_handler
+/* vector 0x0b (#11) */ word UNDEFINED
+/* vector 0x0c (#12) */ word UNDEFINED
+/* vector 0x0d (#13) */ word UNDEFINED
+/* vector 0x0e (#14) */ word UNDEFINED
+/* vector 0x0f (#15) */ word UNDEFINED
+/* vector 0x10 (#16) */ word UNDEFINED
+/* vector 0x11 (#17) */ word UNDEFINED
+/* vector 0x12 (#18) */ word UNDEFINED
+/* vector 0x13 (#19) */ word UNDEFINED
+/* vector 0x14 (#20) */ word UNDEFINED
+/* vector 0x15 (#21) */ word UNDEFINED
+/* vector 0x16 (#22) */ word UNDEFINED
+/* vector 0x17 (#23) */ word UNDEFINED
+/* vector 0x18 (#24) */ word UNDEFINED
+/* vector 0x19 (#25) */ word UNDEFINED
+/* vector 0x1a (#26) */ word UNDEFINED
+/* vector 0x1b (#27) */ word UNDEFINED
+/* vector 0x1c (#28) */ word UNDEFINED
+/* vector 0x1d (#29) */ word UNDEFINED
+/* vector 0x1e (#30) */ word UNDEFINED
+/* vector 0x1f (#31) */ word UNDEFINED
+/* vector 0x20 (#32) */ word UNDEFINED
+/* vector 0x21 (#33) */ word UNDEFINED
+/* vector 0x22 (#34) */ word UNDEFINED
+/* vector 0x23 (#35) */ word UNDEFINED
+/* vector 0x24 (#36) */ word UNDEFINED
+/* vector 0x25 (#37) */ word UNDEFINED
+/* vector 0x26 (#38) */ word UNDEFINED
+/* vector 0x27 (#39) */ word UNDEFINED
+/* vector 0x28 (#40) */ word UNDEFINED
+/* vector 0x29 (#41) */ word UNDEFINED
+/* vector 0x2a (#42) */ word UNDEFINED
+/* vector 0x2b (#43) */ word UNDEFINED
+/* vector 0x2c (#44) */ word UNDEFINED
+/* vector 0x2d (#45) */ word UNDEFINED
+/* vector 0x2e (#46) */ word UNDEFINED
+/* vector 0x2f (#47) */ word UNDEFINED
+/* vector 0x30 (#48) */ word UNDEFINED
+/* vector 0x31 (#49) */ word UNDEFINED
+/* vector 0x32 (#50) */ word UNDEFINED
+/* vector 0x33 (#51) */ word UNDEFINED
+/* vector 0x34 (#52) */ word UNDEFINED
+/* vector 0x35 (#53) */ word UNDEFINED
+/* vector 0x36 (#54) */ word UNDEFINED
+/* vector 0x37 (#55) */ word UNDEFINED
+/* vector 0x38 (#56) */ word UNDEFINED
+/* vector 0x39 (#57) */ word UNDEFINED
+/* vector 0x3a (#58) */ word UNDEFINED
+/* vector 0x3b (#59) */ word UNDEFINED
+/* vector 0x3c (#60) */ word UNDEFINED
+/* vector 0x3d (#61) */ word UNDEFINED
+/* vector 0x3e (#62) */ word UNDEFINED
+/* vector 0x3f (#63) */ word UNDEFINED
+/* vector 0x40 (#64) */ word UNDEFINED
+/* vector 0x41 (#65) */ word UNDEFINED
+/* vector 0x42 (#66) */ word UNDEFINED
+/* vector 0x43 (#67) */ word UNDEFINED
+/* vector 0x44 (#68) */ word UNDEFINED
+/* vector 0x45 (#69) */ word UNDEFINED
+/* vector 0x46 (#70) */ word UNDEFINED
+/* vector 0x47 (#71) */ word UNDEFINED
+/* vector 0x48 (#72) */ word UNDEFINED
+/* vector 0x49 (#73) */ word UNDEFINED
+/* vector 0x4a (#74) */ word UNDEFINED
+/* vector 0x4b (#75) */ word UNDEFINED
+/* vector 0x4c (#76) */ word UNDEFINED
+/* vector 0x4d (#77) */ word UNDEFINED
+/* vector 0x4e (#78) */ word UNDEFINED
+/* vector 0x4f (#79) */ word UNDEFINED
+/* vector 0x50 (#80) */ word UNDEFINED
+/* vector 0x51 (#81) */ word UNDEFINED
+/* vector 0x52 (#82) */ word UNDEFINED
+/* vector 0x53 (#83) */ word UNDEFINED
+/* vector 0x54 (#84) */ word UNDEFINED
+/* vector 0x55 (#85) */ word UNDEFINED
+/* vector 0x56 (#86) */ word UNDEFINED
+/* vector 0x57 (#87) */ word UNDEFINED
+/* vector 0x58 (#88) */ word UNDEFINED
+/* vector 0x59 (#89) */ word UNDEFINED
+/* vector 0x5a (#90) */ word UNDEFINED
+/* vector 0x5b (#91) */ word UNDEFINED
+/* vector 0x5c (#92) */ word UNDEFINED
+/* vector 0x5d (#93) */ word UNDEFINED
+/* vector 0x5e (#94) */ word UNDEFINED
+/* vector 0x5f (#95) */ word UNDEFINED
+/* vector 0x60 (#96) */ word UNDEFINED
+/* vector 0x61 (#97) */ word UNDEFINED
+/* vector 0x62 (#98) */ word UNDEFINED
+/* vector 0x63 (#99) */ word UNDEFINED
+/* vector 0x64 (#100) */ word UNDEFINED
+/* vector 0x65 (#101) */ word UNDEFINED
+/* vector 0x66 (#102) */ word UNDEFINED
+/* vector 0x67 (#103) */ word UNDEFINED
+/* vector 0x68 (#104) */ word UNDEFINED
+/* vector 0x69 (#105) */ word UNDEFINED
+/* vector 0x6a (#106) */ word UNDEFINED
+/* vector 0x6b (#107) */ word UNDEFINED
+/* vector 0x6c (#108) */ word UNDEFINED
+/* vector 0x6d (#109) */ word UNDEFINED
+/* vector 0x6e (#110) */ word UNDEFINED
+/* vector 0x6f (#111) */ word UNDEFINED
+/* vector 0x70 (#112) */ word UNDEFINED
+/* vector 0x71 (#113) */ word UNDEFINED
+/* vector 0x72 (#114) */ word fp_precise_handler
+/* vector 0x73 (#115) */ word fp_imprecise_handler
+/* vector 0x74 (#116) */ word unimplemented_handler
+/* vector 0x75 (#117) */ word UNDEFINED
+/* vector 0x76 (#118) */ word unimplemented_handler
+/* vector 0x77 (#119) */ word UNDEFINED
+/* vector 0x78 (#120) */ word unimplemented_handler
+/* vector 0x79 (#121) */ word UNDEFINED
+/* vector 0x7a (#122) */ word unimplemented_handler
+/* vector 0x7b (#123) */ word UNDEFINED
+/* vector 0x7c (#124) */ word unimplemented_handler
+/* vector 0x7d (#125) */ word UNDEFINED
+/* vector 0x7e (#126) */ word unimplemented_handler
+/* vector 0x7f (#127) */ word UNDEFINED
+/* vector 0x80 (#128) */ word syscall_handler
+/* vector 0x81 (#129) */ word syscall_handler
+/* vector 0x82 (#130) */ word break
+/* vector 0x83 (#131) */ word trace
+/* vector 0x84 (#132) */ word entry
+#if defined(RAW_PRINTF) && RAW_PRINTF
+/* vector 0x85 (#133) */ word user_raw_putstr /* for USER raw_printf() */
+/* vector 0x85 (#134) */ word user_raw_xpr /* for USER raw_xpr() */
+#endif
diff --git a/sys/arch/mvme88k/include/exec.h b/sys/arch/mvme88k/include/exec.h
new file mode 100644
index 00000000000..23e5ea9b6c0
--- /dev/null
+++ b/sys/arch/mvme88k/include/exec.h
@@ -0,0 +1,327 @@
+#ifndef __A_OUT_GNU_H__
+#define __A_OUT_GNU_H__
+
+#include <machine/endian.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define __GNU_EXEC_MACROS__
+
+#ifndef __STRUCT_EXEC_OVERRIDE__
+
+struct exec
+{
+ unsigned a_midmag; /* Use macros N_MAGIC, etc for access */
+#define a_info a_midmag
+ unsigned a_text; /* size of text, in bytes */
+ unsigned a_data; /* size of data, in bytes */
+ unsigned a_bss; /* size of uninitialized data area, in bytes */
+ unsigned a_syms; /* length of symbol table data, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* size of reloc info for text, in bytes */
+ unsigned a_drsize; /* size of reloc info for data, in bytes */
+};
+
+#endif /* __STRUCT_EXEC_OVERRIDE__ */
+
+#define MID_ZERO 0 /* unknown - implementation dependent */
+#define MID_SUN010 1 /* sun 68010/68020 binary */
+#define MID_SUN020 2 /* sun 68020-only binary */
+#define MID_PC386 100 /* 386 PC binary. (so quoth BFD) */
+#define MID_HP200 200 /* hp200 (68010) BSD binary */
+#define MID_I386 134 /* i386 BSD binary */
+#define MID_M68K 135 /* m68k BSD binary with 8K page sizes */
+#define MID_M68K4K 136 /* m68k BSD binary with 4K page sizes */
+#define MID_NS32532 137 /* ns32532 */
+#define MID_SPARC 138 /* sparc */
+#define MID_PMAX 139 /* pmax */
+#define MID_VAX 140 /* vax */
+#define MID_ALPHA 141 /* Alpha BSD binary */
+#define MID_M88K 151 /* m88k BSD binary */
+#define MID_HP300 300 /* hp300 (68020+68881) BSD binary */
+#define MID_HPUX 0x20C /* hp200/300 HP-UX binary */
+
+/* these go in the N_MACHTYPE field */
+enum machine_type {
+#if defined (M_OLDSUN2)
+ M__OLDSUN2 = M_OLDSUN2,
+#else
+ M_OLDSUN2 = 0,
+#endif
+#if defined (M_68010)
+ M__68010 = M_68010,
+#else
+ M_68010 = 1,
+#endif
+#if defined (M_68020)
+ M__68020 = M_68020,
+#else
+ M_68020 = 2,
+#endif
+#if defined (M_SPARC)
+ M__SPARC = M_SPARC,
+#else
+ M_SPARC = 3,
+#endif
+ /* skip a bunch so we don't run into any of sun's numbers */
+ M_386 = 100,
+ M_88K = 151
+};
+
+#define N_GETMAGIC(ex) \
+ ((ex).a_midmag & 0xffff)
+
+#define N_GETMID(ex) \
+ (((ex).a_midmag >> 16)&0xff)
+#if !defined (N_MAGIC)
+#define N_MAGIC(exec) ((exec).a_info & 0xffff)
+#endif
+#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff))
+#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff)
+#define N_SET_INFO(exec, magic, type, flags) \
+ ((exec).a_info = ((magic) & 0xffff) \
+ | (((int)(type) & 0xff) << 16) \
+ | (((flags) & 0xff) << 24))
+#define N_SET_MAGIC(exec, magic) \
+ ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff)))
+
+#define N_SET_MACHTYPE(exec, machtype) \
+ ((exec).a_info = \
+ ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16))
+
+#define N_SET_FLAGS(exec, flags) \
+ ((exec).a_info = \
+ ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24))
+
+#ifndef OMAGIC
+/* Code indicating object file or impure executable. */
+#define OMAGIC 0407
+/* Code indicating pure executable. */
+#define NMAGIC 0410
+/* Code indicating demand-paged executable. */
+#define ZMAGIC 0413
+#define QMAGIC 0314 /* "compact" demand load format; deprecated */
+#endif /* not OMAGIC */
+
+#if !defined (N_BADMAG)
+#define N_BADMAG(x) \
+ (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC \
+ && N_MAGIC(x) != ZMAGIC)
+#endif
+
+#define _N_BADMAG(x) \
+ (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC \
+ && N_MAGIC(x) != ZMAGIC)
+
+#if !defined(sparc) && !defined(m88k)
+#define _N_HDROFF(x) (SEGMENT_SIZE - sizeof (struct exec))
+#else
+#define _N_HDROFF(x) (- sizeof (struct exec))
+#endif
+
+#if !defined (N_TXTOFF)
+#define N_TXTOFF(x) \
+ (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : sizeof (struct exec))
+#endif
+
+#if !defined (N_DATOFF)
+#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text)
+#endif
+
+#if !defined (N_TRELOFF)
+#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data)
+#endif
+
+#if !defined (N_DRELOFF)
+#define N_DRELOFF(x) (N_TRELOFF(x) + (x).a_trsize)
+#endif
+
+#if !defined (N_SYMOFF)
+#define N_SYMOFF(x) (N_DRELOFF(x) + (x).a_drsize)
+#endif
+
+#if !defined (N_STROFF)
+#define N_STROFF(x) (N_SYMOFF(x) + (x).a_syms)
+#endif
+
+/* Address of text segment in memory after it is loaded. */
+#if !defined (N_TXTADDR)
+#define N_TXTADDR(x) 0
+#endif
+
+/* Address of data segment in memory after it is loaded.
+ Note that it is up to you to define SEGMENT_SIZE
+ on machines not listed here. */
+#if defined (hp300) || defined (mips) || defined(m88k)
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif /* PAGE_SIZE */
+#endif
+#if defined (sparc) || defined (NeXT)
+#define PAGE_SIZE 0x2000
+#endif
+#if defined (sony) || (defined (sun) && defined (mc68000))
+#define SEGMENT_SIZE 0x2000
+#endif /* Sony or 68k Sun. */
+#ifdef is68k
+#define SEGMENT_SIZE 0x20000
+#endif
+#if defined(m68k) && defined(PORTAR)
+#define PAGE_SIZE 0x400
+#endif
+#ifndef SEGMENT_SIZE
+/* This used to be first in this paragraph and under:
+ if (defined(vax) || defined(hp300) || defined(pyr) || defined(sparc) \
+ || (defined(m68k) && defined(PORTAR)) \
+ || defined (NeXT) || defined (mips)) */
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+#ifndef PAGE_SIZE
+/* This value is for i386-minix, but that has no predefine.
+ Making it default will only cause confusion on machines
+ which have no proper value defined. */
+#define PAGE_SIZE 16
+#endif
+
+#define PAGSIZ PAGE_SIZE
+#define SEGSIZ SEGMENT_SIZE
+#if !defined(__LDPGSZ)
+#define __LDPGSZ PAGE_SIZE
+#endif /* __LDPGSZ */
+
+#define _N_SEGMENT_ROUND(x) (((x) + SEGMENT_SIZE - 1) & ~(SEGMENT_SIZE - 1))
+
+#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text)
+
+#ifndef N_DATADDR
+#define N_DATADDR(x) \
+ (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) \
+ : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x))))
+#endif
+
+/* Address of bss segment in memory after it is loaded. */
+#if !defined (N_BSSADDR)
+#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data)
+#endif
+
+#if 0
+#if !defined (N_NLIST_DECLARED)
+struct nlist {
+ union {
+ char *n_name;
+ struct nlist *n_next;
+ long n_strx;
+ } n_un;
+ unsigned char n_type;
+ char n_other;
+ short n_desc;
+ unsigned long n_value;
+};
+#endif /* no N_NLIST_DECLARED. */
+#endif /* 0 */
+
+#if !defined (N_UNDF)
+#define N_UNDF 0
+#endif
+#if !defined (N_ABS)
+#define N_ABS 2
+#endif
+#if !defined (N_TEXT)
+#define N_TEXT 4
+#endif
+#if !defined (N_DATA)
+#define N_DATA 6
+#endif
+#if !defined (N_BSS)
+#define N_BSS 8
+#endif
+#if !defined (N_COMM)
+#define N_COMM 18
+#endif
+#if !defined (N_FN)
+#define N_FN 15
+#endif
+
+#if !defined (N_EXT)
+#define N_EXT 1
+#endif
+#if !defined (N_TYPE)
+#define N_TYPE 036
+#endif
+#if !defined (N_STAB)
+#define N_STAB 0340
+#endif
+
+/* The following type indicates the definition of a symbol as being
+ an indirect reference to another symbol. The other symbol
+ appears as an undefined reference, immediately following this symbol.
+
+ Indirection is asymmetrical. The other symbol's value will be used
+ to satisfy requests for the indirect symbol, but not vice versa.
+ If the other symbol does not have a definition, libraries will
+ be searched to find a definition. */
+#define N_INDR 0xa
+
+/* The following symbols refer to set elements.
+ All the N_SET[ATDB] symbols with the same name form one set.
+ Space is allocated for the set in the text section, and each set
+ element's value is stored into one word of the space.
+ The first word of the space is the length of the set (number of elements).
+
+ The address of the set is made into an N_SETV symbol
+ whose name is the same as the name of the set.
+ This symbol acts like a N_DATA global symbol
+ in that it can satisfy undefined external references. */
+
+/* These appear as input to LD, in a .o file. */
+#define N_SETA 0x14 /* Absolute set element symbol */
+#define N_SETT 0x16 /* Text set element symbol */
+#define N_SETD 0x18 /* Data set element symbol */
+#define N_SETB 0x1A /* Bss set element symbol */
+
+/* This is output from LD. */
+#define N_SETV 0x1C /* Pointer to set vector in data area. */
+
+#if !defined (N_RELOCATION_INFO_DECLARED)
+/* This structure describes a single relocation to be performed.
+ The text-relocation section of the file is a vector of these structures,
+ all of which apply to the text section.
+ Likewise, the data-relocation section applies to the data section. */
+
+struct relocation_info
+{
+ /* Address (within segment) to be relocated. */
+ int r_address;
+ /* The meaning of r_symbolnum depends on r_extern. */
+ unsigned int r_symbolnum:24;
+ /* Nonzero means value is a pc-relative offset
+ and it should be relocated for changes in its own address
+ as well as for changes in the symbol or section specified. */
+ unsigned int r_pcrel:1;
+ /* Length (as exponent of 2) of the field to be relocated.
+ Thus, a value of 2 indicates 1<<2 bytes. */
+ unsigned int r_length:2;
+ /* 1 => relocate with value of symbol.
+ r_symbolnum is the index of the symbol
+ in file's the symbol table.
+ 0 => relocate with the address of a segment.
+ r_symbolnum is N_TEXT, N_DATA, N_BSS or N_ABS
+ (the N_EXT bit may be set also, but signifies nothing). */
+ unsigned int r_extern:1;
+ /* Four bits that aren't used, but when writing an object file
+ it is desirable to clear them. */
+ unsigned int r_pad:4;
+};
+#endif /* no N_RELOCATION_INFO_DECLARED. */
+#define M_32_SWAP(x) (x)
+#define M_16_SWAP(x) (x)
+#define M_8_SWAP(x) (x)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/sys/arch/mvme88k/include/foo b/sys/arch/mvme88k/include/foo
new file mode 100644
index 00000000000..6d19c6950ab
--- /dev/null
+++ b/sys/arch/mvme88k/include/foo
@@ -0,0 +1,124 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: foo,v $
+ * Revision 1.1 1995/10/18 10:54:24 deraadt
+ * Initial revision
+ *
+ * Revision 2.6 93/01/26 18:01:15 danner
+ * Added #ifndef file wrapper.
+ * [93/01/25 jfriedl]
+ *
+ * Revision 2.5 93/01/14 17:53:26 danner
+ * u_int -> unsigned
+ * [92/12/02 jfriedl]
+ *
+ * Revision 2.4 92/08/03 17:52:34 jfriedl
+ * changed ifndef to depend on ASSEMBLER. [danner]
+ *
+ * Revision 2.3 92/05/21 17:23:01 jfriedl
+ * Appended 'U' to constants that would otherwise be signed.
+ * [92/05/16 jfriedl]
+ *
+ * Revision 2.2 92/02/18 18:03:52 elf
+ * Moved from luna88k
+ * [92/01/20 danner]
+ *
+ */
+
+#ifndef __MOTOROLA_M88K_M88100_PSL_H__
+#define __MOTOROLA_M88K_M88100_PSL_H__
+/*
+ * 88100 control registers
+ */
+
+/*
+ * processor identification register (PID)
+ */
+#define PID_ARN 0x0000FF00U /* architectural revision number */
+#define PID_VN 0x000000FEU /* version number */
+#define PID_MC 0x00000001U /* master/checker */
+
+/*
+ * processor status register
+ */
+#define PSR_MODE 0x80000000U /* supervisor/user mode */
+#define PSR_BO 0x40000000U /* byte-ordering 0:big 1:little */
+#define PSR_SER 0x20000000U /* serial mode */
+#define PSR_C 0x10000000U /* carry */
+#define PSR_SFD 0x000003F0U /* SFU disable */
+#define PSR_SFD1 0x00000008U /* SFU1 (FPU) disable */
+#define PSR_MXM 0x00000004U /* misaligned access enable */
+#define PSR_IND 0x00000002U /* interrupt disable */
+#define PSR_SFRZ 0x00000001U /* shadow freeze */
+
+/*
+ * This is used in ext_int() and hard_clock().
+ */
+#define PSR_IPL 0x00001000 /* for basepri */
+#define PSR_IPL_LOG 12 /* = log2(PSR_IPL) */
+
+#define PSR_MODE_LOG 31 /* = log2(PSR_MODE) */
+#define PSR_BO_LOG 30 /* = log2(PSR_BO) */
+#define PSR_SER_LOG 29 /* = log2(PSR_SER) */
+#define PSR_SFD1_LOG 3 /* = log2(PSR_SFD1) */
+#define PSR_MXM_LOG 2 /* = log2(PSR_MXM) */
+#define PSR_IND_LOG 1 /* = log2(PSR_IND) */
+#define PSR_SFRZ_LOG 0 /* = log2(PSR_SFRZ) */
+
+#define PSR_SUPERVISOR (PSR_MODE | PSR_SFD)
+#define PSR_USER (PSR_SFD)
+#define PSR_SET_BY_USER (PSR_BO | PSR_SER | PSR_C | PSR_MXM)
+
+#ifndef ASSEMBLER
+struct psr {
+ unsigned
+ psr_mode: 1,
+ psr_bo : 1,
+ psr_ser : 1,
+ psr_c : 1,
+ :18,
+ psr_sfd : 6,
+ psr_sfd1: 1,
+ psr_mxm : 1,
+ psr_ind : 1,
+ psr_sfrz: 1;
+};
+#endif
+
+#define FIP_V 0x00000002U /* valid */
+#define FIP_E 0x00000001U /* exception */
+#define FIP_ADDR 0xFFFFFFFCU /* address mask */
+#define NIP_V 0x00000002U /* valid */
+#define NIP_E 0x00000001U /* exception */
+#define NIP_ADDR 0xFFFFFFFCU /* address mask */
+#define XIP_V 0x00000002U /* valid */
+#define XIP_E 0x00000001U /* exception */
+#define XIP_ADDR 0xFFFFFFFCU /* address mask */
+
+#endif /* __MOTOROLA_M88K_M88100_PSL_H__ */
diff --git a/sys/arch/mvme88k/include/limits.h b/sys/arch/mvme88k/include/limits.h
new file mode 100644
index 00000000000..abd96bd03e3
--- /dev/null
+++ b/sys/arch/mvme88k/include/limits.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * $Id: limits.h,v 1.1 1995/10/18 10:54:21 deraadt Exp $
+ */
+
+#define CHAR_BIT 8 /* number of bits in a char */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
+
+
+#define CLK_TCK 60 /* ticks per second */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 127 /* min value for a signed char */
+#define SCHAR_MIN (-128) /* max value for a signed char */
+
+#define UCHAR_MAX 255 /* max value for an unsigned char */
+#define CHAR_MAX 127 /* max value for a char */
+#define CHAR_MIN (-128) /* min value for a char */
+
+#define USHRT_MAX 65535 /* max value for an unsigned short */
+#define SHRT_MAX 32767 /* max value for a short */
+#define SHRT_MIN (-32768) /* min value for a short */
+
+#define UINT_MAX 0xffffffff /* max value for an unsigned int */
+#define INT_MAX 2147483647 /* max value for an int */
+#define INT_MIN (-2147483647-1) /* min value for an int */
+
+#define ULONG_MAX 0xffffffff /* max value for an unsigned long */
+#define LONG_MAX 2147483647 /* max value for a long */
+#define LONG_MIN (-2147483647-1) /* min value for a long */
+
+#if !defined(_ANSI_SOURCE)
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE */
+#endif /* !_ANSI_SOURCE */
diff --git a/sys/arch/mvme88k/include/locore.h b/sys/arch/mvme88k/include/locore.h
new file mode 100644
index 00000000000..cce0671b2f8
--- /dev/null
+++ b/sys/arch/mvme88k/include/locore.h
@@ -0,0 +1,301 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/* "locore.h" Omron Corporation
+ **********************************************************************
+ * This file created by Omron Corporation, 1990.
+ *
+ * HISTORY
+ *
+ *
+ **********************************************************************
+ *
+ * This file contains defines and such used by (most of) the assembly
+ * routines for Omron's Luna88 Mach system. It also may be included by
+ * some C language files.
+ *
+ **********************************************************************
+ * NOTE: Any assembly file that includes this one must define ASSEMBLER first
+ *
+ */
+
+#ifndef __MACHINE_LOCORE_H__
+#define __MACHINE_LOCORE_H__
+
+
+/*
+ **********************************************************************
+ SYNTACTICAL AND SEMANTIC DOO-DADS
+ **********************************************************************
+ */
+/*
+ * NEWLINE is defined in 'assmy.s' in the object area to be a double
+ * backslash, which 'as' interprets the same as '\n'
+ */
+
+/*
+ * If this has been included in an assembly file, make sure
+ * LOCORE is defined. Always make sure KERNEL is defined.
+ */
+#if defined(ASSEMBLER) && !defined(LOCORE)
+# define LOCORE
+#endif
+#if !defined(KERNEL)
+# define KERNEL
+#endif
+
+/* Define EH_DEBUG to be non-zero to compile-in various debugging things */
+#ifndef EH_DEBUG
+#define EH_DEBUG 0
+#endif EH_DEBUG
+
+/* this gives the offsets into various structures of various elements, etc */
+#include "assym.s"
+
+/*
+ * LABEL(name)
+ * Defines the name to be a label visible to the world.
+ *
+ * _LABEL(name)
+ * Defines one visible only to the file, unless debugging
+ * is enabled, in which case it's visible to the world (and
+ * hence to debuggers, and such).
+ */
+#define LABEL(name) name: global name NEWLINE
+#if EH_DEBUG
+# define _LABEL(name) name: global name NEWLINE
+#else
+# define _LABEL(name) name: NEWLINE
+#endif
+
+
+/*
+ * Useful in some situations.
+ * NOTE: If ARG1 or ARG2 are r2 or r3, strange things may happen. Watch out!
+ */
+#define CALL(NAME, ARG1, ARG2) \
+ subu r31, r31, 32 NEWLINE \
+ or r2, r0, ARG1 NEWLINE \
+ bsr.n NAME NEWLINE \
+ or r3, r0, ARG2 NEWLINE \
+ addu r31, r31, 32
+
+/*
+ **********************************************************************
+ SYMBOLIC CONSTANTS AND VALUES and other important things
+ **********************************************************************
+ */
+
+/*
+ * SR1 - CPU FLAGS REGISTER
+ *
+ * SR1 contains flags about the current CPU status.
+ *
+ * The lowest FLAG_CPU_FIELD_WIDTH bits hold the cpu number (currently 0-3).
+ *
+ *
+ * The bit FLAG_IGNORE_DATA_EXCEPTION indicates that any data exceptions
+ * should be ignored (well, at least treated in a special way).
+ * The bit FLAG_INTERRUPT_EXCEPTION indicates that the current exception
+ * is the interrupt exception. Such information can be gotten
+ * in other ways, but having it in the flags makes it easy for the
+ * exception handler to check quickly.
+ * The bit FLAG_ENABLING_FPU indicates that the exception handler is
+ * in the process of enabling the FPU (so that an exception can
+ * be serviced). This is needed because enabling the FPU can
+ * cause other exceptions to happen, and the whole system is
+ * in a rather precarious state and so special cautions must
+ * be taken.
+ */
+#define FLAG_CPU_FIELD_WIDTH 4 /* must be <= 12 */
+
+#define FLAG_IGNORE_DATA_EXCEPTION 5 /* bit number 5 */
+#define FLAG_INTERRUPT_EXCEPTION 6 /* bit number 6 */
+#define FLAG_ENABLING_FPU 7 /* bit number 7 */
+
+
+/* REGister OFFset into the E.F. (exception frame) */
+#define REG_OFF(reg_num) ((reg_num) * 4) /* (num * sizeof(register int)) */
+#define GENREG_OFF(num) (REG_OFF(EF_R0 + (num))) /* GENeral REGister OFFset */
+
+
+#define GENERAL_BREATHING_ROOM /* arbitrarily */ 200
+#define KERNEL_STACK_BREATHING_ROOM \
+ (GENERAL_BREATHING_ROOM + SIZEOF_STRUCT_PCB + SIZEOF_STRUCT_UTHREAD)
+
+/*
+ * Some registers used during the setting up of the new exception frame.
+ * Don't choose r1, r30, or r31 for any of them.
+ *
+ * Also, if any are 'r2' or 'r3', be careful using with CALL above!
+ */
+#define FLAGS r2
+#define TMP r3
+#define TMP2 r10
+#define TMP3 r11
+#define SAVE_TMP2 st r10, r31, GENREG_OFF(10)
+#define SAVE_TMP3 st r11, r31, GENREG_OFF(11)
+#define RESTORE_TMP2 ld r10, r31, GENREG_OFF(10)
+#define RESTORE_TMP3 ld r11, r31, GENREG_OFF(11)
+
+
+/* alternate CPU control register names */
+#define PID cr0
+#define PSR cr1
+#define EPSR cr2
+#define SSBR cr3
+#define SXIP cr4
+#define SNIP cr5
+#define SFIP cr6
+#define VBR cr7
+#define DMT0 cr8
+#define DMD0 cr9
+#define DMA0 cr10
+#define DMT1 cr11
+#define DMD1 cr12
+#define DMA1 cr13
+#define DMT2 cr14
+#define DMD2 cr15
+#define DMA2 cr16
+#define SR0 cr17
+#define SR1 cr18
+#define SR2 cr19
+#define SR3 cr20
+#define FPECR fcr0
+#define FPHS1 fcr1
+#define FPLS1 fcr2
+#define FPHS2 fcr3
+#define FPLS2 fcr4
+#define FPPT fcr5
+#define FPRH fcr6
+#define FPRL fcr7
+#define FPIT fcr8
+#define FPSR fcr62
+#define FPCR fcr63
+
+/*
+ * Info about the PSR
+ */
+#define PSR_SHADOW_FREEZE_BIT 0
+#define PSR_INTERRUPT_DISABLE_BIT 1
+#define PSR_FPU_DISABLE_BIT 3
+#define PSR_BIG_ENDIAN_MODE 30
+#define PSR_SUPERVISOR_MODE_BIT 31
+
+/*
+ * Status bits for an SXIP/SNIP/SFIP address.
+ */
+#define RTE_VALID_BIT 1
+#define RTE_ERROR_BIT 0
+
+/*
+ * Info about DMT0/DMT1/DMT2
+ */
+#define DMT_VALID_BIT 0
+#define DMT_WRITE_BIT 1
+#define DMT_LOCK_BIT 12
+#define DMT_DOUBLE_BIT 13
+#define DMT_DAS_BIT 14
+#define DMT_DREG_OFFSET 7
+#define DMT_DREG_WIDTH 5
+
+/*
+ * Bits for eh_debug.
+ */
+#define DEBUG_INTERRUPT_BIT 0
+#define DEBUG_DATA_BIT 1
+#define DEBUG_INSTRUCTION_BIT 2
+#define DEBUG_MISALIGN_BIT 3
+#define DEBUG_UNIMP_BIT 4
+#define DEBUG_DIVIDE_BIT 5
+#define DEBUG_OF_BIT 6
+#define DEBUG_FPp_BIT 7
+#define DEBUG_FPi_BIT 8
+#define DEBUG_SYSCALL_BIT 9
+#define DEBUG_MACHSYSCALL_BIT 10
+#define DEBUG_UNIMPLEMENTED_BIT 11
+#define DEBUG_PRIVILEGE_BIT 12
+#define DEBUG_BOUNDS_BIT 13
+#define DEBUG_OVERFLOW_BIT 14
+#define DEBUG_ERROR_BIT 15
+#define DEBUG_SIGSYS_BIT 16
+#define DEBUG_SIGTRAP_BIT 17
+#define DEBUG_BREAK_BIT 18
+#define DEBUG_TRACE_BIT 19
+#define DEBUG_KDB_BIT 20
+#define DEBUG_JKDB_BIT 21
+#define DEBUG_BUGCALL_BIT 22
+
+#define DEBUG_UNKNOWN_BIT 31
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+#define YES 1
+#define NO 0
+
+#define SCSI_INTS 0x10
+#define SCSI_SSTS 0x18
+#define SCSI_DREG 0x28
+
+/* change software timer for 8mm device support -- 90/08/21 CEC OKUI */
+#define SCSI_WAIT 0x5000000
+
+/*
+ * At various times, there is the need to clear the pipeline (i.e.
+ * synchronize). A "tcnd ne0, r0, foo" will do that (because a trap
+ * instruction always synchronizes, and this particular instruction
+ * will never actually take the trap).
+ */
+#define FLUSH_PIPELINE tcnd ne0, r0, 0
+
+/*
+ * NOP -- NO-Operation.
+ *
+ * A do-nothing one clock doesn't-touch-the-scoreboard type of instruction,
+ * in case one's needed (sometimes useful for debugging).
+ */
+#define NOP or r0, r0, r0
+
+/*
+ * These things for vector_init.c and locore.c
+ */
+#if defined(ASSEMBLER)
+# define PREDEFINED_BY_ROM 0xffffffff
+# define END_OF_VECTOR_LIST 0xfffffffe
+#else
+# define PREDEFINED_BY_ROM 0xffffffffU
+# define END_OF_VECTOR_LIST 0xfffffffeU
+#endif
+
+/*
+ * Define ERROR__XXX_USR if the xxx.usr bug (mask C82N) is present.
+ * This implements the workaround.
+ */
+#define ERRATA__XXX_USR 1
+
+#define USERMODE(x) (!(x & (1 << PSR_SUPERVISOR_MODE_BIT)))
+
+#endif /* __MACHINE_LOCORE_H__ */
diff --git a/sys/arch/mvme88k/include/m88100.h b/sys/arch/mvme88k/include/m88100.h
new file mode 100644
index 00000000000..eb047bb08e0
--- /dev/null
+++ b/sys/arch/mvme88k/include/m88100.h
@@ -0,0 +1,69 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ */
+/*
+ * M88100 flags
+ */
+
+#ifndef _M88100_H_
+#define _M88100_H_
+
+
+/*
+ * 88100 RISC definitions
+ */
+
+/* DMT0, DMT1, DMT2 */
+#define DMT_BO 0x00008000 /* Byte-Ordering */
+#define DMT_DAS 0x00004000 /* Data Access Space */
+#define DMT_DOUB1 0x00002000 /* Double Word */
+#define DMT_LOCKBAR 0x00001000 /* Bud Lock */
+#define DMT_DREG 0x00000F80 /* Destination Registers 5bits */
+#define DMT_SIGNED 0x00000040 /* Sign-Extended Bit */
+#define DMT_EN 0x0000003C /* Byte Enable Bit */
+#define DMT_WRITE 0x00000002 /* Read/Write Transaction Bit */
+#define DMT_VALID 0x00000001 /* Valid Transaction Bit */
+
+#ifndef ASSEMBLER
+#include "sys/types.h"
+
+struct dmt_reg {
+ unsigned int :16,
+ dmt_bo:1,
+ dmt_das:1,
+ dmt_doub1:1,
+ dmt_lockbar:1,
+ dmt_dreg:5,
+ dmt_signed:1,
+ dmt_en:4,
+ dmt_write:1,
+ dmt_valid:1;
+};
+#endif
+
+#endif _M88100_H_
diff --git a/sys/arch/mvme88k/include/m882xx.h b/sys/arch/mvme88k/include/m882xx.h
new file mode 100644
index 00000000000..4c9759db1bc
--- /dev/null
+++ b/sys/arch/mvme88k/include/m882xx.h
@@ -0,0 +1,259 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ *
+ */
+
+
+#ifndef __MACHINE_M882XX_H__
+#define __MACHINE_M882XX_H__
+
+#ifndef ASSEMBLER
+# include <machine/mmu.h> /* batc_template_t */
+#endif
+
+#include <machine/board.h>
+
+/*
+ * 88200 CMMU definitions
+ */
+#define CMMU_IDR 0x000 /* CMMU id register */
+#define CMMU_SCR 0x004 /* system command register */
+#define CMMU_SSR 0x008 /* system status register */
+#define CMMU_SAR 0x00C /* system address register */
+#define CMMU_SCTR 0x104 /* system control register */
+#define CMMU_PFSR 0x108 /* P bus fault status register */
+#define CMMU_PFAR 0x10C /* P bus fault address register */
+#define CMMU_SAPR 0x200 /* supervisor area pointer register */
+#define CMMU_UAPR 0x204 /* user area pointer register */
+#define CMMU_BWP0 0x400 /* block ATC writer port 0 */
+#define CMMU_BWP1 0x404 /* block ATC writer port 1 */
+#define CMMU_BWP2 0x408 /* block ATC writer port 2 */
+#define CMMU_BWP3 0x40C /* block ATC writer port 3 */
+#define CMMU_BWP4 0x410 /* block ATC writer port 4 */
+#define CMMU_BWP5 0x414 /* block ATC writer port 5 */
+#define CMMU_BWP6 0x418 /* block ATC writer port 6 */
+#define CMMU_BWP7 0x41C /* block ATC writer port 7 */
+#define CMMU_CDP0 0x800 /* cache data port 0 */
+#define CMMU_CDP1 0x804 /* cache data port 1 */
+#define CMMU_CDP2 0x808 /* cache data port 2 */
+#define CMMU_CDP3 0x80C /* cache data port 3 */
+#define CMMU_CTP0 0x840 /* cache tag port 0 */
+#define CMMU_CTP1 0x844 /* cache tag port 1 */
+#define CMMU_CTP2 0x848 /* cache tag port 2 */
+#define CMMU_CTP3 0x84C /* cache tag port 3 */
+#define CMMU_CSSP 0x880 /* cache set status register */
+
+/* 88204 CMMU definitions */
+#define CMMU_CSSP0 0x880 /* cache set status register */
+#define CMMU_CSSP1 0x890 /* cache set status register */
+#define CMMU_CSSP2 0x8A0 /* cache set status register */
+#define CMMU_CSSP3 0x8B0 /* cache set status register */
+
+/* CMMU systerm commands */
+#define CMMU_FLUSH_USER_LINE 0x30 /* flush PATC */
+#define CMMU_FLUSH_USER_PAGE 0x31
+#define CMMU_FLUSH_USER_SEGMENT 0x32
+#define CMMU_FLUSH_USER_ALL 0x33
+#define CMMU_FLUSH_SUPER_LINE 0x34
+#define CMMU_FLUSH_SUPER_PAGE 0x35
+#define CMMU_FLUSH_SUPER_SEGMENT 0x36
+#define CMMU_FLUSH_SUPER_ALL 0x37
+#define CMMU_PROBE_USER 0x20 /* probe user address */
+#define CMMU_PROBE_SUPER 0x24 /* probe supervisor address */
+#define CMMU_FLUSH_CACHE_INV_LINE 0x14 /* data cache invalidate */
+#define CMMU_FLUSH_CACHE_INV_PAGE 0x15
+#define CMMU_FLUSH_CACHE_INV_SEGMENT 0x16
+#define CMMU_FLUSH_CACHE_INV_ALL 0x17
+#define CMMU_FLUSH_CACHE_CB_LINE 0x18 /* data cache copyback */
+#define CMMU_FLUSH_CACHE_CB_PAGE 0x19
+#define CMMU_FLUSH_CACHE_CB_SEGMENT 0x1A
+#define CMMU_FLUSH_CACHE_CB_ALL 0x1B
+#define CMMU_FLUSH_CACHE_CBI_LINE 0x1C /* copyback and invalidate */
+#define CMMU_FLUSH_CACHE_CBI_PAGE 0x1D
+#define CMMU_FLUSH_CACHE_CBI_SEGMENT 0x1E
+#define CMMU_FLUSH_CACHE_CBI_ALL 0x1F
+
+/* CMMU system control command */
+#define CMMU_SCTR_PE 0x00008000 /* parity enable */
+#define CMMU_SCTR_SE 0x00004000 /* snoop enable */
+#define CMMU_SCTR_PR 0x00002000 /* priority arbitration */
+
+/* CMMU P bus fault status */
+#define CMMU_PFSR_SUCCESS 0 /* no fault */
+#define CMMU_PFSR_BERROR 3 /* bus error */
+#define CMMU_PFSR_SFAULT 4 /* segment fault */
+#define CMMU_PFSR_PFAULT 5 /* page fault */
+#define CMMU_PFSR_SUPER 6 /* supervisor violation */
+#define CMMU_PFSR_WRITE 7 /* writer violation */
+
+/* Area Description */
+#define AREA_D_WT 0x00000200 /* write through */
+#define AREA_D_G 0x00000080 /* global */
+#define AREA_D_CI 0x00000040 /* cache inhibit */
+#define AREA_D_TE 0x00000001 /* translation enable */
+
+/* Segment Description */
+#define SEG_D_WT 0x00000200 /* write through */
+#define SEG_D_SP 0x00000100 /* supervisor protection */
+#define SEG_D_G 0x00000080 /* global */
+#define SEG_D_CI 0x00000040 /* cache inhibit */
+#define SEG_D_WP 0x00000004 /* write protect */
+#define SEG_D_V 0x00000001 /* valid */
+
+/*
+ * Flags for cmmu_flush_tlb
+ */
+#define FLUSH_KERNEL 1
+#define FLUSH_USER 0
+#define FLUSH_ALL ((vm_offset_t)~0)
+
+
+#ifndef ASSEMBLER
+/*
+ * This file defines the data structures for the mmu.
+ * One major data structure, the page descriptor, is not defined here
+ * but rather in pte.h as struct pte.
+ */
+
+struct area_d { /* area descriptor */
+ unsigned
+ ad_addr:20, /* segment table base address */
+ : 2,
+ ad_wt : 1, /* write through */
+ : 1,
+ ad_g : 1, /* global */
+ ad_ci : 1, /* cache inhibit */
+ : 5,
+ ad_te : 1; /* translation enable */
+};
+
+struct segment_d { /* segment descriptor */
+ unsigned
+ sd_addr:20, /* page table base address */
+ : 2,
+ sd_wt : 1, /* write through */
+ sd_sp : 1, /* supervisor protection */
+ sd_g : 1, /* global */
+ sd_ci : 1, /* cache inhibit */
+ : 3,
+ sd_wp : 1, /* write protect */
+ : 1,
+ sd_v : 1; /* valid */
+};
+
+typedef struct segment_d segment_d_t;
+
+struct pfsr { /* P bus fault status register */
+ unsigned
+ :13,
+ pfsr_fc: 3, /* falut code */
+ :16;
+};
+
+struct batc { /* block address translation register */
+ unsigned
+ batc_lba:13, /* logical block address */
+ batc_pba:13, /* physical block address */
+ batc_s : 1, /* supervisor */
+ batc_wt : 4, /* write through */
+ batc_g : 1, /* global */
+ batc_ci : 1, /* cache inhibit */
+ batc_wp : 1, /* write protect */
+ batc_v : 1; /* valid */
+};
+
+/*
+ * Prototypes and stuff for cmmu.c.
+ */
+extern unsigned cpu_sets[MAX_CPUS];
+extern unsigned ncpus;
+extern unsigned cache_policy;
+
+#ifdef CMMU_DEBUG
+ void show_apr(unsigned value);
+ void show_sctr(unsigned value);
+#endif
+
+/*
+ * Prototypes from "motorola/m88k/m88100/cmmu.c"
+ */
+unsigned cmmu_cpu_number(void);
+#if !DDB
+static
+#endif /* !DDB */
+unsigned cmmu_remote_get(unsigned cpu, unsigned r, unsigned data);
+unsigned cmmu_get_idr(unsigned data);
+void cmmu_init(void);
+void cmmu_shutdown_now(void);
+void cmmu_parity_enable(void);
+#if !DDB
+static
+#endif /* !DDB */
+void cmmu_remote_set(unsigned cpu, unsigned r, unsigned data, unsigned x);
+void cmmu_set_sapr(unsigned ap);
+void cmmu_remote_set_sapr(unsigned cpu, unsigned ap);
+void cmmu_set_uapr(unsigned ap);
+void cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size);
+void cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size);
+void cmmu_flush_cache(vm_offset_t physaddr, int size);
+void cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size);
+void cmmu_flush_inst_cache(vm_offset_t physaddr, int size);
+void cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size);
+void cmmu_flush_data_cache(vm_offset_t physaddr, int size);
+
+void cmmu_pmap_activate(
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX]);
+
+void cmmu_flush_remote_tlb(
+ unsigned cpu,
+ unsigned kernel,
+ vm_offset_t vaddr,
+ int size);
+
+void cmmu_set_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned data, /* 1 = data, 0 = instruction */
+ unsigned value); /* the value to stuff into the batc */
+
+void cmmu_set_pair_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned value); /* the value to stuff into the batc */
+
+#endif /* ASSEMBLER */
+
+#define INST_CMMU 0
+#define DATA_CMMU 1
+
+#define NBSG (4*1024*1024) /* segment size */
+
+#endif /* __MACHINE_M882XX_H__ */
diff --git a/sys/arch/mvme88k/include/mmu.h b/sys/arch/mvme88k/include/mmu.h
new file mode 100644
index 00000000000..fda1117f667
--- /dev/null
+++ b/sys/arch/mvme88k/include/mmu.h
@@ -0,0 +1,306 @@
+/*
+ * Ashura Project
+ */
+/*
+ * HISTORY
+ *
+ * Original SCCS ID in ISEDL
+ * @(#)mmu.h 1.22 90/09/20 19:13:34
+ */
+
+#ifndef _MACHINE_MMU_
+#define _MACHINE_MMU_
+
+/* for m88k_pgbytes, m8kk_pgshift */
+#include <machine/vmparam.h>
+
+
+/*
+ * Parameters which determine the 'geometry' of the M88K page tables in memory.
+ */
+#define SDT_BITS 10 /* M88K segment table size bits */
+#define PDT_BITS 10 /* M88K page table size bits */
+#define PG_BITS M88K_PGSHIFT /* M88K hardware page size bits */
+
+/*
+ * Shifts and masks for M88K (hardware) page
+ */
+/* M88K_PGBYTES, PG_SHIFT in vm_param.h */
+#define M88K_PGOFSET (M88K_PGBYTES-1) /* offset into M88K page */
+#define M88K_PGMASK (~M88K_PGOFSET) /* page mask */
+
+/*
+ * Convert byte address to page frame number
+ */
+#define M88K_BTOP(x) (((unsigned) (x)) >> M88K_PGSHIFT)
+#define M88K_PTOB(x) (((unsigned) (x)) << M88K_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work for
+ * either addresses of counts. (i.e. 1 byte round to 1 page bytes).
+ */
+#define M88K_TRUNC_PAGE(x) (((unsigned) (x) & M88K_PGMASK))
+#define M88K_ROUND_PAGE(x) M88K_TRUNC_PAGE((x) + M88K_PGOFSET)
+
+/*
+ * M88K area descriptors
+ */
+typedef struct cmmu_apr {
+ unsigned long
+ st_base:20, /* segment table base address */
+ rsvA:2, /* reserved */
+ wt:1, /* writethrough (cache control) */
+ rsvB:1, /* reserved */
+ g:1, /* global (cache control) */
+ ci:1, /* cache inhibit */
+ rsvC:5, /* reserved */
+ te:1; /* transration enable */
+} cmmu_apr_t;
+
+typedef union apr_template {
+ cmmu_apr_t field;
+ unsigned long bits;
+} apr_template_t;
+
+/*
+ * M88K segment descriptors
+ */
+typedef struct sdt_entry {
+ unsigned long
+ table_addr:20, /* page table base address */
+ rsvA:2, /* reserved */
+ wt:1, /* writethrough (cache control) */
+ sup:1, /* supervisor protection */
+ g:1, /* global (cache control) */
+ no_cache:1, /* cache inhibit */
+ rsvB:3, /* reserved */
+ prot:1, /* write protect */
+ rsvC:1, /* reserved */
+ dtype:1; /* valid */
+} sdt_entry_t;
+
+typedef union sdt_entry_template {
+ sdt_entry_t sdt_desc;
+ unsigned long bits;
+} sdt_entry_template_t;
+
+#define SDT_ENTRY_NULL ((sdt_entry_t *) 0)
+
+/*
+ * M88K page descriptors
+ */
+typedef struct pt_entry {
+ unsigned long
+ pfn:20, /* page frame address */
+ rsvA:1, /* reserved */
+ wired:1, /* wired bit <<software>> */
+ wt:1, /* writethrough (cache control) */
+ sup:1, /* supervisor protection */
+ g:1, /* global (cache control) */
+ ci:1, /* cache inhibit */
+ rsvB:1, /* reserved */
+ modified:1, /* modified */
+ pg_used:1, /* used (referenced) */
+ prot:1, /* write protect */
+ rsvC:1, /* reserved */
+ dtype:1; /* valid */
+} pt_entry_t;
+
+typedef union pte_template {
+ pt_entry_t pte;
+ unsigned long bits;
+} pte_template_t;
+
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+/*
+ * 88200 PATC (TLB)
+ */
+
+#define PATC_ENTRIES 56
+
+/*
+ * M88K BATC entries
+ */
+typedef struct {
+ unsigned long
+ lba:13, /* logical block address */
+ pba:13, /* physical block address */
+ sup:1, /* supervisor mode bit */
+ wt:1, /* writethrough (cache control) */
+ g:1, /* global (cache control) */
+ ci:1, /* cache inhibit */
+ wp:1, /* write protect */
+ v:1; /* valid */
+} batc_entry_t;
+
+typedef union batc_template {
+ batc_entry_t field;
+ unsigned long bits;
+} batc_template_t;
+
+/*
+ * Parameters and macros for BATC
+ */
+#define BATC_BLKBYTES (512*1024) /* 'block' size of a BATC entry mapping */
+#define BATC_BLKSHIFT 19 /* number of bits to BATC shift (log2(BATC_BLKBYTES)) */
+#define BATC_BLKMASK (BATC_BLKBYTES-1) /* BATC block mask */
+
+#define BATC_MAX 8 /* number of BATC entries */
+
+#define BATC_BLK_ALIGNED(x) ((x & BATC_BLKMASK) == 0)
+
+#define M88K_BTOBLK(x) (x >> BATC_BLKSHIFT)
+
+/*
+ * protection codes (prot field)
+ */
+#define M88K_RO 1 /* read only */
+#define M88K_RW 0 /* read/write */
+
+/*
+ * protection codes (sup field)
+ */
+#define M88K_SUPV 1 /* translation can only be done in supervisor mode */
+#define M88K_USER 0 /* translation can be done supv. or user mode */
+
+/*
+ * descriptor types
+ */
+#define DT_INVALID 0
+#define DT_VALID 1
+
+/*
+ * Number of entries in a page table.
+ */
+#define SDT_ENTRIES (1<<(SDT_BITS))
+#define PDT_ENTRIES (1<<(PDT_BITS))
+
+/*
+ * Size in bytes of a single page table.
+ */
+#define SDT_SIZE (sizeof(sdt_entry_t) * SDT_ENTRIES)
+#define PDT_SIZE (sizeof(pt_entry_t) * PDT_ENTRIES)
+
+/*
+ * Shifts and masks
+ */
+#define SDT_SHIFT (PDT_BITS + PG_BITS)
+#define PDT_SHIFT (PG_BITS)
+
+#define SDT_MASK (((1<<SDT_BITS)-1) << SDT_SHIFT)
+#define PDT_MASK (((1<<PDT_BITS)-1) << PDT_SHIFT)
+
+#define SDT_NEXT(va) ((va + (1<<SDT_SHIFT)) & SDT_MASK)
+#define PDT_NEXT(va) ((va + (1<<PDT_SHIFT)) & (SDT_MASK|PDT_MASK))
+
+#define SDTIDX(va) ((va & SDT_MASK) >> SDT_SHIFT)
+#define PDTIDX(va) ((va & PDT_MASK) >> PDT_SHIFT)
+
+#define SDTENT(map, va) ((sdt_entry_t *)(map->sdt_vaddr + SDTIDX(va)))
+
+/*
+ * Size of a PDT table group.
+ */
+#define LOG2_PDT_SIZE (PDT_BITS + 2)
+#define LOG2_PDT_TABLE_GROUP_SIZE (PAGE_SHIFT - LOG2_PDT_SIZE)
+#define PDT_TABLE_GROUP_SIZE (1 << LOG2_PDT_TABLE_GROUP_SIZE)
+#define PT_FREE(tbl) kmem_free(kernel_map, tbl, PAGE_SIZE)
+
+/*
+ * Va spaces mapped by tables and PDT table group.
+ */
+#define PDT_VA_SPACE (PDT_ENTRIES * M88K_PGBYTES)
+#define PDT_TABLE_GROUP_VA_SPACE (PDT_VA_SPACE * PDT_TABLE_GROUP_SIZE)
+
+/*
+ * Number of sdt entries used to map user and kernel space.
+ */
+#define USER_SDT_ENTRIES SDTIDX(VM_MIN_KERNEL_ADDRESS)
+#define KERNEL_SDT_ENTRIES (SDT_ENTRIES - USER_SDT_ENTRIES)
+
+/*
+ * Macros to check if the descriptor is valid.
+ */
+#define SDT_VALID(sd_ptr) ((sd_ptr)->dtype == DT_VALID)
+#define PDT_VALID(pd_ptr) ((pd_ptr)->dtype == DT_VALID)
+
+/*
+ * Alignment checks for pages (must lie on page boundaries).
+ */
+#define PAGE_ALIGNED(ad) (((vm_offset_t)(ad) & ~M88K_PGMASK) == 0)
+#define CHECK_PAGE_ALIGN(ad,who) \
+ if (!PAGE_ALIGNED(ad)) \
+ printf("%s: addr %x not page aligned.\n", who, ad)
+
+/*
+ * Validate PTE's for all hardware pages in a VM page.
+ * "ptes_per_vm_page" should be set in pmap_bootstrap.
+ *
+ * PARAMETERS:
+ * pt_entry_t *start;
+ * unsigned long template;
+ */
+#define DO_PTES(start, template) \
+{ \
+ int i_; \
+ pt_entry_t *p_ = start; \
+ \
+ for (i_ = ptes_per_vm_page; i_>0; i_--) { \
+ *(int *)p_++ = (unsigned long)(template); \
+ template += M88K_PGBYTES; \
+ /* (unsigned long)(template) for m88k C compiler\
+ '90.7.24 Fuzzy */ \
+ } \
+}
+
+/*
+ * Flags for cmmu_store() <cmmu.s>
+ */
+#define STORE_CMD 0
+#define STORE_UAPR 4
+#define STORE_SAPR 8
+#define STORE_BATCWP 0x400
+
+#define C_CMMU 0
+#define D_CMMU 0x1000
+
+/*
+ * Parameters for ATC(TLB) fulsh
+ */
+
+#define CMMU_SCR 0x004
+
+#define FLUSH_SUP_ALL 0x37
+#define FLUSH_USR_ALL 0x33
+#define FLUSH_SUP_SEG 0x36
+#define FLUSH_USR_SEG 0x32
+#define FLUSH_SUP_PG 0x35
+#define FLUSH_USR_PG 0x31
+
+/*
+ * Cache coontrol bits for pte
+ */
+#define CACHE_DFL 0
+#define CACHE_INH 0x40
+#define CACHE_GLOBAL 0x80
+#define CACHE_WT 0x200
+
+#define CACHE_MASK (~(unsigned)(CACHE_INH | CACHE_GLOBAL | CACHE_WT))
+
+/*
+ * Prototype for invalidate_pte found in "motorola/m88k/m88100/misc.s"
+ */
+unsigned invalidate_pte(pt_entry_t *pointer);
+
+extern vm_offset_t kmapva;
+
+#define kvtopte(va) \
+({ \
+ sdt_entry_t *sdt; \
+ sdt = (sdt_entry_t *)kmapva + SDTIDX(va) + SDT_ENTRIES; \
+ (pte_template_t *)(sdt->table_addr << PDT_SHIFT) + PDTIDX(va); \
+})
+
+#endif
+/* endif _MACHINE_MMU_ */
diff --git a/sys/arch/mvme88k/include/param.h b/sys/arch/mvme88k/include/param.h
new file mode 100644
index 00000000000..37f380ffe3c
--- /dev/null
+++ b/sys/arch/mvme88k/include/param.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: machparam.h 1.11 89/08/14$
+ *
+ * @(#)param.h 7.8 (Berkeley) 6/28/91
+ * $Id: param.h,v 1.1 1995/10/18 10:54:21 deraadt Exp $
+ */
+#ifndef _MACHINE_PARAM_H_
+#define _MACHINE_PARAM_H_
+
+/*
+ * Machine dependent constants for amiga
+ */
+#define MACHINE "m88k"
+#define MACHINE_ARCH "m88k"
+#define MID_MACHINE MID_M88K
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value
+ * for all data types (int, long, ...). The result is u_int and
+ * must be cast to any desired pointer type.
+ */
+#define ALIGNBYTES (sizeof(int) - 1)
+#define ALIGN(p) (((u_int)(p) + (sizeof(int) - 1)) &~ (sizeof(int) - 1))
+
+#ifndef NBPG
+#define NBPG 4096 /* bytes/page */
+#endif /* NBPG */
+#define PGOFSET (NBPG-1) /* byte offset into page */
+#define PGSHIFT 12 /* LOG2(NBPG) */
+#define NPTEPG (NBPG/(sizeof(u_int)))
+
+#define NBSEG (1<<22) /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+
+/*
+ * 187 Bug uses the bottom 64k. We allocate ptes to map this into the
+ * kernel. But when we link the kernel, we tell it to start linking
+ * past this 64k. How does this change KERNBASE? XXX
+ */
+
+#define KERNBASE 0x0 /* start of kernel virtual */
+#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+
+#define DEV_BSIZE 512
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define BLKDEV_IOSIZE 2048 /* Should this be changed? XXX */
+#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */
+
+#define CLSIZE 1
+#define CLSIZELOG2 0
+
+/* NOTE: SSIZE, SINCR and UPAGES must be multiples of CLSIZE */
+#define SSIZE 1 /* initial stack size/NBPG */
+#define SINCR 1 /* increment of stack/NBPG */
+#define USPACE ctob(UPAGES)
+
+#define UPAGES 3 /* pages of u-area */
+#define UADDR 0xFFEE0000 /* address of u */
+#define UVPN (UADDR>>PGSHIFT)/* virtual page number of u */
+#define KERNELSTACK (UADDR+UPAGES*NBPG) /* top of kernel stack */
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than CLBYTES (the software page size), and,
+ * on machines that exchange pages of input or output buffers with mbuf
+ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
+ * of the hardware page size.
+ */
+#define MSIZE 128 /* size of an mbuf */
+#define MCLBYTES 1024
+#define MCLSHIFT 10
+#define MCLOFSET (MCLBYTES - 1)
+#ifndef NMBCLUSTERS
+#ifdef GATEWAY
+#define NMBCLUSTERS 512 /* map size, max cluster allocation */
+#else
+#define NMBCLUSTERS 256 /* map size, max cluster allocation */
+#endif
+#endif
+
+/*
+ * Size of kernel malloc arena in CLBYTES-sized logical pages
+ */
+#ifndef NKMEMCLUSTERS
+#define NKMEMCLUSTERS (3072*1024/CLBYTES)
+#endif
+
+#define MAXPARTITIONS 16
+
+/* pages ("clicks") to disk blocks */
+#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT))
+#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT))
+#define dtob(x) ((x)<<DEV_BSHIFT)
+
+/* pages to bytes */
+#define ctob(x) ((x)<<PGSHIFT)
+
+/* bytes to pages */
+#define btoc(x) (((unsigned)(x)+(NBPG-1))>>PGSHIFT)
+
+#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
+ ((unsigned)(bytes) >> DEV_BSHIFT)
+#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
+ ((unsigned)(db) << DEV_BSHIFT)
+
+/*
+ * Map a ``block device block'' to a file system block.
+ * This should be device dependent, and should use the bsize
+ * field from the disk label.
+ * For now though just use DEV_BSIZE.
+ */
+#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE))
+#include <machine/psl.h>
+
+#ifdef JUNK
+/*
+ * Mach derived conversion macros
+ */
+#define m88k_round_seg(x) ((((unsigned)(x)) + NBSEG - 1) & ~(NBSEG-1))
+#define m88k_trunc_seg(x) ((unsigned)(x) & ~(NBSEG-1))
+#define m88k_round_page(x) ((((unsigned)(x)) + NBPG - 1) & ~(NBPG-1))
+#define m88k_trunc_page(x) ((unsigned)(x) & ~(NBPG-1))
+#define m88k_btos(x) ((unsigned)(x) >> SEGSHIFT)
+#define m88k_stob(x) ((unsigned)(x) << SEGSHIFT)
+#define m88k_btop(x) ((unsigned)(x) >> PGSHIFT)
+#define m88k_ptob(x) ((unsigned)(x) << PGSHIFT)
+
+/*
+ * spl functions; all but spl0 are done in-line
+ */
+#include <machine/psl.h>
+
+#define _debug_spl(s) \
+({ \
+ register int _spl_r; \
+\
+ asm __volatile ("clrl %0; movew sr,%0; movew %1,sr" : \
+ "&=d" (_spl_r) : "di" (s)); \
+ if ((_spl_r&PSL_IPL) > (s&PSL_IPL)) \
+ printf ("%s:%d:spl(%d) ==> spl(%d)!!\n",__FILE__,__LINE__, \
+ ((PSL_IPL&_spl_r)>>8), ((PSL_IPL&s)>>8)); \
+ _spl_r; \
+})
+
+#define _spl_no_check(s) \
+({ \
+ register int _spl_r; \
+\
+ asm __volatile ("clrl %0; movew sr,%0; movew %1,sr" : \
+ "&=d" (_spl_r) : "di" (s)); \
+ _spl_r; \
+})
+#if defined (DEBUG)
+#define _spl _debug_spl
+#else
+#define _spl _spl_no_check
+#endif
+
+/* spl0 requires checking for software interrupts */
+#define spl1() _spl(PSL_S|PSL_IPL1)
+#define spl2() _spl(PSL_S|PSL_IPL2)
+#define spl3() _spl(PSL_S|PSL_IPL3)
+#define spl4() _spl(PSL_S|PSL_IPL4)
+#define spl5() _spl(PSL_S|PSL_IPL5)
+#define spl6() _spl(PSL_S|PSL_IPL6)
+#define spl7() _spl(PSL_S|PSL_IPL7)
+
+
+#define splnone() spl0()
+#define splsoftclock() spl1()
+#define splnet() spl1()
+#define splbio() spl3()
+#define splimp() spl3()
+#define spltty() spl4()
+#define splclock() spl6()
+#define splstatclock() spl6()
+#define splvm() spl6()
+#define splhigh() spl7()
+#define splsched() spl7()
+#endif /* JUNK */
+
+#ifdef _KERNEL
+#define DELAY(x) delay(x)
+#endif
+
+#endif /* !_MACHINE_PARAM_H_ */
diff --git a/sys/arch/mvme88k/include/pcb.h b/sys/arch/mvme88k/include/pcb.h
new file mode 100644
index 00000000000..96628d11b34
--- /dev/null
+++ b/sys/arch/mvme88k/include/pcb.h
@@ -0,0 +1,141 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Motorola 88100 pcb definitions
+ *
+ */
+/*
+ */
+#ifndef _PCB_H_
+#define _PCB_H_
+
+/*
+ * Our PCB is the regular PCB+Save area for kernel frame.
+ * Upon entering kernel mode from user land, save the user context
+ * in the saved_state area - this is passed as the exception frame.
+ * On a context switch, only registers that need to be saved by the
+ * C calling convention and few other regs (pc, psr etc) are saved
+ * in the kernel_state part of the PCB.
+ */
+
+/* This must always be an even number of words long */
+
+struct m88100_pcb {
+ unsigned pcb_pc; /* address to return */
+ unsigned pcb_r14;
+ unsigned pcb_r15;
+ unsigned pcb_r16;
+ unsigned pcb_r17;
+ unsigned pcb_r18;
+ unsigned pcb_r19;
+ unsigned pcb_r20;
+ unsigned pcb_r21;
+ unsigned pcb_r22;
+ unsigned pcb_r23;
+ unsigned pcb_r24;
+ unsigned pcb_r25;
+ unsigned pcb_r26;
+ unsigned pcb_r27;
+ unsigned pcb_r28;
+ unsigned pcb_r29;
+ unsigned pcb_r30;
+ unsigned pcb_sp; /* kernel stack pointer */
+ unsigned pcb_mask;
+};
+
+
+/*
+ * m88100_saved_state this structure corresponds to the state
+ * of the user registers as saved on the
+ * stack upon kernel entry. This structure
+ * is used internally only. Since this
+ * structure may change from version to
+ * version, it is hidden from the user.
+ */
+
+/* This must always be an even number of words long */
+
+struct m88100_saved_state {
+ unsigned r[32];
+ unsigned fpsr;
+ unsigned fpcr;
+ unsigned epsr;
+ unsigned sxip;
+ unsigned snip;
+ unsigned sfip;
+ unsigned ssbr;
+ unsigned dmt0;
+ unsigned dmd0;
+ unsigned dma0;
+ unsigned dmt1;
+ unsigned dmd1;
+ unsigned dma1;
+ unsigned dmt2;
+ unsigned dmd2;
+ unsigned dma2;
+ unsigned fpecr;
+ unsigned fphs1;
+ unsigned fpls1;
+ unsigned fphs2;
+ unsigned fpls2;
+ unsigned fppt;
+ unsigned fprh;
+ unsigned fprl;
+ unsigned fpit;
+ unsigned vector; /* exception vector number */
+ unsigned mask; /* interrupt mask level */
+ unsigned mode; /* interrupt mode */
+ unsigned scratch1; /* used by locore trap handling code */
+ unsigned pad; /* to make an even length */
+} ;
+
+#define trapframe m88100_saved_state
+
+struct pcb
+{
+ struct m88100_saved_state user_state;
+ struct m88100_pcb kernel_state;
+ int pcb_onfault; /* for copyin/copyout faults */
+};
+
+typedef struct pcb *pcb_t; /* exported */
+
+/*
+ * Location of saved user registers for the proc.
+ */
+#define USER_REGS(p) \
+ (((struct m88100_saved_state *) (&((p)->p_addr->u_pcb.user_state))))
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. Note that the trapframe here is a copy of the one
+ * from the top of the kernel stack (included here so that the kernel
+ * stack itself need not be dumped).
+ */
+struct md_coredump {
+ struct trapframe md_tf;
+};
+
+#endif _PCB_H_
diff --git a/sys/arch/mvme88k/include/pcctworeg.h b/sys/arch/mvme88k/include/pcctworeg.h
new file mode 100644
index 00000000000..eecc7d47479
--- /dev/null
+++ b/sys/arch/mvme88k/include/pcctworeg.h
@@ -0,0 +1,146 @@
+/* $NetBSD$ */
+
+/*
+ * Copyright (c) 1995 Theo de Raadt
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Theo de Raadt
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MVME1x7/16x PCC2 chip: sort of a confused mish-mash of the MC in the 162
+ * and the PCC in the 147
+ */
+struct pcctworeg {
+ volatile u_char pcc2_chipid;
+ volatile u_char pcc2_chiprev;
+ volatile u_char pcc2_genctl;
+ volatile u_char pcc2_vecbase; /* irq vector base */
+ volatile u_long pcc2_t1cmp; /* timer1 compare */
+ volatile u_long pcc2_t1count; /* timer1 count */
+ volatile u_long pcc2_t2cmp; /* timer2 compare */
+ volatile u_long pcc2_t2count; /* timer2 count */
+ volatile u_char pcc2_pscalecnt; /* timer prescaler counter */
+ volatile u_char pcc2_pscaleadj; /* timer prescaler adjust */
+ volatile u_char pcc2_t2ctl; /* timer2 ctrl reg */
+ volatile u_char pcc2_t1ctl; /* timer1 ctrl reg */
+ volatile u_char pcc2_gpioirq; /* gpio irq */
+ volatile u_char pcc2_gpio; /* gpio i/o */
+ volatile u_char pcc2_t2irq;
+ volatile u_char pcc2_t1irq;
+ volatile u_char pcc2_sccerr;
+ volatile u_char pcc2_sccirq;
+ volatile u_char pcc2_scctx;
+ volatile u_char pcc2_sccrx;
+ volatile u_char :8;
+ volatile u_char :8;
+ volatile u_char :8;
+ volatile u_char pcc2_sccmoiack;
+ volatile u_char :8;
+ volatile u_char pcc2_scctxiack;
+ volatile u_char :8;
+ volatile u_char pcc2_sccrxiack;
+ volatile u_char pcc2_ieerr;
+ volatile u_char :8;
+ volatile u_char pcc2_iectl;
+ volatile u_char pcc2_ieirq;
+ volatile u_char pcc2_ncrerr;
+ volatile u_char :8;
+ volatile u_char :8;
+ volatile u_char pcc2_ncrirq;
+ volatile u_char pcc2_prtairq;
+ volatile u_char pcc2_prtfirq;
+ volatile u_char pcc2_prtsirq;
+ volatile u_char pcc2_prtpirq;
+ volatile u_char pcc2_prtbirq;
+ volatile u_char :8;
+ volatile u_char pcc2_prtstat;
+ volatile u_char pcc2_prtctl;
+ volatile u_short pcc2_speed; /* DO NOT USE */
+ volatile u_short pcc2_prtdat;
+ volatile u_short :16;
+ volatile u_char pcc2_ipl;
+ volatile u_char pcc2_mask;
+};
+#define PCC2_PCC2CHIP_ADDR 0xFFF42000
+#define PCC2_PCC2CHIP_OFF 0x42000
+#define PCC2_CHIPID 0x20
+
+/*
+ * points to system's PCCTWO. This is not active until the pcctwo0
+ * device has been attached. After that, it gives the virtual address
+ * at which the PCCTWO can be accessed.
+ */
+extern struct pcctworeg *sys_pcc2;
+
+/*
+ * We lock off our interrupt vector at 0x50.
+ */
+#define PCC2_VECBASE 0x50
+#define PCC2_NVEC 12
+
+/*
+ * Vectors we use
+ */
+#define PCC2V_NCR 0x05
+#define PCC2V_IE_ERR 0x06
+#define PCC2V_IE 0x07
+#define PCC2V_TIMER2 0x08
+#define PCC2V_TIMER1 0x09
+#define PCC2V_GPIO 0x0A
+
+#define PCC2_TCTL_CEN 0x01
+#define PCC2_TCTL_COC 0x02
+#define PCC2_TCTL_COVF 0x04
+#define PCC2_TCTL_OVF 0xf0
+
+#define PCC2_GPIO_PLTY 0x80
+#define PCC2_GPIO_EL 0x40
+
+#define PCC2_GPIOCR_OE 0x2
+#define PCC2_GPIOCR_O 0x1
+
+#define PCC2_SCC_AVEC 0x08
+#define PCC2_SCCRX_INHIBIT (0 << 6)
+#define PCC2_SCCRX_SNOOP (1 << 6)
+#define PCC2_SCCRX_INVAL (2 << 6)
+#define PCC2_SCCRX_RESV (3 << 6)
+
+#define pcc2_timer_us2lim(us) (us) /* timer increments in "us" */
+
+#define PCC2_IRQ_IPL 0x07
+#define PCC2_IRQ_ICLR 0x08
+#define PCC2_IRQ_IEN 0x10
+#define PCC2_IRQ_INT 0x20
+
+#define PCC2_GENCTL_FAST 0x01
+#define PCC2_GENCTL_IEN 0x02
+#define PCC2_GENCTL_C040 0x03
+
+#define PCC2_SC_INHIBIT (0 << 6)
+#define PCC2_SC_SNOOP (1 << 6)
+#define PCC2_SC_INVAL (2 << 6)
+#define PCC2_SC_RESV (3 << 6)
diff --git a/sys/arch/mvme88k/include/pmap.h b/sys/arch/mvme88k/include/pmap.h
new file mode 100644
index 00000000000..fc1380b3297
--- /dev/null
+++ b/sys/arch/mvme88k/include/pmap.h
@@ -0,0 +1,219 @@
+/*
+ * HISTORY
+ */
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
+#define OMRON_PMAP
+
+/* use builtin memcpy in gcc 2.0 */
+#if (__GNUC__ > 1)
+#define bcopy(a,b,c) memcpy(b,a,c)
+#endif
+
+#include <machine/psl.h> /* get standard goodies */
+#include <vm/vm_param.h>
+#include <vm/vm_prot.h> /* vm_prot_t */
+#include <machine/mmu.h> /* batc_template_t, BATC_MAX, etc.*/
+#include <machine/pcb.h> /* pcb_t, etc.*/
+
+typedef struct sdt_entry *sdt_ptr_t;
+
+/*
+ * PMAP structure
+ */
+typedef struct pmap *pmap_t;
+
+struct pmap {
+ sdt_ptr_t sdt_paddr; /* physical pointer to sdt */
+ sdt_ptr_t sdt_vaddr; /* virtual pointer to sdt */
+ int ref_count; /* reference count */
+
+ struct pmap_statistics stats; /* pmap statistics */
+
+#ifdef DEBUG
+ pmap_t next;
+ pmap_t prev;
+#endif
+
+ /* for OMRON_PMAP */
+ batc_template_t i_batc[BATC_MAX]; /* instruction BATCs */
+ batc_template_t d_batc[BATC_MAX]; /* data BATCs */
+ /* end OMRON_PMAP */
+
+};
+
+#include <vm/vm.h>
+
+#define PMAP_NULL ((pmap_t) 0)
+
+extern pmap_t kernel_pmap;
+
+#define PMAP_ACTIVATE(pmap, th, my_cpu) _pmap_activate(pmap, th, my_cpu)
+#define PMAP_DEACTIVATE(pmap, th, my_cpu) _pmap_deactivate(pmap, th, my_cpu)
+
+#define PMAP_CONTEXT(pmap, thread)
+
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+
+/* Used in builtin/device_pager.c */
+#define pmap_phys_address(frame) ((vm_offset_t) (M88K_PTOB(frame)))
+
+/* Used in kern/mach_timedev.c */
+#define pmap_phys_to_frame(phys) ((int) (M88K_BTOP(phys)))
+
+/*
+ * Since Our PCB has no infomation about the mapping,
+ * we have nothing to do in PMAP_PCB_INITIALIZE.
+ * XXX
+ */
+/* Used in machine/pcb.c */
+#define PMAP_PCB_INITIALIZE(x)
+
+/*
+ * Modes used when calling pmap_cache_fulsh().
+ */
+#define FLUSH_CACHE 0
+#define FLUSH_CODE_CACHE 1
+#define FLUSH_DATA_CACHE 2
+#define FLUSH_LOCAL_CACHE 3
+#define FLUSH_LOCAL_CODE_CACHE 4
+#define FLUSH_LOCAL_DATA_CACHE 5
+
+/**************************************************************************/
+/*** Prototypes for public functions defined in pmap.c ********************/
+/**************************************************************************/
+
+void _pmap_activate(pmap_t pmap, pcb_t, int my_cpu);
+void _pmap_deactivate(pmap_t pmap, pcb_t, int my_cpu);
+void pmap_activate(pmap_t my_pmap, pcb_t);
+void pmap_deactivate(pmap_t pmap, pcb_t);
+void pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot);
+int pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type);
+void pmap_page_protect(vm_offset_t phys, vm_prot_t prot);
+
+vm_offset_t pmap_map(
+ vm_offset_t virt,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot
+ #ifdef OMRON_PMAP
+ , unsigned cmode
+ #endif
+ );
+
+vm_offset_t pmap_map_batc(
+ vm_offset_t virt,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot,
+ unsigned cmode);
+
+void pmap_enter(
+ pmap_t pmap,
+ vm_offset_t va,
+ vm_offset_t pa,
+ vm_prot_t prot,
+ boolean_t wired);
+
+
+#ifdef JUNK
+int pmap_attribute(
+ pmap_t pmap,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value); /* IN/OUT */
+#endif /* JUNK */
+
+void pmap_bootstrap(
+ vm_offset_t load_start, /* IN */
+ vm_offset_t *phys_start, /* IN/OUT */
+ vm_offset_t *phys_end, /* IN */
+ vm_offset_t *virt_start, /* OUT */
+ vm_offset_t *virt_end); /* OUT */
+
+#ifdef MACH_KERNEL
+ void pmap_init();
+#else
+ void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end);
+#endif
+
+void pmap_copy(
+ pmap_t dst_pmap,
+ pmap_t src_pmap,
+ vm_offset_t dst_addr,
+ vm_size_t len,
+ vm_offset_t src_addr);
+
+void pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable);
+
+pt_entry_t *pmap_pte(pmap_t map, vm_offset_t virt);
+void pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode);
+void pmap_zero_page(vm_offset_t phys);
+pmap_t pmap_create(vm_size_t size);
+void pmap_pinit(pmap_t p);
+void pmap_release(pmap_t p);
+void pmap_destroy(pmap_t p);
+void pmap_reference(pmap_t p);
+void pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e);
+void pmap_remove_all(vm_offset_t phys);
+void pmap_change_wiring(pmap_t map, vm_offset_t v, boolean_t wired);
+vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va);
+vm_offset_t pmap_extract_unlocked(pmap_t pmap, vm_offset_t va);
+void pmap_update(void);
+void pmap_collect(pmap_t pmap);
+pmap_t pmap_kernel(void);
+void pmap_copy_page(vm_offset_t src, vm_offset_t dst);
+void copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount);
+void copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount);
+void pmap_redzone(pmap_t pmap, vm_offset_t va);
+void pmap_clear_modify(vm_offset_t phys);
+boolean_t pmap_is_modified(vm_offset_t phys);
+void pmap_clear_reference(vm_offset_t phys);
+boolean_t pmap_is_referenced(vm_offset_t phys);
+boolean_t pmap_verify_free(vm_offset_t phys);
+boolean_t pmap_valid_page(vm_offset_t p);
+void icache_flush(vm_offset_t pa);
+void pmap_dcache_flush(pmap_t pmap, vm_offset_t va);
+void pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode);
+void pmap_print (pmap_t pmap);
+void pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format);
+void pmap_virtual_space(vm_offset_t *startp, vm_offset_t *endp);
+unsigned pmap_free_pages(void);
+boolean_t pmap_next_page(vm_offset_t *addrp);
+
+#if 0
+#ifdef OMRON_PMAP
+ void pmap_set_batc(
+ pmap_t pmap,
+ boolean_t data,
+ int i,
+ vm_offset_t va,
+ vm_offset_t pa,
+ boolean_t super,
+ boolean_t wt,
+ boolean_t global,
+ boolean_t ci,
+ boolean_t wp,
+ boolean_t valid);
+
+ void use_batc(
+ task_t task,
+ boolean_t data, /* for data-cmmu ? */
+ int i, /* batc number */
+ vm_offset_t va, /* virtual address */
+ vm_offset_t pa, /* physical address */
+ boolean_t s, /* for super-mode ? */
+ boolean_t wt, /* is writethrough */
+ boolean_t g, /* is global ? */
+ boolean_t ci, /* is cache inhibited ? */
+ boolean_t wp, /* is write-protected ? */
+ boolean_t v); /* is valid ? */
+#endif
+#endif /* 0 */
+
+#endif /* endif _MACHINE_PMAP_H_ */
diff --git a/sys/arch/mvme88k/include/pmap_table.h b/sys/arch/mvme88k/include/pmap_table.h
new file mode 100644
index 00000000000..555789144ac
--- /dev/null
+++ b/sys/arch/mvme88k/include/pmap_table.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ */
+
+
+/* an entry is considered invalid if pm_size = 0 */
+/* end of list is indicated by pm_size 0xffffffff */
+
+typedef struct {
+ vm_offset_t phys_start; /* in bytes */
+ vm_offset_t virt_start; /* in bytes */
+ unsigned int size; /* in bytes */
+ unsigned int prot; /* vm_prot_read, vm_prot_write */
+ unsigned int cacheability; /* none, writeback, normal */
+} pmap_table_entry;
+
+typedef pmap_table_entry *pmap_table_t;
+
diff --git a/sys/arch/mvme88k/include/proc.h b/sys/arch/mvme88k/include/proc.h
new file mode 100644
index 00000000000..95209084379
--- /dev/null
+++ b/sys/arch/mvme88k/include/proc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 8.1 (Berkeley) 6/11/93
+ *
+ * from: Header: proc.h,v 1.6 92/11/26 02:04:41 torek Exp (LBL)
+ * $Id: proc.h,v 1.1 1995/10/18 10:54:21 deraadt Exp $
+ */
+
+#include <machine/pcb.h>
+#include <machine/mmu.h>
+
+/*
+ * Machine-dependent part of the proc structure for VME1X7.
+ */
+struct mdproc {
+ struct trapframe *md_tf; /* trap/syscall registers */
+ struct fpstate *md_fpstate; /* fpu state, if any; always resident */
+ int md_upte[UPAGES]; /* ptes for mapping u page */
+};
diff --git a/sys/arch/mvme88k/include/profile.h b/sys/arch/mvme88k/include/profile.h
new file mode 100644
index 00000000000..429d42d3c6b
--- /dev/null
+++ b/sys/arch/mvme88k/include/profile.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)profile.h 8.1 (Berkeley) 6/11/93
+ * $Id: profile.h,v 1.1 1995/10/18 10:54:24 deraadt Exp $
+ */
+
+#define _MCOUNT_DECL static inline void _mcount
+
+#define MCOUNT \
+extern void mcount() asm("mcount"); \
+void \
+mcount() \
+{ \
+ register int selfret, callerret; \
+ /* \
+ * find the return address for mcount, \
+ * and the return address for mcount's caller. \
+ * \
+ * selfret = ret pushed by mcount call \
+ */ \
+ asm volatile("ld %0,r31,36" : "=r" (selfret)); \
+ /* \
+ * callerret = ret pushed by call into self. \
+ */ \
+ /* \
+ * This may not be right. It all depends on where the \
+ * caller stores the return address. XXX \
+ */ \
+ asm volatile("addu r10,r31,48"); \
+ asm volatile("ld %0,r10,36" : "=r" (callerret)); \
+ _mcount(callerret, selfret); \
+}
+
+#ifdef KERNEL
+/*
+ * Note that we assume splhigh() and splx() cannot call mcount()
+ * recursively.
+ */
+#define MCOUNT_ENTER s = splhigh()
+#define MCOUNT_EXIT splx(s)
+#endif /* KERNEL */
diff --git a/sys/arch/mvme88k/include/psl.h b/sys/arch/mvme88k/include/psl.h
new file mode 100644
index 00000000000..f7f35dcd1c6
--- /dev/null
+++ b/sys/arch/mvme88k/include/psl.h
@@ -0,0 +1,97 @@
+#ifndef __M88K_M88100_PSL_H__
+#define __M88K_M88100_PSL_H__
+
+/* needs major cleanup - XXX nivas */
+
+#define spl0() spln(0)
+#define spl1() spln(1)
+#define spl2() spln(2)
+#define spl3() spln(3)
+#define spl4() spln(4)
+#define spl5() spln(5)
+#define spl6() spln(6)
+#define spl7() spln(7)
+
+#define splnone() spln(0)
+#define splsoftclock() spln(1)
+#define splnet() spln(1)
+#define splbio() spln(3)
+#define splimp() spln(3)
+#define spltty() spln(4)
+#define splclock() spln(6)
+#define splstatclock() spln(6)
+#define splvm() spln(6)
+#define splhigh() spln(7)
+#define splsched() spln(7)
+
+#define splx(x) spln(x)
+
+/*
+ * 88100 control registers
+ */
+
+/*
+ * processor identification register (PID)
+ */
+#define PID_ARN 0x0000FF00U /* architectural revision number */
+#define PID_VN 0x000000FEU /* version number */
+#define PID_MC 0x00000001U /* master/checker */
+
+/*
+ * processor status register
+ */
+#define PSR_MODE 0x80000000U /* supervisor/user mode */
+#define PSR_BO 0x40000000U /* byte-ordering 0:big 1:little */
+#define PSR_SER 0x20000000U /* serial mode */
+#define PSR_C 0x10000000U /* carry */
+#define PSR_SFD 0x000003F0U /* SFU disable */
+#define PSR_SFD1 0x00000008U /* SFU1 (FPU) disable */
+#define PSR_MXM 0x00000004U /* misaligned access enable */
+#define PSR_IND 0x00000002U /* interrupt disable */
+#define PSR_SFRZ 0x00000001U /* shadow freeze */
+
+/*
+ * This is used in ext_int() and hard_clock().
+ */
+#define PSR_IPL 0x00001000 /* for basepri */
+#define PSR_IPL_LOG 12 /* = log2(PSR_IPL) */
+
+#define PSR_MODE_LOG 31 /* = log2(PSR_MODE) */
+#define PSR_BO_LOG 30 /* = log2(PSR_BO) */
+#define PSR_SER_LOG 29 /* = log2(PSR_SER) */
+#define PSR_SFD1_LOG 3 /* = log2(PSR_SFD1) */
+#define PSR_MXM_LOG 2 /* = log2(PSR_MXM) */
+#define PSR_IND_LOG 1 /* = log2(PSR_IND) */
+#define PSR_SFRZ_LOG 0 /* = log2(PSR_SFRZ) */
+
+#define PSR_SUPERVISOR (PSR_MODE | PSR_SFD)
+#define PSR_USER (PSR_SFD)
+#define PSR_SET_BY_USER (PSR_BO | PSR_SER | PSR_C | PSR_MXM)
+
+#ifndef ASSEMBLER
+struct psr {
+ unsigned
+ psr_mode: 1,
+ psr_bo : 1,
+ psr_ser : 1,
+ psr_c : 1,
+ :18,
+ psr_sfd : 6,
+ psr_sfd1: 1,
+ psr_mxm : 1,
+ psr_ind : 1,
+ psr_sfrz: 1;
+};
+#endif
+
+#define FIP_V 0x00000002U /* valid */
+#define FIP_E 0x00000001U /* exception */
+#define FIP_ADDR 0xFFFFFFFCU /* address mask */
+#define NIP_V 0x00000002U /* valid */
+#define NIP_E 0x00000001U /* exception */
+#define NIP_ADDR 0xFFFFFFFCU /* address mask */
+#define XIP_V 0x00000002U /* valid */
+#define XIP_E 0x00000001U /* exception */
+#define XIP_ADDR 0xFFFFFFFCU /* address mask */
+
+#endif /* __M88K_M88100_PSL_H__ */
diff --git a/sys/arch/mvme88k/include/ptrace.h b/sys/arch/mvme88k/include/ptrace.h
new file mode 100644
index 00000000000..2aecaab88eb
--- /dev/null
+++ b/sys/arch/mvme88k/include/ptrace.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ptrace.h 8.1 (Berkeley) 6/11/93
+ *
+ * from: Header: ptrace.h,v 1.6 92/11/26 02:04:43 torek Exp (LBL)
+ * $Id: ptrace.h,v 1.1 1995/10/18 10:54:23 deraadt Exp $
+ */
+
+/*
+ * m88k-dependent ptrace definitions.
+ */
+#define PT_GETREGS (PT_FIRSTMACH + 0)
+#define PT_SETREGS (PT_FIRSTMACH + 1)
+#define PT_GETFPREGS (PT_FIRSTMACH + 2)
+#define PT_SETFPREGS (PT_FIRSTMACH + 3)
diff --git a/sys/arch/mvme88k/include/reg.h b/sys/arch/mvme88k/include/reg.h
new file mode 100644
index 00000000000..6e7fc265cca
--- /dev/null
+++ b/sys/arch/mvme88k/include/reg.h
@@ -0,0 +1,47 @@
+#include <machine/pcb.h>
+
+struct reg {
+ unsigned r_r[32];
+ unsigned r_fpsr;
+ unsigned r_fpcr;
+ unsigned r_epsr;
+ unsigned r_sxip;
+ unsigned r_snip;
+ unsigned r_sfip;
+ unsigned r_ssbr;
+ unsigned r_dmt0;
+ unsigned r_dmd0;
+ unsigned r_dma0;
+ unsigned r_dmt1;
+ unsigned r_dmd1;
+ unsigned r_dma1;
+ unsigned r_dmt2;
+ unsigned r_dmd2;
+ unsigned r_dma2;
+ unsigned r_fpecr;
+ unsigned r_fphs1;
+ unsigned r_fpls1;
+ unsigned r_fphs2;
+ unsigned r_fpls2;
+ unsigned r_fppt;
+ unsigned r_fprh;
+ unsigned r_fprl;
+ unsigned r_fpit;
+ unsigned r_vector; /* exception vector number */
+ unsigned r_mask; /* interrupt mask level */
+ unsigned r_mode; /* interrupt mode */
+ unsigned r_scratch1; /* used by locore trap handling code */
+ unsigned r_pad; /* to make an even length */
+} ;
+
+struct fpreg {
+ unsigned fp_fpecr;
+ unsigned fp_fphs1;
+ unsigned fp_fpls1;
+ unsigned fp_fphs2;
+ unsigned fp_fpls2;
+ unsigned fp_fppt;
+ unsigned fp_fprh;
+ unsigned fp_fprl;
+ unsigned fp_fpit;
+};
diff --git a/sys/arch/mvme88k/include/setjmp.h b/sys/arch/mvme88k/include/setjmp.h
new file mode 100644
index 00000000000..aee3a668b34
--- /dev/null
+++ b/sys/arch/mvme88k/include/setjmp.h
@@ -0,0 +1,7 @@
+/* $NetBSD: setjmp.h,v 1.1 1994/12/20 10:37:10 cgd Exp $ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#define _JBLEN 19 /* size, in longs, of a jmp_buf */
diff --git a/sys/arch/mvme88k/include/signal.h b/sys/arch/mvme88k/include/signal.h
new file mode 100644
index 00000000000..fb5c3e1c960
--- /dev/null
+++ b/sys/arch/mvme88k/include/signal.h
@@ -0,0 +1,45 @@
+/* Stolen from SVR4 (/usr/include/sys/signal.h) */
+
+typedef int sig_atomic_t;
+
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ *
+ * All machines must have an sc_onstack and sc_mask.
+ */
+struct sigcontext {
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore */
+ /* begin machine dependent portion */
+ int sc_regs[32];
+#define sc_sp sc_regs[31]
+ int sc_xip;
+ int sc_nip;
+ int sc_fip;
+ int sc_ps;
+ int sc_fpsr;
+ int sc_fpcr;
+ int sc_ssbr;
+ int sc_dmt0;
+ int sc_dmd0;
+ int sc_dma0;
+ int sc_dmt1;
+ int sc_dmd1;
+ int sc_dma1;
+ int sc_dmt2;
+ int sc_dmd2;
+ int sc_dma2;
+ int sc_fpecr;
+ int sc_fphs1;
+ int sc_fpls1;
+ int sc_fphs2;
+ int sc_fpls2;
+ int sc_fppt;
+ int sc_fprh;
+ int sc_fprl;
+ int sc_fpit;
+};
diff --git a/sys/arch/mvme88k/include/stdarg.h b/sys/arch/mvme88k/include/stdarg.h
new file mode 100644
index 00000000000..9820ff6fb8c
--- /dev/null
+++ b/sys/arch/mvme88k/include/stdarg.h
@@ -0,0 +1,176 @@
+/* This file has local changes by MOTOROLA
+Thu Sep 9 09:06:29 CDT 1993 Dale Rahn (drahn@pacific)
+ * (gstdarg.h, gvarargs.h) C-Front requires all builtins to
+ be defined. This is to insert these definitions if
+ __cplusplus is defined but not using the G++ compiler.
+ */
+/* stdarg.h for GNU.
+ Note that the type used in va_arg is supposed to match the
+ actual type **after default promotions**.
+ Thus, va_arg (..., short) is not valid. */
+
+#ifndef _STDARG_H
+#ifndef _ANSI_STDARG_H_
+#ifndef __need___va_list
+#define _STDARG_H
+#define _ANSI_STDARG_H_
+#endif /* not __need___va_list */
+#undef __need___va_list
+
+#ifndef __GNUC__
+/* Use the system's macros with the system's compiler.
+ This is relevant only when building GCC with some other compiler. */
+#include <stdarg.h>
+#else
+#ifdef __clipper__
+#include <va-clipper.h>
+#else
+#ifdef __m88k__
+#include <va-m88k.h>
+#else
+#ifdef __i860__
+#include <va-i860.h>
+#else
+#ifdef __hppa__
+#include <va-pa.h>
+#else
+#ifdef __mips__
+#include <va-mips.h>
+#else
+#ifdef __sparc__
+#include <va-sparc.h>
+#else
+#ifdef __i960__
+#include <va-i960.h>
+#else
+#ifdef __alpha__
+#include <va-alpha.h>
+#else
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX)
+typedef char *__gnuc_va_list;
+#else
+typedef void *__gnuc_va_list;
+#endif
+#endif
+
+/* Define the standard macros for the user,
+ if this invocation was from the user program. */
+#ifdef _STDARG_H
+
+/* Amount of space required in an argument list for an arg of type TYPE.
+ TYPE may alternatively be an expression whose type is used. */
+
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#define va_start(AP, LASTARG) \
+ (AP = ((__gnuc_va_list) __builtin_next_arg ()))
+
+#undef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#define va_end(AP)
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+
+#if defined (__arm__) || defined (__i386__) || defined (__ns32000__) || defined (__vax__)
+/* This is for little-endian machines; small args are padded upward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#else /* big-endian */
+/* This is for big-endian machines; small args are padded downward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - ((sizeof (TYPE) < 4 \
+ ? sizeof (TYPE) \
+ : __va_rounded_size (TYPE))))))
+#endif /* big-endian */
+#endif /* _STDARG_H */
+
+#endif /* not alpha */
+#endif /* not i960 */
+#endif /* not sparc */
+#endif /* not mips */
+#endif /* not hppa */
+#endif /* not i860 */
+#endif /* not m88k */
+#endif /* not clipper */
+
+#ifdef _STDARG_H
+/* Define va_list, if desired, from __gnuc_va_list. */
+/* We deliberately do not define va_list when called from
+ stdio.h, because ANSI C says that stdio.h is not supposed to define
+ va_list. stdio.h needs to have access to that data type,
+ but must not use that name. It should use the name __gnuc_va_list,
+ which is safe because it is reserved for the implementation. */
+
+#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */
+#undef _VA_LIST
+#endif
+
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+
+#ifdef __svr4__
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#endif /* _VA_LIST_ */
+#else /* not __svr4__ */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____)
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#if !(defined (__BSD_NET2__) || defined (____386BSD____))
+#define _VA_LIST_
+#endif
+#define _VA_LIST
+typedef __gnuc_va_list va_list;
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_ */
+
+#endif /* not __svr4__ */
+
+#if defined(__cplusplus) && !defined(__GNUG__)
+
+/* This is added to work with AT&T C++. */
+extern "C" {
+ char *__builtin_next_arg(void);
+ __gnuc_va_list *__builtin_saveregs(void);
+ void *__builtin_saveregs2(int);
+ int *__builtin_argptr(void);
+ int __builtin_argsize(void);
+ int __builtin_classify_type(...);
+ int __alignof__(...);
+}
+#endif
+
+#endif /* _STDARG_H */
+
+#endif /* __GNUC__ */
+#endif /* not _ANSI_STDARG_H_ */
+#endif /* not _STDARG_H */
diff --git a/sys/arch/mvme88k/include/trap.h b/sys/arch/mvme88k/include/trap.h
new file mode 100644
index 00000000000..4f8f3d4aa3f
--- /dev/null
+++ b/sys/arch/mvme88k/include/trap.h
@@ -0,0 +1,86 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: trap.h,v $
+ * Revision 1.1 1995/10/18 10:54:21 deraadt
+ * Initial revision
+ *
+ * Revision 2.3 92/08/03 17:52:42 jfriedl
+ * watchpoint support
+ * [92/07/31 jfriedl]
+ *
+ * Revision 2.2 92/02/18 18:03:58 elf
+ * Liberated.
+ * [92/01/30 danner]
+ *
+ */
+/*
+ * Trap codes
+ */
+
+#ifndef _M88K_TRAP_H
+#define _M88K_TRAP_H 1
+
+/*
+ * Trap type values
+ */
+
+#define T_RESADFLT 0 /* reserved addressing fault */
+#define T_PRIVINFLT 1 /* privileged instruction fault */
+#define T_RESOPFLT 2 /* reserved operand fault */
+
+/* End of known constants */
+
+#define T_INSTFLT 3 /* instruction access exception */
+#define T_DATAFLT 4 /* data access exception */
+#define T_MISALGNFLT 5 /* misaligned access exception */
+#define T_ILLFLT 6 /* unimplemented opcode exception */
+#define T_BNDFLT 7 /* bounds check violation exception */
+#define T_ZERODIV 8 /* illegal divide exception */
+#define T_OVFFLT 9 /* integer overflow exception */
+#define T_ERRORFLT 10 /* error exception */
+#define T_FPEPFLT 11 /* floating point precise exception */
+#define T_FPEIFLT 12 /* floating point imprecise exception */
+#define T_ASTFLT 13 /* software trap */
+#if DDB
+#define T_KDB_ENTRY 14 /* force entry to kernel debugger */
+#define T_KDB_BREAK 15 /* break point hit */
+#define T_KDB_TRACE 16 /* trace */
+#endif /* DDB */
+#define T_UNKNOWNFLT 17 /* unknown exception */
+#define T_SIGTRAP 18 /* generate SIGTRAP */
+#define T_SIGSYS 19 /* generate SIGSYS */
+#define T_STEPBPT 20 /* special breakpoint for single step */
+#define T_USERBPT 21 /* user set breakpoint (for debugger) */
+#define T_SYSCALL 22 /* Syscall */
+#define T_USER 23 /* user mode fault */
+#if DDB
+#define T_KDB_WATCH 24 /* watchpoint hit */
+#endif /* DDB */
+
+#endif _M88K_TRAP_H
+
diff --git a/sys/arch/mvme88k/include/types.h b/sys/arch/mvme88k/include/types.h
new file mode 100644
index 00000000000..58d1d8f47c1
--- /dev/null
+++ b/sys/arch/mvme88k/include/types.h
@@ -0,0 +1,83 @@
+/* $NetBSD: types.h,v 1.7 1995/07/05 17:46:11 pk Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.1 (Berkeley) 6/11/93
+ */
+
+#ifndef _MACHTYPES_H_
+#define _MACHTYPES_H_
+
+#include <sys/cdefs.h>
+
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
+typedef struct _physadr {
+ short r[1];
+} *physadr;
+
+typedef struct label_t {
+ int val[2];
+} label_t;
+#endif
+
+typedef unsigned long vm_offset_t;
+typedef unsigned long vm_size_t;
+
+/*
+ * Basic integral types. Omit the typedef if
+ * not possible for a machine/compiler combination.
+ */
+#define __BIT_TYPES_DEFINED__
+typedef __signed char int8_t;
+typedef unsigned char u_int8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+typedef long long int64_t;
+typedef unsigned long long u_int64_t;
+
+typedef int32_t register_t;
+
+#define __BDEVSW_DUMP_OLD_TYPE
+
+#endif /* _MACHTYPES_H_ */
diff --git a/sys/arch/mvme88k/include/va-m88k.h b/sys/arch/mvme88k/include/va-m88k.h
new file mode 100644
index 00000000000..caa77c3c12d
--- /dev/null
+++ b/sys/arch/mvme88k/include/va-m88k.h
@@ -0,0 +1,85 @@
+/* This file has local changes by MOTOROLA
+Thu Sep 9 09:06:29 CDT 1993 Dale Rahn (drahn@pacific)
+ * Due to C-Front's usage of __alignof__ builtin the
+ usage of it must be changed to have an object of that type
+ as the argument not just the type.
+ */
+/* GNU C varargs support for the Motorola 88100 */
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+
+typedef struct
+{
+ int __va_arg; /* argument number */
+ int *__va_stk; /* start of args passed on stack */
+ int *__va_reg; /* start of args passed in regs */
+} __gnuc_va_list;
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but
+ __gnuc_va_list. */
+#if defined (_STDARG_H) || defined (_VARARGS_H)
+
+#ifdef _STDARG_H /* stdarg.h support */
+
+#if __GNUC__ > 1 /* GCC 2.0 and beyond */
+#define va_start(AP,LASTARG) ((AP) = *(__gnuc_va_list *)__builtin_saveregs())
+#else
+#define va_start(AP,LASTARG) \
+ ( (AP).__va_reg = (int *) __builtin_saveregs2(0), \
+ (AP).__va_stk = (int *) __builtin_argptr(), \
+ (AP).__va_arg = (int) (__builtin_argsize() + 3) / 4 )
+#endif
+
+#else /* varargs.h support */
+
+#if __GNUC__ > 1 /* GCC 2.0 and beyond */
+#define va_start(AP) ((AP) = *(__gnuc_va_list *)__builtin_saveregs())
+#else
+#define va_start(AP) \
+ ( (AP).__va_reg = (int *) __builtin_saveregs2(1), \
+ (AP).__va_stk = (int *) __builtin_argptr(), \
+ (AP).__va_arg = (int) (__builtin_argsize() - 4 + 3) / 4 )
+#endif
+#define va_alist __va_1st_arg
+#define va_dcl register int va_alist;
+
+#endif /* _STDARG_H */
+
+/* Avoid trouble between this file and _int_varargs.h under DG/UX. This file
+ can be included by <stdio.h> and others and provides definitions of
+ __va_size and __va_reg_p and a va_list typedef. Avoid defining va_list
+ again with _VA_LIST. */
+#ifdef __INT_VARARGS_H
+#undef __va_size
+#undef __va_reg_p
+#define __gnuc_va_list va_list
+#define _VA_LIST
+#else
+/* Similarly, if this gets included first, do nothing in _int_varargs.h. */
+#define __INT_VARARGS_H
+#endif
+
+#define __va_reg_p(TYPE) \
+ (__builtin_classify_type(*(TYPE *)0) < 12 \
+ ? sizeof(TYPE) <= 8 : sizeof(TYPE) == 4 && __alignof__(*(TYPE *)0) == 4)
+
+#define __va_size(TYPE) ((sizeof(TYPE) + 3) >> 2)
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+#define va_arg(AP,TYPE) \
+ ( (AP).__va_arg = (((AP).__va_arg + (1 << (__alignof__(*(TYPE *)0) >> 3)) - 1) \
+ & ~((1 << (__alignof__(*(TYPE *)0) >> 3)) - 1)) \
+ + __va_size(TYPE), \
+ *((TYPE *) (void *) ((__va_reg_p(TYPE) \
+ && (AP).__va_arg < 8 + __va_size(TYPE) \
+ ? (AP).__va_reg : (AP).__va_stk) \
+ + ((AP).__va_arg - __va_size(TYPE)))))
+
+#define va_end(AP)
+
+#endif /* defined (_STDARG_H) || defined (_VARARGS_H) */
diff --git a/sys/arch/mvme88k/include/varargs.h b/sys/arch/mvme88k/include/varargs.h
new file mode 100644
index 00000000000..a04933a211c
--- /dev/null
+++ b/sys/arch/mvme88k/include/varargs.h
@@ -0,0 +1,189 @@
+/* This file has local changes by MOTOROLA
+Thu Sep 9 09:06:29 CDT 1993 Dale Rahn (drahn@pacific)
+ * (gstdarg.h, gvarargs.h) C-Front requires all builtins to
+ be defined. This is to insert these definitions if
+ __cplusplus is defined but not using the G++ compiler.
+ */
+#ifndef __GNUC__
+/* Use the system's macros with the system's compiler. */
+#include <varargs.h>
+#else
+/* Record that this is varargs.h; this turns off stdarg.h. */
+
+#ifndef _VARARGS_H
+#define _VARARGS_H
+
+#ifdef __sparc__
+#include <va-sparc.h>
+#else
+#ifdef __spur__
+#include <va-spur.h>
+#else
+#ifdef __mips__
+#include <va-mips.h>
+#else
+#ifdef __i860__
+#include <va-i860.h>
+#else
+#ifdef __pyr__
+#include <va-pyr.h>
+#else
+#ifdef __clipper__
+#include <va-clipper.h>
+#else
+#ifdef __m88k__
+#include <va-m88k.h>
+#else
+#if defined(__hppa__) || defined(hp800)
+#include <va-pa.h>
+#else
+#ifdef __i960__
+#include <va-i960.h>
+#else
+#ifdef __alpha__
+#include <va-alpha.h>
+#else
+
+#ifdef __NeXT__
+
+/* On Next, erase any vestiges of stdarg.h. */
+
+#ifdef _ANSI_STDARG_H_
+#define _VA_LIST_
+#endif
+#define _ANSI_STDARG_H_
+
+#undef va_alist
+#undef va_dcl
+#undef va_list
+#undef va_start
+#undef va_end
+#undef __va_rounded_size
+#undef va_arg
+#endif /* __NeXT__ */
+
+/* In GCC version 2, we want an ellipsis at the end of the declaration
+ of the argument list. GCC version 1 can't parse it. */
+
+#if __GNUC__ > 1
+#define __va_ellipsis ...
+#else
+#define __va_ellipsis
+#endif
+
+/* These macros implement traditional (non-ANSI) varargs
+ for GNU C. */
+
+#define va_alist __builtin_va_alist
+/* The ... causes current_function_varargs to be set in cc1. */
+#define va_dcl int __builtin_va_alist; __va_ellipsis
+
+/* Define __gnuc_va_list, just as in gstdarg.h. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX)
+typedef char *__gnuc_va_list;
+#else
+typedef void *__gnuc_va_list;
+#endif
+#endif
+
+#define va_start(AP) AP=(char *) &__builtin_va_alist
+
+#define va_end(AP)
+
+#define __va_rounded_size(TYPE) \
+ (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+
+#if defined (__arm__) || defined (__i386__) || defined (__ns32000__) || defined (__vax__)
+/* This is for little-endian machines; small args are padded upward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE))))
+#else /* big-endian */
+/* This is for big-endian machines; small args are padded downward. */
+#define va_arg(AP, TYPE) \
+ (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \
+ *((TYPE *) (void *) ((char *) (AP) - ((sizeof (TYPE) < 4 \
+ ? sizeof (TYPE) \
+ : __va_rounded_size (TYPE))))))
+#endif /* big-endian */
+
+#endif /* not alpha */
+#endif /* not i960 */
+#endif /* not hppa */
+#endif /* not m88k */
+#endif /* not clipper */
+#endif /* not pyr */
+#endif /* not i860 */
+#endif /* not mips */
+#endif /* not spur */
+#endif /* not sparc */
+#endif /* not _VARARGS_H */
+
+/* Define va_list from __gnuc_va_list. */
+
+#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */
+#undef _VA_LIST
+#endif
+
+#ifdef __svr4__
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#endif /* _VA_LIST_ */
+
+#else /* not __svr4__ */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____)
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#if !(defined (__BSD_NET2__) || defined (____386BSD____))
+#define _VA_LIST_
+#endif
+#define _VA_LIST
+typedef __gnuc_va_list va_list;
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_ */
+
+#endif /* not __svr4__ */
+
+/* The next BSD release (if there is one) wants this symbol to be
+ undefined instead of _VA_LIST_. */
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+#if defined(__cplusplus) && !defined(__GNUG__)
+
+/* This is added to work with AT&T C++. */
+extern "C" {
+ char *__builtin_next_arg(void);
+ __gnuc_va_list *__builtin_saveregs(void);
+ void *__builtin_saveregs2(int);
+ int *__builtin_argptr(void);
+ int __builtin_argsize(void);
+ int __builtin_classify_type(...);
+ int __alignof__(...);
+}
+#endif
+
+
+#endif /* __GNUC__ */
diff --git a/sys/arch/mvme88k/include/vid.h b/sys/arch/mvme88k/include/vid.h
new file mode 100644
index 00000000000..acaa280dace
--- /dev/null
+++ b/sys/arch/mvme88k/include/vid.h
@@ -0,0 +1,56 @@
+#define START_BLOCK 1
+#define LOADER_SIZE 2
+#define LOADER_ADDRESS 0x1F0000
+
+#ifndef __ASSEMBLER__
+struct vid {
+ unsigned char vid_id[4];
+ unsigned char vid_0[16];
+ unsigned int vid_oss;
+ unsigned short vid_osl;
+ unsigned char vid_1[4];
+ unsigned short vid_osa_u;
+ unsigned short vid_osa_l;
+ unsigned char vid_2[4];
+ unsigned char vid_vd[20];
+ unsigned char vid_3[86];
+ unsigned int vid_cas;
+ unsigned char vid_cal;
+ unsigned char vid_4[99];
+ unsigned char vid_mot[8];
+};
+struct cfg {
+
+ unsigned char cfg_0[4];
+ unsigned short cfg_atm;
+ unsigned short cfg_prm;
+ unsigned short cfg_atw;
+ unsigned short cfg_rec;
+ unsigned char cfg_1[12];
+ unsigned char cfg_spt;
+ unsigned char cfg_hds;
+ unsigned short cfg_trk;
+ unsigned char cfg_ilv;
+ unsigned char cfg_sof;
+ unsigned short cfg_psm;
+ unsigned short cfg_shd;
+ unsigned char cfg_2[2];
+ unsigned short cfg_pcom;
+ unsigned char cfg_3;
+ unsigned char cfg_ssr;
+ unsigned short cfg_rwcc;
+ unsigned short cfg_ecc;
+ unsigned short cfg_eatm;
+ unsigned short cfg_eprm;
+ unsigned short cfg_eatw;
+ unsigned char cfg_gpb1;
+ unsigned char cfg_gpb2;
+ unsigned char cfg_gpb3;
+ unsigned char cfg_gpb4;
+ unsigned char cfg_ssc;
+ unsigned char cfg_runit;
+ unsigned short cfg_rsvc1;
+ unsigned short cfg_rsvc2;
+ unsigned char cfg_4[196];
+};
+#endif
diff --git a/sys/arch/mvme88k/include/vmparam.h b/sys/arch/mvme88k/include/vmparam.h
new file mode 100644
index 00000000000..586fb83fe38
--- /dev/null
+++ b/sys/arch/mvme88k/include/vmparam.h
@@ -0,0 +1,219 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ */
+/*
+ * File: vm_param.h
+ *
+ * machine dependent virtual memory parameters.
+ * Most of the declarations are preceeded by M88K_ (or m88k_)
+ * which is OK because only M88K specific code will be using
+ * them.
+ */
+
+
+#ifndef _MACHINE_VM_PARAM_
+#define _MACHINE_VM_PARAM_
+
+/*
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack.
+ */
+#define USRTEXT 0x1000 /* Start of user text */
+#define USRSTACK 0x80000000 /* Start of user stack */
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (8*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (16*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (64*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (512*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ MAXDSIZ /* max stack size */
+#endif
+
+/*
+ * Default sizes of swap allocation chunks (see dmap.h).
+ * The actual values may be changed in vminit() based on MAXDSIZ.
+ * With MAXDSIZ of 16Mb and NDMAP of 38, dmmax will be 1024.
+ * DMMIN should be at least ctod(1) so that vtod() works.
+ * vminit() insures this.
+ */
+#define DMMIN 32 /* smallest swap allocation */
+#define DMMAX 4096 /* largest potential swap allocation */
+#define DMTEXT 1024 /* swap allocation for text */
+
+/*
+ * Size of shared memory map
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 1024
+#endif
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * A swapped in process is given a small amount of core without being bothered
+ * by the page replacement algorithm. Basically this says that if you are
+ * swapped in you deserve some resources. We protect the last SAFERSS
+ * pages against paging and will just swap you out rather than paging you.
+ * Note that each process has at least UPAGES+CLSIZE pages which are not
+ * paged anyways (this is currently 8+2=10 pages or 5k bytes), so this
+ * number just means a swapped in process is given around 25k bytes.
+ * Just for fun: current memory prices are 4600$ a megabyte on VAX (4/22/81),
+ * so we loan each swapped in process memory worth 100$, or just admit
+ * that we don't consider it worthwhile and swap it out to disk which costs
+ * $30/mb or about $0.75.
+ */
+#define SAFERSS 4 /* nominal ``small'' resident set size
+ protected against replacement */
+
+#define VM_MINUSER_ADDRESS ((vm_offset_t) 0)
+#define VM_MAXUSER_ADDRESS ((vm_offset_t) 0xffc00000U)
+
+#define VM_MINKERNEL_ADDRESS ((vm_offset_t) 0)
+#define VM_MAXKERNEL_ADDRESS ((vm_offset_t) 0x1fffffff)
+
+/*
+ * Mach derived constants
+ */
+#define BYTE_SIZE 8 /* byte size in bits */
+
+#define M88K_PGBYTES (1<<12) /* bytes per m88k page */
+#define M88K_PGSHIFT 12 /* number of bits to shift for pages */
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define m88k_btop(x) (((unsigned)(x)) >> M88K_PGSHIFT)
+#define m88k_ptob(x) (((unsigned)(x)) << M88K_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.
+ */
+
+#define m88k_round_page(x) ((((unsigned)(x)) + M88K_PGBYTES - 1) & \
+ ~(M88K_PGBYTES-1))
+#define m88k_trunc_page(x) (((unsigned)(x)) & ~(M88K_PGBYTES-1))
+
+#define VM_MIN_ADDRESS ((vm_offset_t) 0)
+#define VM_MAX_ADDRESS ((vm_offset_t) 0xffc00000U)
+
+#define VM_MIN_USER_ADDRESS ((vm_offset_t) 0)
+#define VM_MAX_USER_ADDRESS ((vm_offset_t) 0xffc00000U)
+
+/* on vme188, max = 0xf0000000 */
+
+#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0)
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x1fffffff)
+
+#define KERNEL_STACK_SIZE (3*4096) /* kernel stack size */
+#define INTSTACK_SIZE (3*4096) /* interrupt stack size */
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
+#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
+
+/*
+ * Conversion between MACHINE pages and VM pages
+ */
+
+#define trunc_m88k_to_vm(p) (atop(trunc_page(m88k_ptob(p))))
+#define round_m88k_to_vm(p) (atop(round_page(m88k_ptob(p))))
+#define vm_to_m88k(p) (m88k_btop(ptoa(p)))
+
+#if 1 /*Do we really need all this stuff*/
+#if 1 /*Do we really need all this stuff*/
+#if 1 /*Do we really need all this stuff*/
+#define M88K_SGPAGES (1<<10) /* pages per m88k segment */
+#define M88K_SGPGSHIFT 10 /* number of bits to shift for segment-page */
+#define M88K_ALSEGMS (1<<10) /* segments per m88k all space */
+#define M88K_ALSGSHIFT 10 /* number of bits to shift for all-segment */
+
+#define M88K_SGBYTES (1<<22) /* bytes per m88k segments */
+#define M88K_SGSHIFT 22 /* number of bits to shift for segment */
+#define M88K_ALPAGES (1<<20) /* pages per m88k all space */
+#define M88K_ALPGSHIFT 20 /* number of bits to shift for all-page */
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define m88k_btopr(x) (((unsigned)(x) + (M88K_PGBYTES - 1)) >> M88K_PGSHIFT)
+#define m88k_btosr(x) (((unsigned)(x) + (M88K_SGBYTES - 1)) >> M88K_SGSHIFT)
+#define m88k_btos(x) (((unsigned)(x)) >> M88K_SGSHIFT)
+#define m88k_stob(x) (((unsigned)(x)) << M88K_SGSHIFT)
+#define m88k_ptosr(x) (((unsigned)(x) + (M88K_SGPAGES - 1)) >> M88K_SGPGSHIFT)
+#define m88k_ptos(x) (((unsigned)(x)) >> M88K_SGPGSHIFT)
+#define m88k_stop(x) (((unsigned)(x)) << M88K_SGPGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.
+ */
+
+#define m88k_round_segm(x) ((((unsigned)(x)) + M88K_SGBYTES - 1) & \
+ ~(M88K_SGBYTES-1))
+#define m88k_next_segm(x) ((((unsigned)(x)) & ~(M88K_SGBYTES-1)) + \
+ M88K_SGBYTES)
+#define m88k_trunc_segm(x) (((unsigned)(x)) & ~(M88K_SGBYTES-1))
+
+#define m88k_round_seg(x) ((((unsigned)(x)) + M88K_SGBYTES - 1) & \
+ ~(M88K_SGBYTES-1))
+#define m88k_trunc_seg(x) (((unsigned)(x)) & ~(M88K_SGBYTES-1))
+
+#define VEQR_ADDR 0x20000000 /* kernel virtual eq phy mapping */
+#endif /* Do we really need all this stuff */
+#endif /* Do we really need all this stuf */
+#endif /* Do we really need all this stuff */
+
+#endif _MACHINE_VM_PARAM_
diff --git a/sys/arch/mvme88k/m88k/TODO b/sys/arch/mvme88k/m88k/TODO
new file mode 100644
index 00000000000..a6151ebd11e
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/TODO
@@ -0,0 +1,4 @@
+1. It appears that trap() assumes instruction access or data access
+ faults can only be caused by page faults. Could do better by
+ checking PFSR in the CMMU and handling parity errors, page faults,
+ segmentation faults and protection faults appropriately.
diff --git a/sys/arch/mvme88k/m88k/autoconf.c b/sys/arch/mvme88k/m88k/autoconf.c
new file mode 100644
index 00000000000..cbac9fe7ad4
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/autoconf.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 1994 Christian E. Hopps
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christian E. Hopps.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: autoconf.c,v 1.1 1995/10/18 10:54:25 deraadt Exp $
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <machine/cpu.h>
+
+void configure __P((void));
+void setroot __P((void));
+void swapconf __P((void));
+
+int realconfig=0;
+int cold; /* 1 if still booting */
+#include <sys/kernel.h>
+/*
+ * called at boot time, configure all devices on system
+ */
+void
+configure()
+{
+ /*
+ * this is the real thing baby (i.e. not console init)
+ */
+ realconfig = 1;
+
+ if (config_rootfound("mainbus", "mainbus") == 0)
+ panic("no mainbus found");
+
+#ifdef GENERIC
+ if ((boothowto & RB_ASKNAME) == 0)
+ setroot();
+ setconf();
+#else
+ setroot();
+#endif
+ swapconf();
+ cold = 0;
+}
+
+/*ARGSUSED*/
+int
+simple_devprint(auxp, pnp)
+ void *auxp;
+ char *pnp;
+{
+ return(QUIET);
+}
+
+int
+matchname(fp, sp)
+ char *fp, *sp;
+{
+ int len;
+
+ len = strlen(fp);
+ if (strlen(sp) != len)
+ return(0);
+ if (bcmp(fp, sp, len) == 0)
+ return(1);
+ return(0);
+}
+/*
+ * this function needs to get enough configured to do a console
+ * basically this means start attaching the grfxx's that support
+ * the console. Kinda hacky but it works.
+ */
+int
+config_console()
+{
+ struct cfdata *cf;
+
+ /*
+ * we need mainbus' cfdata.
+ */
+ cf = config_rootsearch(NULL, "mainbus", "mainbus");
+ if (cf == NULL)
+ panic("no mainbus");
+}
+
+void
+swapconf()
+{
+ struct swdevt *swp;
+ u_int maj;
+ int nb;
+
+ for (swp = swdevt; swp->sw_dev > 0; swp++) {
+ maj = major(swp->sw_dev);
+
+ if (maj > nblkdev)
+ break;
+
+ if (bdevsw[maj].d_psize) {
+ nb = bdevsw[maj].d_psize(swp->sw_dev);
+ if (nb > 0 &&
+ (swp->sw_nblks == 0 || swp->sw_nblks > nb))
+ swp->sw_nblks = nb;
+ else
+ swp->sw_nblks = 0;
+ }
+ swp->sw_nblks = ctod(dtoc(swp->sw_nblks));
+ }
+ if (dumplo == 0 && dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize)
+ /*dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) - physmem;*/
+ dumplo = (*bdevsw[major(dumpdev)].d_psize)(dumpdev) -
+ ctob(physmem)/DEV_BSIZE;
+ if (dumplo < 0)
+ dumplo = 0;
+
+}
+
+#define DOSWAP /* change swdevt and dumpdev */
+u_long bootdev = 0; /* should be dev_t, but not until 32 bits */
+
+static char devname[][2] = {
+ 0,0,
+ 0,0,
+ 0,0,
+ 0,0,
+ 's','d', /* 4 = sd -- new SCSI system */
+};
+
+void
+setroot()
+{
+ int majdev, mindev, unit, part, adaptor;
+ dev_t temp, orootdev;
+ struct swdevt *swp;
+
+ printf("setroot boothowto %x bootdev %x\n", boothowto, bootdev);
+ if (boothowto & RB_DFLTROOT ||
+ (bootdev & B_MAGICMASK) != (u_long)B_DEVMAGIC)
+ return;
+ majdev = (bootdev >> B_TYPESHIFT) & B_TYPEMASK;
+ if (majdev > sizeof(devname) / sizeof(devname[0]))
+ return;
+ adaptor = (bootdev >> B_ADAPTORSHIFT) & B_ADAPTORMASK;
+ part = (bootdev >> B_PARTITIONSHIFT) & B_PARTITIONMASK;
+ unit = (bootdev >> B_UNITSHIFT) & B_UNITMASK;
+ orootdev = rootdev;
+ rootdev = MAKEDISKDEV(majdev, unit, part);
+ /*
+ * If the original rootdev is the same as the one
+ * just calculated, don't need to adjust the swap configuration.
+ */
+ if (rootdev == orootdev)
+ return;
+ printf("changing root device to %c%c%d%c\n",
+ devname[majdev][0], devname[majdev][1],
+ unit, part + 'a');
+#ifdef DOSWAP
+ mindev = DISKUNIT(rootdev);
+ for (swp = swdevt; swp->sw_dev; swp++) {
+ printf("DOSWAP swap %x dev %x\n", swp, swp->sw_dev);
+ if (majdev == major(swp->sw_dev) &&
+ mindev == DISKUNIT(swp->sw_dev)) {
+ temp = swdevt[0].sw_dev;
+ swdevt[0].sw_dev = swp->sw_dev;
+ swp->sw_dev = temp;
+ break;
+ }
+ }
+ if (swp->sw_dev == 0)
+ return;
+ /*
+ * If dumpdev was the same as the old primary swap
+ * device, move it to the new primary swap device.
+ */
+ if (temp == dumpdev)
+ dumpdev = swdevt[0].sw_dev;
+#endif
+}
diff --git a/sys/arch/mvme88k/m88k/clock.c b/sys/arch/mvme88k/m88k/clock.c
new file mode 100644
index 00000000000..50703114c09
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/clock.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)clock.c 8.1 (Berkeley) 6/11/93
+ *
+ * from: Header: clock.c,v 1.17 92/11/26 03:04:47 torek Exp (LBL)
+ * $Id: clock.c,v 1.1 1995/10/18 10:54:27 deraadt Exp $
+ */
+
+/*
+ * Clock driver. This is the id prom (``eeprom'') driver as well
+ * and includes the timer register functions too.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#ifdef GPROF
+#include <sys/gmon.h>
+#endif
+
+#include <vm/vm.h>
+
+#include <machine/autoconf.h>
+
+#include <sparc/sparc/clockreg.h>
+#include <sparc/sparc/intreg.h>
+#include <sparc/sparc/timerreg.h>
+
+/*
+ * Statistics clock interval and variance, in usec. Variance must be a
+ * power of two. Since this gives us an even number, not an odd number,
+ * we discard one case and compensate. That is, a variance of 1024 would
+ * give us offsets in [0..1023]. Instead, we take offsets in [1..1023].
+ * This is symmetric about the point 512, or statvar/2, and thus averages
+ * to that value (assuming uniform random numbers).
+ */
+/* XXX fix comment to match value */
+int statvar = 8192;
+int statmin; /* statclock interval - 1/2*variance */
+
+static int clockmatch __P((struct device *, struct cfdata *, void *));
+static void clockattach __P((struct device *, struct device *, void *));
+
+struct cfdriver clockcd =
+ { NULL, "clock", clockmatch, clockattach, DV_DULL, sizeof(struct device) };
+
+static int timermatch __P((struct device *, struct cfdata *, void *));
+static void timerattach __P((struct device *, struct device *, void *));
+struct cfdriver timercd =
+ { NULL, "timer", timermatch, timerattach, DV_DULL, sizeof(struct device) };
+
+/*
+ * The OPENPROM calls the clock the "eeprom", so we have to have our
+ * own special match function to call it the "clock".
+ */
+static int
+clockmatch(parent, cf, aux)
+ struct device *parent;
+ struct cfdata *cf;
+ void *aux;
+{
+
+ return (strcmp("eeprom", ((struct romaux *)aux)->ra_name) == 0);
+}
+
+/* ARGSUSED */
+static void
+clockattach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ register int h;
+ register struct clockreg *cl;
+ struct romaux *ra = aux;
+ char *prop;
+
+ prop = getpropstring(ra->ra_node, "model");
+ printf(": %s (eeprom)\n", prop);
+ /*
+ * We ignore any existing virtual address as we need to map
+ * this read-only and make it read-write only temporarily,
+ * whenever we read or write the clock chip. The clock also
+ * contains the ID ``PROM'', and I have already had the pleasure
+ * of reloading the cpu type, Ethernet address, etc, by hand from
+ * the console FORTH interpreter. I intend not to enjoy it again.
+ */
+ if (strcmp(prop, "mk48t08") == 0) {
+ /*
+ * the MK48T08 is 8K
+ */
+ cl = (struct clockreg *)mapiodev(ra->ra_paddr, 2 * NBPG);
+ pmap_changeprot(kernel_pmap, (vm_offset_t)cl, VM_PROT_READ, 1);
+ pmap_changeprot(kernel_pmap, (vm_offset_t)cl + NBPG, VM_PROT_READ, 1);
+ cl = (struct clockreg *)((int)cl + CLK_MK48T08_OFF);
+ } else {
+ /*
+ * the MK48T02 is 2K
+ */
+ cl = (struct clockreg *)mapiodev(ra->ra_paddr, sizeof *clockreg);
+ pmap_changeprot(kernel_pmap, (vm_offset_t)cl, VM_PROT_READ, 1);
+ }
+
+ h = cl->cl_idprom.id_machine << 24;
+ h |= cl->cl_idprom.id_hostid[0] << 16;
+ h |= cl->cl_idprom.id_hostid[1] << 8;
+ h |= cl->cl_idprom.id_hostid[2];
+ hostid = h;
+ clockreg = cl;
+}
+
+/*
+ * The OPENPROM calls the timer the "counter-timer".
+ */
+static int
+timermatch(parent, cf, aux)
+ struct device *parent;
+ struct cfdata *cf;
+ void *aux;
+{
+
+ return (strcmp("counter-timer", ((struct romaux *)aux)->ra_name) == 0);
+}
+
+/* ARGSUSED */
+static void
+timerattach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ register struct romaux *ra = aux;
+
+ printf("\n");
+ /*
+ * This time, we ignore any existing virtual address because
+ * we have a fixed virtual address for the timer, to make
+ * microtime() faster.
+ */
+ (void)mapdev(ra->ra_paddr, TIMERREG_VA, sizeof(struct timerreg));
+ /* should link interrupt handlers here, rather than compiled-in? */
+}
+
+/*
+ * Write en/dis-able clock registers. We coordinate so that several
+ * writers can run simultaneously.
+ */
+void
+clk_wenable(onoff)
+ int onoff;
+{
+ register int s;
+ register vm_prot_t prot;/* nonzero => change prot */
+ static int writers;
+
+ s = splhigh();
+ if (onoff)
+ prot = writers++ == 0 ? VM_PROT_READ|VM_PROT_WRITE : 0;
+ else
+ prot = --writers == 0 ? VM_PROT_READ : 0;
+ splx(s);
+ if (prot)
+ pmap_changeprot(kernel_pmap, (vm_offset_t)clockreg, prot, 1);
+}
+
+/*
+ * XXX this belongs elsewhere
+ */
+void
+myetheraddr(cp)
+ u_char *cp;
+{
+ register struct clockreg *cl = clockreg;
+
+ cp[0] = cl->cl_idprom.id_ether[0];
+ cp[1] = cl->cl_idprom.id_ether[1];
+ cp[2] = cl->cl_idprom.id_ether[2];
+ cp[3] = cl->cl_idprom.id_ether[3];
+ cp[4] = cl->cl_idprom.id_ether[4];
+ cp[5] = cl->cl_idprom.id_ether[5];
+}
+
+/*
+ * Delay: wait for `about' n microseconds to pass.
+ * This is easy to do on the SparcStation since we have
+ * freerunning microsecond timers -- no need to guess at
+ * cpu speed factors. We just wait for it to change n times
+ * (if we calculated a limit, we might overshoot, and precision
+ * is irrelevant here---we want less object code).
+ */
+delay(n)
+ register int n;
+{
+ register int c, t;
+
+ if (timercd.cd_ndevs == 0)
+ panic("delay");
+ c = TIMERREG->t_c10.t_counter;
+ while (--n >= 0) {
+ while ((t = TIMERREG->t_c10.t_counter) == c)
+ continue;
+ c = t;
+ }
+}
+
+/*
+ * Set up the real-time and statistics clocks. Leave stathz 0 only if
+ * no alternative timer is available.
+ *
+ * The frequencies of these clocks must be an even number of microseconds.
+ */
+cpu_initclocks()
+{
+ register int statint, minint;
+
+ if (1000000 % hz) {
+ printf("cannot get %d Hz clock; using 100 Hz\n", hz);
+ hz = 100;
+ tick = 1000000 / hz;
+ }
+ if (stathz == 0)
+ stathz = hz;
+ if (1000000 % stathz) {
+ printf("cannot get %d Hz statclock; using 100 Hz\n", stathz);
+ stathz = 100;
+ }
+ profhz = stathz; /* always */
+
+ statint = 1000000 / stathz;
+ minint = statint / 2 + 100;
+ while (statvar > minint)
+ statvar >>= 1;
+ TIMERREG->t_c10.t_limit = tmr_ustolim(tick);
+ TIMERREG->t_c14.t_limit = tmr_ustolim(statint);
+ statmin = statint - (statvar >> 1);
+ ienab_bis(IE_L14 | IE_L10);
+}
+
+/*
+ * Dummy setstatclockrate(), since we know profhz==hz.
+ */
+/* ARGSUSED */
+void
+setstatclockrate(newhz)
+ int newhz;
+{
+ /* nothing */
+}
+
+/*
+ * Clock interrupts.
+ */
+int
+clockintr(cap)
+ void *cap;
+{
+ volatile register unsigned char icr;
+ /* clear clock interrupt */
+ asm ("ld.b %0,%1" : "=r" (icr) : "" (TIMER2ICR));
+ icr |= ICLR;
+ asm ("st.b %0,%1" : "=r" (icr) : "" (TIMER2ICR));
+
+ /* read the limit register to clear the interrupt */
+ hardclock((struct clockframe *)cap);
+
+ return (1);
+}
+
+/*
+ * BCD to decimal and decimal to BCD.
+ */
+#define FROMBCD(x) (((x) >> 4) * 10 + ((x) & 0xf))
+#define TOBCD(x) (((x) / 10 * 16) + ((x) % 10))
+
+#define SECDAY (24 * 60 * 60)
+#define SECYR (SECDAY * 365)
+#define LEAPYEAR(y) (((y) & 3) == 0)
+
+/*
+ * This code is defunct after 2068.
+ * Will Unix still be here then??
+ */
+const short dayyr[12] =
+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
+
+chiptotime(sec, min, hour, day, mon, year)
+ register int sec, min, hour, day, mon, year;
+{
+ register int days, yr;
+
+ sec = FROMBCD(sec);
+ min = FROMBCD(min);
+ hour = FROMBCD(hour);
+ day = FROMBCD(day);
+ mon = FROMBCD(mon);
+ year = FROMBCD(year) + YEAR0;
+
+ /* simple sanity checks */
+ if (year < 70 || mon < 1 || mon > 12 || day < 1 || day > 31)
+ return (0);
+ days = 0;
+ for (yr = 70; yr < year; yr++)
+ days += LEAPYEAR(yr) ? 366 : 365;
+ days += dayyr[mon - 1] + day - 1;
+ if (LEAPYEAR(yr) && mon > 2)
+ days++;
+ /* now have days since Jan 1, 1970; the rest is easy... */
+ return (days * SECDAY + hour * 3600 + min * 60 + sec);
+}
+
+struct chiptime {
+ int sec;
+ int min;
+ int hour;
+ int wday;
+ int day;
+ int mon;
+ int year;
+};
+
+timetochip(c)
+ register struct chiptime *c;
+{
+ register int t, t2, t3, now = time.tv_sec;
+
+ /* compute the year */
+ t2 = now / SECDAY;
+ t3 = (t2 + 2) % 7; /* day of week */
+ c->wday = TOBCD(t3 + 1);
+
+ t = 69;
+ while (t2 >= 0) { /* whittle off years */
+ t3 = t2;
+ t++;
+ t2 -= LEAPYEAR(t) ? 366 : 365;
+ }
+ c->year = t;
+
+ /* t3 = month + day; separate */
+ t = LEAPYEAR(t);
+ for (t2 = 1; t2 < 12; t2++)
+ if (t3 < dayyr[t2] + (t && t2 > 1))
+ break;
+
+ /* t2 is month */
+ c->mon = t2;
+ c->day = t3 - dayyr[t2 - 1] + 1;
+ if (t && t2 > 2)
+ c->day--;
+
+ /* the rest is easy */
+ t = now % SECDAY;
+ c->hour = t / 3600;
+ t %= 3600;
+ c->min = t / 60;
+ c->sec = t % 60;
+
+ c->sec = TOBCD(c->sec);
+ c->min = TOBCD(c->min);
+ c->hour = TOBCD(c->hour);
+ c->day = TOBCD(c->day);
+ c->mon = TOBCD(c->mon);
+ c->year = TOBCD(c->year - YEAR0);
+}
+
+/*
+ * Set up the system's time, given a `reasonable' time value.
+ */
+inittodr(base)
+ time_t base;
+{
+ register struct clockreg *cl = clockreg;
+ int sec, min, hour, day, mon, year;
+ int badbase = 0, waszero = base == 0;
+
+ if (base < 5 * SECYR) {
+ /*
+ * If base is 0, assume filesystem time is just unknown
+ * in stead of preposterous. Don't bark.
+ */
+ if (base != 0)
+ printf("WARNING: preposterous time in file system\n");
+ /* not going to use it anyway, if the chip is readable */
+ base = 21*SECYR + 186*SECDAY + SECDAY/2;
+ badbase = 1;
+ }
+ clk_wenable(1);
+ cl->cl_csr |= CLK_READ; /* enable read (stop time) */
+ sec = cl->cl_sec;
+ min = cl->cl_min;
+ hour = cl->cl_hour;
+ day = cl->cl_mday;
+ mon = cl->cl_month;
+ year = cl->cl_year;
+ cl->cl_csr &= ~CLK_READ; /* time wears on */
+ clk_wenable(0);
+ if ((time.tv_sec = chiptotime(sec, min, hour, day, mon, year)) == 0) {
+ printf("WARNING: bad date in battery clock");
+ /*
+ * Believe the time in the file system for lack of
+ * anything better, resetting the clock.
+ */
+ time.tv_sec = base;
+ if (!badbase)
+ resettodr();
+ } else {
+ int deltat = time.tv_sec - base;
+
+ if (deltat < 0)
+ deltat = -deltat;
+ if (waszero || deltat < 2 * SECDAY)
+ return;
+ printf("WARNING: clock %s %d days",
+ time.tv_sec < base ? "lost" : "gained", deltat / SECDAY);
+ }
+ printf(" -- CHECK AND RESET THE DATE!\n");
+}
+
+/*
+ * Reset the clock based on the current time.
+ * Used when the current clock is preposterous, when the time is changed,
+ * and when rebooting. Do nothing if the time is not yet known, e.g.,
+ * when crashing during autoconfig.
+ */
+resettodr()
+{
+ register struct clockreg *cl;
+ struct chiptime c;
+
+ if (!time.tv_sec || (cl = clockreg) == NULL)
+ return;
+ timetochip(&c);
+ clk_wenable(1);
+ cl->cl_csr |= CLK_WRITE; /* enable write */
+ cl->cl_sec = c.sec;
+ cl->cl_min = c.min;
+ cl->cl_hour = c.hour;
+ cl->cl_wday = c.wday;
+ cl->cl_mday = c.day;
+ cl->cl_month = c.mon;
+ cl->cl_year = c.year;
+ cl->cl_csr &= ~CLK_WRITE; /* load them up */
+ clk_wenable(0);
+}
diff --git a/sys/arch/mvme88k/m88k/cmmu.c b/sys/arch/mvme88k/m88k/cmmu.c
new file mode 100644
index 00000000000..2b76a62aeb2
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/cmmu.c
@@ -0,0 +1,1199 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ */
+
+
+#define SNOOP_ENABLE
+#define SHADOW_BATC 0
+
+#ifndef NBPG
+#define NBPG 4096
+#endif /* NBPG */
+
+struct cmmu_regs
+{
+ /* base + $000 */ volatile unsigned idr;
+ /* base + $004 */ volatile unsigned scr;
+ /* base + $008 */ volatile unsigned ssr;
+ /* base + $00C */ volatile unsigned sar;
+ /* */ unsigned padding1[0x3D];
+ /* base + $104 */ volatile unsigned sctr;
+ /* base + $108 */ volatile unsigned pfSTATUSr;
+ /* base + $10C */ volatile unsigned pfADDRr;
+ /* */ unsigned padding2[0x3C];
+ /* base + $200 */ volatile unsigned sapr;
+ /* base + $204 */ volatile unsigned uapr;
+ /* */ unsigned padding3[0x7E];
+ /* base + $400 */ volatile unsigned bwp[8];
+ /* */ unsigned padding4[0xF8];
+ /* base + $800 */ volatile unsigned cdp[4];
+ /* */ unsigned padding5[0x0C];
+ /* base + $840 */ volatile unsigned ctp[4];
+ /* */ unsigned padding6[0x0C];
+ /* base + $880 */ volatile unsigned cssp;
+
+ /* The rest for the 88204 */
+ #define cssp0 cssp
+ /* */ unsigned padding7[0x03];
+ /* base + $890 */ volatile unsigned cssp1;
+ /* */ unsigned padding8[0x03];
+ /* base + $8A0 */ volatile unsigned cssp2;
+ /* */ unsigned padding9[0x03];
+ /* base + $8B0 */ volatile unsigned cssp3;
+
+
+
+
+
+
+};
+
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <machine/board.h>
+#include <machine/cpus.h>
+#if 0
+#include <vm/pmap.h>
+#endif
+
+
+static struct cmmu {
+ struct cmmu_regs *cmmu_regs; /* CMMU "base" area */
+ unsigned char cmmu_cpu; /* cpu number it is attached to */
+ unsigned char which; /* either INST_CMMU || DATA_CMMU */
+ unsigned char cmmu_alive;
+ #define CMMU_DEAD 0 /* This cmmu not there */
+ #define CMMU_AVAILABLE 1 /* It's there, but which cpu's? */
+ #define CMMU_MARRIED 2 /* Know which cpu it belongs to. */
+ #if SHADOW_BATC
+ unsigned batc[8];
+ #endif
+ unsigned char pad;
+} cmmu[MAX_CMMUS] = {
+ {(void *)CMMU_I, 0, 0, 0, 0},
+ {(void *)CMMU_D, 0, 1, 0, 0},
+};
+
+#include <machine/m882xx.h>
+/*
+ * We rely upon and use INST_CMMU == 0 and DATA_CMMU == 1
+ */
+#if INST_CMMU != 0 || DATA_CMMU != 1
+ error("ack gag barf!");
+#endif
+struct cpu_cmmu {
+ struct cmmu *pair[2];
+} cpu_cmmu[1];
+
+/*
+ * CMMU(cpu,data) Is the cmmu struct for the named cpu's indicated cmmu.
+ * REGS(cpu,data) is the actual register structure.
+ */
+#define CMMU(cpu, data) cpu_cmmu[(cpu)].pair[(data)?DATA_CMMU:INST_CMMU]
+#define REGS(cpu, data) (*CMMU(cpu, data)->cmmu_regs)
+
+unsigned cache_policy = 0;
+
+#ifdef CMMU_DEBUG
+void show_apr(unsigned value)
+{
+ union apr_template apr_template;
+ apr_template.bits = value;
+ _printf("table @ 0x%x000", apr_template.field.st_base);
+ if (apr_template.field.wt) printf(", writethrough");
+ if (apr_template.field.g) printf(", global");
+ if (apr_template.field.ci) printf(", cache inhibit");
+ if (apr_template.field.te) printf(", valid");
+ else printf(", not valid");
+ printf("]\n");
+}
+
+void show_sctr(unsigned value)
+{
+ union {
+ unsigned bits;
+ struct {
+ unsigned :16,
+ pe: 1,
+ se: 1,
+ pr: 1,
+ :13;
+ } fields;
+ } sctr;
+ sctr.bits = value;
+ printf("%spe, %sse %spr]\n",
+ sctr.fields.pe ? "" : "!",
+ sctr.fields.se ? "" : "!",
+ sctr.fields.pr ? "" : "!");
+}
+#endif
+
+/*
+ * CMMU initialization routine
+ */
+void cmmu_init(void)
+{
+ unsigned tmp, cmmu_num;
+ union cpupid id;
+ int cpu;
+
+ cpu_cmmu[0].pair[INST_CMMU] = cpu_cmmu[0].pair[DATA_CMMU] = 0;
+
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++) {
+ if (!wprobe((vm_offset_t)cmmu[cmmu_num].cmmu_regs, -1)) {
+ id.cpupid = cmmu[cmmu_num].cmmu_regs->idr;
+ if (id.m88200.type != M88200 && id.m88200.type != M88204)
+ continue;
+ cmmu[cmmu_num].cmmu_alive = CMMU_AVAILABLE;
+
+ cpu_cmmu[cmmu[cmmu_num].cmmu_cpu].pair[cmmu[cmmu_num].which] =
+ &cmmu[cmmu_num];
+
+ /*
+ * Reset cache data....
+ * as per M88200 Manual (2nd Ed.) section 3.11.
+ */
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp << 4;
+ cmmu[cmmu_num].cmmu_regs->cssp = 0x3f0ff000;
+ }
+
+ /* 88204 has additional cache to clear */
+ if(id.m88200.type == M88204)
+ {
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp1 = 0x3f0ff000;
+ }
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp2 = 0x3f0ff000;
+ }
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp3 = 0x3f0ff000;
+ }
+ }
+
+ /*
+ * Set the SCTR, SAPR, and UAPR to some known state
+ * (I don't trust the reset to do it).
+ */
+ tmp =
+ ! CMMU_SCTR_PE | /* not parity enable */
+ ! CMMU_SCTR_SE | /* not snoop enable */
+ ! CMMU_SCTR_PR ; /* not priority arbitration */
+ cmmu[cmmu_num].cmmu_regs->sctr = tmp;
+
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ AREA_D_WT | /* write through */
+ AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* not translation enable */
+ cmmu[cmmu_num].cmmu_regs->sapr =
+ cmmu[cmmu_num].cmmu_regs->uapr = tmp;
+
+
+#if SHADOW_BATC
+ cmmu[cmmu_num].batc[0] =
+ cmmu[cmmu_num].batc[1] =
+ cmmu[cmmu_num].batc[2] =
+ cmmu[cmmu_num].batc[3] =
+ cmmu[cmmu_num].batc[4] =
+ cmmu[cmmu_num].batc[5] =
+ cmmu[cmmu_num].batc[6] =
+ cmmu[cmmu_num].batc[7] = 0;
+#endif
+ cmmu[cmmu_num].cmmu_regs->bwp[0] =
+ cmmu[cmmu_num].cmmu_regs->bwp[1] =
+ cmmu[cmmu_num].cmmu_regs->bwp[2] =
+ cmmu[cmmu_num].cmmu_regs->bwp[3] =
+ cmmu[cmmu_num].cmmu_regs->bwp[4] =
+ cmmu[cmmu_num].cmmu_regs->bwp[5] =
+ cmmu[cmmu_num].cmmu_regs->bwp[6] =
+ cmmu[cmmu_num].cmmu_regs->bwp[7] = 0;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_CACHE_INV_ALL;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_SUPER_ALL;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_USER_ALL;
+ }
+ }
+
+ /*
+ * Now that we know which CMMUs are there, let's report on which
+ * CPU/CMMU sets seem complete (hopefully all)
+ */
+ for (cpu = 0; cpu < MAX_CPUS; cpu++)
+ {
+ if (cpu_cmmu[cpu].pair[INST_CMMU] && cpu_cmmu[cpu].pair[DATA_CMMU])
+ {
+ if(id.m88200.type == M88204)
+ printf("CPU%d is attached with MC88204 CMMU\n", cpu);
+ else
+ printf("CPU%d is attached with MC88200 CMMU\n", cpu);
+
+ }
+ else if (cpu_cmmu[cpu].pair[INST_CMMU])
+ {
+ printf("CPU%d data CMMU is not working.\n", cpu);
+ panic("cmmu-data");
+ }
+ else if (cpu_cmmu[cpu].pair[DATA_CMMU])
+ {
+ printf("CPU%d instruction CMMU is not working.\n", cpu);
+ panic("cmmu");
+ }
+ else
+ {
+ }
+ }
+
+ /*
+ * Enable snooping...
+ */
+ for (cpu = 0; cpu < MAX_CPUS; cpu++)
+ {
+ /*
+ * Enable snooping.
+ * We enable it for instruction cmmus as well so that we can have
+ * breakpoints, etc, and modify code.
+ */
+ tmp =
+ ! CMMU_SCTR_PE | /* not parity enable */
+ CMMU_SCTR_SE | /* snoop enable */
+ ! CMMU_SCTR_PR ; /* not priority arbitration */
+ REGS(cpu, DATA_CMMU).sctr = tmp;
+ REGS(cpu, INST_CMMU).sctr = tmp;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ }
+
+ /*
+ * Turn on some cache.
+ */
+ for (cpu = 0; cpu < MAX_CPUS; cpu++)
+ {
+ /*
+ * Enable some caching for the instruction stream.
+ * Can't cache data yet 'cause device addresses can never
+ * be cached, and we don't have those no-caching zones
+ * set up yet....
+ */
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ AREA_D_WT | /* write through */
+ AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* not translation enable */
+ REGS(cpu, INST_CMMU).sapr = tmp;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ }
+}
+
+/*
+ * Just before poweroff or reset....
+ */
+void cmmu_shutdown_now(void)
+{
+#if 0 /* was trying to fix a reboot problem... doesn't seem to help */
+ unsigned tmp;
+ unsigned cmmu_num;
+
+ /*
+ * Now set some state as we like...
+ */
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
+ {
+ tmp =
+ ! CMMU_SCTR_PE | /* parity enable */
+ ! CMMU_SCTR_SE | /* snoop enable */
+ ! CMMU_SCTR_PR ; /* priority arbitration */
+ cmmu[cmmu_num].cmmu_regs->sctr = tmp;
+
+
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ ! AREA_D_WT | /* write through */
+ ! AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* translation enable */
+ cmmu[cmmu_num].cmmu_regs->sapr = tmp;
+ cmmu[cmmu_num].cmmu_regs->uapr = tmp;
+ }
+#endif
+}
+
+
+/*
+ * enable parity
+ */
+void cmmu_parity_enable(void)
+{
+#ifdef PARITY_ENABLE
+ register int cmmu_num;
+
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++) {
+ if (cmmu[cmmu_num].cmmu_alive != CMMU_DEAD) {
+ cmmu[cmmu_num].cmmu_regs->sctr |= CMMU_SCTR_PE;
+ }
+ }
+#endif PARITY_ENABLE
+}
+
+/*
+ * Find out the CPU number from accessing CMMU
+ * Better be at splhigh, or even better, with interrupts
+ * disabled.
+ */
+unsigned cmmu_cpu_number(void)
+{
+ register unsigned cmmu_no;
+ int i;
+
+ for (i=0; i < 10; i++)
+ {
+ /* clear CMMU p-bus status registers */
+ for (cmmu_no = 0; cmmu_no < MAX_CMMUS; cmmu_no++)
+ {
+ if (cmmu[cmmu_no].cmmu_alive == CMMU_AVAILABLE &&
+ cmmu[cmmu_no].which == DATA_CMMU)
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr = 0;
+ }
+
+ /* access faulting address */
+ badwordaddr((void *)ILLADDRESS);
+
+ /* check which CMMU reporting the fault */
+ for (cmmu_no = 0; cmmu_no < MAX_CMMUS; cmmu_no++)
+ {
+ if (cmmu[cmmu_no].cmmu_alive == CMMU_AVAILABLE &&
+ cmmu[cmmu_no].which == DATA_CMMU &&
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr & 0x70000)
+ {
+ if (cmmu[cmmu_no].cmmu_regs->pfSTATUSr & 0x70000)
+ {
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr = 0; /* to be clean */
+ cmmu[cmmu_no].cmmu_alive = CMMU_MARRIED;
+ return cmmu[cmmu_no].cmmu_cpu;
+ }
+ }
+ }
+ }
+printf("at cmmu.c line %d.\n", __LINE__);
+
+ panic("could not determine my cpu number");
+ return 0; /* to make compiler happy */
+}
+
+/**
+ ** Funcitons that actually modify CMMU registers.
+ **/
+
+#if !DDB
+static
+#endif
+void cmmu_remote_set(unsigned cpu, unsigned r, unsigned data, unsigned x)
+{
+ *(volatile unsigned *)(r + (char*)&REGS(cpu,data)) = x;
+}
+
+/*
+ * cmmu_cpu_lock should be held when called if read
+ * the CMMU_SCR or CMMU_SAR.
+**/
+#if !DDB
+static
+#endif
+unsigned cmmu_remote_get(unsigned cpu, unsigned r, unsigned data)
+{
+ return *(volatile unsigned *)(r + (char*)&REGS(cpu,data));
+}
+
+/* Needs no locking - read only registers */
+unsigned cmmu_get_idr(unsigned data)
+{
+ return REGS(0,data).idr;
+}
+
+void cmmu_set_sapr(unsigned ap)
+{
+ int cpu = 0;
+ if (cache_policy & CACHE_INH)
+ ap |= AREA_D_CI;
+
+ REGS(cpu, INST_CMMU).sapr = ap;
+ REGS(cpu, DATA_CMMU).sapr = ap;
+}
+
+void cmmu_remote_set_sapr(unsigned cpu, unsigned ap)
+{
+ if (cache_policy & CACHE_INH)
+ ap |= AREA_D_CI;
+ REGS(cpu, INST_CMMU).sapr = ap;
+ REGS(cpu, DATA_CMMU).sapr = ap;
+}
+
+void cmmu_set_uapr(unsigned ap)
+{
+ int cpu = 0;
+ /* this functionality also mimiced in cmmu_pmap_activate() */
+ REGS(cpu, INST_CMMU).uapr = ap;
+ REGS(cpu, DATA_CMMU).uapr = ap;
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data or instruction cache depending on data.
+ *
+ * Except for the cmmu_init, this function, cmmu_set_pair_batc_entry,
+ * and cmmu_pmap_activate are the only functions which may set the
+ * batc values.
+ */
+void cmmu_set_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned data, /* 1 = data, 0 = instruction */
+ unsigned value) /* the value to stuff into the batc */
+{
+
+ REGS(cpu,data).bwp[entry_no] = value;
+ #if SHADOW_BATC
+ CMMU(cpu,data)->batc[entry_no] = value;
+ #endif
+#if 0 /* was for debugging piece (peace?) of mind */
+ REGS(cpu,data).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,data).scr = CMMU_FLUSH_USER_ALL;
+#endif
+
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data and instruction cache for the named CPU.
+ */
+void cmmu_set_pair_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned value) /* the value to stuff into the batc */
+{
+
+ REGS(cpu,DATA_CMMU).bwp[entry_no] = value;
+ #if SHADOW_BATC
+ CMMU(cpu,DATA_CMMU)->batc[entry_no] = value;
+ #endif
+ REGS(cpu,INST_CMMU).bwp[entry_no] = value;
+ #if SHADOW_BATC
+ CMMU(cpu,INST_CMMU)->batc[entry_no] = value;
+ #endif
+
+#if 0 /* was for debugging piece (peace?) of mind */
+ REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
+#endif
+
+}
+
+/**
+ ** Functions that invalidate TLB entries.
+ **/
+
+/*
+ * flush any tlb
+ * Some functionality mimiced in cmmu_pmap_activate.
+ */
+void cmmu_flush_remote_tlb(
+ unsigned cpu,
+ unsigned kernel,
+ vm_offset_t vaddr,
+ int size)
+{
+ register s = splhigh();
+
+ if ((unsigned)size > M88K_PGBYTES)
+ {
+ REGS(cpu, INST_CMMU).scr =
+ REGS(cpu, DATA_CMMU).scr =
+ kernel ? CMMU_FLUSH_SUPER_ALL : CMMU_FLUSH_USER_ALL;
+ }
+ else /* a page or smaller */
+ {
+ REGS(cpu, INST_CMMU).sar = (unsigned)vaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)vaddr;
+
+ REGS(cpu, INST_CMMU).scr =
+ REGS(cpu, DATA_CMMU).scr =
+ kernel ? CMMU_FLUSH_SUPER_PAGE : CMMU_FLUSH_USER_PAGE;
+ }
+ splx(s);
+}
+
+/*
+ * flush my personal tlb
+ */
+void cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
+{
+ cmmu_flush_remote_tlb(0, kernel, vaddr, size);
+}
+
+
+/*
+ * New fast stuff for pmap_activate.
+ * Does what a few calls used to do.
+ * Only called from pmap.c's _pmap_activate().
+ */
+void cmmu_pmap_activate(
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX])
+{
+ int entry_no;
+
+ /* the following is from cmmu_set_uapr */
+ REGS(cpu, INST_CMMU).uapr = uapr;
+ REGS(cpu, DATA_CMMU).uapr = uapr;
+
+ for (entry_no = 0; entry_no < BATC_MAX; entry_no++) {
+ REGS(cpu,INST_CMMU).bwp[entry_no] = i_batc[entry_no].bits;
+ REGS(cpu,DATA_CMMU).bwp[entry_no] = d_batc[entry_no].bits;
+ #if SHADOW_BATC
+ CMMU(cpu,INST_CMMU)->batc[entry_no] = i_batc[entry_no].bits;
+ CMMU(cpu,DATA_CMMU)->batc[entry_no] = d_batc[entry_no].bits;
+ #endif
+ }
+
+ /*
+ * Flush the user TLB.
+ * IF THE KERNEL WILL EVER CARE ABOUT THE BATC ENTRIES,
+ * THE SUPERVISOR TLBs SHOULB EE FLUSHED AS WELL.
+ */
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
+}
+
+/**
+ ** Functions that invalidate caches.
+ **
+ ** Cache invalidates require physical addresses. Care must be exercised when
+ ** using segment invalidates. This implies that the starting physical address
+ ** plus the segment length should be invalidated. A typical mistake is to
+ ** extract the first physical page of a segment from a virtual address, and
+ ** then expecting to invalidate when the pages are not physically contiguous.
+ **
+ ** We don't push Instruction Caches prior to invalidate because they are not
+ ** snooped and never modified (I guess it doesn't matter then which form
+ ** of the command we use then).
+ **/
+/*
+ * flush both Instruction and Data caches
+ */
+void cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+ if (size < 0 || size > NBSG ) {
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ }
+ else if (size <= 16) {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ }
+ else if (size <= NBPG) {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ }
+ else {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ }
+
+
+ splx(s);
+}
+
+/*
+ * flush both Instruction and Data caches
+ */
+void cmmu_flush_cache(vm_offset_t physaddr, int size)
+{
+ cmmu_flush_remote_cache(0, physaddr, size);
+}
+
+/*
+ * flush Instruction caches
+ */
+void cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+ if (size < 0 || size > NBSG ) {
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ }
+ else if (size <= 16) {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ }
+ else if (size <= NBPG) {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ }
+ else {
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ }
+
+
+ splx(s);
+}
+
+/*
+ * flush Instruction caches
+ */
+void cmmu_flush_inst_cache(vm_offset_t physaddr, int size)
+{
+ cmmu_flush_remote_inst_cache(0, physaddr, size);
+}
+
+void cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+ if (size < 0 || size > NBSG ) {
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ }
+ else if (size <= 16) {
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ }
+ else if (size <= NBPG) {
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ }
+ else {
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ }
+
+
+ splx(s);
+}
+
+/*
+ * flush data cache
+ */
+void cmmu_flush_data_cache(vm_offset_t physaddr, int size)
+{
+ cmmu_flush_remote_data_cache(0, physaddr, size);
+}
+
+
+#if 0
+#if DDB
+union ssr {
+ unsigned bits;
+ struct {
+ unsigned :16,
+ ce:1,
+ be:1,
+ :4,
+ wt:1,
+ sp:1,
+ g:1,
+ ci:1,
+ :1,
+ m:1,
+ u:1,
+ wp:1,
+ bh:1,
+ v:1;
+ } field;
+};
+
+union cssp {
+ unsigned bits;
+ struct {
+ unsigned : 2,
+ l: 6,
+ d3: 1,
+ d2: 1,
+ d1: 1,
+ d0: 1,
+ vv3: 2,
+ vv2: 2,
+ vv1: 2,
+ vv0: 2,
+ :12;
+ } field;
+};
+
+union batcu {
+ unsigned bits;
+ struct { /* block address translation register */
+ unsigned int
+ lba:13, /* logical block address */
+ pba:13, /* physical block address */
+ s:1, /* supervisor */
+ wt:4, /* write through */
+ g:1, /* global */
+ ci:1, /* cache inhibit */
+ wp:1, /* write protect */
+ v:1; /* valid */
+ } field;
+};
+
+#define VV_EX_UNMOD 0
+#define VV_EX_MOD 1
+#define VV_SHARED_UNMOD 2
+#define VV_INVALID 3
+
+#define D(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.d3 : \
+ ((LINE) == 2 ? (UNION).field.d2 : \
+ ((LINE) == 1 ? (UNION).field.d1 : \
+ ((LINE) == 0 ? (UNION).field.d0 : ~0))))
+#define VV(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.vv3 : \
+ ((LINE) == 2 ? (UNION).field.vv2 : \
+ ((LINE) == 1 ? (UNION).field.vv1 : \
+ ((LINE) == 0 ? (UNION).field.vv0 : ~0))))
+
+
+/*
+ * Show (for debugging) how the given CMMU translates the given ADDRESS.
+ * If cmmu == -1, the data cmmu for the current cpu is used.
+ */
+void cmmu_show_translation(
+ unsigned address,
+ unsigned supervisor_flag,
+ unsigned verbose_flag,
+ int cmmu_num)
+{
+ /*
+ * A virtual address is split into three fields. Two are used as
+ * indicies into tables (segment and page), and one is an offset into
+ * a page of memory.
+ */
+ union {
+ unsigned bits;
+ struct {
+ unsigned segment_table_index:10,
+ page_table_index:10,
+ page_offset:12;
+ } field;
+ } virtual_address;
+ unsigned value;
+
+ if (verbose_flag)
+ db_printf("-------------------------------------------\n");
+
+
+ /****** ACCESS PROPER CMMU or THREAD ***********/
+ if (thread != 0)
+ {
+ /* the following tidbit from _pmap_activate in m88k/pmap.c */
+ register apr_template_t apr_data;
+ supervisor_flag = 0; /* thread implies user */
+
+ if (thread->task == 0) {
+ db_printf("[thread %x has empty task pointer]\n", thread);
+ return;
+ } else if (thread->task->map == 0) {
+ db_printf("[thread/task %x/%x has empty map pointer]\n",
+ thread, thread->task);
+ return;
+ } else if (thread->task->map->pmap == 0) {
+ db_printf("[thread/task/map %x/%x/%x has empty pmap pointer]\n",
+ thread, thread->task, thread->task->map);
+ return;
+ }
+ if (thread->task->map->pmap->lock.lock_data) {
+ db_printf("[Warning: thread %x's task %x's map %x's "
+ "pmap %x is locked]\n", thread, thread->task,
+ thread->task->map, thread->task->map->pmap);
+ }
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(thread->task->map->pmap->sdt_paddr);
+ apr_data.field.wt = 0;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1;
+ value = apr_data.bits;
+ if (verbose_flag) {
+ db_printf("[thread %x task %x map %x pmap %x UAPR is %x]\n",
+ thread, thread->task, thread->task->map,
+ thread->task->map->pmap, value);
+ }
+ } else {
+ if (cmmu_num == -1)
+ {
+ if (cpu_cmmu[0].pair[DATA_CMMU] == 0)
+ {
+ db_printf("ack! can't figure my own data cmmu number.\n");
+ return;
+ }
+ cmmu_num = cpu_cmmu[0].pair[DATA_CMMU] - cmmu;
+ if (verbose_flag)
+ db_printf("The data cmmu for cpu#%d is cmmu#%d.\n",
+ 0, cmmu_num);
+ }
+ else if (cmmu_num < 0 || cmmu_num >= MAX_CMMUS)
+ {
+ db_printf("invalid cpu number [%d]... must be in range [0..%d]\n",
+ cmmu_num, MAX_CMMUS - 1);
+ return;
+ }
+
+ if (cmmu[cmmu_num].cmmu_alive == 0)
+ {
+ db_printf("warning: cmmu %d is not alive.\n", cmmu_num);
+ #if 0
+ return;
+ #endif
+ }
+
+ if (!verbose_flag)
+ {
+ if (!(cmmu[cmmu_num].cmmu_regs->sctr & CMMU_SCTR_SE))
+ db_printf("WARNING: snooping not enabled for CMMU#%d.\n",
+ cmmu_num);
+ }
+ else
+ {
+ int i;
+ for (i=0; i<MAX_CMMUS; i++)
+ if ((i == cmmu_num || cmmu[i].cmmu_alive) &&
+ (verbose_flag>1 || !(cmmu[i].cmmu_regs->sctr&CMMU_SCTR_SE)))
+ {
+ db_printf("CMMU#%d (cpu %d %s) snooping %s\n", i,
+ cmmu[i].cmmu_cpu, cmmu[i].which ? "data" : "inst",
+ (cmmu[i].cmmu_regs->sctr & CMMU_SCTR_SE) ? "on":"OFF");
+ }
+ }
+
+ if (supervisor_flag)
+ value = cmmu[cmmu_num].cmmu_regs->sapr;
+ else
+ value = cmmu[cmmu_num].cmmu_regs->uapr;
+
+ }
+
+ /******* LOOK AT THE BATC ** (if not a thread) **************/
+ #if SHADOW_BATC
+ if (thread == 0)
+ {
+ int i;
+ union batcu batc;
+ for (i = 0; i < 8; i++) {
+ batc.bits = cmmu[cmmu_num].batc[i];
+ if (batc.field.v == 0) {
+ if (verbose_flag>1)
+ db_printf("cmmu #%d batc[%d] invalid.\n", cmmu_num, i);
+ } else {
+ db_printf("cmmu#%d batc[%d] v%08x p%08x", cmmu_num, i,
+ batc.field.lba << 18, batc.field.pba);
+ if (batc.field.s) db_printf(", supervisor");
+ if (batc.field.wt) db_printf(", wt.th");
+ if (batc.field.g) db_printf(", global");
+ if (batc.field.ci) db_printf(", cache inhibit");
+ if (batc.field.wp) db_printf(", write protect");
+ }
+ }
+ }
+ #endif
+
+ /******* SEE WHAT A PROBE SAYS (if not a thread) ***********/
+ if (thread == 0)
+ {
+ union ssr ssr;
+ struct cmmu_regs *cmmu_regs = cmmu[cmmu_num].cmmu_regs;
+ cmmu_regs->sar = address;
+ cmmu_regs->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
+ ssr.bits = cmmu_regs->ssr;
+ if (verbose_flag > 1)
+ db_printf("probe of 0x%08x returns ssr=0x%08x\n",
+ address, ssr.bits);
+ if (ssr.field.v)
+ db_printf("PROBE of 0x%08x returns phys=0x%x",
+ address, cmmu_regs->sar);
+ else
+ db_printf("PROBE fault at 0x%x", cmmu_regs->pfADDRr);
+ if (ssr.field.ce) db_printf(", copyback err");
+ if (ssr.field.be) db_printf(", bus err");
+ if (ssr.field.wt) db_printf(", writethrough");
+ if (ssr.field.sp) db_printf(", sup prot");
+ if (ssr.field.g) db_printf(", global");
+ if (ssr.field.ci) db_printf(", cache inhibit");
+ if (ssr.field.m) db_printf(", modified");
+ if (ssr.field.u) db_printf(", used");
+ if (ssr.field.wp) db_printf(", write prot");
+ if (ssr.field.bh) db_printf(", BATC");
+ db_printf(".\n");
+ }
+
+ /******* INTERPRET AREA DESCRIPTOR *********/
+ {
+ union apr_template apr_template;
+ apr_template.bits = value;
+ if (verbose_flag > 1) {
+ if (thread == 0)
+ db_printf("CMMU#%d", cmmu_num);
+ else
+ db_printf("THREAD %x", thread);
+ db_printf(" %cAPR is 0x%08x\n",
+ supervisor_flag ? 'S' : 'U', apr_template.bits);
+ }
+ if (thread == 0)
+ db_printf("CMMU#%d", cmmu_num);
+ else
+ db_printf("THREAD %x", thread);
+ db_printf(" %cAPR: SegTbl: 0x%x000p",
+ supervisor_flag ? 'S' : 'U', apr_template.field.st_base);
+ if (apr_template.field.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (apr_template.field.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (apr_template.field.ci) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (apr_template.field.te) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (apr_template.field.te == 0) {
+ db_printf("<would report an error, valid bit not set>\n");
+ return;
+ }
+
+ value = apr_template.field.st_base << 12; /* now point to seg page */
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ virtual_address.bits = address;
+
+ /****** ACCESS SEGMENT TABLE AND INTERPRET SEGMENT DESCRIPTOR *******/
+ {
+ union sdt_entry_template std_template;
+ if (verbose_flag)
+ db_printf("will follow to entry %d of page at 0x%x...\n",
+ virtual_address.field.segment_table_index, value);
+ value |= virtual_address.field.segment_table_index *
+ sizeof(struct sdt_entry);
+
+ if (badwordaddr(value)) {
+ db_printf("ERROR: unable to access page at 0x%08x.\n", value);
+ return;
+ }
+
+ std_template.bits = *(unsigned *)value;
+ if (verbose_flag > 1)
+ db_printf("SEG DESC @0x%x is 0x%08x\n", value, std_template.bits);
+ db_printf("SEG DESC @0x%x: PgTbl: 0x%x000",
+ value, std_template.sdt_desc.table_addr);
+ if (std_template.sdt_desc.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (std_template.sdt_desc.sup) db_printf(", S-PROT");
+ else db_printf(", UserOk");
+ if (std_template.sdt_desc.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (std_template.sdt_desc.no_cache) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (std_template.sdt_desc.prot) db_printf(", W-PROT");
+ else db_printf(", WriteOk");
+ if (std_template.sdt_desc.dtype) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (std_template.sdt_desc.dtype == 0) {
+ db_printf("<would report an error, STD entry not valid>\n");
+ return;
+ }
+
+ value = std_template.sdt_desc.table_addr << 12;
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ /******* PAGE TABLE *********/
+ {
+ union pte_template pte_template;
+ if (verbose_flag)
+ db_printf("will follow to entry %d of page at 0x%x...\n",
+ virtual_address.field.page_table_index, value);
+ value |= virtual_address.field.page_table_index *
+ sizeof(struct pt_entry);
+
+ if (badwordaddr(value)) {
+ db_printf("error: unable to access page at 0x%08x.\n", value);
+ return;
+ }
+
+ pte_template.bits = *(unsigned *)value;
+ if (verbose_flag > 1)
+ db_printf("PAGE DESC @0x%x is 0x%08x.\n", value, pte_template.bits);
+ db_printf("PAGE DESC @0x%x: page @%x000",
+ value, pte_template.pte.pfn);
+ if (pte_template.pte.wired) db_printf(", WIRE");
+ else db_printf(", !wire");
+ if (pte_template.pte.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (pte_template.pte.sup) db_printf(", S-PROT");
+ else db_printf(", UserOk");
+ if (pte_template.pte.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (pte_template.pte.ci) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (pte_template.pte.modified) db_printf(", MOD");
+ else db_printf(", !mod");
+ if (pte_template.pte.pg_used) db_printf(", USED");
+ else db_printf(", !used");
+ if (pte_template.pte.prot) db_printf(", W-PROT");
+ else db_printf(", WriteOk");
+ if (pte_template.pte.dtype) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (pte_template.pte.dtype == 0) {
+ db_printf("<would report an error, PTE entry not valid>\n");
+ return;
+ }
+
+ value = pte_template.pte.pfn << 12;
+ if (verbose_flag)
+ db_printf("will follow to byte %d of page at 0x%x...\n",
+ virtual_address.field.page_offset, value);
+ value |= virtual_address.field.page_offset;
+
+ if (badwordaddr(value)) {
+ db_printf("error: unable to access page at 0x%08x.\n", value);
+ return;
+ }
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ db_printf("WORD at 0x%x is 0x%08x.\n", value, *(unsigned *)value);
+}
+
+
+void cmmu_cache_state(unsigned addr, unsigned supervisor_flag)
+{
+ static char *vv_name[4] =
+ {"exclu-unmod", "exclu-mod", "shared-unmod", "invalid"};
+ int cmmu_num;
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
+ {
+ union ssr ssr;
+ union cssp cssp;
+ struct cmmu_regs *R;
+ unsigned tag, line;
+ if (!cmmu[cmmu_num].cmmu_alive)
+ continue;
+ R = cmmu[cmmu_num].cmmu_regs;
+ db_printf("cmmu #%d %s cmmu for cpu %d.\n", cmmu_num,
+ cmmu[cmmu_num].which ? "data" : "inst",
+ cmmu[cmmu_num].cmmu_cpu);
+ R->sar = addr;
+ R->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
+
+ ssr.bits = R->ssr;
+ if (!ssr.field.v) {
+ db_printf("PROBE of 0x%08x faults.\n",addr);
+ continue;
+ }
+ db_printf("PROBE of 0x%08x returns phys=0x%x", addr, R->sar);
+
+ tag = R->sar & ~0xfff;
+ cssp.bits = R->cssp;
+
+ /* check to see if any of the tags for the set match the address */
+ for (line = 0; line < 4; line++)
+ {
+ if (VV(cssp, line) == VV_INVALID)
+ {
+ db_printf("line %d invalid.\n", line);
+ continue; /* line is invalid */
+ }
+ if (D(cssp, line))
+ {
+ db_printf("line %d disabled.\n", line);
+ continue; /* line is disabled */
+ }
+
+ if ((R->ctp[line] & ~0xfff) != tag)
+ {
+ db_printf("line %d address tag is %x.\n", line,
+ (R->ctp[line] & ~0xfff));
+ continue;
+ }
+ db_printf("found in line %d as %08x (%s).\n",
+ line, R->cdp[line], vv_name[VV(cssp, line)]);
+ }
+ }
+}
+
+void show_cmmu_info(unsigned addr)
+{
+ int cmmu_num;
+ cmmu_cache_state(addr, 1);
+
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
+ if (cmmu[cmmu_num].cmmu_alive) {
+ db_printf("cmmu #%d %s cmmu for cpu %d: ", cmmu_num,
+ cmmu[cmmu_num].which ? "data" : "inst",
+ cmmu[cmmu_num].cmmu_cpu);
+ cmmu_show_translation(addr, 1, 0, cmmu_num);
+ }
+}
+#endif /* end if DDB */
+#endif /* 0 */
diff --git a/sys/arch/mvme88k/m88k/conf.c b/sys/arch/mvme88k/m88k/conf.c
new file mode 100644
index 00000000000..ac48d0a62e6
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/conf.c
@@ -0,0 +1,348 @@
+/* $NetBSD: conf.c,v 1.28 1995/04/19 22:37:27 mycroft Exp $ */
+
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)conf.c 7.9 (Berkeley) 5/28/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/conf.h>
+#include <sys/vnode.h>
+
+int ttselect __P((dev_t, int, struct proc *));
+
+bdev_decl(sw);
+#include "st.h"
+bdev_decl(st);
+#include "sd.h"
+bdev_decl(sd);
+#include "cd.h"
+bdev_decl(cd);
+
+#if notyet
+#include "ch.h"
+bdev_decl(ch);
+#include "xd.h"
+bdev_decl(xd);
+#endif /* notyet */
+
+#include "vnd.h"
+bdev_decl(vnd);
+
+#ifdef LKM
+int lkmenodev();
+#else
+#define lkmenodev enodev
+#endif
+
+struct bdevsw bdevsw[] =
+{
+ bdev_notdef(), /* 0 */
+ bdev_notdef(), /* 1 */
+ bdev_notdef(), /* 2 */
+ bdev_swap_init(1,sw), /* 3: swap pseudo-device */
+ bdev_disk_init(NSD,sd), /* 4: SCSI disk */
+ bdev_tape_init(NST,st), /* 5: SCSI tape */
+ bdev_disk_init(NCD,cd), /* 6: SCSI CD-ROM */
+ bdev_notdef(), /* 7 */
+ bdev_disk_init(NVND,vnd), /* 8: vnode disk driver */
+ bdev_notdef(), /* 9 */
+#if notyet
+ bdev_disk_init(NXD,xd), /* 10: XD disk */
+#endif /* notyet */
+ bdev_notdef(), /* 11 */
+ bdev_notdef(), /* 12 */
+ bdev_lkm_dummy(), /* 13 */
+ bdev_lkm_dummy(), /* 14 */
+ bdev_lkm_dummy(), /* 15 */
+ bdev_lkm_dummy(), /* 16 */
+ bdev_lkm_dummy(), /* 17 */
+ bdev_lkm_dummy(), /* 18 */
+};
+int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]);
+
+cdev_decl(cn);
+cdev_decl(ctty);
+#define mmread mmrw
+#define mmwrite mmrw
+#if notyet
+cdev_decl(mm);
+#endif /* notyet */
+cdev_decl(sw);
+
+#if notyet
+#include "sram.h"
+cdev_decl(sram);
+
+#include "vmel.h"
+cdev_decl(vmel);
+
+#include "vmes.h"
+cdev_decl(vmes);
+
+#include "nvram.h"
+cdev_decl(nvram);
+
+#include "flash.h"
+cdev_decl(flash);
+#endif /* notyet */
+
+#include "pty.h"
+#define ptstty ptytty
+#define ptsioctl ptyioctl
+cdev_decl(pts);
+#define ptctty ptytty
+#define ptcioctl ptyioctl
+cdev_decl(ptc);
+cdev_decl(log);
+cdev_decl(fd);
+
+#if notyet
+#include "zs.h"
+cdev_decl(zs);
+#include "cl.h"
+cdev_decl(cl);
+#endif /* notyet */
+
+#include "bugtty.h"
+cdev_decl(bugtty);
+
+/* open, close, write, ioctl */
+#define cdev_lp_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \
+ dev_init(c,n,write), dev_init(c,n,ioctl), (dev_type_stop((*))) enodev, \
+ 0, seltrue, (dev_type_mmap((*))) enodev }
+
+/* open, close, ioctl, mmap, ioctl */
+#define cdev_mdev_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \
+ dev_init(c,n,write), dev_init(c,n,ioctl), \
+ (dev_type_stop((*))) enodev, 0, (dev_type_select((*))) enodev, \
+ dev_init(c,n,mmap) }
+
+#if notyet
+#include "lp.h"
+cdev_decl(lp);
+#include "lptwo.h"
+cdev_decl(lptwo);
+#endif /* notyet */
+
+cdev_decl(st);
+cdev_decl(sd);
+cdev_decl(cd);
+cdev_decl(xd);
+cdev_decl(vnd);
+
+#include "bpfilter.h"
+cdev_decl(bpf);
+
+#include "tun.h"
+cdev_decl(tun);
+
+#ifdef LKM
+#define NLKM 1
+#else
+#define NLKM 0
+#endif
+
+cdev_decl(lkm);
+
+struct cdevsw cdevsw[] =
+{
+ cdev_cn_init(1,cn), /* 0: virtual console */
+ cdev_ctty_init(1,ctty), /* 1: controlling terminal */
+#if notyet
+ cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
+#endif /* notyet */
+ cdev_swap_init(1,sw), /* 3: /dev/drum (swap pseudo-device) */
+ cdev_tty_init(NPTY,pts), /* 4: pseudo-tty slave */
+ cdev_ptc_init(NPTY,ptc), /* 5: pseudo-tty master */
+ cdev_log_init(1,log), /* 6: /dev/klog */
+#if notyet
+ cdev_mdev_init(NSRAM,sram), /* 7: /dev/sramX */
+#endif /* notyet */
+ cdev_disk_init(NSD,sd), /* 8: SCSI disk */
+ cdev_disk_init(NCD,cd), /* 9: SCSI CD-ROM */
+#if notyet
+ cdev_mdev_init(NNVRAM,nvram), /* 10: /dev/nvramX */
+ cdev_mdev_init(NFLASH,flash), /* 11: /dev/flashX */
+ cdev_tty_init(NZS,zs), /* 12: SCC serial (tty[a-d]) */
+ cdev_tty_init(NCL,cl), /* 13: CL-CD1400 serial (tty0[0-3]) */
+#endif /* notyet */
+ cdev_tty_init(NBUGTTY,bugtty), /* 14: BUGtty (ttyB) */
+ cdev_notdef(), /* 15 */
+ cdev_notdef(), /* 16 */
+ cdev_notdef(), /* 17: concatenated disk */
+ cdev_notdef(), /* 18 */
+ cdev_disk_init(NVND,vnd), /* 19: vnode disk */
+ cdev_tape_init(NST,st), /* 20: SCSI tape */
+ cdev_fd_init(1,fd), /* 21: file descriptor pseudo-dev */
+ cdev_bpftun_init(NBPFILTER,bpf),/* 22: berkeley packet filter */
+ cdev_bpftun_init(NTUN,tun), /* 23: network tunnel */
+ cdev_lkm_init(NLKM,lkm), /* 24: loadable module driver */
+ cdev_notdef(), /* 25 */
+#if notyet
+ cdev_disk_init(NXD,xd), /* 26: XD disk */
+#endif /* notyet */
+ cdev_notdef(), /* 27 */
+#if notyet
+ cdev_lp_init(NLP,lp), /* 28: lp */
+ cdev_lp_init(NLPTWO,lptwo), /* 29: lptwo */
+#endif /* notyet */
+ cdev_notdef(), /* 30 */
+#if notyet
+ cdev_mdev_init(NVMEL,vmel), /* 31: /dev/vmelX */
+ cdev_mdev_init(NVMES,vmes), /* 32: /dev/vmesX */
+#endif /* notyet */
+ cdev_lkm_dummy(), /* 33 */
+ cdev_lkm_dummy(), /* 34 */
+ cdev_lkm_dummy(), /* 35 */
+ cdev_lkm_dummy(), /* 36 */
+ cdev_lkm_dummy(), /* 37 */
+ cdev_lkm_dummy(), /* 38 */
+};
+int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]);
+
+int mem_no = 2; /* major device number of memory special file */
+
+/*
+ * Swapdev is a fake device implemented
+ * in sw.c used only internally to get to swstrategy.
+ * It cannot be provided to the users, because the
+ * swstrategy routine munches the b_dev and b_blkno entries
+ * before calling the appropriate driver. This would horribly
+ * confuse, e.g. the hashing routines. Instead, /dev/drum is
+ * provided as a character (raw) device.
+ */
+dev_t swapdev = makedev(3, 0);
+
+/*
+ * Returns true if dev is /dev/mem or /dev/kmem.
+ */
+iskmemdev(dev)
+ dev_t dev;
+{
+
+ return (major(dev) == mem_no && minor(dev) < 2);
+}
+
+/*
+ * Returns true if dev is /dev/zero.
+ */
+iszerodev(dev)
+ dev_t dev;
+{
+
+ return (major(dev) == mem_no && minor(dev) == 12);
+}
+
+static int chrtoblktbl[] = {
+ /* XXXX This needs to be dynamic for LKMs. */
+ /*VCHR*/ /*VBLK*/
+ /* 0 */ NODEV,
+ /* 1 */ NODEV,
+ /* 2 */ NODEV,
+ /* 3 */ NODEV,
+ /* 4 */ NODEV,
+ /* 5 */ NODEV,
+ /* 6 */ NODEV,
+ /* 7 */ NODEV,
+ /* 8 */ 4, /* SCSI disk */
+ /* 9 */ 6, /* SCSI CD-ROM */
+ /* 10 */ NODEV,
+ /* 11 */ NODEV,
+ /* 12 */ NODEV,
+ /* 13 */ NODEV,
+ /* 14 */ NODEV,
+ /* 15 */ NODEV,
+ /* 16 */ NODEV,
+ /* 17 */ NODEV,
+ /* 18 */ NODEV,
+ /* 19 */ 8, /* vnode disk */
+ /* 20 */ NODEV,
+ /* 21 */ NODEV,
+ /* 22 */ NODEV,
+ /* 23 */ NODEV,
+ /* 24 */ NODEV,
+ /* 25 */ NODEV,
+ /* 26 */ 10, /* XD disk */
+};
+
+/*
+ * Convert a character device number to a block device number.
+ */
+chrtoblk(dev)
+ dev_t dev;
+{
+ int blkmaj;
+
+ if (major(dev) >= nchrdev ||
+ major(dev) >= sizeof(chrtoblktbl)/sizeof(chrtoblktbl[0]))
+ return (NODEV);
+ blkmaj = chrtoblktbl[major(dev)];
+ if (blkmaj == NODEV)
+ return (NODEV);
+ return (makedev(blkmaj, minor(dev)));
+}
+
+/*
+ * This entire table could be autoconfig()ed but that would mean that
+ * the kernel's idea of the console would be out of sync with that of
+ * the standalone boot. I think it best that they both use the same
+ * known algorithm unless we see a pressing need otherwise.
+ */
+#include <dev/cons.h>
+
+#define zscnpollc nullcnpollc
+cons_decl(zs);
+#define clcnpollc nullcnpollc
+cons_decl(cl);
+#define bugttycnpollc nullcnpollc
+cons_decl(bugtty);
+
+struct consdev constab[] = {
+#if NZS > 0
+ cons_init(zs),
+#endif
+#if NCL > 0
+ cons_init(cl),
+#endif
+#if NBUGTTY > 0
+ cons_init(bugtty),
+#endif
+ { 0 },
+};
diff --git a/sys/arch/mvme88k/m88k/continuation.s b/sys/arch/mvme88k/m88k/continuation.s
new file mode 100644
index 00000000000..6f12118be2f
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/continuation.s
@@ -0,0 +1,238 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Assembler continuation support routines.
+ */
+/*
+ * HISTORY
+ * $Log: continuation.s,v $
+ * Revision 1.1 1995/10/18 10:54:27 deraadt
+ * Initial revision
+ *
+ * Revision 2.7 93/01/26 18:00:29 danner
+ * changed ;comments to C-style for cpp.
+ * [93/01/25 jfriedl]
+ *
+ * Revision 2.6 93/01/14 17:53:09 danner
+ * Enhanced debugger support for continuations.
+ * [92/12/02 jfriedl]
+ *
+ * Revision 2.5 92/08/03 17:51:54 jfriedl
+ * Adjusted references from luna88k/locore --> luna88k
+ * [92/07/24 jfriedl]
+ *
+ * Revision 2.4.1.1 92/05/27 14:48:42 danner
+ * Updated includes.
+ * PSR_INTERRUPT_DISABLE_BIT -> PSR_IND_LOG
+ *
+ *
+ * Revision 2.4 92/05/04 11:27:58 danner
+ * Support for gcc 2.x's moptimize-arg-area switch. Simplify
+ * Switch_context.
+ * [92/05/03 danner]
+ * Performed instruction reordering in Switch_context suggested by
+ * jfriedl.
+ * [92/04/26 danner]
+ * [92/04/12 16:24:48 danner]
+ *
+ * Thread_syscall_return now stores r2 into the pcb. This cannot be
+ * avoided due to asts.
+ * [92/04/12 danner]
+ *
+ * Revision 2.3 92/03/03 15:38:44 rpd
+ * Save continuation argument as old_thread->swap_func in
+ * Switch_context.
+ * [92/03/02 danner]
+ *
+ * Added missing stcr in interrupt disabling code.
+ * [92/03/02 danner]
+ *
+ * Revision 2.2 92/02/18 18:03:27 elf
+ * Created.
+ * [92/02/01 danner]
+ *
+ */
+#ifndef ASSEMBLER /* predefined by ascpp, at least */
+#define ASSEMBLER /* this is required for some of the include files */
+#endif
+
+#include <assym.s> /* for PCB_KSP, etc */
+#include <machine/asm.h>
+#include <motorola/m88k/m88100/m88100.h>
+#include <motorola/m88k/m88100/psl.h>
+#include <mach/machine/vm_param.h>
+#include <mach_kdb.h>
+
+/*
+ * Jump out into user space for the first time.
+ * No ast check. Reload registers from continuation,
+ * the jump out.
+ */
+ENTRY(thread_bootstrap_return)
+/*
+ * Jump out to user space from an exception. Restore
+ * all registers.
+ *
+ */
+ENTRY(thread_exception_return)
+ ldcr r30, SR0 /* get current thread pointer */
+ ld r30, r30, THREAD_PCB /* get the pcb pointer */
+ br.n _return_from_exception
+ addu r30, r30, PCB_USER /* point to exception frame */
+
+/*
+ *
+ * Return to user space from a system call.
+ * The value in r2 is the return value, and should be
+ * preserved. The other argument registers (r3-r9), as well as
+ * the temporary registers (r10-r13) need not be restored.
+ * R2 is saved into the pcb in case we get blocked by an ast.
+ */
+ENTRY(thread_syscall_return)
+ ldcr r30, SR0 /* get current thread pointer */
+ ld r30, r30, THREAD_PCB /* get the pcb pointer */
+ addu r30, r30, PCB_USER /* point to exception frame */
+ br.n _return_from_syscall
+ st r2, r30, GENREG_OFF(2) /* save r2 */
+
+
+/*
+ * Call continuation - call the function specified (r2) with no
+ * arguments. Reset the stack point to the top of stack first.
+ * On the 88k, we leave the top 2 words of the stack availible
+ * to hold a pointer to the user exception frame.
+ */
+ENTRY(call_continuation)
+ /* reset the stack pointer to the top of stack. Since stacks
+ grow down, this can be accomplished by rounding up the sp
+ to the nearest KERNEL_STACK_SIZE quanta. We do this
+ carefully to make sure we have a valid stack pointer at
+ all times (in case we take an interrupt).
+ 32 bytes is also subtracted from the stack pointer to
+ allow compilation with gcc 2.x's -moptimize-arg-area
+ option
+ */
+ or r3, r0, KERNEL_STACK_SIZE-1
+ addu r30, r31, r3 /* nsp += KSS-1 */
+ and.c r30, r30, r3 /* nsp &= ~(KSS-1) */
+#if MACH_KDB
+ or r1, r1, 1 /* mark "continuation" return */
+#endif
+ jmp.n r2 /* call continuation */
+ subu r31, r30, (8+32) /* sp = nsp-8 */
+
+/*
+ * Assembler support for switch context. The address space switch
+ * has already occured.
+ *
+ * On entry
+ * r2 - old thread (current_thread)
+ * r3 - continuation for old thread
+ * r4 - new thread
+ * r5 - &(old->pcb->kernel_state)
+ * r6 - &(new->pcb->kernel_state)
+ *
+ */
+ENTRY(Switch_context)
+ /*
+ * if a nonnull continuation, we can skip saving the
+ * current thread state
+ */
+ bcnd ne0, r3, 1f /* non null continuation */
+ /* null continuation; need to save registers */
+ or r11, r0, r5
+ /* save the relevant registers; r1, r14-r31 */
+ st r1, r11,0
+ st r14,r11,4
+ st r15,r11,2*4
+ st r16,r11,3*4
+ st r17,r11,4*4
+ st r18,r11,5*4
+ st r19,r11,6*4
+ st r20,r11,7*4
+ st r21,r11,8*4
+ st r22,r11,9*4
+ st r23,r11,10*4
+ st r24,r11,11*4
+ st r25,r11,12*4
+ /* In principle, registers 26-29 are never manipulated in the
+ kernel. Maybe we can skip saving them? */
+ st r26,r11,13*4
+ st r27,r11,14*4
+ st r28,r11,15*4
+ st r29,r11,16*4
+ st r30,r11,17*4 /* save frame pointer */
+ st r31,r11,18*4 /* save stack pointer */
+ 1:
+ /*
+ Saved incoming thread registers, if necessary.
+ Reload new thread registers
+ */
+ /* get pointer to new pcb */
+ or r11, r0, r6
+ /* switch stacks */
+ ld r31,r11,18*4
+
+ /*
+ current_thread, active_threads and active_stacks have
+ all been updated in switch_context. We just switched
+ onto this threads stack, so all state is now consistent
+ again. Hence its safe to turn interrupts back on */
+
+ /* reenable interrupts */
+ ldcr r10, PSR
+ clr r10, r10, 1<PSR_IND_LOG>
+ stcr r10, PSR
+ FLUSH_PIPELINE
+
+ /* restore registers */
+ ld r1, r11,0
+ ld r14,r11,4
+ ld r15,r11,2*4
+ ld r16,r11,3*4
+ ld r17,r11,4*4
+ ld r18,r11,5*4
+ ld r19,r11,6*4
+ ld r20,r11,7*4
+ ld r21,r11,8*4
+ ld r22,r11,9*4
+ ld r23,r11,10*4
+ ld r24,r11,11*4
+ ld r25,r11,12*4
+ ld r26,r11,13*4
+ ld r27,r11,14*4
+ ld r28,r11,15*4
+ ld r29,r11,16*4
+ /* make the call - r2 is still old thread, which
+ * makes it the return value/first argument
+ * Sometimes this call will be actually be a return
+ * up to switch_context, and sometimes it will be
+ * an actual call to a function. Stare at Figure 4
+ * of Draves, et al. SOSP paper for a few hours to really
+ * understand....
+ */
+ jmp.n r1
+ ld r30,r11,17*4
diff --git a/sys/arch/mvme88k/m88k/eh.S b/sys/arch/mvme88k/m88k/eh.S
new file mode 100644
index 00000000000..280267ccc52
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/eh.S
@@ -0,0 +1,1749 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * 1. Should get rid of SR0 reference for thread stuff.
+ * 2. Make up my mind what is _kstack. I think it
+ * should be p->p_addr+UPAGES. (p_addr
+ * is pointing to user struct and swapin is
+ * making sure it is updated)
+ * Whatever is _kstack, its usage in this file should be
+ * revisited.
+ */
+
+ /*
+ **************************************************************RCS******
+ *
+ * -------------------------------------------------------------------
+ * | In the following discussion, references are made to: |
+ * | MC88100 - RISC MICROPROCESSOR USER'S MANUAL |
+ * | (second edition). Reference in []s refer to section numbers. |
+ * | |
+ * | This discussion assumes that you are at least vaguely familiar |
+ * | with 88100 exception handling (chapter 6), the MACH kernel, and |
+ * | that you have a brain (and use it while reading this). |
+ * | |
+ * | I also assume (and hope) that you're not offended by |
+ * | frequent misspellings. |
+ * | |
+ * | Jeffrey Friedl |
+ * | jfriedl@rna.ncl.omron.co.jp |
+ * | December, 1989 |
+ * -------------------------------------------------------------------
+ *
+ * EXCEPTIONS, INTERRUPTS, and TRAPS
+ * ---------------------------------
+ * This is the machine exception handler.
+ * In the MC88100, various "conditions" cause an exception, where
+ * processing momentarily jumps here to "service" the exception,
+ * and then continues where it left off.
+ *
+ * There are a number of different types of exceptions.
+ * For example, exception #6 is the privilege violation exception which
+ * is raised when the user tries to execute a supervisor-only instruction.
+ *
+ * Exception #1 is the interrupt exception, and is raised when an
+ * outside device raises the INT line on the CPU. This happens,
+ * for example, when the clock signals that it is time for a context
+ * switch, or perhaps the disk drive signaling that some operation
+ * is complete.
+ *
+ * Traps are also exceptions. Traps are ways for user programs to request
+ * kernel operations. For example, "tcnd eq0, r0, 128" will raise
+ * exception 128, the system call exception.
+ *
+ *
+ * SERVICING AN EXCEPTION
+ * -----------------------
+ * When an exception occurs, each control register is saved in its
+ * respective shadow register and execution continues from the
+ * appropriate exception handler. The exception handler must
+ * - save the context from the time of the exception
+ * - service the exception
+ * - restore the context (registers, etc)
+ * - pick up from where the exception occurred.
+ *
+ * The context is saved on a stack. Actually, in the user_state area
+ * in the PCB if the exception happens in user mode.
+ *
+ * Servicing the exception is usually straightforward and in fact not dealt
+ * with very much here. Usually a C routine is called to handle it.
+ * For example, when a privilege exception is raised, the routine that sends
+ * an "illegal instruction" signal to the offending process is called.
+ *
+ * When the exception has been serviced, the context is restored from the
+ * stack and execution resumes from where it left off.
+ *
+ * In more detail:
+ *
+ * Saving the exception-time context.
+ * ---------------------------------
+ * In saving the exception-time context, we copy the shadow and general
+ * purpose registers to memory. Since one exception may occur while
+ * servicing another, the memory used to save the exception-time context may
+ * not be static (i.e. the same every time). Thus, memory on a stack is set
+ * aside for the exception frame (area where the exception-time context is
+ * saved). The same stack is also used when C routines are called (to
+ * service the exception).
+ *
+ * Each process has a stack in kernel space (called the "kernel stack",
+ * short for "process's kernel stack) as well as the user space stack. When
+ * entering the kernel from user space, the kernel stack is unused. On this
+ * stack we save the exception state and (most likely call a C routine to)
+ * service the exception.
+ *
+ * Before servicing an exception, several issues must be addressed.
+ *
+ * 1) When an interrupt is recognized by the hardware, the data pipeline is
+ * allowed to clear. However, if one of these data accesses faults (bad
+ * reference, or a reference to a page which needs to be swapped in), that
+ * reference, as well as any others in the pipeline at the time (at most
+ * three total) are left there, to be taken care of by the exception
+ * handler [6.4.1]. This involves swapping in the proper page and
+ * manually doing the appropriate load or store.
+ *
+ * The other (at most, two other) data accesses that might have been in
+ * the pipeline must also be manually completed (even though they may not
+ * be at fault [yes, that's a bad pun, thank you]).
+ *
+ * 2) If any of the (at most three) uncompleted data access in the pipeline
+ * are loads (from memory to a register), then the bit for the destination
+ * register is set in the SSBR. Since the hardware will never complete
+ * that load (since we do it manually), the hardware will never clear that
+ * SSBR bit. Thus, we must clear it manually. If this isn't done, the
+ * system will hang waiting for a bit to clear that will never.
+ *
+ * 3) If the exception is the privilege violation exception, the bounds
+ * violation exception, or the misaligned access exception, the
+ * destination register bit in the SSBR may need to be cleared.
+ *
+ * 4) If the exception is one of the floating exceptions, then the
+ * destination register for that floating process won't be written,
+ * and the SSBR must be cleared explicitly.
+ *
+ * 5) The FPU must be enabled (as it is disabled by the exception processing
+ * hardware) and allowed to complete actions in progress. This is so
+ * so that it may be used in the servicing of any instruction.
+ * When the FPU is being restarted, operations attempting to complete
+ * may themselves fault (raising another exception).
+ *
+ * More on Restarting the FPU
+ * --------------------------
+ * The manual [section 6.4.3.4] gives only minor mention to this
+ * rather complex task. Before the FPU is restarted all SSBR bits are
+ * cleared for actions that the exception handler completes (as mentioned
+ * above) so that the SSBR is clear unless there are FPU operations that
+ * have not actually been completed (and hence not written to the registers).
+ * Also, all control registers (at least all those that we care about) are
+ * saved to the stack exception frame before the FPU is restarted (this
+ * is important... the reason comes later).
+ *
+ * The FPU is restarted by doing an rte to a trap-not-taken (the rte
+ * actually enables the fpu because we ensure that the EPSR has the
+ * FPU-enable bit on; the trap-not-taken ensures anything in the FPU
+ * completes by waiting until scoreboard register is clear).
+ *
+ * At the time the FPU is restarted (the rte to the trap-not-taken) the FPU
+ * can write to ANY of the general registers. Thus, we must make sure that
+ * all general registers (r1..r31) are in their pre-exception state so that
+ * when saved to the exception frame after the FPU is enabled, they properly
+ * reflect any changes made by the FPU in being restarted.
+ *
+ * Because we can't save the pointer to the exception frame in a general
+ * register during the FPU restart (it could get overwritten by the FPU!),
+ * we save it in a control register, SR3, during the restart.
+ *
+ *
+ * HOWEVER .....
+ *
+ * Because other uncompleted actions in the FPU may fault when the FPU is
+ * restarted, a new exception may be raised during the restart. This may
+ * happen recursively a number of times. Thus, during a restart, ANY register
+ * whatsoever may be modified, including control registers. Because of this
+ * we must make sure that the exception handler preserves SR3 throughout
+ * servicing an exception so that, if the exception had been raised during
+ * an FPU restart, it is returned unmolested when control returns to the FPU
+ * restart.
+ *
+ * Thus: if an exception is from kernel space, we MUST preserve SR3.
+ * (if it from user space, no FPU-enable can be in progress and SR3 is
+ * unimportant).
+ *
+ * Now is a good time to recap SR0..SR3 usage:
+ * SR0 -
+ * SR1 - CPU flags (exception handler flags)
+ * SR2 - generally free
+ * SR3 - free only if the exception is from user mode
+ *
+ * Once the FPU has been restarted, the general registers are saved to the
+ * exception frame. If the exception is not the interrupt exception,
+ * interrupts are enabled and any faulted data accesses (see above) are
+ * serviced. In either case, the exception is then serviced (usually by
+ * calling a C routine). After servicing, any faulted data accesses are
+ * serviced (if it had been the interrupt exception). The context is then
+ * restored and control returns to where the exception occurred.
+ *
+ */
+
+#ifndef ASSEMBLER /* predefined by ascpp, at least */
+#define ASSEMBLER /* this is required for some of the include files */
+#endif
+
+#include <assym.s> /* for PCB_KSP, etc */
+#include <machine/trap.h> /* for T_ defines */
+#include <machine/locore.h> /* lots of stuff */
+#include <machine/asm.h>
+
+#ifndef PCB_USER
+#define PCB_USER 0
+#endif
+#ifndef NBPG
+#define NBPG 4096
+#endif /* NBPG */
+#ifndef USIZE
+#define USIZE (UPAGES * NBPG)
+#endif /* USIZE */
+
+/*
+ * The exception frame as defined in "luna/m88k.h" (among other places) is
+ * a bit outdated and needs to be changed. Until then, we'll define some
+ * pseudo-fields there for our needs.
+ *
+ * EF_SR3 A place to save the exception-time SR3 from just after the
+ * time when an exception is raised until just after the FPU
+ * has been restarted. This does not necessarly conflict with
+ * the general registers (though it can if you're not careful)
+ * and so we can use a spot later used to save a general register.
+ *
+ * EF_FLAGS This is just the old EF_MODE. "EF_MODE" isn't a very good name.
+ */
+#define EF_SR3 (EF_R0 + 5)
+#define EF_FLAGS EF_MODE
+
+#define FLAG_FROM_KERNEL 8 /* this should be in locore.h */
+
+ text
+ align 8
+
+/***************************************************************************
+ ***************************************************************************
+ **
+ ** #define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_CHECK)
+ **
+ ** This is the "exception processing preparaton" common to all exception
+ ** processing. It is used in the following manor:
+ **
+ ** LABEL(foo_handler)
+ ** PREP("foo", 11, DEBUG_FOO_BIT, No_SSBR_Stuff, No_Precheck)
+ ** CALL(_trap, T_FOO_FAULT, r31)
+ ** DONE(DEBUG_FOO_BIT)
+ **
+ ** This defines the exception handler for the "foo" exception.
+ ** The arguments ro PREP():
+ ** NAME - String for debugging (more info later)
+ ** NUM - The exception number [see the manual, Table 6-1]
+ ** BIT - Bit to check in eh_debug for debugging (more info later)
+ ** SSBR_STUFF -
+ ** If the exception might leave some bits in the SSBR set,
+ ** this should indicate how they are cleared.
+ ** FLAG_PRECHECK -
+ ** This is for the data access exception only. See it for
+ ** more info.
+ **
+ **
+ ** What's in between PREP() and DONE() (usually a CALL) is the actual
+ ** servicing of the interrupt. During this time, any register may
+ ** be used freely as they've all been saved in the exception frame
+ ** (which is pointed-to by r31).
+ **/
+#define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK) NEWLINE \
+ xcr FLAGS, FLAGS, SR1 NEWLINE \
+ FLAG_PRECHECK NEWLINE \
+ NEWLINE \
+ /* the bsr later clobbers r1, so save now */ NEWLINE \
+ stcr r1, SR2 /* r1 now free */ NEWLINE \
+ NEWLINE \
+ /* set or clear the FLAG_FROM_KERNEL bit */ NEWLINE \
+ ldcr r1, EPSR NEWLINE \
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f NEWLINE \
+ clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> NEWLINE \
+ set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> NEWLINE \
+ NEWLINE \
+ /* get a stack (exception frame) */ NEWLINE \
+ 1: bsr setup_phase_one NEWLINE \
+ NEWLINE \
+ /* TMP2 now free -- use to set EF_VECTOR */ NEWLINE \
+ or TMP2, r0, NUM NEWLINE \
+ st TMP2, r31, REG_OFF(EF_VECTOR) NEWLINE \
+ NEWLINE \
+ /* Clear any bits in the SSBR (held in TMP) */ NEWLINE \
+ /* SSBR_STUFF may be empty, though. */ NEWLINE \
+ SSBR_STUFF NEWLINE \
+ NEWLINE \
+ /* call setup_phase_two to restart the FPU */ NEWLINE \
+ /* and to save all general registers. */ NEWLINE \
+ bsr setup_phase_two NEWLINE \
+ NEWLINE \
+ /* All general regs free -- do any debugging */ NEWLINE \
+ PREP_DEBUG(BIT, NAME)
+
+#undef EH_DEBUG
+#define EH_DEBUG 1
+
+/* Some defines for use with PREP() */
+#define No_SSBR_Stuff /* empty */
+#define Clear_SSBR_Dest bsr clear_dest_ssbr_bit
+#define No_Precheck /* empty */
+#define Data_Precheck \
+ bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, ignore_data_exception
+
+#if EH_DEBUG
+ /*
+ * If we allow debugging, there is a variable "eh_debug"
+ * in which there is a bit for each exception. If the bit
+ * is set for an exception, debugging information is printed
+ * about that exception whenever it occurs.
+ *
+ * The bits are defined in "locore.h"
+ */
+/* LABEL(_eh_debug) word 0x00000000 */
+ LABEL(_eh_debug) word 0xFFFFFFFF
+
+ /*
+ * additional pre-servicing preparation to be done when
+ * debugging... check eh_debug and make the call if
+ * need be.
+ */
+ #define PREP_DEBUG(DebugNumber, Name) \
+ or.u r2, r0, hi16(_eh_debug) NEWLINE \
+ ld r3, r2, lo16(_eh_debug) NEWLINE \
+ bb0 DebugNumber, r3, 4f NEWLINE \
+ /* call MY_info(ef,thread,flags,kind)*/ NEWLINE \
+ or r2, r30, r0 NEWLINE \
+ ldcr r3, SR0 NEWLINE \
+ ldcr r4, SR1 NEWLINE \
+ or.u r5, r0, hi16(2f) NEWLINE \
+ or r5, r5, lo16(2f) NEWLINE \
+ bsr.n _MY_info NEWLINE \
+ subu r31, r31, 40 NEWLINE \
+ br.n 4f NEWLINE \
+ addu r31, r31, 40 NEWLINE \
+ data NEWLINE \
+ 2: string Name NEWLINE \
+ byte 0 NEWLINE \
+ align 4 NEWLINE \
+ text NEWLINE \
+ 4:
+
+
+ /*
+ * Post-servicing work to be done.
+ * When debugging, check "eh_debug" and call the
+ * debug routined if neeed be.
+ *
+ * Then, return from the interrupt handler.
+ */
+ #define DONE(DebugNumber) \
+ or.u r2, r0, hi16(_eh_debug) NEWLINE \
+ ld r3, r2, lo16(_eh_debug) NEWLINE \
+ bb0 DebugNumber, r3, 2f NEWLINE \
+ ldcr r4, SR1 NEWLINE \
+ CALL(_MY_info_done, r31, r4) NEWLINE \
+ 2: br return_from_exception_handler
+#else
+ /*
+ * If not debugging, then no debug-prep to do.
+ * Also, when you're done, you're done! (no debug check).
+ */
+ #define PREP_DEBUG(bit, name)
+ #define DONE(num) br return_from_exception_handler
+#endif
+
+
+/*#########################################################################*/
+/*#### THE ACTUAL EXCEPTION HANDLER ENTRY POINTS ##########################*/
+/*#########################################################################*/
+
+/* unknown exception handler */
+LABEL(unknown_handler)
+ PREP("unknown", 0, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_UNKNOWN_BIT)
+
+/* interrupt exception handler */
+LABEL(interrupt_handler)
+ PREP("interrupt", 1, DEBUG_INTERRUPT_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_ext_int, 1, r30)
+ DONE(DEBUG_INTERRUPT_BIT)
+
+/* instruction access exception handler */
+LABEL(instruction_access_handler)
+ PREP("inst", 2, DEBUG_INSTRUCTION_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_INSTFLT, r30)
+#if 0
+ /* done in trap now */
+ /*
+ * Now, to retry the instruction.
+ * Copy the SNIP to the SFIP, clearing the E bit.
+ * Copy the SXIP to the SNIP, clearing the E bit.
+ */
+ ld r1, r30, REG_OFF(EF_SNIP)
+ ld r2, r30, REG_OFF(EF_SXIP)
+ clr r1, r1, 1<RTE_ERROR_BIT>
+ clr r2, r2, 1<RTE_ERROR_BIT>
+ st r1, r30, REG_OFF(EF_SFIP)
+ st r2, r30, REG_OFF(EF_SNIP)
+#endif /* 0 */
+ DONE(DEBUG_INSTRUCTION_BIT)
+
+/*
+ * data access exception handler --
+ * See badaddr() below for info about Data_Precheck.
+ */
+LABEL(data_exception_handler)
+ PREP("data", 3, DEBUG_DATA_BIT, No_SSBR_Stuff, Data_Precheck)
+ DONE(DEBUG_DATA_BIT)
+
+/* misaligned access exception handler */
+LABEL(misaligned_handler)
+ PREP("misalign", 4, DEBUG_MISALIGN_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_MISALGNFLT, r30)
+ DONE(DEBUG_MISALIGN_BIT)
+
+/* unimplemented opcode exception handler */
+LABEL(unimplemented_handler)
+ PREP("unimp", 5, DEBUG_UNIMPLEMENTED_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_ILLFLT, r30)
+ DONE(DEBUG_UNIMPLEMENTED_BIT)
+
+/*
+ * Some versions of the chip have * a bug whereby false privilege
+ * violation exceptions are raised. If the valid bit in the SXIP is clear,
+ * it is false. If so, just return. The code before PREP handles this....
+ */
+LABEL(privilege_handler)
+ stcr r1, SR2 /* hold r1 for a moment */
+ ldcr r1, SXIP /* look at the sxip... valid bit set? */
+ bb1.n RTE_VALID_BIT, r1, 1f /*skip over return if a valid exception*/
+ ldcr r1, SR2 /* restore r1 */
+ RTE
+ 1: PREP("privilege", 6, DEBUG_PRIVILEGE_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_PRIVINFLT, r30)
+ DONE(DEBUG_PRIVILEGE_BIT)
+
+/*
+ * I'm not sure what the trap(T_BNDFLT,...) does, but it doesn't send
+ * a signal to the process...
+ */
+LABEL(bounds_handler)
+ PREP("bounds", 7, DEBUG_BOUNDS_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_BNDFLT, r30)
+ DONE(DEBUG_BOUNDS_BIT)
+
+/* integer divide-by-zero exception handler */
+LABEL(divide_handler)
+ PREP("divide", 8, DEBUG_DIVIDE_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_ZERODIV, r30)
+ DONE(DEBUG_DIVIDE_BIT)
+
+/* integer overflow exception handelr */
+LABEL(overflow_handler)
+ PREP("overflow", 9, DEBUG_OVERFLOW_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_OVFFLT, r30)
+ DONE(DEBUG_OVERFLOW_BIT)
+
+/* Floating-point precise handler */
+#define FPp_SSBR_STUFF bsr clear_FPp_ssbr_bit
+LABEL(fp_precise_handler)
+ PREP("FPU precise", 114, DEBUG_FPp_BIT, FPp_SSBR_STUFF, No_Precheck)
+ CALL(_Xfp_precise, r0, r30) /* call fp_precise(??, exception_frame)*/
+ DONE(DEBUG_FPp_BIT)
+
+/* Floating-point imprecise handler */
+#define FPi_SSBR_STUFF bsr clear_FPi_ssbr_bit
+LABEL(fp_imprecise_handler)
+ PREP("FPU imprecise", 115, DEBUG_FPi_BIT, FPi_SSBR_STUFF, No_Precheck)
+ CALL(_Xfp_imprecise, r0, r30) /*call fp_imprecise(??,exception_frame)*/
+ DONE(DEBUG_FPi_BIT)
+
+/* All standard system calls. */
+LABEL(syscall_handler)
+ PREP("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r13, r30, GENREG_OFF(13)
+ CALL(_syscall, r13, r30) /* system call no. is in r13 */
+ DONE(DEBUG_SYSCALL_BIT)
+
+/* trap 496 comes here */
+LABEL(_bugtrap)
+ PREP("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r9, r30, GENREG_OFF(9)
+ CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */
+ DONE(DEBUG_SYSCALL_BIT)
+
+LABEL(_sigsys)
+ PREP("sigsys", 0, DEBUG_SIGSYS_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_SIGSYS, r30)
+ DONE(DEBUG_SIGSYS_BIT)
+
+LABEL(_sigtrap)
+ PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_SIGTRAP, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+LABEL(_stepbpt)
+ PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_STEPBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+LABEL(_userbpt)
+ PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_USERBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+#if DDB
+ LABEL(break)
+ PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_BREAK, r30)
+ DONE(DEBUG_BREAK_BIT)
+ LABEL(trace)
+ PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_TRACE, r30)
+ DONE(DEBUG_TRACE_BIT)
+ LABEL(entry)
+ PREP("kdb", 132, DEBUG_KDB_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_ENTRY, r30)
+ DONE(DEBUG_KDB_BIT)
+#else /* else not DDB */
+ LABEL(break)
+ PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_BREAK_BIT)
+ LABEL(trace)
+ PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_TRACE_BIT)
+ LABEL(entry)
+ PREP("unknown", 132, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_KDB_BIT)
+#endif /* DDB */
+
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * The error exception handler.
+ * The error exception is raised when any other non-trap exception is raised
+ * while shadowing is off. This is Bad News.
+ *
+ * The shadow registers are not valid in this case (shadowing was off, ne).
+ * R1-R31 may be interesting though, so we'll save them.
+ *
+ * We'll not worry about trashing r26-29 here,
+ * since they aren't generally used.
+ */
+LABEL(error_handler)
+ /* pick up the slavestack */
+ or r26, r0, r31 /* save old stack */
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
+
+ /* zero the stack, so we'll know what we're lookin' at */
+ or.u r27, r0, hi16(_intstack)
+ or r27, r27, lo16(_intstack)
+ 1: cmp r28, r27, r31
+ bb1 ge, r28, 2f /* branch if at the end of the stack */
+ st r0, r0, r27
+ br.n 1b
+ addu r28, r27, 4 /* bump up */
+ 2: /* stack has been cleared */
+
+ /* ensure that stack is 8-byte aligned */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+
+ /* create exception frame on stack */
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+
+ /* save old R31 and other R registers */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st r30, r31, GENREG_OFF(30)
+ st r26, r31, GENREG_OFF(31)
+
+ /* save shadow registers (are OLD, though) */
+ ldcr r10, SXIP
+ st r10, r31, REG_OFF(EF_SXIP)
+ ldcr r10, SFIP
+ st r10, r31, REG_OFF(EF_SFIP)
+ ldcr r10, SNIP
+ st r10, r31, REG_OFF(EF_SNIP)
+ ldcr r10, SSBR
+ st r10, r31, REG_OFF(EF_SSBR)
+ ldcr r10, EPSR
+ st r10, r31, REG_OFF(EF_EPSR)
+
+ ldcr r10, DMT0
+ st r10, r31, REG_OFF(EF_DMT0)
+ ldcr r11, DMD0
+ st r11, r31, REG_OFF(EF_DMD0)
+ ldcr r12, DMA0
+ st r12, r31, REG_OFF(EF_DMA0)
+
+ ldcr r10, DMT1
+ st r10, r31, REG_OFF(EF_DMT1)
+ ldcr r11, DMD1
+ st r11, r31, REG_OFF(EF_DMD1)
+ ldcr r12, DMA1
+ st r12, r31, REG_OFF(EF_DMA1)
+
+ ldcr r10, DMT2
+ st r10, r31, REG_OFF(EF_DMT2)
+ ldcr r11, DMD2
+ st r11, r31, REG_OFF(EF_DMD2)
+ ldcr r12, DMA2
+ st r12, r31, REG_OFF(EF_DMA2)
+
+ ldcr r10, SR1
+ st r10, r31, REG_OFF(EF_MODE)
+
+ /* shove sr2 into EF_FPLS1 */
+ ldcr r10, SR2
+ st r10, r31, REG_OFF(EF_FPLS1)
+
+ /* shove sr3 into EF_FPHS2 */
+ ldcr r10, SR3
+ st r10, r31, REG_OFF(EF_FPHS2)
+
+ /* error vector is zippo numero el'zeroooo */
+ st r0, r31, REG_OFF(EF_VECTOR)
+
+ stcr r0, SSBR /* won't want shadow bits bothering us later */
+
+ /*
+ * Cheap way to enable FPU and start shadowing again.
+ */
+ ldcr r10, PSR
+ clr r10, r10, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr r10, r10, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+
+ stcr r10, PSR /* bang */
+ FLUSH_PIPELINE
+
+ /* put pointer to regs into r30... r31 will become a simple stack */
+ or r30, r31, r0
+
+ subu r31, r31, 0x10 /* make some breathing space */
+ st r30, r31, 0x0c /* store frame pointer on the st */
+ st r30, r31, 0x08 /* store again for the debugger to recognize */
+ or.u r20, r0, hi16(0x87654321)
+ or r20, r20, lo16(0x87654321)
+ st r20, r31, 0x04
+ st r20, r31, 0x00
+
+ CALL(_error_fault, r30, r30)
+
+ /* TURN INTERUPTS back on */
+ ldcr r1, PSR
+ clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r1, PSR
+ FLUSH_PIPELINE
+LABEL(_error_loop) bsr _error_loop
+ /* never returns*/
+
+/*
+ *----------------------------------------------------------------------------
+ *----------------------------------------------------------------------------
+ *----------------------------------------------------------------------------
+ */
+
+
+/*
+ * This is part of baddadr (below).
+ */
+_LABEL(ignore_data_exception)
+ /******************************************************\
+ * SR0: pointer to the current thread structure *
+ * SR1: previous FLAGS reg *
+ * SR2: free *
+ * SR3: must presere *
+ * FLAGS: CPU status flags *
+ \******************************************************/
+ xcr FLAGS, FLAGS, SR1 /* replace SR1, FLAGS */
+
+ /*
+ * For more info, see badaddr() below.
+ *
+ * We just want to jump to "badaddr__return_nonzero" below.
+ *
+ * We don't worry about trashing R2 here because we're
+ * jumping back to the function badaddr() where we're allowd
+ * to blast r2..r9 as we see fit.
+ */
+
+ /* the "+2" below is to set the VALID bit. */
+ or.u r2, r0, hi16(badaddr__return_nonzero + 2)
+ or r2, r2, lo16(badaddr__return_nonzero + 2)
+ stcr r2, SNIP /* Make it the next instruction to execute */
+ addu r2, r2, 4
+ stcr r2, SFIP /* and the next one after that, too. */
+ stcr r0, SSBR /* make the scoreboard happy. */
+
+ /* the following jumps to "badaddr__return_nonzero" in below */
+ RTE
+
+/*
+ * extern boolean_t badaddr(unsigned addr, unsigned len)
+ *
+ * Returns true (non-zero) if the given LEN bytes starting at ADDR are
+ * not all currently accessable by the kernel.
+ *
+ * If all LEN bytes starting at ADDR are accessable, zero is returned.
+ *
+ * Len may be be 1, 2, or 4.
+ *
+ * This is implementd by setting a special flag in SR1 before trying to access
+ * the given address. If a data access exception is raised, the address
+ * is inaccessable. The exception handler will notice the special CPU flag
+ * and not try to swap the address in. Rather, it will return to
+ * "badaddr__return_nonzero" in this routine so that we may return non-zero
+ * to the calling routine.
+ *
+ * If no fault is raised, we continue to where we return zero to the calling
+ * routine (after removing the special CPU flag).
+ */
+
+LABEL(_badaddr)
+ /*
+ * Disable interrupts ... don't want a context switch while we're
+ * doing this! Also, save the old PSR in R8 to restore later.
+ */
+ ldcr r8, PSR
+ set r4, r8, 1<PSR_INTERRUPT_DISABLE_BIT>
+ FLUSH_PIPELINE
+ stcr r4, PSR
+
+ ldcr r5, SR1
+ set r5, r5, 1<FLAG_IGNORE_DATA_EXCEPTION>
+ /* resetting r5 to SR1 done in the delay slot below. */
+
+ /*
+ * If it's a word we're doing, do that here. Otherwise,
+ * see if it's a halfword.....
+ */
+ sub r6, r3, 4
+ bcnd.n ne0, r6, badaddr__maybe_halfword
+ stcr r5, SR1
+ FLUSH_PIPELINE
+
+ /*
+ * It's a bad address if it's misaligned.
+ */
+ bb1 0, r2, badaddr__return_nonzero
+ bb1 1, r2, badaddr__return_nonzero
+ /*
+ * The next line will either fault or not. If it faults, execution
+ * will go to: data_access_handler (see above)
+ * and then to: ignore_data_exception (see above)
+ * and then to: badaddr__return_nonzero (see below)
+ * which will return to the calling function.
+ *
+ * If there is no fault, execution just continues as normal.
+ */
+ ld r5, r2, 0
+ FLUSH_PIPELINE
+ br.n badaddr__return
+ or r2, r0, r0 /* indicate a zero (address not bad) return.*/
+
+ badaddr__maybe_halfword:
+ /* More or less like the code for checking a word above */
+ sub r6, r3, 2
+ bcnd ne0, r6, badaddr__maybe_byte
+
+ /* it's bad if it's misaligned */
+ bb1 0, r2, badaddr__return_nonzero
+
+ FLUSH_PIPELINE
+ ld.h r5, r2, 0
+ FLUSH_PIPELINE
+ br.n badaddr__return
+ or r2, r0, r0
+
+ badaddr__maybe_byte:
+ /* More or less like the code for checking a word above */
+ sub r6, r3, 1
+ bcnd ne0, r6, badaddr__unknown_size
+ FLUSH_PIPELINE
+ ld.b r5, r2, 0
+ FLUSH_PIPELINE
+ br.n badaddr__return
+ or r2, r0, r0
+ badaddr__unknown_size:
+#ifndef NDEBUG
+ data
+ 1: string "bad length (%d) to badaddr() from 0x%x\n\000"
+ text
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ or r4, r0, r1
+ bsr _printf
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ bsr _panic
+ /*NOTREACHED*/
+#endif
+
+_LABEL(badaddr__return_nonzero)
+ or r2, r0, 1
+ /* fall through to badaddr__return */
+
+_LABEL(badaddr__return)
+ ldcr r4, SR1
+ clr r4, r4, 1<FLAG_IGNORE_DATA_EXCEPTION>
+ stcr r4, SR1
+
+ /*
+ * Restore the PSR to what it was before.
+ * The only difference is that we might be enabling interrupts
+ * (which we turned off above). If interrupts were already off,
+ * we do not want to turn them on now, so we just restore from
+ * where we saved it.
+ */
+ FLUSH_PIPELINE
+ stcr r8, PSR
+ jmp r1
+
+
+/*
+******************************************************************************
+******************************************************************************
+******************************************************************************
+*/
+
+
+LABEL(setup_phase_one)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: saved copy of exception-time r1 *
+ * SR3: must be preserved .. may be the exception-time stack *
+ * r1: return address to calling exception handler *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Decide where we're going to put the exception frame. *
+ * Might be at the end of R31, SR3, or the thread's *
+ * pcb. *
+ \***************************************************************/
+
+ /* Check if we are coming in from a FPU restart exception.
+ If so, the pcb will be in SR3 */
+ bb1.n FLAG_ENABLING_FPU, FLAGS, use_SR3_pcb
+ xcr r1, r1, SR2
+ /* are we coming in from user mode? If so, pick up thread pcb */
+ bb0 FLAG_FROM_KERNEL, FLAGS, pickup_stack
+
+ /* Interrupt in kernel mode, not FPU restart */
+ _LABEL(already_on_kernel_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; may be important for other exceptions *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * We're already on the kernel stack, but not having *
+ * needed to use SR3. We can just make room on the *
+ * stack (r31) for our exception frame. *
+ \***************************************************************/
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+ st FLAGS, r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+
+ ldcr r1, SR3 /* save previous SR3 */
+ st r1, r31, REG_OFF(EF_SR3)
+
+ addu r1, r31, SIZEOF_EF /* save previous r31 */
+ br.n have_pcb
+ st r1, r31, GENREG_OFF(31)
+
+
+ _LABEL(use_SR3_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; exception-time stack pointer *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * An exception occured while enabling the FPU. Since r31 *
+ * is the user's r31 while enabling the FPU, we had put *
+ * our pcb pointer into SR3, so make room from *
+ * there for our stack pointer. *
+ * We need to check if SR3 is the old stack pointer or the *
+ * pointer off to the user pcb. If it pointing to the user *
+ * pcb, we need to pick up the kernel stack. Otherwise *
+ * we need to allocate a frame upon it. *
+ * We look at the EPSR to see if it was from user mode *
+ * Unfortunately, we have no registers free at the moment *
+ * But we know register 0 in the pcb frame will always be *
+ * zero, so we can use it as scratch storage. *
+ * *
+ * *
+ \***************************************************************/
+ xcr r30, r30, SR3 /* r30 = old exception frame */
+ st r1, r30, GENREG_OFF(0) /* free up r1 */
+ ld r1, r30, REG_OFF(EF_EPSR) /* get back the epsr */
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f /* if user mode */
+ ld r1, r30, GENREG_OFF(0) /* restore r1 */
+ /* we were in kernel mode - dump frame upon the stack */
+ st r0, r30, GENREG_OFF(0) /* repair old frame */
+ subu r30, r30, SIZEOF_EF /* r30 now our E.F. */
+ st FLAGS, r30, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r30, GENREG_OFF(1) /* save prev. r1 (now r1 free) */
+
+ st r31, r30, GENREG_OFF(31) /* save previous r31 */
+ or r31, r0, r30 /* make r31 our pointer. */
+ addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */
+ st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */
+ br.n have_pcb
+ xcr r30, r30, SR3 /* restore r30 */
+ 1:
+ /* we took an exception while restarting the FPU from user space.
+ Consequently, we never picked up a stack. Do so now.
+ R1 is currently free (saved in the exception frame pointed at by
+ r30) */
+ or.u r1, r0, hi16(_kstack)
+ ld r1, r1, lo16(_kstack)
+ addu r1, r1, USIZE-SIZEOF_EF
+ st FLAGS, r1, REG_OFF(EF_FLAGS) /* store flags */
+ st r31, r1, GENREG_OFF(31) /* store r31 - now free */
+ st r30, r1, REG_OFF(EF_SR3) /* store old SR3 (pcb) */
+ or r31, r1, r0 /* make r31 our exception frame pointer */
+ ld r1, r30, GENREG_OFF(0) /* restore old r1 */
+ st r0, r30, GENREG_OFF(0) /* repair that frame */
+ st r1, r31, GENREG_OFF(1) /* store r1 in its proper place */
+ br.n have_pcb
+ xcr r30, r30, SR3 /* restore r30 */
+
+ _LABEL(pickup_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Since we're servicing an exception from user mode, we *
+ * know that SR3 is free. We use it to free up a temp. *
+ * register to be used in getting the thread's pcb *
+ \***************************************************************/
+ stcr r31, SR3 /* save previous r31 */
+
+ /* switch to the thread's kernel stack. */
+ or.u r31, r0, hi16(_curpcb)
+ ld r31, r31, lo16(_curpcb)
+ addu r31, r31, PCB_USER /* point to user save area */
+ st FLAGS, r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+ ldcr r1, SR3 /* save previous r31 */
+ st r1, r31, GENREG_OFF(31)
+ /*FALLTHROUGH */
+
+ _LABEL(have_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * r1: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Save the shadow registers that need to be saved to *
+ * the exception frame. *
+ \***************************************************************/
+ stcr TMP, SR3 /* free up TMP, TMP2, TMP3 */
+ SAVE_TMP2
+ SAVE_TMP3
+
+ /* save some exception-time registers to the exception frame */
+ ldcr TMP, EPSR
+ ldcr TMP2, SFIP
+ ldcr TMP3, SNIP
+ st TMP, r31, REG_OFF(EF_EPSR)
+ st TMP2, r31, REG_OFF(EF_SFIP)
+ st TMP3, r31, REG_OFF(EF_SNIP)
+
+ ldcr TMP, SSBR
+ ldcr TMP2, SXIP
+ ldcr TMP3, DMT0
+ st TMP2, r31, REG_OFF(EF_SXIP)
+
+#if 0
+ /*
+ * The following is a kludge so that
+ * a core file will have a copy of
+ * DMT0 so that 'sim' can display it
+ * correctly.
+ * After a data fault has been noticed,
+ * the real EF_DTM0 is cleared, so I need
+ * to throw this somewhere.
+ * There's no special reason I chose this
+ * register (FPIT)... it's just one of many
+ * for which it causes no pain to do this.
+ */
+ st TMP3, r31, REG_OFF(EF_FPIT)
+#endif
+
+ /*
+ * The above shadow registers are obligatory for any and all
+ * exceptions. Now, if the data access pipeline is not clear,
+ * we must save the DMx shadow registers, as well as clear
+ * the appropriate SSBR bits for the destination registers of
+ * loads or xmems.
+ */
+ bb0.n DMT_VALID_BIT, TMP3, DMT_check_finished
+ st TMP3, r31, REG_OFF(EF_DMT0)
+
+ ldcr TMP2, DMT1
+ ldcr TMP3, DMT2
+ st TMP2, r31, REG_OFF(EF_DMT1)
+ st TMP3, r31, REG_OFF(EF_DMT2)
+
+ ldcr TMP2, DMA0
+ ldcr TMP3, DMA1
+ st TMP2, r31, REG_OFF(EF_DMA0)
+ st TMP3, r31, REG_OFF(EF_DMA1)
+
+ ldcr TMP2, DMA2
+ ldcr TMP3, DMD0
+ st TMP2, r31, REG_OFF(EF_DMA2)
+ st TMP3, r31, REG_OFF(EF_DMD0)
+
+ ldcr TMP2, DMD1
+ ldcr TMP3, DMD2
+ st TMP2, r31, REG_OFF(EF_DMD1)
+ st TMP3, r31, REG_OFF(EF_DMD2)
+
+ /*
+ *---------------------------------------------------------------
+ * need to clear "appropriate" bits in the SSBR before
+ * we restart the FPU
+ */
+
+
+ _LABEL(check_DMT0)
+ ldcr TMP2, DMT0
+ bb0.n DMT_VALID_BIT, TMP2, DMT_check_finished
+ stcr r0, DMT0 /* so an exception at fpu_enable doesn't see our DMT0*/
+ bb1 DMT_LOCK_BIT, TMP2, do_DMT0
+ bb1 DMT_WRITE_BIT, TMP2, check_DMT1
+ _LABEL(do_DMT0)
+ extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
+ set TMP2, TMP2, 1<5>
+ clr TMP, TMP, TMP2
+
+ _LABEL(check_DMT1)
+ ldcr TMP2, DMT1
+ bb0 DMT_VALID_BIT, TMP2, check_DMT2
+ bb1 DMT_LOCK_BIT, TMP2, do_DMT1
+ bb1 DMT_WRITE_BIT, TMP2, check_DMT2
+ _LABEL(do_DMT1)
+ extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
+ set TMP2, TMP2, 1<5>
+ clr TMP, TMP, TMP2
+
+ _LABEL(check_DMT2)
+ ldcr TMP2, DMT2
+ bb0 DMT_VALID_BIT, TMP2, DMT_check_finished
+ bb1 DMT_LOCK_BIT, TMP2, do_DMT2_single
+ bb1 DMT_WRITE_BIT, TMP2, DMT_check_finished
+ bb1 DMT_DOUBLE_BIT,TMP2, do_DMT2_double
+ _LABEL(do_DMT2_single)
+ extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
+ br.n 1f
+ set TMP2, TMP2, 1<5>
+ _LABEL(do_DMT2_double)
+ extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
+ set TMP2, TMP2, 1<6>
+1: clr TMP, TMP, TMP2
+
+ _LABEL(DMT_check_finished)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: saved TMP *
+ * r1: free *
+ * TMP: possibly revised SSBR *
+ * TMP2: free *
+ * TMP3: free *
+ * FLAGS: CPU status flags *
+ * r31: exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception-time TMP2, TMP3. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Dmt0. *
+ * Other data pipeline control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ \***************************************************************/
+ ldcr r1, SR2
+ jmp r1 /* return to allow the handler to clear more SSBR bits */
+
+/************************************************************************/
+/************************************************************************/
+
+ _LABEL(clear_FPi_ssbr_bit)
+ /*
+ * Clear floatingpont-imprecise ssbr bits.
+ * Also, save appropriate FPU control registers to the E.F.
+ *
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+ fldcr TMP2, FPSR
+ fldcr TMP3, FPCR
+ st TMP2, r31, REG_OFF(EF_FPSR)
+ st TMP3, r31, REG_OFF(EF_FPCR)
+
+ fldcr TMP2, FPECR
+ fldcr TMP3, FPRH
+ st TMP2, r31, REG_OFF(EF_FPECR)
+ st TMP3, r31, REG_OFF(EF_FPRH)
+
+ fldcr TMP2, FPIT
+ fldcr TMP3, FPRL
+ st TMP2, r31, REG_OFF(EF_FPIT)
+ st TMP3, r31, REG_OFF(EF_FPRL)
+
+ /*
+ * We only need clear the bit in the SSBR for the
+ * 2nd reg of a double result [see section 6.8.5]
+ */
+ #define FPIT_SIZE_BIT 10
+ bb0 FPIT_SIZE_BIT, TMP2, not_double_fpi
+ extu TMP2, TMP2, 5<0> /* get the reg. */
+ set TMP2, TMP2, 1<6> /* set width (width=2 will clear two bits) */
+ clr TMP, TMP, TMP2
+
+ _LABEL(not_double_fpi)
+ jmp r1
+
+
+/************************************************************************/
+/************************************************************************/
+
+
+ _LABEL(clear_FPp_ssbr_bit)
+ /*
+ * Clear floating pont precise ssbr bits.
+ * Also, save appropriate FPU control registers to the E.F.
+ *
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+ fldcr TMP2, FPSR
+ fldcr TMP3, FPCR
+ st TMP2, r31, REG_OFF(EF_FPSR)
+ st TMP3, r31, REG_OFF(EF_FPCR)
+
+ fldcr TMP2, FPHS1
+ fldcr TMP3, FPHS2
+ st TMP2, r31, REG_OFF(EF_FPHS1)
+ st TMP3, r31, REG_OFF(EF_FPHS2)
+
+ fldcr TMP2, FPLS1
+ fldcr TMP3, FPLS2
+ st TMP2, r31, REG_OFF(EF_FPLS1)
+ st TMP3, r31, REG_OFF(EF_FPLS2)
+
+ fldcr TMP2, FPPT
+ fldcr TMP3, FPECR
+ st TMP2, r31, REG_OFF(EF_FPPT)
+ st TMP3, r31, REG_OFF(EF_FPECR)
+
+ #define FPPT_SIZE_BIT 5
+ bb1.n FPPT_SIZE_BIT, TMP2, 1f
+ extu TMP3, TMP2, 5<0> /* get FP operation dest reg */
+ br.n 2f
+ set TMP3, TMP3, 1<5> /* set size=1 -- clear one bit for "float" */
+ 1: set TMP3, TMP3, 1<6> /* set size=2 -- clear two bit for "double" */
+ 2:
+ clr TMP, TMP, TMP3 /* clear bit(s) in ssbr. */
+ jmp r1
+
+
+/************************************************************************/
+/************************************************************************/
+
+
+ _LABEL(clear_dest_ssbr_bit)
+ /*
+ * There are various cases where an exception can leave the
+ * destination register's bit in the SB set.
+ * Examples:
+ * misaligned or privilege exception on a LD or XMEM
+ * DIV or DIVU by zero.
+ *
+ * I think that if the instruction is LD.D, then two bits must
+ * be cleared.
+ *
+ * Even though there are a number of instructions/exception
+ * combinations that could fire this code up, it's only required
+ * to be run for the above cases. However, I don't think it'll
+ * ever be a problem to run this in other cases (ST instructions,
+ * for example), so I don't bother checking. If we had to check
+ * for every possible instruction, this code would be much larger.
+ *
+ * The only checking, then, is to see if it's a LD.D or not.
+ *
+ * At the moment....
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+ ldcr TMP3, EPSR /* going to check: user or system memory? */
+ ldcr TMP2, SXIP /* get the instruction's address */
+ bb1.n PSR_SUPERVISOR_MODE_BIT, TMP3, 2f
+ clr TMP2, TMP2, 2<0> /* get rid of valid and error bits. */
+
+ 1: /* user space load here */
+#if ERRATA__XXX_USR
+ NOP
+ ld.usr TMP2, TMP2, r0 /* get the instruction itself */
+ NOP
+ NOP
+ NOP
+ br 3f
+#else
+ br.n 3f
+ ld.usr TMP2, TMP2, r0 /* get the instruction itself */
+#endif
+
+ 2: /* system space load here */
+ ld TMP2, TMP2, r0 /* get the instruction itself */
+
+ 3: /* now have the instruction..... */
+ /*
+ * Now see if it's a double load
+ * There are three forms of double load [IMM16, scaled, unscaled],
+ * which can be checked by matching against two templates:
+ * -- 77776666555544443333222211110000 --
+ * if (((instruction & 11111100000000000000000000000000) ==
+ * 00010000000000000000000000000000) ||
+ * ((instruction & 11111100000000001111110011100000) ==
+ * 11110100000000000001000000000000))
+ * {
+ * It's a load double, so
+ * clear two SSBR bits.
+ * }
+ * else
+ * {
+ * It's not a load double.
+ * Must be a load single, xmem, or st
+ * Thus, clear one SSBR bit.
+ * }
+ */
+ /* check the first pattern for ld.d */
+ extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
+ mask TMP3, TMP3, 0xFC00 /* apply the mask */
+ cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
+ bb1 eq, TMP3, misaligned_double
+
+ /* still could be -- check the second pattern for ld.d */
+ /* look at the upper 16 bits first */
+ extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
+ mask TMP3, TMP3, 0xFC00 /* apply the mask */
+ cmp TMP3, TMP3, 0xF400 /* if equal, it might be a load double */
+ bb1 ne, TMP3, misaligned_single /* not equal, so must be single */
+
+ /* now look at the lower 16 bits */
+ extu TMP3, TMP2, 16<0> /* get the lower 16 bits */
+ mask TMP3, TMP3, 0xFCE0 /* apply the mask */
+ cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
+ bb1 eq, TMP3, misaligned_double
+
+ _LABEL(misaligned_single)
+ extu TMP2, TMP2, 5<21> /* get the destination register */
+ br.n 1f
+ set TMP2, TMP2, 1<5> /* set size=1 */
+
+ _LABEL(misaligned_double)
+ extu TMP2, TMP2, 5<21> /* get the destination register */
+ set TMP2, TMP2, 1<6> /* set size=2 -- clear two bit for "ld.d" */
+
+ 1: jmp.n r1
+ clr TMP, TMP, TMP2 /* clear bit(s) in ssbr. */
+
+/************************************************************************/
+/************************************************************************/
+
+
+
+ LABEL(setup_phase_two)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: free *
+ * SR3: saved TMP *
+ * r1: return address to calling exception handler *
+ * TMP: possibly revised SSBR *
+ * TMP2: free *
+ * TMP3: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception-time TMP2, TMP3. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * restore the system to the exception-time state (except *
+ * SR3 will be OUR stack pointer) so that we may resart the FPU. *
+ \***************************************************************/
+ stcr TMP, SSBR /* done with SSBR, TMP now free */
+ RESTORE_TMP2 /* done with extra temp regs */
+ RESTORE_TMP3 /* done with extra temp regs */
+
+ /* Get the current PSR and modify for the rte to enable the FPU */
+ ldcr TMP, PSR
+ clr TMP, TMP, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr TMP, TMP, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+ stcr TMP, EPSR
+
+ /* the "+2" below is to set the VALID_BIT */
+ or.u TMP, r0, hi16(fpu_enable + 2)
+ or TMP, TMP, lo16(fpu_enable + 2)
+ stcr TMP, SNIP /* jump to here fpu_enable */
+ addu TMP, TMP, 4
+ stcr TMP, SFIP /* and then continue after that */
+
+ set FLAGS, FLAGS, 1<FLAG_ENABLING_FPU> /* note what we're doing.*/
+ xcr FLAGS, FLAGS, SR1
+ st r1, r31, REG_OFF(EF_RET) /* save the return address */
+ ld r1, r31, GENREG_OFF(1) /* get original r1 */
+
+ xcr TMP, r31, SR3 /* TMP now restored. R31 now saved in SR3 */
+ ld r31, r31, GENREG_OFF(31) /* get original r31 */
+
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: CPU flags *
+ * SR2: free *
+ * SR3: pointer to our exception frame (our stack pointer) *
+ * r1 through r31: original exception-time values *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ * Held temporarly in the exception frame: *
+ * Return address to the calling excption handler. *
+ *************************************************** *
+ * immediate goal: *
+ * Do an RTE to restart the fpu and jump to "fpu_enable" *
+ * Another exception (or exceptions) may be raised in *
+ * this, which is why FLAG_ENABLING_FPU is set in SR1. *
+ \***************************************************************/
+ RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */
+
+ _LABEL(fpu_enable)
+ FLUSH_PIPELINE
+ xcr TMP, TMP, SR3 /* get E.F. pointer */
+ st.d r30, TMP, GENREG_OFF(30) /* save previous r30, r31 */
+ or r31, TMP, r0 /* transfer E.F. pointer to r31 */
+ ld TMP, r31, REG_OFF(EF_SR3)/* get previous SR3; maybe important*/
+
+ /* make sure that the FLAG_ENABLING_FPU bit is off */
+ xcr FLAGS, FLAGS, SR1
+ clr FLAGS, FLAGS, 1<FLAG_ENABLING_FPU>
+ xcr FLAGS, FLAGS, SR1
+
+ xcr TMP, TMP, SR3 /* replace TMP, SR3 */
+
+ /* now save all regs to the exception frame. */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st.d r26, r31, GENREG_OFF(26)
+ st.d r28, r31, GENREG_OFF(28)
+#ifdef JEFF_DEBUG
+ /* mark beginning of frame with notable value */
+ or.u r20, r0, hi16(0x12345678)
+ or r20, r20, lo16(0x12345678)
+ st r20, r31, GENREG_OFF(0)
+#endif
+
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: free *
+ * SR2: free *
+ * SR3: previous exception-time SR3 *
+ * r1: return address to the calling exception handler *
+ * r2 through r30: free *
+ * r31: our exception frame *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time r0 through r31. *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Pick up a stack if we came in from user mode. Put *
+ * A copy of the exception frame pointer into r30 *
+ * bump the stack a doubleword and write the exception *
+ * frame pointer. *
+ * if not an interrupt exception, *
+ * Turn on interrupts and service any outstanding *
+ * data access exceptions. *
+ * Return to calling exception handler to *
+ * service the exception. *
+ \***************************************************************/
+
+ /*
+ * If it's not the interrupt exception, enable interrupts and
+ * take care of any data access exceptions......
+ *
+ * If interrupt exception, switch to interrupt stack if not
+ * already there. Else, switch to kernel stack.
+ */
+ or r30, r0, r31 /* get a copy of the e.f. pointer */
+ ld r2, r31, REG_OFF(EF_EPSR)
+ bb1 PSR_SUPERVISOR_MODE_BIT, r2, 1f /* If in kernel mode */
+
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 2f
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
+ br 3f
+ 2:
+ or.u r31, r0, hi16(_kstack)
+ ld r31, r31, lo16(_kstack)
+ addu r31, r31, USIZE /* point at proper end */
+ br 3f
+ 1:
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 3f /* no, we will stay on kern stack */
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
+ /* This label is here for debugging */
+ exception_handler_has_ksp: global exception_handler_has_ksp
+ 3: /*
+ here - r30 holds a pointer to the exception frame.
+ r31 is a pointer to the kernel stack/interrupt stack.
+ */
+ subu r31, r31, 8 /* make some breathing space */
+ st r30, r31, 0 /* store frame pointer on the stack */
+#if DDB
+ st r30, r31, 4 /* store it again for the debugger to recognize */
+#endif DDB
+
+ ld r2, r30, REG_OFF(EF_VECTOR)
+ bcnd.n eq0, r2, return_to_calling_exception_handler /* is error */
+ ld r14, r30, REG_OFF(EF_RET)
+ cmp r3, r2, 1 /* interrupt is exception #1 ;Is an interrupt? */
+ bb1.n eq, r3, return_to_calling_exception_handler /* skip if so */
+
+#if DDB
+ cmp r3, r2, 130 /* DDB break exception */
+ bb1.n eq, r3, return_to_calling_exception_handler
+
+ cmp r3, r2, 132 /* DDB entry exception */
+ bb1.n eq, r3, return_to_calling_exception_handler
+#endif
+
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ stcr r2, PSR
+#if DDB
+ FLUSH_PIPELINE
+#endif
+
+ /* service any outstanding data pipeline stuff
+ - check dmt0 anything outstanding?*/
+
+ ld r3, r30, REG_OFF(EF_DMT0)
+ bb0 DMT_VALID_BIT, r3, return_to_calling_exception_handler
+
+/*
+ r30 can be clobbered by calls. So stuff its value into a
+ preserved register, say r15. R14 is in use (see return_to_... below).
+ */
+ or r15, r0, r30
+
+ CALL(_trap, T_DATAFLT, r15)
+ CALL(_data_access_emulation, r15, r0)
+
+/* restore it... */
+ or r30, r0, r15
+
+ /* clear the dmt0 word in the E.F */
+ st r0, r30, REG_OFF(EF_DMT0)
+
+ _LABEL(return_to_calling_exception_handler)
+ jmp r14 /* loaded above */
+
+
+
+/*
+ * ##########################################################################
+ * ##########################################################################
+ * ##########################################################################
+ */
+
+LABEL(return_from_exception_handler)
+LABEL(_return_from_main)
+ /*
+ * Regs r1-r30 are free. R31 is pointing at the word
+ * on the kernel stack where our pointer to the exception frame
+ * it stored. Reload it now.
+ *
+ * At this point, if EF_DMT0 is not zero, then
+ * this must have been an interrupt where the fault didn't
+ * get corrected above. We'll do that now.
+ *
+ * We load it into r14 since it is preserved across function
+ * calls, and we may have to call some routines from within here.
+ *
+ * control is transfered here from obvious places in this file
+ * and thread_bootstrap in luna88k/locore.c.
+ *
+ */
+#define FPTR r14
+ ld FPTR, r31, 0 /* grab exception frame pointer */
+ ld r3, FPTR, REG_OFF(EF_DMT0)
+ bb0 DMT_VALID_BIT, r3, _check_ast /*[Oh well, nothing to do here] */
+
+#if 1
+ /*
+ * This might happen for non-interrupts If the user sets DMT0
+ * in an exception handler.........
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 eq, r2, 1f
+ LABEL(oops)
+ or.u r4, r0, hi16(2f)
+ or r4, r4, lo16(2f)
+#if DDB
+ CALL(_db_printf, r4, r0)
+ tb0 0, r0, 132
+#endif
+ br 1f
+ data
+ 2: string "OOPS: DMT0 not zero and not interrupt.\n\000"
+ align 4
+ text
+ 1:
+#endif
+ /*
+ * If it's the interrupt exception, enable interrupt.
+ * Take care of any data access exception...... 90/8/15 add by yama
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 ne, r2, 1f /* If not so, skip */
+
+ /* if EPSR has interrupts disabled, skip also */
+ ld r2, FPTR, REG_OFF(EF_EPSR)
+ bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if disabled */
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ FLUSH_PIPELINE
+ stcr r2, PSR
+ 1:
+ ld r2, FPTR, REG_OFF(EF_DMT0)
+ bb0 DMT_VALID_BIT, r2, 2f
+
+ /*
+ * if there happens to be a data fault that hasn't been serviced yet,
+ * go off and service that...
+ */
+ CALL(_trap, T_DATAFLT, r30)
+ CALL(_data_access_emulation, r30, r0) /* really only 2 args */
+
+ /* clear the dmt0 word in the E.F. */
+ st r0 , FPTR, REG_OFF(EF_DMT0)
+ 2:
+
+LABEL(_check_ast)
+
+ ldcr r1, PSR /* get current PSR */
+ set r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT> /* set for disable intr. */
+ stcr r1, PSR /* install new PSR */
+ FLUSH_PIPELINE
+
+ /*
+ *
+ * This code (including a bit above) is more or less:
+ *
+ * check_ast:
+ *
+ * Disable interrupts
+ * if (exception was from user mode && want_ast)
+ * {
+ * trap(AST, frame)
+ * goto check_ast
+ * }
+ *
+ * We want to service AST's only if returning to user space.
+ */
+
+ ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
+ bb1 PSR_SUPERVISOR_MODE_BIT, r2, no_ast /*skip if in system mode */
+
+ /* get and check want_ast */
+ or.u r2, r0, hi16(_want_ast)
+ ld r3, r2, lo16(_want_ast)
+ bcnd eq0, r3, no_ast
+ /*
+ * trap(AST,...) will service
+ * software interrupts and
+ * network interrupts
+ */
+ CALL(_trap, T_ASTFLT, FPTR) /* enter with interrupts disabled */
+ subu r31, r31, 40 /* return with interrupts enabled */
+ addu r31, r31, 40
+ br _check_ast /* and check again..... */
+
+_LABEL(no_ast)
+
+ /* now ready to return....*/
+
+ /*
+ * Transfer the frame pointer to r31, since we no longer need a stack.
+ * No page faults here, and interrupts are disabled.
+ */
+
+ or r31, r0, FPTR
+
+
+/* restore r1 later */
+ ld.d r2 , r31, GENREG_OFF(2)
+ ld.d r4 , r31, GENREG_OFF(4)
+ ld.d r6 , r31, GENREG_OFF(6)
+ ld.d r8 , r31, GENREG_OFF(8)
+ ld.d r10, r31, GENREG_OFF(10)
+ ld.d r12, r31, GENREG_OFF(12)
+ ld.d r14, r31, GENREG_OFF(14)
+ ld.d r16, r31, GENREG_OFF(16)
+ ld.d r18, r31, GENREG_OFF(18)
+ ld.d r20, r31, GENREG_OFF(20)
+ ld.d r22, r31, GENREG_OFF(22)
+ ld.d r24, r31, GENREG_OFF(24)
+ ld.d r26, r31, GENREG_OFF(26)
+ ld.d r28, r31, GENREG_OFF(28)
+ /* restore r1, r30, r31 later */
+
+
+ /* disable shadowing (interrupts already disabled above) */
+ ldcr r1, PSR
+ set r1, r1, 1<PSR_SHADOW_FREEZE_BIT>
+ FLUSH_PIPELINE
+ stcr r1, PSR
+
+ /* reload the control regs*/
+
+ /*
+ * Note: no need to restore the SXIP.
+ * When the "rte" causes execution to continue
+ * first with the instruction pointed to by the NIP
+ * and then the FIP.
+ *
+ * See MC88100 Risc Processor User's Manual, 2nd Edition,
+ * section 6.4.3.1.2-4
+ */
+ ld r30, r31, REG_OFF(EF_SNIP)
+ ld r1, r31, REG_OFF(EF_SFIP)
+ stcr r0, SSBR
+ stcr r30, SNIP
+ stcr r1, SFIP
+
+ ld r30, r31, REG_OFF(EF_EPSR)
+ ld r1, r31, REG_OFF(EF_MODE)
+ stcr r30, EPSR
+
+ /* Now restore r1, r30, and r31 */
+ ld r1, r31, GENREG_OFF(1)
+ ld.d r30, r31, GENREG_OFF(30)
+
+ _LABEL(return_from_exception)
+ RTE
+
+/***********************************************************************/
+/***********************************************************************/
+/***********************************************************************/
diff --git a/sys/arch/mvme88k/m88k/exception_return.s b/sys/arch/mvme88k/m88k/exception_return.s
new file mode 100644
index 00000000000..9c79a5ba083
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/exception_return.s
@@ -0,0 +1,255 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Exception handler return routines.
+ */
+/*
+ * HISTORY
+ * $Log: exception_return.s,v $
+ * Revision 1.1 1995/10/18 10:54:27 deraadt
+ * Initial revision
+ *
+ * Revision 2.6 93/01/26 18:00:53 danner
+ * conditionalized define of ASSEMBLER.
+ * [93/01/22 jfriedl]
+ *
+ * Revision 2.5 92/08/03 17:51:58 jfriedl
+ * Update includes, changed to new style manifiest constants [danner]
+ *
+ * Revision 2.4 92/05/04 11:28:03 danner
+ * Remove debugging cruft. Leave argument save area in call to
+ * ast_taken.
+ * [92/05/03 danner]
+ * Remove debugging cruft.
+ * [92/04/12 danner]
+ * [92/04/12 16:25:32 danner]
+ *
+ * In the case of an ast on a return from exception, a random value
+ * was stored into R2. Fixed.
+ * [92/04/12 danner]
+ *
+ * Revision 2.3 92/04/01 10:56:17 rpd
+ * Corrections to the ast handling code.
+ * [92/03/20 danner]
+ * Corrected typo in ast_taken register reload code.
+ * [92/03/03 danner]
+ *
+ * Revision 2.2 92/02/18 18:03:30 elf
+ * Created.
+ * [92/02/01 danner]
+ *
+ */
+
+#include <mach_kdb.h>
+
+#ifndef ASSEMBLER
+# define ASSEMBLER /* this is required for some of the include files */
+#endif
+
+#include <assym.s> /* for PCB_KSP, etc */
+#include <machine/asm.h>
+#include <motorola/m88k/m88100/m88100.h>
+#include <motorola/m88k/m88100/psl.h>
+#include <motorola/m88k/trap.h> /* for T_ defines */
+
+/*
+ * Return from exception - all registers need to be restored.
+ * R30 points to the exception frame.
+ * R31 is the kernel stack pointer.
+ * Any interrupt status is acceptable on entry.
+ * All other registers are scratch.
+ * Any data and fp faults must be cleared up before this routine
+ * is called.
+ */
+ENTRY(return_from_exception)
+ ld r10, r30, REG_OFF(EF_EPSR) ; get old epsr
+ ldcr r2, PSR
+ set r2, r2, 1<PSR_IND_LOG>
+ stcr r2, PSR ; disable interrupts
+ FLUSH_PIPELINE
+ bb1 PSR_IND_LOG, r10, 1f ; no need to check
+ bsr ast_check
+1:
+/* current status -
+
+ interrupts disabled. Asts checked for.
+ Ready to restore registers and return from the exception.
+ R30 points to the exception frame.
+*/
+ /* reload r2-r13 */
+ ld.d r2 , r30, GENREG_OFF(2)
+ ld.d r4 , r30, GENREG_OFF(4)
+ ld.d r6 , r30, GENREG_OFF(6)
+ ld.d r8 , r30, GENREG_OFF(8)
+ ld.d r10, r30, GENREG_OFF(10)
+ br.n return_common
+ ld.d r12, r30, GENREG_OFF(12)
+
+/*
+ * Return from syscall - registers r3-r13 need not be restored.
+ * R30 points to the exception frame.
+ * R31 is the kernel stack pointer.
+ * All other registers are scratch.
+ * Any interrupt status is acceptable on entry.
+ */
+
+ENTRY(return_from_syscall)
+/* turn off interrupts, check ast */
+ ldcr r3, PSR
+ set r3, r3, 1<PSR_IND_LOG>
+ stcr r3, PSR ; disable interrupts
+ FLUSH_PIPELINE
+ bsr ast_check
+ /* restore r2 */
+ ld r2, r30, GENREG_OFF(2)
+ /* current status -
+ interrupts disabled. Asts checked for.
+ Ready to restore registers and return from the exception.
+ R30 holds the frame pointer
+ */
+ /* br return_common */
+
+
+LABEL(return_common)
+/*
+ R30 points to the exception frame.
+ Interrupts off.
+ r2-r13 need to be preserved.
+*/
+ /* restore r14-r29 */
+ ld.d r14, r30, GENREG_OFF(14)
+ ld.d r16, r30, GENREG_OFF(16)
+ ld.d r18, r30, GENREG_OFF(18)
+ ld.d r20, r30, GENREG_OFF(20)
+ ld.d r22, r30, GENREG_OFF(22)
+ ld.d r24, r30, GENREG_OFF(24)
+ ld.d r26, r30, GENREG_OFF(26)
+ ld.d r28, r30, GENREG_OFF(28)
+ ; restore r1, r30, r31 later
+ /* turn off shadowing - we are about to trash
+ our kernel stack pointer, which means this code
+ cannot be tracked by a debuuger */
+ ; disable shadowing (interrupts already disabled above)
+ ldcr r1, PSR
+ set r1, r1, 1<PSR_SFRZ_LOG>
+ stcr r1, PSR
+ FLUSH_PIPELINE
+
+ ; reload the control regs
+ /*
+ * Note: no need to restore the SXIP.
+ * When the "rte" causes execution to continue
+ * first with the instruction pointed to by the NIP
+ * and then the FIP.
+ *
+ * See MC88100 Risc Processor User's Manual, 2nd Edition,
+ * section 6.4.3.1.2-4
+ */
+ ld r31, r30, REG_OFF(EF_SNIP)
+ ld r1, r30, REG_OFF(EF_SFIP)
+ stcr r0, SSBR
+ stcr r31, SNIP
+ stcr r1, SFIP
+
+ ld r31, r30, REG_OFF(EF_EPSR)
+ ld r1, r30, REG_OFF(EF_MODE)
+ stcr r31, EPSR
+
+ /*
+ * restore the mode (cpu flags).
+ * This can't be done directly, because the flags include the
+ * CPU number. We might now be on a different CPU from when we
+ * first entered the exception handler (due to having been blocked
+ * and then restarted on a different CPU). Thus, we'll grab the
+ * old flags and put the current cpu number there.
+ */
+ clr r1, r1, FLAG_CPU_FIELD_WIDTH <0> /* clear bits 0..WIDTH */
+ ldcr r31, SR1
+ clr r31, r31, 0<FLAG_CPU_FIELD_WIDTH> /* clear bits WIDTH..31 */
+ or r31, r1, r31
+ stcr r31, SR1 ; restore old flags with (maybe new) CPU number
+
+ /* Now restore r1, r30, and r31 */
+ ld r1, r30, GENREG_OFF(1)
+ ld.d r30, r30, GENREG_OFF(30)
+
+ _LABEL(return_from_exception)
+ RTE
+
+
+LABEL(ast_check)
+ /* enter here with interrupts disabled */
+ /*
+ *
+ * ast_check:
+ *
+ * if (exception was from user mode && need_ast[cpu_number()])
+ * {
+ * call: ast_taken()(turns interrupts back on,clears need_ast)
+ * disable_interrupts
+ * goto check_ast
+ * }
+ * return (with interrupts off)
+ *
+ * Upon entry,
+ * R30 is the exception frame pointer
+ * R31 is the kernel stack pointer
+ * R1 is the return address
+ *
+ * Upon entry to this function, all user register state
+ * must be up to date in the pcb. In particular, the return
+ * value for thread_syscall_return has to have been saved.
+ *
+ * If we block, we will return through thread_exception_return.
+ *
+ * This routine clobbers r2-r29.
+ *
+ */
+ ld r3, r30, REG_OFF(EF_EPSR)
+ bb1 PSR_MODE_LOG, r3, 1f
+ ldcr r3, SR1
+ mak r3, r3, FLAG_CPU_FIELD_WIDTH <2> ; r3 = cpu#
+ or.u r3, r3, hi16(_need_ast)
+ ld r4, r3, lo16(_need_ast) ; r4 now need_ast[cpu#]
+ bcnd eq0, r4, 1f
+ /* preserve r1, r30 */
+ subu r31, r31, 40
+ st r1, r31, 32
+ bsr.n _ast_taken ; no arguments
+ st r30, r31, 36
+ /* turn interrupts back off */
+ ldcr r1, PSR ; get current PSR
+ set r1, r1, 1<PSR_IND_LOG> ; set for disable intr.
+ stcr r1, PSR ; install new PSR
+ FLUSH_PIPELINE
+ /* restore register state */
+ ld r30, r31, 36
+ ld r1, r31, 32
+ br.n ast_check ; check again
+ addu r31, r31, 40
+1:
+ /* no ast. Return back to caller */
+ jmp r1
diff --git a/sys/arch/mvme88k/m88k/genassym.c b/sys/arch/mvme88k/m88k/genassym.c
new file mode 100644
index 00000000000..3511defe6c3
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/genassym.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)genassym.c 7.8 (Berkeley) 5/7/91
+ * $Id: genassym.c,v 1.1 1995/10/18 10:54:25 deraadt Exp $
+ */
+
+#ifndef KERNEL
+#define KERNEL
+#endif /* KERNEL */
+
+#include <sys/param.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <machine/cpu.h>
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/vmparam.h>
+#include <sys/syscall.h>
+#include <vm/vm.h>
+#include <sys/user.h>
+
+#define pair(TOKEN, ELEMENT) \
+ printf("#define " TOKEN " %u\n", (unsigned)(ELEMENT))
+
+#define int_offset_of_element(ELEMENT) (((unsigned)&(ELEMENT))/sizeof(int))
+
+main()
+{
+ register struct proc *p = (struct proc *)0;
+ struct m88100_saved_state *ss = (struct m88100_saved_state *) 0;
+ register struct vmmeter *vm = (struct vmmeter *)0;
+ register struct user *up = (struct user *)0;
+ register struct rusage *rup = (struct rusage *)0;
+ struct vmspace *vms = (struct vmspace *)0;
+ pmap_t pmap = (pmap_t)0;
+ struct pcb *pcb = (struct pcb *)0;
+ register unsigned i;
+
+ printf("#ifndef __GENASSYM_INCLUDED\n");
+ printf("#define __GENASSYM_INCLUDED 1\n\n");
+
+ printf("#ifdef ASSEMBLER\n"
+ "#define NEWLINE \\\\ \n"
+ "#endif\n");
+
+ printf("#define\tP_FORW %d\n", &p->p_forw);
+ printf("#define\tP_BACK %d\n", &p->p_back);
+ printf("#define\tP_VMSPACE %d\n", &p->p_vmspace);
+ printf("#define\tP_ADDR %d\n", &p->p_addr);
+ printf("#define\tP_PRIORITY %d\n", &p->p_priority);
+ printf("#define\tP_STAT %d\n", &p->p_stat);
+ printf("#define\tP_WCHAN %d\n", &p->p_wchan);
+ printf("#define\tSRUN %d\n", SRUN);
+
+ printf("#define\tVM_PMAP %d\n", &vms->vm_pmap);
+ printf("#define\tV_INTR %d\n", &vm->v_intr);
+
+ printf("#define\tUPAGES %d\n", UPAGES);
+ printf("#define\tPGSHIFT %d\n", PGSHIFT);
+
+ printf("#define\tU_PROF %d\n", &up->u_stats.p_prof);
+ printf("#define\tU_PROFSCALE %d\n", &up->u_stats.p_prof.pr_scale);
+ printf("#define\tPCB_ONFAULT %d\n", &pcb->pcb_onfault);
+ printf("#define\tSIZEOF_PCB %d\n", sizeof(struct pcb));
+
+ printf("#define\tSYS_exit %d\n", SYS_exit);
+ printf("#define\tSYS_execve %d\n", SYS_execve);
+ printf("#define\tSYS_sigreturn %d\n", SYS_sigreturn);
+
+ pair("EF_R0", int_offset_of_element(ss->r[0]));
+ pair("EF_R31", int_offset_of_element(ss->r[31]));
+ pair("EF_FPSR", int_offset_of_element(ss->fpsr));
+ pair("EF_FPCR", int_offset_of_element(ss->fpcr));
+ pair("EF_EPSR", int_offset_of_element(ss->epsr));
+ pair("EF_SXIP", int_offset_of_element(ss->sxip));
+ pair("EF_SFIP", int_offset_of_element(ss->sfip));
+ pair("EF_SNIP", int_offset_of_element(ss->snip));
+ pair("EF_SSBR", int_offset_of_element(ss->ssbr));
+ pair("EF_DMT0", int_offset_of_element(ss->dmt0));
+ pair("EF_DMD0", int_offset_of_element(ss->dmd0));
+ pair("EF_DMA0", int_offset_of_element(ss->dma0));
+ pair("EF_DMT1", int_offset_of_element(ss->dmt1));
+ pair("EF_DMD1", int_offset_of_element(ss->dmd1));
+ pair("EF_DMA1", int_offset_of_element(ss->dma1));
+ pair("EF_DMT2", int_offset_of_element(ss->dmt2));
+ pair("EF_DMD2", int_offset_of_element(ss->dmd2));
+ pair("EF_DMA2", int_offset_of_element(ss->dma2));
+ pair("EF_FPECR", int_offset_of_element(ss->fpecr));
+ pair("EF_FPHS1", int_offset_of_element(ss->fphs1));
+ pair("EF_FPLS1", int_offset_of_element(ss->fpls1));
+ pair("EF_FPHS2", int_offset_of_element(ss->fphs2));
+ pair("EF_FPLS2", int_offset_of_element(ss->fpls2));
+ pair("EF_FPPT", int_offset_of_element(ss->fppt));
+ pair("EF_FPRH", int_offset_of_element(ss->fprh));
+ pair("EF_FPRL", int_offset_of_element(ss->fprl));
+ pair("EF_FPIT", int_offset_of_element(ss->fpit));
+ pair("EF_VECTOR", int_offset_of_element(ss->vector));
+ pair("EF_MASK", int_offset_of_element(ss->mask));
+ pair("EF_MODE", int_offset_of_element(ss->mode));
+
+ pair("EF_RET", int_offset_of_element(ss->scratch1));
+ pair("EF_NREGS", sizeof(*ss)/sizeof(int));
+
+ /* make a sanity check */
+ if (sizeof(*ss) & 7)
+ {
+ /*
+ * This contortion using write instead of fputs(stderr)
+ * is necessary because we can't include stdio.h in here.
+ */
+ static char buf[] =
+ "Exception frame not a multiple of double words\n";
+ write(2 /* stderr */,buf,sizeof(buf));
+ exit(1);
+ }
+ pair("SIZEOF_EF", sizeof(*ss));
+ printf("\n#endif /* __GENASSYM_INCLUDED */\n");
+ exit(0);
+}
diff --git a/sys/arch/mvme88k/m88k/locore.S b/sys/arch/mvme88k/m88k/locore.S
new file mode 100644
index 00000000000..e0c2ad9c862
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/locore.S
@@ -0,0 +1,496 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/* $RCSfile: locore.S,v $ -- asm boot routines
+ *
+ **********************************************************************
+ *****************************************************************RCS**/
+
+#ifndef ASSEMBLER /* predefined by ascpp, at least */
+#define ASSEMBLER
+#endif
+
+#include "machine/locore.h"
+#include "machine/m88100.h"
+#include "machine/trap.h"
+#include "machine/asm.h"
+#include "machine/board.h" /* lots of stuff (OBIO_PIO*, SYSV_BASE, etc)*/
+#include "machine/vmparam.h" /* INTSTACK_SIZE */
+#include "assym.s"
+
+/***********************************************************************/
+
+/*
+ * Arrange for the include file version number to appear directly in
+ * the namelist.
+ */
+global _INCLUDE_VERSION
+def _INCLUDE_VERSION, INCLUDE_VERSION
+#ifndef NBPG
+#define NBPG 4096
+#endif /* NBPG */
+
+#ifndef UADDR
+#define UADDR 0xFFEE0000 /* address of u */
+#endif /* UADDR */
+#ifndef USIZE
+#define USIZE (UPAGES * NBPG)
+#endif /* USIZE */
+/*
+ * The memory looks like:
+ * 0x00000 - 0x01000 : trap vectors
+ * 0x01000 - 0x10000 : first 64k used by BUG
+ * 0x10000 == start : Boot loader jumps here. (for now, this can
+ * handle only NMAGIC - screwy linker)
+ *
+ ***********************************************************************/
+ text
+
+LABEL(_kernelstart)
+LABEL(_start)
+LABEL(start)
+ br _start_text
+#if 0
+ .align 4096 ; VBR points to page aligned list
+ _LABEL(vector_list) /* references memory BELOW this line */
+ #include "machine/exception_vectors.h"
+ word END_OF_VECTOR_LIST
+
+ _LABEL(_msgsw)
+ word 0 /* Bits here turn on/off debugging somewhere. */
+#endif
+/*
+ * Do a dump. Called by auto-restart.
+ */
+
+ global _dumpsys
+LABEL(_doadump)
+ bsr _dumpsys
+ bsr _doboot
+ /*NOTREACHED*/
+
+/**************************************************************************/
+LABEL(_start_text) /* This is the *real* start upon poweron or reset */
+ /*
+ * Args passed by boot loader
+ * r2 howto
+ * r3 first_addr (first available address)
+ * r4 ((Clun << 8) | Dlun & FF) -> bootdev
+ * r5 esym
+ * r6 miniroot
+ */
+ or.u r13, r0, hi16(_boothowto)
+ st r2, r13, lo16(_boothowto)
+ or.u r13, r0, hi16(_first_addr)
+ st r3, r13, lo16(_first_addr)
+#if 0
+ or.u r13, r0, hi16(_bootdev)
+ st r4, r13, lo16(_bootdev)
+#endif
+ or.u r13, r0, hi16(_esym)
+ st r5, r13, lo16(_esym)
+ or.u r13, r0, hi16(_miniroot)
+ st r6, r13, lo16(_miniroot)
+
+ /*
+ * CPU Initialization
+ *
+ * Every CPU starts from here..
+ * (well, from 'start' above, which just jumps here).
+ *
+ * I use r11 and r22 here 'cause they're easy to not
+ * get mixed up -- r10, for example, looks too similar
+ * to r0 when not being careful....
+ *
+ * Ensure that the PSR is as we like:
+ * supervisor mode
+ * big-endian byte ordering
+ * concurrent operation allowed
+ * carry bit clear (I don't think we really care about this)
+ * FPU enabled
+ * misaligned access raises an exception
+ * interrupts disabled
+ * shadow registers frozen
+ *
+ * The manual says not to disable interrupts and freeze shadowing
+ * at the same time because interupts are not actually disabled
+ * until after the next instruction. Well, if an interrupt
+ * occurs now, we're in deep anyway, so I'm going to do
+ * the two together.
+ *
+ * Upon a reset (or poweron, I guess), the PSR indicates:
+ * supervisor mode
+ * interrupts, shadowing, FPU, missaligned exception: all disabled
+ *
+ * We'll just construct our own turning on what we want.
+ *
+ * jfriedl@omron.co.jp
+ */
+ stcr r0, SSBR /* clear this for later */
+
+ /* XXX We can use SR0-SR3 for any purpose */
+ set r11, r0, 1<PSR_SUPERVISOR_MODE_BIT>
+ set r11, r11, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r11, PSR
+ /* shadowing, FPU, misalgined access exception: all enabled now.*/
+#if 0
+ or.u r11, r0, hi16(_vector_list)
+ or r11, r11, lo16(_vector_list)
+ stcr r11, VBR
+#endif /* 0 */
+ stcr r0, VBR
+
+/************************************************************************/
+
+#if defined(RAW_PRINTF) && RAW_PRINTF
+ bsr replace_mayput_with_rawputchar
+#endif
+
+ /*
+ * switch to interrupt stack
+ */
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+
+ /*
+ * Want to make the call:
+ * vector_init(VBR, vector_list)
+ */
+ or.u r3, r0, hi16(_vector_list)
+ or r3, r3, lo16(_vector_list)
+ bsr.n _vector_init
+ ldcr r2, VBR
+
+#if 0
+ /* clear BSS. Boot loader might have already done this... */
+ or.u r2, r0, hi16(_edata)
+ or r2, r2, lo16(_edata)
+ or.u r4, r0, hi16(_end)
+ or r4, r4, lo16(_end)
+ bsr.n _bzero /* bzero(edata, end-edata) */
+ subu r3, r4, r2
+#endif
+
+ /* still on int stack */
+ bsr.n _m187_bootstrap
+ subu r31, r31, 40
+ addu r31, r31, 40
+
+ /* switch to proc0 uarea */
+
+ or.u r10, r0, hi16(UADDR)
+ or r31, r10,lo16(UADDR)
+ addu r31, r31, USIZE
+
+ /* make the call: main() */
+ bsr.n _main
+ subu r31, r31, 40
+ addu r31, r31, 40
+ br _return_from_main
+
+/*****************************************************************************/
+
+ data
+ .align 4096 ; VBR points to page aligned list
+ global _vector_list
+_vector_list: ; references memory BELOW this line
+ #include "machine/exception_vectors.h"
+ word END_OF_VECTOR_LIST
+
+ global _msgsw
+_msgsw:
+ word 0 ;Bits here turn on/off debugging somewhere.
+ .align 4096
+ global _intstack
+ global _intstack_end
+_intstack:
+ space 4 * NBPG /* 16K */
+_intstack_end:
+
+/*
+ * When a process exits and its u. area goes away, we set curpcb to point
+ * to this `u.', leaving us with something to use for an interrupt stack,
+ * and letting all the register save code have a pcb_uw to examine.
+ * This is also carefully arranged (to come just before u0, so that
+ * process 0's kernel stack can quietly overrun into it during bootup, if
+ * we feel like doing that).
+ * Should be page aligned.
+ */
+ global _idle_u
+_idle_u:
+ space UPAGES * NBPG
+
+/*
+ * Process 0's u.
+ *
+ * This must be page aligned
+ */
+ global _u0
+ align 4096
+_u0: space UPAGES * NBPG
+estack0:
+
+/*
+ * UPAGES get mapped to kstack
+ */
+
+ global _kstack
+_kstack:
+ word UADDR
+
+#ifdef DDB
+ global _esym
+_esym:
+ word 0
+#endif /* DDB */
+
+ global _proc0paddr /* move to C code */
+_proc0paddr:
+ word _u0 /* KVA of proc0 uarea */
+
+/*
+ * _curpcb points to the current pcb (and hence u. area).
+ * Initially this is the special one.
+ */
+/*
+ * pcb is composed of kernel state + user state
+ * I may have to change curpcb to u0 + PCB_USER based on what
+ * other parts expect XXX
+ */
+ global _curpcb /* move to C code */
+_curpcb: word _u0 /* curpcb = &u0 */
+
+/*
+ * Trampoline code. Gets copied to the top of
+ * user stack in exec.
+ */
+ global _sigcode
+_sigcode:
+ /* r31 points to sigframe */
+ ld r2, r31, 0 /* signo */
+ ld r3, r31, 4 /* code */
+ ld r4, r31, 8 /* sigcontext* */
+ or r5, r0, 0 /* addr = 0 for now */
+ ld r6, r31, 12 /* handler */
+ jsr.n r6
+ addu r31, r31, 40
+ subu r31, r31, 40
+ ld r2, r31, 8 /* sigcontext* */
+ or r9, r0, SYS_sigreturn
+ tb0 0, r0, 128 /* syscall trap, calling sigreturn */
+ or r0, r0, 0
+ or r0, r0, 0
+ /* sigreturn will not return unless it fails */
+ or r9, r0, SYS_exit
+ tb0 0, r0, 128 /* syscall trap, exit */
+ or r0, r0, 0
+ or r0, r0, 0
+ global _esigcode
+_esigcode:
+
+#if 0
+/*
+ * thread_bootstrap:
+ *
+ * Bootstrap a new thread using the thread state that has been
+ * placed on the stack. Our fp has been set up for us, we only need
+ * to fix up a few things in the saved frame, then get into
+ * usermode.
+ */
+ENTRY(thread_bootstrap)
+ /*
+ * Here r31 should point to the place on our stack which
+ * contains a pointer to our exception frame.
+ */
+#if DDB
+ ENTRY_ASM
+#endif
+ br return_from_exception_handler
+
+/*
+ * save_context
+ */
+ENTRY(save_context)
+ subu r31,r31,40 /* allocate stack for r1 and args */
+ st r1,r31,36 /* save return address */
+ bsr _spl /* get the current interrupt mask */
+ ld r1,r31,36 /* recover return address */
+ addu r31,r31,40 /* put stack pointer back */
+ ldcr r10,SR0 /* r10 <- current_thread() */
+ ld r10,r10,THREAD_PCB /* r10 <- pcb */
+#if (PCB_KERNEL!=0)
+ addu r10, r10, PCB_KERNEL /* point to kernel save region */
+#endif
+ st r1,r10,0 /* do setjmp */ /* save return address */
+ st r14,r10,4
+ st r15,r10,2*4
+ st r16,r10,3*4
+ st r17,r10,4*4
+ st r18,r10,5*4
+ st r19,r10,6*4
+ st r20,r10,7*4
+ st r21,r10,8*4
+ st r22,r10,9*4
+ st r23,r10,10*4
+ st r24,r10,11*4
+ st r25,r10,12*4
+ /* In principle, registers 26-29 are never manipulated in the
+ kernel. Maybe we can skip saving them? */
+ st r26,r10,13*4
+ st r27,r10,14*4
+ st r28,r10,15*4
+ st r29,r10,16*4
+ st r30,r10,17*4 /* save frame pointer */
+ st r31,r10,18*4 /* save stack pointer */
+ st r2,r10,19*4 /* save interrupt mask */
+ /* we need to switch to the interrupt stack here */
+ or.u r31, r0, hi16(_intstack)
+ or r31, r31, lo16(_intstack)
+ addu r31, r31, INTSTACK_SIZE /* end of stack */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+ jmp.n r1
+ or r2,r0,r0
+#endif /* 0 */
+
+/* ------------------------------------------------------------------------ */
+/*
+ * unsigned measure_pause(volatile int *flag)
+ *
+ * Count cycles executed until *flag becomes nonzero.
+ * Return the number of cycles counted.
+ */
+ENTRY(measure_pause)
+ /* R2 is pointer to flag */
+ def GRANULAIRTY, 10000
+
+ or r3, r0, 1 /* r3 is my counter, this is the first */
+
+ measure_pause_outer_loop:
+ or r4, r0, GRANULAIRTY
+
+ measure_pause_inner_loop:
+ /*
+ * Execute a tight loop of a known number of cycles.
+ * This assumes, of course, that the instruction cache is on.
+ * This loop takes two cycles per iteration.
+ */
+ bcnd.n ne0, r4, measure_pause_inner_loop
+ subu r4, r4, 1
+
+
+ /*
+ * Now add the number of cycles done above (plus the overhead
+ * of the outer loop) to the total count.
+ * Also, check the *flag and exit the outer loop if it's non-zero.
+ *
+ * The overhead is really unknown because it's not known how
+ * the memory system will tread the access to *flag, so we just
+ * take a guess.
+ */
+ ld r4, r2, r0 /* get the flag */
+ addu r3, r3, (GRANULAIRTY * 2 + 10) /* account for the cost */
+ bcnd eq0, r4, measure_pause_outer_loop /* continue or exit the loop*/
+
+ jmp.n r1
+ or r2, r3, r0 /* pass count back */
+
+/*
+ * void delay_in_microseconds(int count)
+ *
+ * The processor loops (busy waits) for the given number of microseconds:
+ * Thus, delay_in_microseconds(1000000) will delay for one second.
+ *
+ * REGISTER USAGE:
+ * IN r1 - return address
+ * IN r2 - (signed int) number of microseconds
+ * r3 - (float) number of microseconds
+ * r4/5 - (double) number of cycles per microsecond
+ * r6 - (float) number of cycles to delay
+ * r7 - (signed) number of cycles to delay
+ */
+ENTRY(delay_in_microseconds)
+ENTRY(delay)
+ flt.ss r3, r2 /* convert microseconds from signed int to float */
+ or.u r4, r0, hi16(_cycles_per_microsecond)
+ ld.d r4, r4, lo16(_cycles_per_microsecond)
+ fmul.ssd r6, r3, r4 /* convert microseconds to cycles */
+ int.ss r7, r6 /* convert cycles from float to signed int */
+ subu r7, r7, 25 /* subtract for overhead of above instruction */
+
+ /* now loop for the given number of cycles */
+ pause_loop:
+ bcnd.n gt0, r7, pause_loop
+ subu r7, r7, 2 /* two cycles per iteration */
+
+ jmp r1 /* return */
+
+#if 0
+/*
+ * void switch_to_shutdown_context(thread_t thread,
+ * void (*routine)(processor_t),
+ * processor_t processor)
+ *
+ * saves the kernel context of the thread,
+ * switches to the interrupt stack,
+ * continues the thread (with thread_dispatch),
+ * then runs routine on the interrupt stack.
+ *
+ */
+
+ENTRY(switch_to_shutdown_context)
+/* call save_context to save the thread state */
+ subu r31, r31, 40
+ or r25, r3, r0 /* save arguments */
+ or r24, r4, r0
+ bsr.n _save_context
+ st r1, r31, 36
+ addu r31, r31, 40
+ ldcr r10, SR0 /* r10 <- current_thread() */
+ st r31, r10, THREAD_KERNEL_STACK /* save stack pointer */
+ st r0, r10, THREAD_SWAP_FUNC /* null continuation */
+ ldcr r11, SR1
+ mak r11, r11, FLAG_CPU_FIELD_WIDTH<0> /* r1 = cpu # */
+ or r12, r12, lo16(_interrupt_stack)
+ ld r31, r12 [r11]
+ addu r31, r31, INTSTACK_SIZE /* end of stack */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+ /* save the thread; switched to the interrupt stack; now call thread
+ dispatch to get rid of this thread */
+ or r2, r10, r0
+ bsr.n _thread_dispatch
+ subu r31, r31, 40
+ /* call the continuation routine */
+ jsr.n r25
+ or r2, r24, r0
+ /* panic if here */
+ or.u r2, r0, hi16(1f)
+ bsr.n _panic
+ or r2, r2, lo16(1f)
+1:
+ string "switch_to_shutdown_context"
+#endif /* 0 */
diff --git a/sys/arch/mvme88k/m88k/locore2.c b/sys/arch/mvme88k/m88k/locore2.c
new file mode 100644
index 00000000000..e8e6a57e86f
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/locore2.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)locore2.c 8.4 (Berkeley) 12/10/93
+ *
+ * from: Header: locore2.c,v 1.8 92/11/26 03:05:01 mccanne Exp (LBL)
+ * $Id: locore2.c,v 1.1 1995/10/18 10:54:27 deraadt Exp $
+ */
+
+/*
+ * Primitives which are in locore.s on other machines,
+ * but which have no reason to be assembly-coded on SPARC.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/resourcevar.h>
+
+int whichqs;
+
+/*
+ * Put process p on the run queue indicated by its priority.
+ * Calls should be made at splstatclock(), and p->p_stat should be SRUN.
+ */
+void
+setrunqueue(p)
+ register struct proc *p;
+{
+ register struct prochd *q;
+ register struct proc *oldlast;
+ register int which = p->p_priority >> 2;
+
+ if (p->p_back != NULL)
+ panic("setrunqueue");
+ q = &qs[which];
+ whichqs |= 1 << which;
+ p->p_forw = (struct proc *)q;
+ p->p_back = oldlast = q->ph_rlink;
+ q->ph_rlink = p;
+ oldlast->p_forw = p;
+}
+
+/*
+ * Remove process p from its run queue, which should be the one
+ * indicated by its priority. Calls should be made at splstatclock().
+ */
+remrq(p)
+ register struct proc *p;
+{
+ register int which = p->p_priority >> 2;
+ register struct prochd *q;
+
+ if ((whichqs & (1 << which)) == 0)
+ panic("remrq");
+ p->p_forw->p_back = p->p_back;
+ p->p_back->p_forw = p->p_forw;
+ p->p_back = NULL;
+ q = &qs[which];
+ if (q->ph_link == (struct proc *)q)
+ whichqs &= ~(1 << which);
+}
diff --git a/sys/arch/mvme88k/m88k/locore_asm_routines.S b/sys/arch/mvme88k/m88k/locore_asm_routines.S
new file mode 100644
index 00000000000..73ac642a3d3
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/locore_asm_routines.S
@@ -0,0 +1,1668 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/* locore_asm_routines.c
+ *
+ **********************************************************************
+ * This file created by Omron Corporation, 1990.
+ *
+ * HISTORY
+ *
+ **************************************************************RCS*****/
+
+#ifndef ASSEMBLER
+# define ASSEMBLER
+#endif
+
+#include <machine/asm.h>
+#include <machine/locore.h>
+#include <machine/trap.h>
+#include <machine/board.h>
+#include <sys/errno.h>
+
+
+#undef ENTRY /* don't want anything to do with a G?PROF ENTRY() */
+#ifdef __STDC__
+# define ENTRY(name) align 4 NEWLINE _ ## name: global _ ## name
+#else
+# define ENTRY(name) align 4 NEWLINE _/**/name: global _/**/name
+#endif
+
+
+/*****************************************************************************
+ * DO_LOAD_ADDRESS
+ *
+ * unsigned int do_load_word(address, supervisor_mode)
+ * vm_offset_t address; \\ in r2
+ * boolean_t supervisor_mode; \\ in r3
+ *
+ * Return the word at ADDRESS (from user space if SUPERVISOR_MODE is zero,
+ * supervisor space if non-zero).
+ *
+ */
+
+ENTRY(do_load_word) /* do_load_word(address, supervisor) */
+ bcnd ne0,r3,1f
+#if ERRATA__XXX_USR
+ NOP
+ ld.usr r2,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.usr r2,r2,r0
+#endif
+ br 2f
+1: ld r2,r2,r0
+2: jmp r1
+
+ENTRY(do_load_half) /* do_load_half(address, supervisor) */
+ bcnd ne0,r3,1f
+#if ERRATA__XXX_USR
+ NOP
+ ld.h.usr r2,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.h.usr r2,r2,r0
+#endif
+ br 2f
+1: ld.h r2,r2,r0
+2: jmp r1
+
+ENTRY(do_load_byte) /* do_load_byte(address, supervisor) */
+ bcnd ne0,r3,1f
+#if ERRATA__XXX_USR
+ NOP
+ ld.b.usr r2,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.b.usr r2,r2,r0
+#endif
+ br 2f
+1: ld.b r2,r2,r0
+2: jmp r1
+
+ENTRY(do_store_word) /* do_store_word(address, data, supervisor) */
+ bcnd ne0,r4,1f
+#if ERRATA__XXX_USR
+ NOP
+ st.usr r3,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ st.usr r3,r2,r0
+#endif
+ br 2f
+1: st r3,r2,r0
+2: jmp r1
+
+ENTRY(do_store_half) /* do_store_half(address, data, supervisor) */
+ bcnd ne0,r4,1f
+#if ERRATA__XXX_USR
+ NOP
+ st.h.usr r3,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ st.h.usr r3,r2,r0
+#endif
+ br 2f
+1: st.h r3,r2,r0
+2: jmp r1
+
+ENTRY(do_store_byte) /* do_store_byte(address, data, supervisor) */
+ bcnd ne0,r4,1f
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r3,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ st.b.usr r3,r2,r0
+#endif
+ br 2f
+1: st.b r3,r2,r0
+2: jmp r1
+
+ENTRY(do_xmem_word) /* do_xmem_word(address, data, supervisor) */
+ bcnd ne0,r4,1f
+#if ERRATA__XXX_USR
+ NOP
+ xmem.usr r3,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ xmem.usr r3,r2,r0
+#endif
+ br 2f
+1: xmem r3,r2,r0
+2: jmp r1
+
+ENTRY(do_xmem_byte) /* do_xmem_byte(address, data, supervisor) */
+ bcnd ne0,r4,1f
+#if ERRATA__XXX_USR
+ NOP
+ xmem.bu.usr r3,r2,r0
+ NOP
+ NOP
+ NOP
+#else
+ xmem.bu.usr r3,r2,r0
+#endif
+ br 2f
+1: xmem.bu r3,r2,r0
+2: jmp r1
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** void enable_interrupt(void)
+ **
+ ** Enables processor interrupts (for the executing cpu).
+ **/
+#undef enable_interrupt
+ENTRY(enable_interrupt)
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r2, PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+#if DDB
+/* a version of enable_interrupt for the debugger; should never
+ have breakpoints set it in. Keep it consistent with enable
+ interrupt above */
+ENTRY(db_enable_interrupt)
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r2, PSR
+ FLUSH_PIPELINE
+ jmp r1
+#endif /* DDB */
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** unsigned long disable_interrupt(void)
+ **
+ ** Disables processor interrupts (for the executing CPU) and returns
+ ** the *previous* PSR.
+ **
+ ** if ((oldPSR & 0x02) == 0)
+ ** interrupts_were_previously_on = 1;
+ **/
+#undef disable_interrupt
+ENTRY(disable_interrupt)
+ ldcr r2, PSR
+ set r3, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* set disable bit*/
+ stcr r3, PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+/* a version of disable_interrupt for the kernel debugger. Should never
+ have breakpoints set in it. Make sure it stays consistent with
+ disable_interrupt */
+
+#if DDB
+ENTRY(db_disable_interrupt)
+ ldcr r2, PSR
+ set r3, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* set disable bit*/
+ stcr r3, PSR
+ FLUSH_PIPELINE
+ jmp r1
+#endif /* DDB */
+
+/* version for the debugger */
+
+#if DDB
+
+ENTRY(db_are_interrupts_disabled)
+ ldcr r2, PSR /* get the processor status word */
+ set r3, r0, 1<PSR_INTERRUPT_DISABLE_BIT> /* set mask */
+ jmp.n r1 /* delayed return */
+ and r2, r2, r3 /* r2 = r3 & r2 */
+#endif /* DDB */
+
+LABEL(_FAULT_ERROR)
+ or r2,r0,1 /* bad copy */
+ jmp r1
+
+;LABEL(_ALLOW_FAULT_START)
+
+/*
+ * Fetch from user space
+ * r2 == address in user space
+ */
+
+ENTRY(fuword)
+ENTRY(fuiword)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ ld.usr r5, r0, r2
+ NOP
+ NOP
+ NOP
+#else
+ ld.usr r5, r0, r2
+#endif
+ or r2, r0, r5
+ br fusu_ret
+fusu_fault:
+ subu r2, r0, 1
+fusu_ret:
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ st r0, r6, PCB_ONFAULT ; pcb_onfault = 0
+
+ jmp r1
+
+ENTRY(fusword)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ ld.h.usr r5, r0, r2
+ NOP
+ NOP
+ NOP
+#else
+ ld.h.usr r5, r0, r2
+#endif
+ or r2, r0, r5
+ br fusu_ret
+
+ENTRY(fubyte)
+ENTRY(fuibyte)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ ld.b.usr r5, r0, r2
+ NOP
+ NOP
+ NOP
+#else
+ ld.b.usr r5, r0, r2
+#endif
+ or r2, r0, r5
+ br fusu_ret
+
+ENTRY(fuswintr)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(_fubail)
+ or r5, r5, lo16(_fubail)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fubail
+#if ERRATA__XXX_USR
+ NOP
+ ld.h.usr r5, r2, r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.h.usr r5, r2, r0
+#endif
+ or r2, r0, r5
+ br fusu_ret
+
+ENTRY(fubail)
+ subu r2, r0, 1
+ br fusu_ret
+
+/*
+ * store to user space.
+ * r2 == address in user space
+ * r3 == byte/short/word
+ */
+
+ENTRY(suword)
+ENTRY(suiword)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ st.usr r3, r2, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.usr r3, r2, r0
+#endif
+ or r2, r0, r0 /* return success */
+ br fusu_ret
+
+ENTRY(susword)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ st.h.usr r3, r2, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.h.usr r3, r2, r0
+#endif
+ or r2, r0, r0 /* return success */
+ br fusu_ret
+
+ENTRY(subyte)
+ENTRY(suibyte)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = fusu_fault
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r3, r2, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.b.usr r3, r2, r0
+#endif
+ or r2, r0, r0 /* return success */
+ br fusu_ret
+
+ENTRY(suswintr)
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(_subail)
+ or r5, r5, lo16(_subail)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = subail
+#if ERRATA__XXX_USR
+ NOP
+ st.h.usr r3, r2, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.h.usr r3, r2, r0
+#endif
+ or r2, r0, r0 /* return success */
+ br fusu_ret
+
+ENTRY(subail)
+ subu r2, r0, 1
+ br fusu_ret
+
+#if 0
+/*
+ * copystr(fromaddr, toaddr, maxlength, &lencopied)
+ *
+ * Copy a null terminated string from one point to another in
+ * the kernel address space.
+ * NOTE: maxlength must be < 64K
+ */
+ENTRY(copystr)
+ movl sp@(4),a0 | a0 = fromaddr
+ movl sp@(8),a1 | a1 = toaddr
+ moveq #0,d0
+ movw sp@(14),d0 | d0 = maxlength
+ jlt Lcsflt1 | negative count, error
+ jeq Lcsdone | zero count, all done
+ subql #1,d0 | set up for dbeq
+Lcsloop:
+ movb a0@+,a1@+ | copy a byte
+ dbeq d0,Lcsloop | if !null and more, continue
+ jne Lcsflt2 | ran out of room, error
+ moveq #0,d0 | got a null, all done
+Lcsdone:
+ tstl sp@(16) | return length desired?
+ jeq Lcsret | no, just return
+ subl sp@(4),a0 | determine how much was copied
+ movl sp@(16),a1 | return location
+ movl a0,a1@ | stash it
+Lcsret:
+ rts
+Lcsflt1:
+ moveq #EFAULT,d0 | copy fault
+ jra Lcsdone
+Lcsflt2:
+ moveq #ENAMETOOLONG,d0 | ran out of space
+ jra Lcsdone
+
+#endif /* 0 */
+ jmp r1
+/*
+ * Copy specified amount of data from user space into the kernel
+ * copyin(from, to, len)
+ * r2 == from (user source address)
+ * r3 == to (kernel destination address)
+ * r4 == length
+ * (r1=return addr)
+ */
+
+#define SRC r2
+#define DEST r3
+#define LEN r4
+
+ENTRY(copyin)
+ /* set up fault handler */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(.Lciflt)
+ or r5, r5, lo16(.Lciflt)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = .Lciflt
+
+ ;bcnd ne0, LEN, 1f ; XXX optimize len = 0 case
+ ;or r2, r0, 0
+ ;br .Lcidone
+ ;1: ;bcnd lt0, LEN, .Lciflt ; EFAULT if len < 0
+
+ /* If it's a small length (less than 8), then do byte-by-byte */
+ cmp r9, LEN, 8
+ bb1 lt, r9, copyin_byte_only
+
+ /* If they're not aligned similiarly, use byte only... */
+ xor r9, SRC, DEST
+ mask r8, r9, 0x3
+ bcnd ne0, r8, copyin_byte_only
+
+ /*
+ * At this point, we don't know if they're word aligned or not,
+ * but we know that what needs to be done to one to align
+ * it is what's needed for the other.
+ */
+ bb1 0, SRC, copyin_left_align_to_halfword
+copyin_left_aligned_to_halfword:
+ bb1 1, SRC, copyin_left_align_to_word
+copyin_left_aligned_to_word:
+ bb1 0, LEN, copyin_right_align_to_halfword
+copyin_right_aligned_to_halfword:
+ bb1 1, LEN, copyin_right_align_to_word
+copyin_right_aligned_to_word:
+
+ /* At this point, both SRC and DEST are aligned to a word */
+ /* boundry, and LEN is an even multiple of 4. */
+ bb1.n 2, LEN, copyin_right_align_to_doubleword
+ or r7, r0, 4
+
+copyin_right_aligned_to_doubleword:
+#if ERRATA__XXX_USR
+ NOP
+ ld.usr r5, SRC, r0
+ NOP
+ NOP
+ NOP
+ ld.usr r6, SRC, r7
+ NOP
+ NOP
+ NOP
+#else
+ ld.usr r5, SRC, r0
+ ld.usr r6, SRC, r7
+#endif
+ subu LEN, LEN, 8
+ st r5, DEST, r0
+ addu SRC, SRC, 8
+ st r6, DEST, r7
+ bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
+ addu DEST, DEST, 8
+ or r2, r0, r0 /* successful return */
+ br .Lcidone
+
+ /***************************************************/
+
+copyin_left_align_to_halfword:
+#if ERRATA__XXX_USR
+ NOP
+ ld.b.usr r5, SRC, r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.b.usr r5, SRC, r0
+#endif
+ subu LEN, LEN, 1
+ st.b r5, DEST, r0
+ addu SRC, SRC, 1
+ br.n copyin_left_aligned_to_halfword
+ addu DEST, DEST, 1
+
+copyin_left_align_to_word:
+#if ERRATA__XXX_USR
+ NOP
+ ld.h.usr r5, SRC, r0
+ NOP
+ NOP
+ NOP
+#else
+ ld.h.usr r5, SRC, r0
+#endif
+ subu LEN, LEN, 2
+ st.h r5, DEST, r0
+ addu SRC, SRC, 2
+ br.n copyin_left_aligned_to_word
+ addu DEST, DEST, 2
+
+copyin_right_align_to_halfword:
+ subu LEN, LEN, 1
+#if ERRATA__XXX_USR
+ NOP
+ ld.b.usr r5, SRC, LEN
+ NOP
+ NOP
+ NOP
+#else
+ ld.b.usr r5, SRC, LEN
+#endif
+ br.n copyin_right_aligned_to_halfword
+ st.b r5, DEST, LEN
+
+copyin_right_align_to_word:
+ subu LEN, LEN, 2
+#if ERRATA__XXX_USR
+ NOP
+ ld.h.usr r5, SRC, LEN
+ NOP
+ NOP
+ NOP
+#else
+ ld.h.usr r5, SRC, LEN
+#endif
+ br.n copyin_right_aligned_to_word
+ st.h r5, DEST, LEN
+
+copyin_right_align_to_doubleword:
+ subu LEN, LEN, 4
+#if ERRATA__XXX_USR
+ NOP
+ ld.usr r5, SRC, LEN
+ NOP
+ NOP
+ NOP
+#else
+ ld.usr r5, SRC, LEN
+#endif
+ bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
+ st r5, DEST, LEN
+ or r2, r0, r0 /* successful return */
+ br .Lcidone
+
+copyin_byte_only:
+ bcnd eq0, LEN, 2f
+ 1:
+ subu LEN, LEN, 1
+#if ERRATA__XXX_USR
+ NOP
+ ld.b.usr r5, SRC, LEN
+ NOP
+ NOP
+ NOP
+#else
+ ld.b.usr r5, SRC, LEN
+#endif
+ bcnd.n ne0, LEN, 1b
+ st.b r5, DEST, LEN
+ 2: or r2, r0, r0 /* successful return */
+ br .Lcidone
+.Lcidone:
+ or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT
+ jmp r1
+.Lciflt:
+ or r2, r0, EFAULT /* return fault */
+ br .Lcidone
+
+#undef SRC
+#undef DEST
+#undef LEN
+/*######################################################################*/
+/*######################################################################*/
+
+/*
+ * Copy a null terminated string from the user space to the kernel
+ * address space.
+ *
+ * copyinstr(from, to, maxlen, &lencopied)
+ * r2 == from
+ * r3 == to
+ * r4 == maxlen
+ * r5 == len actually transferred
+ * r6 & r7 - used as temporaries
+ */
+#define SRC r2
+#define DEST r3
+#define CNT r4
+#define LEN r5
+
+ENTRY(copyinstr)
+ /* setup fault handler */
+ or.u r6, r0, hi16(_curpcb)
+ ld r7, r6, lo16(_curpcb)
+ or.u r6, r0, hi16(.Lcisflt)
+ or r6, r6, lo16(.Lcisflt)
+ st r6, r7, PCB_ONFAULT
+ bcnd lt0, CNT, .Lcisflt
+ bcnd eq0, CNT, .Lcisdone
+ or r6, r0, 0
+ 1:
+#if ERRATA__XXX_USR
+ NOP
+ ld.bu.usr r7, SRC, r6
+ NOP
+ NOP
+ NOP
+#else
+ ld.bu.usr r7, SRC, r6
+#endif
+ st.b r7, DEST, r6
+ bcnd eq0, r7, 2f ; all done
+ addu r6, r6, 1
+ cmp r7, r6, CNT
+ bb1 lt, r7, 1b
+ or r2, r0, ENAMETOOLONG ; over flow
+ br .Lcisdone
+ 2: ; all done
+ or r2, r0, 0
+ br .Lcisdone
+
+.Lcisdone:
+ bcnd eq0, LEN, 3f
+ st r6, r0, LEN
+ 3: or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
+.Lcisflt:
+ or r2, r0, EFAULT /* return fault */
+ br .Lcisdone
+
+#undef SRC
+#undef DEST
+#undef CNT
+#undef LEN
+
+/*
+ * Copy specified amount of data from kernel to the user space
+ * Copyout(from, to, len)
+ * r2 == from (kernel source address)
+ * r3 == to (user destination address)
+ * r4 == length
+ */
+
+#define SRC r2
+#define DEST r3
+#define LEN r4
+
+ENTRY(copyout)
+ /* setup fault handler */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(.Lcoflt)
+ or r5, r5, lo16(.Lcoflt)
+ st r5, r6, PCB_ONFAULT ; pcb_onfault = .Lcoflt
+ ;bcnd ne0, LEN, 1f ; XXX optimize len = 0 case
+ ;or r2, r0, 0
+ ;br .Lcodone
+ ;1: ;bcnd lt0, LEN, .Lcoflt ; EFAULT if len < 0
+ /* If it's a small length (less than 8), then do byte-by-byte */
+ cmp r9, LEN, 8
+ bb1 lt, r9, copyout_byte_only
+
+ /* If they're not aligned similiarly, use byte only... */
+ xor r9, SRC, DEST
+ mask r8, r9, 0x3
+ bcnd ne0, r8, copyout_byte_only
+
+ /*
+ * At this point, we don't know if they're word aligned or not,
+ * but we know that what needs to be done to one to align
+ * it is what's needed for the other.
+ */
+ bb1 0, SRC, copyout_left_align_to_halfword
+copyout_left_aligned_to_halfword:
+ bb1 1, SRC, copyout_left_align_to_word
+copyout_left_aligned_to_word:
+ bb1 0, LEN, copyout_right_align_to_halfword
+copyout_right_aligned_to_halfword:
+ bb1 1, LEN, copyout_right_align_to_word
+copyout_right_aligned_to_word:
+
+ /*
+ * At this point, both SRC and DEST are aligned to a word
+ * boundry, and LEN is an even multiple of 4.
+ */
+ bb1.n 2, LEN, copyout_right_align_to_doubleword
+ or r7, r0, 4
+
+copyout_right_aligned_to_doubleword:
+ ld r5, SRC, r0
+ ld r6, SRC, r7
+ subu LEN, LEN, 8
+#if ERRATA__XXX_USR
+ NOP
+ st.usr r5, DEST, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.usr r5, DEST, r0
+#endif
+ addu SRC, SRC, 8
+#if ERRATA__XXX_USR
+ NOP
+ st.usr r6, DEST, r7
+ NOP
+ NOP
+ NOP
+#else
+ st.usr r6, DEST, r7
+#endif
+ bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
+ addu DEST, DEST, 8
+ or r2, r0, r0 /* successful return */
+ br .Lcodone
+
+ /***************************************************/
+copyout_left_align_to_halfword:
+ ld.b r5, SRC, r0
+ subu LEN, LEN, 1
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r5, DEST, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.b.usr r5, DEST, r0
+#endif
+ addu SRC, SRC, 1
+ br.n copyout_left_aligned_to_halfword
+ addu DEST, DEST, 1
+
+copyout_left_align_to_word:
+ ld.h r5, SRC, r0
+ subu LEN, LEN, 2
+#if ERRATA__XXX_USR
+ NOP
+ st.h.usr r5, DEST, r0
+ NOP
+ NOP
+ NOP
+#else
+ st.h.usr r5, DEST, r0
+#endif
+ addu SRC, SRC, 2
+ br.n copyout_left_aligned_to_word
+ addu DEST, DEST, 2
+
+copyout_right_align_to_halfword:
+ subu LEN, LEN, 1
+ ld.b r5, SRC, LEN
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r5, DEST, LEN
+ NOP
+ NOP
+ NOP
+ br copyout_right_aligned_to_halfword
+#else
+ br.n copyout_right_aligned_to_halfword
+ st.b.usr r5, DEST, LEN
+#endif
+
+copyout_right_align_to_word:
+ subu LEN, LEN, 2
+ ld.h r5, SRC, LEN
+#if ERRATA__XXX_USR
+ NOP
+ st.h.usr r5, DEST, LEN
+ NOP
+ NOP
+ NOP
+ br copyout_right_aligned_to_word
+#else
+ br.n copyout_right_aligned_to_word
+ st.h.usr r5, DEST, LEN
+#endif
+
+copyout_right_align_to_doubleword:
+ subu LEN, LEN, 4
+ ld r5, SRC, LEN
+#if ERRATA__XXX_USR
+ NOP
+ st.usr r5, DEST, LEN
+ NOP
+ NOP
+ NOP
+ bcnd ne0, LEN, copyout_right_aligned_to_doubleword
+#else
+ bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
+ st.usr r5, DEST, LEN
+#endif
+ or r2, r0, r0 /* successful return */
+ br .Lcodone
+
+_LABEL(copyout_byte_only)
+ bcnd eq0, LEN, 2f
+ 1:
+ subu LEN, LEN, 1
+ ld.b r5, SRC, LEN
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r5, DEST, LEN
+ NOP
+ NOP
+ NOP
+ bcnd ne0, LEN, 1b
+# else
+ bcnd.n ne0, LEN, 1b
+ st.b.usr r5, DEST, LEN
+# endif
+
+ 2: or r2, r0, r0 /* successful return */
+ br .Lcodone
+
+.Lcodone:
+ or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
+.Lcoflt:
+ or r2, r0, EFAULT /* return fault */
+ br .Lcodone
+
+#undef SRC
+#undef DEST
+#undef LEN
+
+/*
+ * Copy a null terminated string from the kernel space to the user
+ * address space.
+ *
+ * copyoutstr(from, to, maxlen, &lencopied)
+ * r2 == from
+ * r3 == to
+ * r4 == maxlen that can be copied
+ * r5 == len actually copied
+ */
+
+#define SRC r2
+#define DEST r3
+#define CNT r4
+#define LEN r5
+
+ENTRY(copyoutstr)
+ /* setup fault handler */
+ or.u r6, r0, hi16(_curpcb)
+ ld r7, r6, lo16(_curpcb)
+ or.u r6, r0, hi16(.Lcosflt)
+ or r6, r6, lo16(.Lcosflt)
+ st r6, r7, PCB_ONFAULT
+ bcnd lt0, CNT, .Lcosflt
+ bcnd eq0, CNT, .Lcosdone
+ or r6, r0, 0
+ 1:
+ ld.bu r7, SRC, r6
+#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r7, DEST, r6
+ NOP
+ NOP
+ NOP
+#else
+ st.b.usr r7, DEST, r6
+#endif
+ bcnd eq0, r7, 2f ; all done
+ addu r6, r6, 1
+ cmp r7, r6, CNT
+ bb1 lt, r7, 1b
+ or r2, r0, ENAMETOOLONG ; over flow
+ br .Lcosdone
+ 2: ; all done
+ or r2, r0, 0
+ br .Lcosdone
+
+.Lcosflt:
+ or r2, r0, EFAULT /* return fault */
+ br .Lcosdone
+
+.Lcosdone:
+ bcnd eq0, LEN, 3f
+ st r6, r0, LEN
+ 3: or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
+
+#undef SRC
+#undef DEST
+#undef CNT
+#undef LEN
+
+/*######################################################################*/
+;LABEL(_ALLOW_FAULT_END)
+;word 0 /* to separate from routine below */
+/*######################################################################*/
+
+/*
+ * Gcc 2 generates calls to memcpy for bcopies of unknown size. memcpy
+ * can simply be implemented as ovbcopy but the src (r2, r3) and dst args need to
+ * be switched.
+ */
+/*
+ * void memcpy(dest, source, count)
+ *
+ */
+ENTRY(memcpy)
+ or r5, r0, r2 /* dst -> tmp */
+ or r2, r0, r3 /* src -> 1st arg */
+ br.n _ovbcopy /* call ovbcopy */
+ or r3, r0, r5 /* dst -> 2nd arg */
+
+
+/*
+ * void bcopy(source, destination, count)
+ *
+ * copy count bytes of data from source to destination
+ * Don Harper (don@omron.co.jp), Omron Corporation.
+ *
+ */
+
+ENTRY(bcopy)
+ENTRY(ovbcopy)
+ bcnd le0,r4,bcopy_out /* nothing to do if count <= 0 */
+/*
+ * check position of source and destination data
+ */
+ cmp r9,r2,r3 /* compare source address to destination */
+ bb1 eq,r9,bcopy_out /* nothing to do if addresses are equal */
+ bb1 lo,r9,bcopy_reverse /* copy in reverse if src < destination */
+/*
+ * source address is greater than destination address, copy forward
+ */
+ cmp r9,r4,16 /* see if we have at least 16 bytes */
+ bb1 lt,r9,f_byte_copy /* copy bytes for small data length */
+/*
+ * determine copy strategy based on alignment of source and destination
+ */
+ mask r6,r2,3 /* get 2 low order bits of source address */
+ mask r7,r3,3 /* get 2 low order bits of destintation addr */
+ mak r6,r6,0<4> /* convert source bits to table offset */
+ mak r7,r7,0<2> /* convert destination bits to table offset */
+ or.u r12,r0,hi16(f_strat) /* forward strategy table address (high) */
+ or r12,r12,lo16(f_strat) /* forward strategy table address (low) */
+ addu r6,r6,r7 /* compute final table offset for strategy */
+ ld r12,r12,r6 /* load the strategy routine */
+ jmp r12 /* branch to strategy routine */
+
+
+/*
+ * Copy three bytes from src to destination then copy words
+ */
+_LABEL(f_3byte_word_copy)
+ ld.bu r6,r2,0 /* load byte from source */
+ ld.bu r7,r2,1 /* load byte from source */
+ ld.bu r8,r2,2 /* load byte from source */
+ st.b r6,r3,0 /* store byte to destination */
+ st.b r7,r3,1 /* store byte to destination */
+ st.b r8,r3,2 /* store byte to destination */
+ addu r2,r2,3 /* increment source pointer */
+ addu r3,r3,3 /* increment destination pointer */
+ br.n f_word_copy /* copy full words */
+ subu r4,r4,3 /* decrement length */
+
+/*
+ * Copy 1 halfword from src to destination then copy words
+ */
+_LABEL(f_1half_word_copy)
+ ld.hu r6,r2,0 /* load half-word from source */
+ st.h r6,r3,0 /* store half-word to destination */
+ addu r2,r2,2 /* increment source pointer */
+ addu r3,r3,2 /* increment destination pointer */
+ br.n f_word_copy /* copy full words */
+ subu r4,r4,2 /* decrement remaining length */
+
+/*
+ * Copy 1 byte from src to destination then copy words
+ */
+_LABEL(f_1byte_word_copy)
+ ld.bu r6,r2,0 /* load 1 byte from source */
+ st.b r6,r3,0 /* store 1 byte to destination */
+ addu r2,r2,1 /* increment source pointer */
+ addu r3,r3,1 /* increment destination pointer */
+ subu r4,r4,1 /* decrement remaining length */
+ /* fall through to word copy */
+/*
+ * Copy as many full words as possible, 4 words per loop
+ */
+_LABEL(f_word_copy)
+ cmp r10,r4,16 /* see if we have 16 bytes remaining */
+ bb1 lo,r10,f_byte_copy /* not enough left, copy bytes */
+ ld r6,r2,0 /* load first word */
+ ld r7,r2,4 /* load second word */
+ ld r8,r2,8 /* load third word */
+ ld r9,r2,12 /* load fourth word */
+ st r6,r3,0 /* store first word */
+ st r7,r3,4 /* store second word */
+ st r8,r3,8 /* store third word */
+ st r9,r3,12 /* store fourth word */
+ addu r2,r2,16 /* increment source pointer */
+ addu r3,r3,16 /* increment destination pointer */
+ br.n f_word_copy /* branch to copy another block */
+ subu r4,r4,16 /* decrement remaining length */
+
+_LABEL(f_1byte_half_copy)
+ ld.bu r6,r2,0 /* load 1 byte from source */
+ st.b r6,r3,0 /* store 1 byte to destination */
+ addu r2,r2,1 /* increment source pointer */
+ addu r3,r3,1 /* increment destination pointer */
+ subu r4,r4,1 /* decrement remaining length */
+ /* fall through to half copy */
+
+_LABEL(f_half_copy)
+ cmp r10,r4,16 /* see if we have 16 bytes remaining */
+ bb1 lo,r10,f_byte_copy /* not enough left, copy bytes */
+ ld.hu r6,r2,0 /* load first half-word */
+ ld.hu r7,r2,2 /* load second half-word */
+ ld.hu r8,r2,4 /* load third half-word */
+ ld.hu r9,r2,6 /* load fourth half-word */
+ ld.hu r10,r2,8 /* load fifth half-word */
+ ld.hu r11,r2,10 /* load sixth half-word */
+ ld.hu r12,r2,12 /* load seventh half-word */
+ ld.hu r13,r2,14 /* load eighth half-word */
+ st.h r6,r3,0 /* store first half-word */
+ st.h r7,r3,2 /* store second half-word */
+ st.h r8,r3,4 /* store third half-word */
+ st.h r9,r3,6 /* store fourth half-word */
+ st.h r10,r3,8 /* store fifth half-word */
+ st.h r11,r3,10 /* store sixth half-word */
+ st.h r12,r3,12 /* store seventh half-word */
+ st.h r13,r3,14 /* store eighth half-word */
+ addu r2,r2,16 /* increment source pointer */
+ addu r3,r3,16 /* increment destination pointer */
+ br.n f_half_copy /* branch to copy another block */
+ subu r4,r4,16 /* decrement remaining length */
+
+_LABEL(f_byte_copy)
+ bcnd eq0,r4,bcopy_out /* branch if nothing left to copy */
+ ld.bu r6,r2,0 /* load byte from source */
+ st.b r6,r3,0 /* store byte in destination */
+ addu r2,r2,1 /* increment source pointer */
+ addu r3,r3,1 /* increment destination pointer */
+ br.n f_byte_copy /* branch for next byte */
+ subu r4,r4,1 /* decrement remaining length */
+
+/*
+ * source address is less than destination address, copy in reverse
+ */
+_LABEL(bcopy_reverse)
+/*
+ * start copy pointers at end of data
+ */
+ addu r2,r2,r4 /* start source at end of data */
+ addu r3,r3,r4 /* start destination at end of data */
+/*
+ * check for short data
+ */
+ cmp r9,r4,16 /* see if we have at least 16 bytes */
+ bb1 lt,r9,r_byte_copy /* copy bytes for small data length */
+/*
+ * determine copy strategy based on alignment of source and destination
+ */
+ mask r6,r2,3 /* get 2 low order bits of source address */
+ mask r7,r3,3 /* get 2 low order bits of destintation addr */
+ mak r6,r6,0<4> /* convert source bits to table offset */
+ mak r7,r7,0<2> /* convert destination bits to table offset */
+ or.u r12,r0,hi16(r_strat) /* reverse strategy table address (high) */
+ or r12,r12,lo16(r_strat) /* reverse strategy table address (low) */
+ addu r6,r6,r7 /* compute final table offset for strategy */
+ ld r12,r12,r6 /* load the strategy routine */
+ jmp r12 /* branch to strategy routine */
+
+/*
+ * Copy three bytes from src to destination then copy words
+ */
+_LABEL(r_3byte_word_copy)
+ subu r2,r2,3 /* decrement source pointer */
+ subu r3,r3,3 /* decrement destination pointer */
+ ld.bu r6,r2,0 /* load byte from source */
+ ld.bu r7,r2,1 /* load byte from source */
+ ld.bu r8,r2,2 /* load byte from source */
+ st.b r6,r3,0 /* store byte to destination */
+ st.b r7,r3,1 /* store byte to destination */
+ st.b r8,r3,2 /* store byte to destination */
+ br.n r_word_copy /* copy full words */
+ subu r4,r4,3 /* decrement length */
+
+/*
+ * Copy 1 halfword from src to destination then copy words
+ */
+_LABEL(r_1half_word_copy)
+ subu r2,r2,2 /* decrement source pointer */
+ subu r3,r3,2 /* decrement destination pointer */
+ ld.hu r6,r2,0 /* load half-word from source */
+ st.h r6,r3,0 /* store half-word to destination */
+ br.n r_word_copy /* copy full words */
+ subu r4,r4,2 /* decrement remaining length */
+
+/*
+ * Copy 1 byte from src to destination then copy words
+ */
+_LABEL(r_1byte_word_copy)
+ subu r2,r2,1 /* decrement source pointer */
+ subu r3,r3,1 /* decrement destination pointer */
+ ld.bu r6,r2,0 /* load 1 byte from source */
+ st.b r6,r3,0 /* store 1 byte to destination */
+ subu r4,r4,1 /* decrement remaining length */
+ /* fall through to word copy */
+/*
+ * Copy as many full words as possible, 4 words per loop
+ */
+_LABEL(r_word_copy)
+ cmp r10,r4,16 /* see if we have 16 bytes remaining */
+ bb1 lo,r10,r_byte_copy /* not enough left, copy bytes */
+ subu r2,r2,16 /* decrement source pointer */
+ subu r3,r3,16 /* decrement destination pointer */
+ ld r6,r2,0 /* load first word */
+ ld r7,r2,4 /* load second word */
+ ld r8,r2,8 /* load third word */
+ ld r9,r2,12 /* load fourth word */
+ st r6,r3,0 /* store first word */
+ st r7,r3,4 /* store second word */
+ st r8,r3,8 /* store third word */
+ st r9,r3,12 /* store fourth word */
+ br.n r_word_copy /* branch to copy another block */
+ subu r4,r4,16 /* decrement remaining length */
+
+_LABEL(r_1byte_half_copy)
+ subu r2,r2,1 /* decrement source pointer */
+ subu r3,r3,1 /* decrement destination pointer */
+ ld.bu r6,r2,0 /* load 1 byte from source */
+ st.b r6,r3,0 /* store 1 byte to destination */
+ subu r4,r4,1 /* decrement remaining length */
+ /* fall through to half copy */
+
+_LABEL(r_half_copy)
+ cmp r10,r4,16 /* see if we have 16 bytes remaining */
+ bb1 lo,r10,r_byte_copy /* not enough left, copy bytes */
+ subu r2,r2,16 /* decrement source pointer */
+ subu r3,r3,16 /* decrement destination pointer */
+ ld.hu r6,r2,0 /* load first half-word */
+ ld.hu r7,r2,2 /* load second half-word */
+ ld.hu r8,r2,4 /* load third half-word */
+ ld.hu r9,r2,6 /* load fourth half-word */
+ ld.hu r10,r2,8 /* load fifth half-word */
+ ld.hu r11,r2,10 /* load sixth half-word */
+ ld.hu r12,r2,12 /* load seventh half-word */
+ ld.hu r13,r2,14 /* load eighth half-word */
+ st.h r6,r3,0 /* store first half-word */
+ st.h r7,r3,2 /* store second half-word */
+ st.h r8,r3,4 /* store third half-word */
+ st.h r9,r3,6 /* store fourth half-word */
+ st.h r10,r3,8 /* store fifth half-word */
+ st.h r11,r3,10 /* store sixth half-word */
+ st.h r12,r3,12 /* store seventh half-word */
+ st.h r13,r3,14 /* store eighth half-word */
+ br.n r_half_copy /* branch to copy another block */
+ subu r4,r4,16 /* decrement remaining length */
+
+_LABEL(r_byte_copy)
+ bcnd eq0,r4,bcopy_out /* branch if nothing left to copy */
+ subu r2,r2,1 /* decrement source pointer */
+ subu r3,r3,1 /* decrement destination pointer */
+ ld.bu r6,r2,0 /* load byte from source */
+ st.b r6,r3,0 /* store byte in destination */
+ br.n r_byte_copy /* branch for next byte */
+ subu r4,r4,1 /* decrement remaining length */
+
+_LABEL(bcopy_out)
+ jmp r1 /* all done, return to caller */
+
+ data
+ align 4
+_LABEL(f_strat)
+ word f_word_copy
+ word f_byte_copy
+ word f_half_copy
+ word f_byte_copy
+ word f_byte_copy
+ word f_3byte_word_copy
+ word f_byte_copy
+ word f_1byte_half_copy
+ word f_half_copy
+ word f_byte_copy
+ word f_1half_word_copy
+ word f_byte_copy
+ word f_byte_copy
+ word f_1byte_half_copy
+ word f_byte_copy
+ word f_1byte_word_copy
+
+_LABEL(r_strat)
+ word r_word_copy
+ word r_byte_copy
+ word r_half_copy
+ word r_byte_copy
+ word r_byte_copy
+ word r_1byte_word_copy
+ word r_byte_copy
+ word r_1byte_half_copy
+ word r_half_copy
+ word r_byte_copy
+ word r_1half_word_copy
+ word r_byte_copy
+ word r_byte_copy
+ word r_1byte_half_copy
+ word r_byte_copy
+ word r_3byte_word_copy
+
+ text
+
+/*######################################################################*/
+/*######################################################################*/
+
+/*
+ * April 1990, Omron Corporation
+ * jfriedl@nff.ncl.omron.co.jp
+ *
+ * void bzero(destination, length)
+ *
+ * Clear (set to zero) LENGTH bytes of memory starting at DESTINATION.
+ * Note that there is no return value.
+ *
+ * This is fast. Really fast. Especially for long lengths.
+ */
+#define R_dest r2
+#define R_len r3
+
+#define R_bytes r4
+#define R_mark_address r5
+#define R_addr r6 /* R_addr && R_temp SHARE */
+#define R_temp r6 /* R_addr && R_temp SHARE */
+
+
+ENTRY(blkclr)
+ENTRY(bzero)
+ /*
+ * If the destination is not word aligned, we'll word align
+ * it first to make things easier.
+ *
+ * We'll check to see first if bit #0 is set and then bit #1
+ * (of the destination address). If either are set, it's
+ * not word aligned.
+ */
+ bb1 0, R_dest, not_initially_word_aligned
+ bb1 1, R_dest, not_initially_word_aligned
+
+ now_word_aligned:
+ /*
+ * before we get into the main loop, grab the
+ * address of the label "mark" below.
+ */
+ or.u R_mark_address, r0, hi16(mark)
+ or R_mark_address, R_mark_address, lo16(mark)
+
+ top_of_main_loop:
+# define MAX_AT_ONE_TIME 128
+ /*
+ * Now we find out how many words we can zero-fill in a row.
+ * We do this by doing something like:
+ *
+ * bytes &= 0xfffffffc;
+ * if (bytes > MAX_AT_ONE_TIME)
+ * bytes = MAX_AT_ONE_TIME;
+ */
+
+ /*
+ * Clear lower two bits of length to give us the number of bytes
+ * ALIGNED TO THE WORD LENGTH remaining to move.
+ */
+ clr R_bytes, R_len, 2<0>
+
+ /* if we're done clearing WORDS, jump out */
+ bcnd eq0, R_bytes, done_doing_words
+
+ /* if the number of bytes > MAX_AT_ONE_TIME, do only the max */
+ cmp R_temp, R_bytes, MAX_AT_ONE_TIME
+ bb1 lt, R_temp, 1f
+
+ /*
+ * Since we're doing the max, we know exactly where we're
+ * jumping (the first one in the list!), so we can jump
+ * right there. However, we've still got to adjust
+ * the length, so we'll jump to where we ajust the length
+ * which just happens to fall through to the first store zero
+ * in the list.
+ *
+ * Note, however, that we're jumping to an instruction that
+ * would be in the delay slot for the jump in front of it,
+ * so if you change things here, WATCH OUT.
+ */
+ br.n do_max
+ or R_bytes, r0, MAX_AT_ONE_TIME
+
+ 1:
+
+ /*
+ * Now we have the number of bytes to zero during this iteration,
+ * (which, as it happens, is the last iteration if we're here).
+ * We'll calculate the proper place to jump and then jump there,
+ * after adjusting the length. NOTE that there is a label between
+ * the "jmp.n" and the "subu" below... the "subu" is NOT always
+ * executed in the delay slot of the "jmp.n".
+ */
+ subu R_addr, R_mark_address, R_bytes
+
+ /* and go there (after adjusting the length via ".n") */
+ jmp.n R_addr
+do_max: subu R_len, R_len, R_bytes /* NOTE: this is in the delay slot! */
+
+ st r0, R_dest, 0x7c /* 128 */
+ st r0, R_dest, 0x78 /* 124 */
+ st r0, R_dest, 0x74 /* 120 */
+ st r0, R_dest, 0x70 /* 116 */
+ st r0, R_dest, 0x6c /* 112 */
+ st r0, R_dest, 0x68 /* 108 */
+ st r0, R_dest, 0x64 /* 104 */
+ st r0, R_dest, 0x60 /* 100 */
+ st r0, R_dest, 0x5c /* 96 */
+ st r0, R_dest, 0x58 /* 92 */
+ st r0, R_dest, 0x54 /* 88 */
+ st r0, R_dest, 0x50 /* 84 */
+ st r0, R_dest, 0x4c /* 80 */
+ st r0, R_dest, 0x48 /* 76 */
+ st r0, R_dest, 0x44 /* 72 */
+ st r0, R_dest, 0x40 /* 68 */
+ st r0, R_dest, 0x3c /* 64 */
+ st r0, R_dest, 0x38 /* 60 */
+ st r0, R_dest, 0x34 /* 56 */
+ st r0, R_dest, 0x30 /* 52 */
+ st r0, R_dest, 0x2c /* 44 */
+ st r0, R_dest, 0x28 /* 40 */
+ st r0, R_dest, 0x24 /* 36 */
+ st r0, R_dest, 0x20 /* 32 */
+ st r0, R_dest, 0x1c /* 28 */
+ st r0, R_dest, 0x18 /* 24 */
+ st r0, R_dest, 0x14 /* 20 */
+ st r0, R_dest, 0x10 /* 16 */
+ st r0, R_dest, 0x0c /* 12 */
+ st r0, R_dest, 0x08 /* 8 */
+ st r0, R_dest, 0x04 /* 4 */
+ st r0, R_dest, 0x00 /* 0 */
+
+ mark:
+ br.n top_of_main_loop
+ addu R_dest, R_dest, R_bytes /* bump up the dest address */
+
+
+
+ done_doing_words:
+ bcnd ne0, R_len, finish_up_last_bytes
+ jmp r1 /* RETURN */
+
+ finish_up_last_bytes:
+ subu R_len, R_len, 1
+ bcnd.n ne0, R_len, finish_up_last_bytes
+ st.b r0, R_dest, R_len
+
+ leave:
+ jmp r1 /* RETURN */
+
+ not_initially_word_aligned:
+ /*
+ * Bzero to word-align the address (at least if the length allows it).
+ */
+ bcnd eq0, R_len, leave
+ st.b r0, R_dest, 0
+ addu R_dest, R_dest, 1
+ mask R_temp, R_dest, 0x3
+ bcnd.n eq0, R_temp, now_word_aligned
+ subu R_len, R_len, 1
+ br not_initially_word_aligned
+
+#undef R_dest
+#undef R_len
+#undef R_bytes
+#undef R_mark_address
+#undef R_addr
+#undef R_temp
+#undef MAX_AT_ONE_TIME
+
+/**********************************************************************/
+/**********************************************************************/
+/**********************************************************************/
+
+/*
+ * non-local goto
+ */
+ global _setjmp
+_setjmp:
+ st r1,r2,0
+ st r14,r2,4
+ st r15,r2,2*4
+ st r16,r2,3*4
+ st r17,r2,4*4
+ st r18,r2,5*4
+ st r19,r2,6*4
+ st r20,r2,7*4
+ st r21,r2,8*4
+ st r22,r2,9*4
+ st r23,r2,10*4
+ st r24,r2,11*4
+ st r25,r2,12*4
+ st r26,r2,13*4
+ st r27,r2,14*4
+ st r28,r2,15*4
+ st r29,r2,16*4
+ st r30,r2,17*4
+ st r31,r2,18*4
+ jmp.n r1
+ or r2,r0,r0
+
+ global _longjmp
+_longjmp:
+ ld r1,r2,0
+ ld r14,r2,4
+ ld r15,r2,2*4
+ ld r16,r2,3*4
+ ld r17,r2,4*4
+ ld r18,r2,5*4
+ ld r19,r2,6*4
+ ld r20,r2,7*4
+ ld r21,r2,8*4
+ ld r22,r2,9*4
+ ld r23,r2,10*4
+ ld r24,r2,11*4
+ ld r25,r2,12*4
+ ld r26,r2,13*4
+ ld r27,r2,14*4
+ ld r28,r2,15*4
+ ld r29,r2,16*4
+ ld r30,r2,17*4
+ ld r31,r2,18*4
+ jmp.n r1
+ or r2,r3,r0
+
+ENTRY(longjmp_int_enable)
+ ld r1,r2,0
+ ld r14,r2,4
+ ld r15,r2,2*4
+ ld r16,r2,3*4
+ ld r17,r2,4*4
+ ld r18,r2,5*4
+ ld r19,r2,6*4
+ ld r20,r2,7*4
+ ld r21,r2,8*4
+ ld r22,r2,9*4
+ ld r23,r2,10*4
+ ld r24,r2,11*4
+ ld r25,r2,12*4
+ ld r26,r2,13*4
+ ld r27,r2,14*4
+ ld r28,r2,15*4
+ ld r29,r2,16*4
+ ld r30,r2,17*4
+ ld r31,r2,18*4
+ or r2,r3,r0
+ ldcr r10,PSR
+ clr r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ jmp r1
+
+ENTRY(getsp)
+ or r2, r0, r31
+ jmp r1
+
+ENTRY(spln)
+ ldcr r10,PSR
+ or r11,r0,r10
+ set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ or.u r3,r0,hi16(INT_MASK_LEVEL)
+ or r4,r3,lo16(INT_MASK_LEVEL)
+ xmem.bu r2,r4,r0
+ stcr r11,PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+ENTRY(spl)
+ ldcr r10,PSR
+ or r11,r0,r10
+ set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ or.u r3,r0,hi16(INT_MASK_LEVEL)
+ ld.b r2,r3,lo16(INT_MASK_LEVEL)
+ stcr r11,PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+/*
+ * invalidate_pte(pte)
+ *
+ * This function will invalidate specified pte indivisibly
+ * to avoid the write-back of used-bit and/or modify-bit into
+ * that pte. It also returns the pte found in the table.
+ */
+ENTRY(invalidate_pte)
+ or r3,r0,r0
+ xmem r3,r2,r0
+ tb1 0,r0,0
+ jmp.n r1
+ or r2,r3,r0
+
+#if DDB
+
+ENTRY(db_spln)
+ ldcr r10,PSR
+ or r11,r0,r10
+ set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ or.u r3,r0,hi16(INT_MASK_LEVEL)
+ or r4,r3,lo16(INT_MASK_LEVEL)
+ xmem.bu r2,r4,r0
+ stcr r11,PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+ENTRY(db_spl)
+ ldcr r10,PSR
+ or r11,r0,r10
+ set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ or.u r3,r0,hi16(INT_MASK_LEVEL)
+ ld.b r2,r3,lo16(INT_MASK_LEVEL)
+ stcr r11,PSR
+ FLUSH_PIPELINE
+ jmp r1
+
+ENTRY(db_flush_pipeline)
+ FLUSH_PIPELINE
+ jmp r1
+#endif /* DDB */
+
+ENTRY(read_processor_identification_register)
+ jmp.n r1
+ ldcr r2, PID
+
+#if 0
+/*
+ * call rom abort (called when non-maskable interrupt detected)
+ */
+ENTRY(call_rom_abort)
+ tcnd eq0, r0, EVN_ROM_ABORT /* trap to ROM */
+ jmp r1 /* and return */
+
+#endif /* 0 */
+;------------------------------------------------------------------------
+
+#ifdef JUNK
+/* JEFF_DEBUG stuff */
+ align 8
+raw_xpr_stack_top:
+ zero 0x40
+raw_xpr_inital_stack_frame:
+ zero 0x60
+raw_xpr_stack_bottom:
+
+user_raw_xpr: global user_raw_xpr
+ ldcr r5, cr17
+ ld r5, r5, THREAD_TASK
+ /* get a stack ... can use r9 no problem */
+ or.u r9, r0, hi16(raw_xpr_inital_stack_frame)
+ or r9, r9, lo16(raw_xpr_inital_stack_frame)
+ st r31, r9, 0x38
+ st r1, r9, 0x3c
+ or r31, r9, r0
+ bsr __raw_xpr
+ ld r1, r31, 0x3c
+ ld r31, r31, 0x38
+ rte
+;--------------------------------------------------------------
+_raw_xpr: global _raw_xpr
+ or.u r5, r0, hi16(_kernel_task)
+ ld r5, r5, lo16(_kernel_task)
+ br __raw_xpr
+#endif /* JUNK */
diff --git a/sys/arch/mvme88k/m88k/locore_c_routines.c b/sys/arch/mvme88k/m88k/locore_c_routines.c
new file mode 100644
index 00000000000..4f07bee8506
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/locore_c_routines.c
@@ -0,0 +1,391 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ *****************************************************************RCS**/
+/* This file created by Omron Corporation, 1990. */
+
+#include <machine/m88100.h> /* DMT_VALID */
+#include <assym.s> /* EF_NREGS, etc. */
+#include <machine/locore.h> /* END_OF_VECTOR_LIST, etc. */
+#ifdef DDB
+ #include <ddb/db_output.h> /* db_printf() */
+#endif /* DDB */
+
+
+#if defined(DDB) && defined(JEFF_DEBUG)
+# define DATA_DEBUG 1
+#endif
+
+
+#if DDB
+# define DEBUG_MSG db_printf
+#else
+# define DEBUG_MSG printf
+#endif /* DDB */
+
+/*
+ * data access emulation for M88100 exceptions
+ */
+#define DMT_BYTE 1
+#define DMT_HALF 2
+#define DMT_WORD 4
+
+static struct
+{
+ unsigned char offset;
+ unsigned char size;
+} dmt_en_info[16] =
+{
+ {0, 0}, {3, DMT_BYTE}, {2, DMT_BYTE}, {2, DMT_HALF},
+ {1, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_HALF}, {0, 0}, {0, 0}, {0, DMT_WORD}
+};
+
+#if DATA_DEBUG
+ int data_access_emulation_debug = 0;
+ static char *bytes[] =
+ {
+ "____", "___x", "__x_", "__xx",
+ "_x__", "_x_x", "_xx_", "_xxx",
+ "x___", "x__x", "x_x_", "x_xx",
+ "xx__", "xx_x", "xxx_", "xxxx",
+ };
+ #define DAE_DEBUG(stuff) { \
+ if ((data_access_emulation_debug != 0) && ( \
+ data_access_emulation_debug == 0xffffffff)) { stuff ;} }
+#else
+ #define DAE_DEBUG(stuff)
+#endif
+
+void data_access_emulation(unsigned *eframe)
+{
+ register int x;
+ register struct dmt_reg *dmtx;
+ register unsigned dmax, dmdx;
+ register unsigned v, reg;
+
+ if (!(eframe[EF_DMT0] & DMT_VALID))
+ return;
+
+ for (x = 0; x < 3; x++)
+ {
+ dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
+
+ if (!dmtx->dmt_valid)
+ continue;
+
+ dmdx = eframe[EF_DMD0+x*3];
+ dmax = eframe[EF_DMA0+x*3];
+
+ DAE_DEBUG
+ (
+ if (dmtx->dmt_write)
+ DEBUG_MSG("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmdx, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ else
+ DEBUG_MSG("[DMT%d=%x: ld.%c r%d<-%x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ )
+
+ dmax += dmt_en_info[dmtx->dmt_en].offset;
+ reg = dmtx->dmt_dreg;
+
+ if ( ! dmtx->dmt_lockbar)
+ {
+ /* the fault is not during an XMEM */
+
+ if (x == 2 && dmtx->dmt_doub1)
+ {
+ /* pipeline 2 (earliest stage) for a double */
+
+ if (dmtx->dmt_write)
+ {
+ /* STORE DOUBLE WILL BE RE-INITIATED BY rte */
+ }
+ else
+ {
+ /* EMULATE ld.d INSTRUCTION */
+ v = do_load_word(dmax, dmtx->dmt_das);
+ if (reg != 0)
+ eframe[EF_R0 + reg] = v;
+ v = do_load_word(dmax ^ 4, dmtx->dmt_das);
+ if (reg != 31)
+ eframe[EF_R0 + reg + 1] = v;
+ }
+ }
+ else /* not pipeline #2 with a double */
+ {
+ if (dmtx->dmt_write) switch (dmt_en_info[dmtx->dmt_en].size)
+ {
+ case DMT_BYTE:
+ DAE_DEBUG(DEBUG_MSG("[byte %x -> [%x(%c)]\n",
+ dmdx & 0xff, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_byte(dmax, dmdx, dmtx->dmt_das);
+ break;
+ case DMT_HALF:
+ DAE_DEBUG(DEBUG_MSG("[half %x -> [%x(%c)]\n",
+ dmdx & 0xffff, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_half(dmax, dmdx, dmtx->dmt_das);
+ break;
+ case DMT_WORD:
+ DAE_DEBUG(DEBUG_MSG("[word %x -> [%x(%c)]\n",
+ dmdx, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_word(dmax, dmdx, dmtx->dmt_das);
+ break;
+ }
+ else /* else it's a read */
+ {
+ switch (dmt_en_info[dmtx->dmt_en].size)
+ {
+ case DMT_BYTE:
+ v = do_load_byte(dmax, dmtx->dmt_das);
+ if (!dmtx->dmt_signed)
+ v &= 0x000000ff;
+ break;
+ case DMT_HALF:
+ v = do_load_half(dmax, dmtx->dmt_das);
+ if (!dmtx->dmt_signed)
+ v &= 0x0000ffff;
+ break;
+ case DMT_WORD:
+ default: /* 'default' just to shut up lint */
+ v = do_load_word(dmax, dmtx->dmt_das);
+ break;
+ }
+ if (reg == 0) {
+ DAE_DEBUG(DEBUG_MSG("[no write to r0 done]\n"));
+ }
+ else
+ {
+ DAE_DEBUG(DEBUG_MSG("[r%d <- %x]\n",
+ reg, v));
+ eframe[EF_R0 + reg] = v;
+ }
+ }
+ }
+ }
+ else /* if lockbar is set... it's part of an XMEM */
+ {
+ /*
+ * According to Motorola's "General Information",
+ * the dmt_doub1 bit is never set in this case, as it should be.
+ * They call this "general information" - I call it a f*cking bug!
+ *
+ * Anyway, if lockbar is set (as it is if we're here) and if
+ * the write is not set, then it's the same as if doub1
+ * was set...
+ */
+ if ( ! dmtx->dmt_write)
+ {
+ if (x != 2)
+ {
+ /* RERUN xmem WITH DMD(x+1) */
+ x++;
+ dmdx = eframe[EF_DMD0 + x*3];
+ }
+ else
+ {
+ /* RERUN xmem WITH DMD2 */
+ }
+
+ if (dmt_en_info[dmtx->dmt_en].size == DMT_WORD)
+ v = do_xmem_word(dmax, dmdx, dmtx->dmt_das);
+ else
+ v = do_xmem_byte(dmax, dmdx, dmtx->dmt_das);
+ eframe[EF_R0 + reg] = v;
+ }
+ else
+ {
+ if (x == 0)
+ {
+ eframe[EF_R0 + reg] = dmdx;
+ eframe[EF_SFIP] = eframe[EF_SNIP];
+ eframe[EF_SNIP] = eframe[EF_SXIP];
+ eframe[EF_SXIP] = 0;
+ /* xmem RERUN ON rte */
+ eframe[EF_DMT0] = 0;
+ return;
+ }
+ }
+ }
+ }
+ eframe[EF_DMT0] = 0;
+}
+
+/*
+ ***********************************************************************
+ ***********************************************************************
+ */
+#define SIGSYS_MAX 501
+#define SIGTRAP_MAX 511
+
+#define EMPTY_BR 0xC0000000U /* empty "br" instruction */
+#define NO_OP 0xf4005800U /* "or r0, r0, r0" */
+
+typedef struct
+{
+ unsigned word_one,
+ word_two;
+} m88k_exception_vector_area;
+
+#define BRANCH(FROM, TO) (EMPTY_BR | ((unsigned)(TO) - (unsigned)(FROM)) >> 2)
+
+#define SET_VECTOR(NUM, to, VALUE) { \
+ unsigned _NUM = (unsigned)(NUM); \
+ unsigned _VALUE = (unsigned)(VALUE); \
+ vector[_NUM].word_one = NO_OP; \
+ vector[_NUM].word_two = BRANCH(&vector[_NUM].word_two, _VALUE); \
+}
+
+
+/*
+ * vector_init(vector, vector_init_list)
+ *
+ * This routine sets up the m88k vector table for the running processor.
+ * It is called with a very little stack, and interrupts disabled,
+ * so don't call any other functions!
+ */
+void vector_init(
+ m88k_exception_vector_area *vector,
+ unsigned *vector_init_list)
+{
+ register unsigned num;
+ register unsigned vec;
+ extern void sigsys(), sigtrap(), stepbpt(), userbpt();
+
+ for (num = 0; (vec = vector_init_list[num]) != END_OF_VECTOR_LIST; num++)
+ {
+ if (vec != PREDEFINED_BY_ROM)
+ SET_VECTOR(num, to, vec);
+ }
+
+ while (num < 496)
+ SET_VECTOR(num++, to, sigsys);
+ num++; /* skip 496, BUG ROM vector */
+#if 0
+ while (num <= SIGSYS_MAX)
+ SET_VECTOR(num++, to, sigsys);
+
+ while (num <= SIGTRAP_MAX)
+ SET_VECTOR(num++, to, sigtrap);
+
+ SET_VECTOR(504, to, stepbpt);
+ SET_VECTOR(511, to, userbpt);
+ vector[496].word_one = 496 * 4;
+ vector[497].word_two = 497 * 4;
+#endif
+}
+
+/* JEFF_DEBUG stuff */
+#include <machine/asm_macro.h>
+
+#ifdef JUNK
+#define MAX_XPR_COUNT 1000
+struct {
+ task_t task;
+ char *fmt;
+ unsigned arg1;
+ unsigned arg2;
+} raw_xpr_data[MAX_XPR_COUNT];
+unsigned volatile raw_xpr_lock = 0;
+unsigned raw_xpr_index = 0;
+
+void _raw_xpr(char *fmt, unsigned b, unsigned c, task_t t)
+{
+ unsigned myindex;
+ m88k_psr_type psr = disable_interrupts_return_psr();
+ simple_lock(&raw_xpr_lock);
+ if (raw_xpr_index < (MAX_XPR_COUNT - 1)) {
+ myindex = raw_xpr_index++;
+ } else {
+ myindex = 0;
+ raw_xpr_index = 1;
+ }
+ simple_unlock(&raw_xpr_lock);
+ set_psr(psr);
+
+ raw_xpr_data[myindex].task = t;
+ raw_xpr_data[myindex].fmt = fmt;
+ raw_xpr_data[myindex].arg1 = b;
+ raw_xpr_data[myindex].arg2 = c;
+}
+
+void raw_xpr_dump(int skipcount)
+{
+ int i, index = raw_xpr_index + 1;
+
+ raw_xpr_lock = 1; /* forcefully grab the lock */
+
+ if (index >= MAX_XPR_COUNT)
+ index = 0;
+ else if (raw_xpr_data[index].task == 0)
+ index = 0; /* hasn't wrapped yet, so start at the beginning */
+
+ for (i = 1; i < MAX_XPR_COUNT; i++) {
+ if (raw_xpr_data[index].task == 0 || raw_xpr_data[index].fmt == 0)
+ break; /* all done */
+ if (skipcount-- <= 0)
+ {
+ db_printf("%04d: ", i);
+ if (db_lookup_task(raw_xpr_data[index].task) < 0)
+ {
+ /* task no longer valid */
+ db_printf("<task %x, fmt %x, arg %x, arg %x>\n",
+ raw_xpr_data[index].task,
+ raw_xpr_data[index].fmt,
+ raw_xpr_data[index].arg1,
+ raw_xpr_data[index].arg2);
+ } else {
+ char buffer[120];
+ buffer[0] = '\0';
+ db_read_bytes(raw_xpr_data[index].fmt,
+ sizeof(buffer),
+ buffer,
+ raw_xpr_data[index].task);
+ buffer[sizeof(buffer)-2] = '\n';
+ buffer[sizeof(buffer)-1] = '\0';
+
+ db_printf(buffer,
+ raw_xpr_data[index].arg1,
+ raw_xpr_data[index].arg2);
+ }
+ }
+ if (++index >= MAX_XPR_COUNT)
+ index = 0;
+ }
+
+ raw_xpr_lock = 0;
+}
+#endif /* JUNK */
diff --git a/sys/arch/mvme88k/m88k/m1x7_init.c b/sys/arch/mvme88k/m88k/m1x7_init.c
new file mode 100644
index 00000000000..705221fc098
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/m1x7_init.c
@@ -0,0 +1,205 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ */
+
+/*
+ * Basic initialization for vme187.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/reboot.h>
+#include <sys/exec.h>
+#include <vm/pmap.h>
+#include <machine/vmparam.h>
+#include <machine/cpu.h>
+#include <machine/bug.h>
+
+#define INITIAL_MHZ_GUESS 25.0
+
+struct bugenv bugargs;
+struct kernel{
+ void *entry;
+ void *symtab;
+ void *esym;
+ int bflags;
+ int bdev;
+ char *kname;
+ void *smini;
+ void *emini;
+ void *end_load;
+}kflags;
+char *esym;
+
+int boothowto; /* read in kern/bootstrap */
+int machineid;
+
+#ifndef roundup
+#define roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
+#endif /* roundup */
+
+vm_size_t mem_size;
+vm_size_t rawmem_size;
+vm_offset_t first_addr = 0;
+vm_offset_t last_addr = 0;
+
+vm_offset_t avail_start, avail_next, avail_end;
+vm_offset_t virtual_avail, virtual_end;
+
+void *end_loaded;
+int bootdev;
+int no_symbols;
+vm_offset_t miniroot;
+
+struct proc *lastproc;
+pcb_t curpcb;
+
+void cmmu_init(void);
+
+double cycles_per_microsecond = INITIAL_MHZ_GUESS;
+
+extern struct user *proc0paddr;
+
+int bcd2int __P((unsigned int));
+
+/*
+ * Called from locore.S during boot,
+ * this is the first C code that's run.
+ */
+
+void
+m187_bootstrap(void)
+{
+ extern char version[];
+ extern char *edata, *end;
+ extern int cold;
+ extern int kernelstart;
+ extern vm_offset_t size_memory(void);
+ struct bugbrdid brdid;
+
+ cold = 1; /* we are still booting */
+
+ bugbrdid(&brdid);
+ machineid = brdid.brdno;
+
+ vm_set_page_size();
+
+#if 0
+ esym = kflags.esym;
+ boothowto = kflags.bflags;
+ bootdev = kflags.bdev;
+#endif /* 0 */
+
+#if 0
+ end_loaded = kflags.end_load;
+ if (esym != NULL) {
+ end = (char *)((int)(kflags.symtab));
+ } else {
+ first_addr = (vm_offset_t)&end;
+ }
+#endif
+
+ first_addr = m88k_round_page(first_addr);
+
+ if (!no_symbols)
+ boothowto |= RB_KDB;
+
+ printf("about to probe\n");
+#if 1
+ last_addr = size_memory();
+#else
+ last_addr = (vm_offset_t)0x01000000;
+ physmem = btoc(last_addr);
+#endif
+
+ printf("probing done\n");
+ cmmu_init();
+
+ avail_start = first_addr;
+ avail_end = last_addr;
+ printf("%s",version);
+ printf("M187 boot: memory from 0x%x to 0x%x\n", avail_start, avail_end);
+
+ /*
+ * Steal one page at the top of physical memory for msgbuf
+ */
+
+ avail_end -= PAGE_SIZE;
+
+ pmap_bootstrap((vm_offset_t)&kernelstart - GOOFYLDOFFSET /* loadpt */,
+ &avail_start, &avail_end, &virtual_avail,
+ &virtual_end);
+ printf("returned from pmap_bootstrap\n");
+
+ /*
+ * Must initialize p_addr before autoconfig or
+ * the fault handler will get a NULL reference.
+ */
+ proc0.p_addr = proc0paddr;
+ curproc = &proc0;
+ curpcb = &proc0paddr->u_pcb;
+
+ /* Initialize cached PTEs for u-area mapping. */
+ save_u_area(&proc0, proc0paddr);
+
+ /*
+ * Map proc0's u-area at the standard address (UADDR).
+ */
+ load_u_area(&proc0);
+
+ /* Initialize the "u-area" pages. */
+ bzero((caddr_t)UADDR, UPAGES*NBPG);
+ printf("returning from init\n");
+}
+
+#ifdef notneeded
+ipow(int base, int i)
+{
+ int cnt = 1;
+ while (i--) {
+ cnt *= base;
+ }
+ return cnt;
+}
+
+int
+bcd2int(unsigned int i)
+{
+ unsigned val = 0;
+ int cnt = 0;
+ while (i) {
+ val += (i&0xf) * ipow(10,cnt);
+ cnt++;
+ i >>= 4;
+ }
+ return val;
+}
+#endif /* notneeded */
diff --git a/sys/arch/mvme88k/m88k/m88100_fp.S b/sys/arch/mvme88k/m88k/m88100_fp.S
new file mode 100644
index 00000000000..c51a862ace8
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/m88100_fp.S
@@ -0,0 +1,2463 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * HISTORY
+ */
+
+/* Floating point trouble routines */
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#define __LUNA_SUB_H__
+
+#ifndef NDEBUG /* no debugging */
+#define NDEBUG
+#endif
+
+#ifdef NDEBUG
+# define _LABEL(NAME) NAME:
+#else
+# define _LABEL(NAME) NAME: global NAME
+#endif
+# define LABEL(NAME) NAME: global NAME
+
+#define psr cr1
+#define spsr cr2
+#define ssb cr3
+#define scip cr4
+#define snip cr5
+#define sfip cr6
+#define vbr cr7
+#define dmt0 cr8
+#define scratch1 cr18
+#define scratch2 cr20
+#define fpecr fcr0
+#define s1hi fcr1
+#define s1lo fcr2
+#define s2hi fcr3
+#define s2lo fcr4
+#define pcr fcr5
+#define manthi fcr6
+#define mantlo fcr7
+#define impcr fcr8
+#define fpsr fcr62
+#define fpcr fcr63
+#define valid 1
+#define exception 0
+#define exc_disable 0
+#define FP_disable 3
+#define dexc 27
+#define serial 29
+#define destsize 10
+#define inexact 0
+#define overflow 1
+#define underflow 2
+#define divzero 3
+#define oper 4
+#define sign 31
+#define s1size 9
+#define s2size 7
+#define dsize 5
+#define full 1
+#define fault 0
+#define FADDop 0x05
+#define FSUBop 0x06
+#define FCMPop 0x07
+#define FMULop 0x00
+#define FDIVop 0x0e
+#define FSQRTop 0x0f
+#define FLTop 0x04
+#define INTop 0x09
+#define NINTop 0x0a
+#define TRNCop 0x0b
+#define mode 31
+#define s1sign 9
+#define s2sign 8
+#define s1nan 7
+#define s2nan 6
+#define s1inf 5
+#define s2inf 4
+#define s1zero 3
+#define s2zero 2
+#define s1denorm 1
+#define s2denorm 0
+#define sigbit 19
+#define sigbits 22
+#define sigbitd 19
+#define nc 0
+#define cp 1
+#define eq 2
+#define ne 3
+#define gt 4
+#define le 5
+#define lt 6
+#define ge 7
+#define ou 8
+#define ib 9
+#define in 10
+#define ob 11
+#define FRAMESIZE 200
+#define SWITCHUSER 128
+#if 0
+#define XR1 4
+#define XR2 8
+#define XR3 12
+#define XR4 16
+#define XR5 20
+#define XR6 24
+#define XR7 28
+#define XR8 32
+#define XR9 36
+#define XR10 40
+#define XR11 44
+#define XR12 48
+#define XR13 52
+#define XR14 56
+#define XR15 60
+#define XR16 64
+#define XR17 68
+#define XR18 72
+#define XR19 76
+#define XR20 80
+#define XR21 84
+#define XR22 88
+#define XR23 92
+#define XR24 96
+#define XR25 100
+#define XR26 104
+#define XR27 108
+#define XR28 112
+#define XR29 116
+#define XR30 120
+#define XR31 124
+#define XFPSR 128
+#define XFPCR 132
+#define XFPECR 136
+#define XS1HI 140
+#define XS1LO 144
+#define XS2HI 148
+#define XS2LO 152
+#define XPCR 156
+#define XMANTHI 140
+#define XMANTLO 144
+#define XIMPCR 148
+#define XSPSR 160
+#define XSSB 164
+#define XSNIP 168
+#define XSFIP 172
+#define XRETADDR 176
+#define XHANDRETADDR 180
+#define XHANDFPECR 184
+#define XHANDPR 188
+#define XHANDIMP 192
+#endif
+#define STKSTATE 196
+#define handtrap 134
+#define modehi 30
+#define modelo 29
+#define rndhi 15
+#define rndlo 14
+#define efunf 7
+#define efovf 6
+#define efinx 5
+#define hiddens 23
+#define hiddend 20
+#define NUMFRAME 10
+#define SIGILL 4
+#define SIGFPEPR 8
+#define u_sfu1full 0x4
+#define u_xcpt 0x8
+#define USIZE 0x0ff0
+#endif
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef LOCORE
+#define LOCORE
+
+#ifndef ASSEMBLER /* predefined by ascpp, at least */
+#define ASSEMBLER
+#endif
+
+#include <machine/trap.h>
+#include "assym.s"
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+#endif
+#define MARK or r21, r0, __LINE__
+
+ text
+ align 4
+ global _Xfp_precise
+_Xfp_precise:
+ or r29, r3, r0 ; r29 is now the E.F.
+ subu r31, r31, 40
+ st r1, r31, 32
+ st r29, r31, 36
+
+ ld r2, r29, EF_FPSR * 4
+ ld r3, r29, EF_FPCR * 4
+ ld r4, r29, EF_FPECR * 4
+ ld r5, r29, EF_FPHS1 * 4
+ ld r6, r29, EF_FPLS1 * 4
+ ld r7, r29, EF_FPHS2 * 4
+ ld r8, r29, EF_FPLS2 * 4
+ ld r9, r29, EF_FPPT * 4
+
+
+ ;Load into r1 the return address for the 0 handlers. Looking
+ ;at FPECR, branch to the appropriate 0 handler. However,
+ ;if none of the 0 bits are enabled, then a floating point
+ ;instruction was issued with the floating point unit disabled. This
+ ;will cause an unimplemented opcode 0.
+
+ or.u r1,r0,hi16(wrapup) ;load return address of function
+ or r1,r1,lo16(wrapup)
+2: bb0 6,r4, 3f ;branch to FPunimp if bit set
+ br FPuimp
+3: bb0 7,r4, 4f ;branch to FPintover if bit set
+ br _FPintover
+4: ; bb0 5,r4, 5f ;branch to FPpriviol if bit set
+ ; br _FPpriviol
+5: bb0 4,r4, 6f ;branch to FPresoper if bit set
+ br _FPresoper
+6: bb0 3,r4, 7f ;branch to FPdivzero if bit set
+ br _FPdivzero
+7:
+ or.u r4, r4, 0xffff
+
+FPuimp: global FPuimp
+fp_p_trap:
+ subu r31,r31,40 /* allocate stack */
+ st r1,r31,36 /* save return address */
+ st r3,r31,32 /* save exception frame */
+ or r2,r0,T_FPEPFLT /* load trap type */
+ or r3, r29, r0
+ bsr _trap /* trap */
+ ld r1,r31,36 /* recover return address */
+ addu r31,r31,40 /* deallocate stack */
+ br fp_p_return
+
+ ;To write back the results to the user registers, disable exceptions
+ ;and the floating point unit. Write FPSR and FPCR and load the SNIP
+ ;and SFIP.
+ ;r5 will contain the upper word of the result
+ ;r6 will contain the lower word of the result
+
+wrapup: global wrapup
+ tb1 0,r0,0 ;make sure all floating point operations
+ ;have finished
+ ldcr r10, cr1 ;load the PSR
+ or r10, r10, 0x2 ;disable interrupts
+ stcr r10, cr1
+#if 0
+Why is this done? -jfriedl
+ or r10, r10, 0x8 ;set SFU 1 disable bit, disable SFU 1
+ stcr r10, cr1
+#endif
+ ld r1, r31, 32
+ ld r29, r31, 36
+ addu r31, r31, 40
+
+ fstcr r2, fpsr ;write revised value of FPSR
+ fstcr r3, fpcr ;write revised value of FPCR
+
+ ;result writeback routine
+ addu r3, r29, EF_R0 * 4
+ extu r2, r9, 5<0> ;get 5 bits of destination register
+ bb0 5, r9, writesingle ;branch if destination is single
+
+;writedouble here
+ st r5, r3 [r2] ;write high word
+ add r2, r2, 1 ;for double, the low word is the
+ ;unspecified register
+ clr r2, r2, 27<5> ;perform equivalent of mod 32
+writesingle:
+ st r6, r3 [r2] ;write low word into memory
+
+fp_p_return:
+ jmp r1
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ text
+ global _FPdivzero
+
+
+;Check if the numerator is zero. If the numerator is zero, then handle
+;this instruction as you would a 0/0 invalid operation.
+
+_FPdivzero:
+ st r1,r31,0 ;save return address
+ bb1 s1size,r9,1f ;branch if numerator double
+/* single number */
+ clr r10,r5,1<sign> ;clear sign bit
+ extu r11,r6,3<29> ;grab upper bits of lower word
+ or r10,r10,r11 ;combine ones of mantissa
+ bcnd eq0,r10,resoper ;numerator is zero, handle reserved
+ ;operand
+ br setbit ;set divzero bit
+1:
+/* double number */
+ clr r10,r5,1<sign> ;clear sign bit
+ or r10,r10,r6 ;or high and low words
+ bcnd ne0,r10,setbit ;set divzero bit
+
+;The numerator is zero, so handle the invalid operation by setting the
+;invalid operation bit and branching to the user handler if there is one
+;or writing a quiet NaN to the destination.
+
+resoper:
+ set r2,r2,1<oper> ;set bit in FPSR
+#ifdef HANDLER
+ bb0 oper,r3,noreshand ;branch to execute default handling for
+ ;reserved operands
+ bsr _handler ;branch to user handler
+ br FP_div_return ;return from function
+#endif
+
+noreshand:
+ set r5,r0,0<0> ;put a NaN in high word
+ set r6,r0,0<0> ;put a NaN in low word
+ br FP_div_return ;return from subroutine
+ ;writing to a word which may be ignored
+ ;is just as quick as checking the precision
+ ;of the destination
+
+;The operation is divide by zero, so set the divide by zero bit in the
+;FPSR. If the user handler is set, then go to the user handler, else
+;go to the default mode.
+
+setbit:
+#ifdef HANDLER
+ set r2,r2,1<divzero> ;set bit in FPSR
+ bb0 divzero,r3,default ;go to default routine if no handler
+ bsr _handler ;execute handler routine
+ br FP_div_return ;return from subroutine
+#endif
+
+
+;Considering the sign of the numerator and zero, write a correctly
+;signed infinity of the proper precision into the destination.
+
+default:
+ bb1 dsize,r9,FPzero_double ;branch to handle double result
+FPzero_single:
+ clr r10,r5,31<0> ;clear all of S1HI except sign bit
+ xor r10,r7,r10 ;xor the sign bits of the operands
+ or.u r6,r0,0x7f80 ;load single precision infinity
+ br.n FP_div_return ;return from subroutine
+ or r6,r6,r10 ;load correctly signed infinity
+
+FPzero_double:
+ clr r10,r5,31<0> ;clear all of S1HI except sign bit
+ xor r10,r7,r10 ;xor the sign bits of the operands
+ or.u r5,r0,0x7ff0 ;load double precision infinity
+ or r5,r5,r10 ;load correctly signed infinity
+ or r6,r0,r0 ;clear lower word of double
+
+FP_div_return:
+ ld r1,r31,0 ;load return address
+ jmp r1 ;return from subroutine
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ text
+
+
+;Both NINT and TRNC require a certain rounding mode, so check which
+;instruction caused the integer conversion overflow. Use a substitute
+;FPCR in r1, and modify the rounding mode if the instruction is NINT or TRNC.
+
+_FPintover: global _FPintover
+ extu r10,r9,5<11> ;extract opcode
+ cmp r11,r10,INTop ;see if instruction is INT
+ st r1,r31,0 ;save return address
+ bb1.n eq,r11,checksize ;instruction is INT, do not modify
+ ;rounding mode
+ or r1,r0,r3 ;load FPCR into r1
+ cmp r11,r10,NINTop ;see if instruction is NINT
+ bb1 eq,r11,NINT ;instruction is NINT
+
+TRNC: clr r1,r1,2<rndlo> ;clear rounding mode bits,
+ ;instruction is TRNC
+ br.n checksize ;branch to check size
+ set r1,r1,1<rndlo> ;make rounding mode round towards zero
+
+NINT: clr r1,r1,2<rndlo> ;make rounding mode round to nearest
+
+
+;See whether the source is single or double precision.
+
+checksize: bb1 s2size,r9,checkdoub ;S2 is double, branch to see if there
+;is a false alarm
+
+
+;An integer has more bits than the mantissa of a single precision floating
+;point number, so to check for false alarms (i.e. valid conversion), simply
+;check the exponents. False alarms are detected for 2**30 to (2**30) - 1 and
+;-2**30 to -2**31. Only seven bits need to be looked at since an exception
+;will not occur for the other half of the numbering system.
+;To speed up the processing, first check to see if the exponent is 32 or
+;greater.
+
+;This code was originally written for the exponent in the control
+;register to have the most significant bit (8 - single, 11 - double)
+;flipped and sign extended. For precise exceptions, however, the most
+;significant bit is only sign extended. Therefore, the code was chopped
+;up so that it would work for positive values of real exponent which were
+;only sign extended.
+
+checksing: extu r10,r7,7<20> ;internal representation for single
+;precision is IEEE 8 bits sign extended
+;to 11 bits; for real exp. = 30, the
+;above instruction gives a result exp.
+;that has the MSB flipped and sign
+;extended like in the IMPCR
+ cmp r11,r10,31 ;compare to 32,but exp. off by 1
+;these 2 instructions to speed up valid
+;execution of valid cases
+ bb1 ge,r11,overflw ;valid case, perform overflow routine
+ bb1 sign,r7,checksingn ;source operand is negative
+
+;If the number is positve and the exponent is greater than 30, than it is
+ ;overflow.
+
+checksingp: cmp r10,r10,29 ;compare to 30, but exp. off by 1
+ bb1 gt,r10,overflw ;no false alarm, its overflow
+ br conversionsp ;finish single precision conversion
+
+;If the number is negative, and the exponent is 30, or 31 with a mantissa
+;of 0, then it is a false alarm.
+
+checksingn: cmp r11,r10,30 ;compare to 31,but exp. off by 1
+ bb1 lt,r11,conversionsn ;exp. less than 31, so convert
+ extu r10,r8,3<29> ;get upper three bits of lower mantissa
+ mak r12,r7,20<3> ;get upper 20 bits of mantissa
+ or r10,r10,r12 ;form complete mantissa
+ bcnd eq0,r10,conversionsn ;complete conversion if mantissa is 0
+ br overflw ;no false alarm, its overflow
+
+
+;False alarms are detected for 2**30 to (2**30) - 1 and
+;-2**30 to -2**31. Only seven bits need to be looked at since an exception
+;will not occur for the other half of the numbering system.
+;To speed up the processing, first check to see if the exponent is 32 or
+;greater. Since there are more mantissa bits than integer bits, rounding
+;could cause overflow. (2**31) - 1 needs to be checked so that it does
+;not round to 2**31, and -2**31 needs to be checked in case it rounds to
+;-((2**31) + 1).
+
+checkdoub: extu r10,r7,10<20> ;internal representation for double
+;precision is the same IEEE 11 bits
+;for real exp. = 30, the
+;above instruction gives a result exp.
+;that has the MSB flipped and sign
+;extended like in the IMPCR
+ cmp r11,r10,31 ;compare to 32,but exp. off by 1
+;these 2 instructions to speed up valid
+;execution of valid cases
+ bb1 ge,r11,overflw ;valid case, perform overflow routine
+ bb1 sign,r7,checkdoubn ;source operand is negative
+
+;If the exponent is not 31, then the floating point number will be rounded
+;before the conversion is done. A branch table is set up with bits 4 and 3
+;being the rounding mode, and bits 2, 1, and 0 are the guard, round, and
+;sticky bits.
+
+checkdoubp: cmp r11,r10,30 ;compare to 31, but exponent off by 1
+ bb1 eq,r11,overflw ;no false alarm, its overflow
+ extu r12,r8,1<22> ;get LSB for integer with exp. = 30
+ mak r12,r12,1<2> ;start to set up field for branch table
+ extu r11,r8,1<21> ;get guard bit
+ mak r11,r11,1<1> ;set up field for branch table
+ or r12,r11,r12 ;set up field for branch table
+ extu r11,r8,21<0> ;get bits for sticky bit
+ bcnd eq0,r11,nostickyp ;do not set sticky
+ set r12,r12,1<0> ;set sticky bit
+nostickyp: rot r11,r1,0<rndlo> ;shift rounding mode to 2 LSB''s
+ mak r11,r11,2<3> ;set up field, clear other bits
+ or r12,r11,r12 ;set up field for branch table
+ lda r12,r0[r12] ;scale r12
+ or.u r12,r12,hi16(ptable) ;load pointer into table
+ addu r12,r12,lo16(ptable)
+ jmp r12 ;jump into branch table
+
+ptable: br conversiondp
+p00001: br conversiondp
+p00010: br conversiondp
+p00011: br paddone
+p00100: br conversiondp
+p00101: br conversiondp
+p00110: br paddone
+p00111: br paddone
+p01000: br conversiondp
+p01001: br conversiondp
+p01010: br conversiondp
+p01011: br conversiondp
+p01100: br conversiondp
+p01101: br conversiondp
+p01110: br conversiondp
+p01111: br conversiondp
+p10000: br conversiondp
+p10001: br conversiondp
+p10010: br conversiondp
+p10011: br conversiondp
+p10100: br conversiondp
+p10101: br conversiondp
+p10110: br conversiondp
+p10111: br conversiondp
+p11000: br conversiondp
+p11001: br paddone
+p11010: br paddone
+p11011: br paddone
+p11100: br conversiondp
+p11101: br paddone
+p11110: br paddone
+p11111: br paddone
+
+;Add one to the bit of the mantissa which corresponds to the LSB of an
+;integer. If the mantissa overflows, then there is a valid integer
+;overflow conversion; otherwise, the mantissa can be converted to the integer.
+
+paddone: or r10,r0,r0 ;clear r10
+ set r10,r10,1<22> ;set LSB bit to 1 for adding
+ addu.co r8,r8,r10 ;add the 1 obtained from rounding
+ clr r11,r7,12<20> ;clear exponent and sign
+ addu.ci r11,r0,r11 ;add carry
+ bb1 20,r11,overflw ;overflow to 2**31, abort the rest
+ br.n conversiondp ;since the exp. was 30, and the exp.
+ ;did not round up to 31, the largest
+ ;number that S2 could become is 2**31-1
+ or r7,r0,r11 ;store r11 into r7 for conversion
+
+;Now check for negative double precision sources. If the exponent is 30,
+;then convert the false alarm. If the exponent is 31, then check the mantissa
+;bits which correspond to integer bits. If any of them are a one, then there
+;is overflow. If they are zero, then check the guard, round, and sticky bits.
+;Round toward zero and positive will not cause a roundup, but round toward
+;nearest and negative may, so perform those roundings. If there is no overflow,
+ ;then convert and return from subroutine.
+
+checkdoubn: cmp r11,r10,29 ;compare to 30, but exp. off by 1
+ bb1 eq,r11,conversiondn ;false alarm if exp. = 30
+ extu r10,r8,11<21> ;check upper bits of lower mantissa
+ bcnd ne0,r10,overflw ;one of the bits is a 1, so overflow
+ extu r10,r7,20<0> ;check upper bits of upper mantissa
+ bcnd ne0,r10,overflw ;one of the bits is a 1, so overflow
+ bb0 rndlo,r1,possround ;rounding mode is either round near or
+ ;round negative, which may cause a
+ ;round
+ br.n FPintov_return ;round positive, which will not cause a
+ ;round
+ set r6,r0,1<sign> ;rounding mode is either round zero or
+possround: extu r12,r8,1<20> ;get guard bit
+ extu r11,r8,20<0> ;get bits for sticky bit
+ bcnd.n eq0,r11,nostickyn ;do not set sticky
+ mak r12,r12,1<1> ;set up field for branch table
+ set r12,r12,1<0> ;set sticky bit
+nostickyn: bb1 rndhi,r1,negative ;rounding mode is negative
+nearest: cmp r12,r12,3 ;are both guard and sticky set
+ bb1 eq,r12,overflw ;both guard and sticky are set,
+ ;so signal overflow
+ or r6,r0,r0 ;clear destination register r6
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<sign> ;set the sign bit and take care of
+ ;this special case
+negative: bcnd ne0,r12,overflw ;-2**31 will be rounded to -(2**31+1),
+ ;so signal overflow
+ or r6,r0,r0 ;clear destination register r6
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<sign> ;set the sign bit and take care of
+ ;this special case
+
+ ;since the exp. was 30, and there was
+ ;no round-up, the largest number that
+ ;S2 could have been was 2**31 - 1
+
+
+ ;Convert the single precision positive floating point number.
+
+conversionsp: extu r6,r8,3<29> ;extract lower bits of integer
+ mak r6,r6,3<7> ;shift left to correct place in integer
+ mak r10,r7,20<10> ;shift left upper bits of integer
+ or r6,r6,r10 ;form most of integer
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<30> ;set hidden one
+
+
+ ;Convert the single precision negative floating point number.
+
+conversionsn: bb1 eq,r11,exp31s ;use old r11 to see if exp. is 31
+ extu r6,r8,3<29> ;extract lower bits of mantissa
+ mak r6,r6,3<7> ;shift left to correct place in integer
+ mak r10,r7,20<10> ;shift left upper bits of integer
+ or r6,r6,r10 ;form most of integer
+ set r6,r6,1<30> ;set hidden one
+ or.c r6,r0,r6 ;negate result
+ br.n FPintov_return ;return from subroutine
+ addu r6,r6,1 ;add 1 to get 2''s complement
+exp31s: or r6,r0,r0 ;clear r6
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<sign> ;set sign bit
+
+
+ ;Convert the double precision positive floating point number.
+
+conversiondp: extu r6,r8,10<22> ;extract lower bits of integer
+ mak r10,r7,20<10> ;shift left upper bits of integer
+ or r6,r6,r10 ;form most of integer
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<30> ;set hidden one
+
+
+ ;Convert the double precision negative floating point number. The number,
+ ;whose exponent is 30, must be rounded before converting. Bits 4 and 3 are
+ ;the rounding mode, and bits 2, 1, and 0 are the guard, round, and sticky
+ ;bits for the branch table.
+
+conversiondn: extu r12,r8,1<22> ;get LSB for integer with exp. = 30
+ mak r12,r12,1<2> ;start to set up field for branch table
+ extu r11,r8,1<21> ;get guard bit
+ mak r11,r11,1<1> ;set up field for branch table
+ or r12,r11,r12 ;set up field for branch table
+ extu r11,r8,21<0> ;get bits for sticky bit
+ bcnd eq0,r11,nostkyn ;do not set sticky
+ set r12,r12,1<0> ;set sticky bit
+nostkyn: rot r11,r1,0<rndlo> ;shift rounding mode to 2 LSB''s
+ mak r11,r11,2<3> ;set up field, clear other bits
+ or r12,r11,r12 ;set up field for branch table
+ lda r12,r0[r12] ;scale r12
+ or.u r12,r12,hi16(ntable);load pointer into table
+ addu r12,r12,lo16(ntable)
+ jmp r12 ;jump into branch table
+
+ntable: br nnoaddone
+n00001: br nnoaddone
+n00010: br nnoaddone
+n00011: br naddone
+n00100: br nnoaddone
+n00101: br nnoaddone
+n00110: br naddone
+n00111: br naddone
+n01000: br nnoaddone
+n01001: br nnoaddone
+n01010: br nnoaddone
+n01011: br nnoaddone
+n01100: br nnoaddone
+n01101: br nnoaddone
+n01110: br nnoaddone
+n01111: br nnoaddone
+n10000: br nnoaddone
+n10001: br naddone
+n10010: br naddone
+n10011: br naddone
+n10100: br nnoaddone
+n10101: br naddone
+n10110: br naddone
+n10111: br naddone
+n11000: br nnoaddone
+n11001: br nnoaddone
+n11010: br nnoaddone
+n11011: br nnoaddone
+n11100: br nnoaddone
+n11101: br nnoaddone
+n11110: br nnoaddone
+n11111: br nnoaddone
+
+
+ ;Add one to the mantissa, and check to see if it overflows to -2**31.
+;The conversion is done in nnoaddone:.
+
+naddone: or r10,r0,r0 ;clear r10
+ set r10,r10,1<22> ;set LSB bit to 1 for adding
+ add.co r8,r8,r10 ;add the 1 obtained from rounding
+ clr r7,r7,12<20> ;clear exponent and sign
+ add.ci r7,r0,r7 ;add carry
+ bb1 20,r7,maxneg ;rounded to -2**31,handle separately
+ ;the exponent was originally 30
+nnoaddone: extu r6,r8,11<22> ;extract lower bits of integer
+ mak r10,r7,20<10> ;shift left upper bits of integer
+ or r6,r6,r10 ;form most of integer
+ set r6,r6,1<30> ;set hidden one
+ or.c r6,r0,r6 ;negate integer
+ br.n FPintov_return ;return from subroutine
+ addu r6,r6,1 ;add 1 to get 2''s complement
+
+maxneg: or r6,r0,r0 ;clear integer
+ br.n FPintov_return ;return from subroutine
+ set r6,r6,1<sign> ;set sign bit
+
+
+ ;For valid overflows, check to see if the integer overflow user handler is
+ ;set. If it is set, then go to user handler, else write the correctly
+ ;signed largest integer.
+
+overflw:
+#ifdef HANDLER
+ bb0.n oper,r3,nohandler ;do not go to user handler routine
+ set r2,r2,1<oper> ;set invalid operand bit
+ bsr _handler ;go to user handler routine
+ br FPintov_return ;return from subroutine
+nohandler:
+#endif
+ bb0.n sign,r7,FPintov_return ;if positive then return from subroutine
+ set r6,r6,31<0> ;set result to largest positive integer
+ or.c r6,r0,r6 ;negate r6,giving largest negative int.
+
+FPintov_return: ld r1,r31,0 ;load return address from memory
+ jmp r1 ;return from subroutine
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ text
+
+
+;Some instructions only have the S2 operations, so clear S1HI and S1LO
+;for those instructions so that the previous contents of S1HI and S1LO
+;do not influence this instruction.
+
+LABEL(_FPresoper)
+ st r1, r31, 0
+ extu r10,r9,5<11> ;extract opcode
+; cmp r11,r10,FSQRTop ;compare to FSQRT
+; bb1 eq,r11,S1clear ;clear S1 if instruction only had S2 operand
+ cmp r11,r10,INTop ;compare to INT
+ bb1 eq,r11,S1clear ;clear S1 if instruction only had S2 operand
+ cmp r11,r10,NINTop ;compare to NINT
+ bb1 eq,r11,S1clear ;clear S1 if instruction only had S2 operand
+ cmp r11,r10,TRNCop ;compare to TRNC
+ bb0 eq,r11,opercheck ;check for reserved operands
+
+_LABEL(S1clear)
+ or r5,r0,r0 ;clear any NaN''s, denorms, or infinities
+ or r6,r0,r0 ;that may be left in S1HI,S1LO from a
+ ;previous instruction
+
+;r12 contains the following flags:
+; bit 9 -- s1sign
+; bit 8 -- s2sign
+; bit 7 -- s1nan
+; bit 6 -- s2nan
+; bit 5 -- s1inf
+; bit 4 -- s2inf
+; bit 3 -- s1zero
+; bit 2 -- s2zero
+; bit 1 -- s1denorm
+; bit 0 -- s2denorm
+
+;Using code for both single and double precision, check if S1 is either
+;a NaN or infinity and set the appropriate flags in r12. Then check if
+;S2 is a NaN or infinity. If it is a NaN, then branch to the NaN routine.
+
+
+_LABEL(opercheck)
+ extu r10,r5,11<20> ;internal representation for double
+ bb1.n s1size,r9,S1NaNdoub ;S1 is double precision
+ or r12,r0,r0 ;clear operand flag register
+_LABEL(S1NaNsing)
+ xor r10,r10,0x0080 ;internal representation for single
+ ext r10,r10,8<0> ;precision is IEEE 8 bits sign extended
+ ;to 11 bits; for real exp. > 0, the
+ ;above instructions gives a result exp.
+ ;that has the MSB flipped and sign
+ ;extended like in the IMPCR
+ cmp r11,r10,127 ;Is exponent equal to IEEE 255 (internal 127)
+ bb1 ne,r11,S2NaN ;source 1 is not a NaN or infinity
+ mak r10,r5,20<0> ;load r10 with upper bits of S1 mantissa
+ extu r11,r6,3<29> ;get 3 upper bits of lower word
+ or r11,r10,r11 ;combine any existing 1''s
+ bcnd eq0,r11,noS1NaNs ;since r11 can only hold 0 or a positive
+ ;number, branch to noS1NaN when eq0
+ br.n S2NaN ;see if S2 has a NaN
+ set r12,r12,1<s1nan> ;indicate that S1 has a NaN
+_LABEL(noS1NaNs)
+ br.n S2NaN ;check contents of S2
+ set r12,r0,1<s1inf> ;indicate that S1 has an infinity
+
+_LABEL(S1NaNdoub)
+ xor r10,r10,0x0400 ;precision is the same IEEE 11 bits
+ ;The
+ ;above instructions gives a result exp.
+ ;that has the MSB flipped and sign
+ ;extended like in the IMPCR
+ cmp r11,r10,1023 ;Is exp. equal to IEEE 2047 (internal 1023)
+ bb1 ne,r11,S2NaN ;source 1 is not a NaN or infinity
+ mak r10,r5,20<0> ;load r10 with upper bits of S1 mantissa
+ or r11,r6,r10 ;combine existing 1''s of mantissa
+ bcnd eq0,r11,noS1NaNd ;since r11 can only hold 0 or a positive
+ ;number, branch to noS1NaN when eq0
+ br.n S2NaN ;see if S2 has a NaN
+ set r12,r12,1<s1nan> ;indicate that S1 has a NaN
+_LABEL(noS1NaNd)
+ set r12,r0,1<s1inf> ;indicate that S1 has an infinity
+
+_LABEL(S2NaN)
+ bb1.n s2size,r9,S2NaNdoub ;S1 is double precision
+ extu r10,r7,11<20> ;internal representation for double
+_LABEL(S2NaNsing)
+ xor r10,r10,0x0080 ;internal representation for single
+ ext r10,r10,8<0> ;precision is IEEE 8 bits sign extended
+ ;to 11 bits; for real exp. > 0, the
+ ;above instruction gives a result exp.
+ ;that has the MSB flipped and sign
+ ;extended like in the IMPCR
+ cmp r11,r10,127 ;Is exponent equal to IEEE 255 (internal 127)
+ bb1 ne,r11,inf ;source 2 is not a NaN or infinity
+ mak r10,r7,20<0> ;load r10 with upper bits of S1 mantissa
+ extu r11,r8,3<29> ;get 3 upper bits of lower word
+ or r11,r10,r11 ;combine any existing 1''s
+ bcnd eq0,r11,noS2NaNs ;since r11 can only hold 0 or a positive
+ ;number, branch to noS2NaNs when eq0
+ br.n _NaN ;branch to NaN routine
+ set r12,r12,1<s2nan> ;indicate that s2 has a NaN
+_LABEL(noS2NaNs)
+ bb0 s1nan,r12, 1f ;branch to NaN if S1 is a NaN
+ br _NaN
+1: br.n _infinity ;If S1 had a NaN we would have already
+ ;branched, and S2 does not have a NaN, but
+ ;it does have an infinity, so branch to
+ ;handle the finity
+ set r12,r12,1<s2inf> ;indicate that S2 has an infinity
+
+_LABEL(S2NaNdoub)
+ xor r10,r10,0x0400 ;precision is the same IEEE 11 bits
+ ;The
+ ;above instruction gives a result exp.
+ ;that has the MSB flipped and sign
+ ;extended like in the IMPCR
+ cmp r11,r10,1023 ;Is exp. equal to IEEE 2047 (internal 1023)
+ bb1 ne,r11,inf ;source 2 is not a NaN or infinity
+ mak r10,r7,20<0> ;load r10 with upper bits of S2 mantissa
+ or r11,r8,r10 ;combine existing 1''s of mantissa
+ bcnd eq0,r11,noS2NaNd ;since r11 can only hold 0 or a positive
+ ;number, branch to noS2NaNd when eq0
+ br.n _NaN ;branch to NaN routine
+ set r12,r12,1<s2nan> ;indicate that s2 has a NaN
+_LABEL(noS2NaNd)
+ bb0 s1nan,r12,1f ;branch to NaN if S1 is a NaN
+ br _NaN
+1: br.n _infinity ;If S1 had a NaN we would have already
+ ;branched, and S2 does not have a NaN, but
+ ;it does have an infinity, so branch to
+ ;handle the finity
+ set r12,r12,1<s2inf> ;indicate that S2 has an infinity
+
+
+;If S2 was a NaN, the routine would have already branched to NaN. If S1
+;is a NaN, then branch to NaN. If S1 is not a NaN and S2 is infinity, then
+;we would have already branched to infinity. If S1 is infinity, then branch.
+;If the routine still has not branched, then branch to denorm, the only
+;reserved operand left.
+
+_LABEL(inf)
+ bb0 s1nan,r12,1f ;branch if S1 has a NaN and S2 does not
+ br _NaN
+1: bb0 s1inf,r12,2f ;Neither S1 or S2 has a NaN, and we would
+ ;have branched already if S2 had an
+ ;infinity, so branch if S1 is infinity
+/*
+ * The above "bb0 s1inf, r12,2f" had been a "bb1", but it just didn't make
+ * sense (and didn't work, either), so I changed it.
+ * jfriedl Dec 1, 1989.
+ */
+ br _infinity
+2:
+
+ br _denorm ;branch to denorm, the only remaining
+ ;alternative
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+;function _FPunderflow --
+;The documentation for this release give an overall description of this code.
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ global _FPunderflow
+ text
+
+;First check for an underflow user handler. If there is not one, then
+;branch to the routine to make a denormalized number. Before branching
+;to the underflow user handler, add 192 to a single precision exponent
+;and 1536 to a double precision exponent.
+
+_FPunderflow: st r1,r31,0 ;save return address
+#ifdef HANDLER
+ bb0 efunf,r12,denorm ;jump to default procedure
+ bb1.n destsize,r12,doubleprec ;double precision destination
+ set r2,r2,1<underflow> ;set underflow flag in FPSR
+singleprec: or.u r6,r0,0x0c00 ;load exponent adjust 192
+ br.n callundhand ;branch to call handler for user handler
+ add r12,r6,r12 ;adjust single precision exponent
+doubleprec: or.u r6,r0,0x6000 ;load exponent adjust 1536
+ add r12,r6,r12 ;adjust double precision exponent
+callundhand: bsr _handler ;call handler for user handler
+ br Ureturn ;return from subroutine
+#endif
+
+;Now the floating point number, which has an exponent smaller than what
+;IEEE allows, must be denormalized. Denormalization is done by calculating
+;the difference between a denormalized exponent and an underflow exponent and
+;shifting the mantissa by that amount. A one may need to be subtracted from
+;the LSB if a one was added during rounding.
+;r9 is used to contain the guard, round, sticky, and an inaccuracy bit in
+;case some bits were shifted off the mantissa during denormalization.
+;r9 will contain: bit 4 -- new addone if one added during rounding
+; after denormalization
+; bit 3 -- inaccuracy flag caused by denormalization
+; or pre-denormalization inexactness
+; bit 2 -- guard bit of result
+; bit 1 -- round bit of result
+; bit 0 -- sticky bit of result
+
+denorm: bb1.n destsize,r12,Udouble ;denorm for double
+ extu r9,r10,3<26> ;load r9 with grs
+Usingle: mak r5,r10,21<3> ;extract high 21 bits of mantissa
+ extu r6,r11,3<29> ;extract low 3 bits of mantissa
+ or r11,r5,r6 ;form 24 bits of mantissa
+
+;See if the addone bit is set and unround if it is.
+ bb0.n 25,r10,nounrounds ;do not unround if addone bit clear
+ extu r6,r12,12<20> ;extract signed exponent from IMPCR
+unrounds: subu r11,r11,1 ;subtract 1 from mantissa
+;If the hidden bit is cleared after subtracting the one, then the one added
+;during the rounding must have propagated through the mantissa. The exponent
+;will need to be decremented.
+ bb1 23,r11,nounrounds ;if hidden bit is set,then exponent does
+ ;not need to be decremented
+decexps: sub r6,r6,1 ;decrement exponent 1
+ set r11,r11,1<23> ;set the hidden bit
+
+;For both single and double precision, there are cases where it is easier
+;and quicker to make a special case. Examples of this are if the shift
+;amount is only 1 or 2, or all the mantissa is shifted off, or all the
+;mantissa is shifted off and it is still shifting, or, in the case of
+;doubles, if the shift amount is around the boundary of MANTLO and MANTHI.
+
+nounrounds: or r8,r0,lo16(0x00000f81) ;load r8 with -127 in decimal
+ ;for lowest 12 bits
+ sub r7,r8,r6 ;find difference between two exponents,
+ ;this amount is the shift amount
+ cmp r6,r7,3 ;check to see if r7 contains 3 or more
+ bb1 ge,r6,threesing ;br to code that handles shifts of >=3
+ cmp r6,r7,2 ;check to see if r7 contains 2
+ bb1 eq,r6,twosing ;br to code that handles shifts of 2
+one: rot r9,r9,0<1> ;rotate roundoff register once, this places
+ ;guard in round and round in sticky
+ bb0 31,r9,nosticky1s;do not or round and sticky if sticky is
+ ;0, this lost bit will be cleared later
+ set r9,r9,1<0> ;or round and sticky
+nosticky1s: bb0 0,r11,guardclr1s ;do not set guard bit if LSB = 0
+ set r9,r9,1<2> ;set guard bit
+guardclr1s: extu r11,r11,31<1> ;shift mantissa right 1
+ br.n round ;round result
+ mak r9,r9,3<0> ;clear bits lost during rotation
+
+twosing: rot r9,r9,0<2> ;rotate roundff register twice, this places
+ ;guard in sticky
+ bb0 30,r9,nosticky2s ;do not or guard and sticky if stick is 0
+ ;this lost bit will be cleared later
+ br.n noround2s ;skip or old guard and old round if old
+ ;sticky set
+ set r9,r9,1<0> ;or guard and sticky
+nosticky2s: bb0 31,r9,noround2s ;do not or guard and round if round is 0
+ ;this lost bit will be cleared later
+ set r9,r9,1<0> ;or guard and round
+noround2s: bb0 0,r11,roundclr2s ;do not set round bit if LSB = 0
+ set r9,r9,1<1> ;set round bit
+roundclr2s: bb0 1,r11,guardclr2s ;do not set guard bit if LSB + 1 = 0
+ set r9,r9,1<2> ;set guard bit
+guardclr2s: extu r11,r11,30<2> ;shift mantissa right 2
+ br.n round ;round result
+ mak r9,r9,3<0> ;clear bits lost during rotation
+
+threesing: bb1 0,r9,noguard3s ;check sticky initially
+ ;sticky is set, forget most of the oring
+nosticky3s: bb0 1,r9,noround3s ;check round initially, do not set sticky
+ br.n noguard3s ;forget most of the rest of oring
+ set r9,r9,1<0> ;if round is clear,set sticky if round set
+noround3s: bb0.n 2,r9,noguard3s ;check guard initially, do not set sticky
+ clr r9,r9,2<1> ;clear the original guard and round for when
+ ;you get to round section
+ set r9,r9,1<0> ;if guard is clear,set sticky if guard set
+noguard3s: cmp r6,r7,23 ;check if # of shifts is <=23
+ bb1 gt,r6,s24 ;branch to see if shifts = 24
+ sub r6,r7,2 ;get number of bits to check for sticky
+ mak r6,r6,5<5> ;shift width into width field
+ mak r8,r11,r6 ;mask off shifted bits -2
+ ff1 r8,r8 ;see if r8 has any ones
+ bb1 5,r8,nostky23 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky23: or r8,r0,34 ;start code to get new mantissa plus two
+ ;extra bits for new round and new guard bits
+ subu r8,r8,r7
+ mak r8,r8,5<5> ;shift field width into second five bits
+ extu r6,r6,5<5> ;shift previous shifted -2 into offset field
+ or r6,r6,r8 ;complete field
+ extu r11,r11,r6 ;form new mantissa with two extra bits
+
+ bb0 0,r11,nornd3s ;do not set new round bit
+ set r9,r9,1<1> ;set new round bit
+nornd3s: bb0 1,r11,nogrd3s ;do not set new guard bit
+ set r9,r9,1<2> ;set new guard bit
+nogrd3s: br.n round ;round mantissa
+ extu r11,r11,30<2> ;shift off remaining two bits
+
+s24: cmp r6,r7,24 ;check to see if # of shifts is 24
+ bb1 gt,r6,s25 ;branch to see if shifts = 25
+ bb1 0,r9,nostky24 ;skip checking if old sticky set
+ extu r8,r11,22<0> ;prepare to check bits that will be shifted
+ ;into the sticky
+ ff1 r8,r8 ;see if there are any 1''s
+ bb1 5,r8,nostky24 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky24: bb0 22,r11,nornd24 ;do not set new round bit
+ set r9,r9,1<1> ;set new round bit
+nornd24: set r9,r9,1<2> ;set new guard bit,this is hidden bit
+ br.n round ;round mantissa
+ or r11,r0,r0 ;clear r11, all of mantissa shifted off
+
+s25: cmp r6,r7,25 ;check to see if # of shifts is 25
+ bb1 gt,r6,s26 ;branch to execute for shifts => 26
+ bb1 0,r9,nostky25 ;skip checking if old sticky set
+ extu r8,r11,23<0> ;prepare to check bits that will be shifted
+ ;into the sticky
+ ff1 r8,r8 ;see if there are any 1''s
+ bb1 5,r8,nostky25 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky25: set r9,r9,1<1> ;set new round bit,this is hidden bit
+ clr r9,r9,1<2> ;clear guard bit since nothing shifted in
+ br.n round ;round and assemble result
+ or r11,r0,r0 ;clear r11, all of mantissa shifted off
+
+s26: set r9,r9,1<0> ;set sticky bit,this contains hidden bit
+ clr r9,r9,2<1> ;clear guard and round bits since nothing
+ ;shifted in
+ br.n round ;round and assemble result
+ or r11,r0,r0 ;clear mantissa
+
+Udouble: mak r5,r10,21<0> ;extract upper bits of mantissa
+ bb0.n 25,r10,nounroundd ;do not unround if addone bit clear
+ extu r6,r12,12<20>;extract signed exponenet from IMPCR
+unroundd: or r8,r0,1
+ subu.co r11,r11,r8 ;subtract 1 from mantissa
+ subu.ci r5,r5,r0 ;subtract borrow from upper word
+ bb1 20,r5,nounroundd ;if hidden bit is set, then exponent does
+ ;not need to be decremented
+decexpd: sub r6,r6,1 ;decrement exponent 1
+ set r5,r5,1<20> ;set the hidden bit
+
+nounroundd: or r8,r0,lo16(0x00000c01) ;load r8 with -1023 in decimal
+ ;for lowest 12 bits
+ sub r7,r8,r6 ;find difference between two exponents,
+ ;this amount is the shift amount
+ cmp r6,r7,3 ;check to see if r7 contains 3 or more
+ bb1 ge,r6,threedoub ;br to code that handles shifts of >=3
+ cmp r6,r7,2 ;check to see if r7 contains 2
+ bb1 eq,r6,twodoub ;br to code that handles shifts of 2
+
+onedoub: rot r9,r9,0<1> ;rotate roundoff register once, this places
+ ;guard in round and round in sticky
+ bb0 31,r9,nosticky1d;do not or round and sticky if sticky is 0
+ ;this lost bit will be cleared later
+ set r9,r9,1<0> ;or old round and old sticky into new sticky
+nosticky1d: bb0 0,r11,guardclr1d ;do not set new guard bit if old LSB = 0
+ set r9,r9,1<2> ;set new guard bit
+guardclr1d: extu r11,r11,31<1> ;shift lower mantissa over 1
+ mak r6,r5,1<31> ;shift off low bit of high mantissa
+ or r11,r6,r11 ;load high bit onto lower mantissa
+ extu r5,r5,20<1> ;shift right once upper 20 bits of mantissa
+ br.n round ;round mantissa and assemble result
+ mak r9,r9,3<0> ;clear bits lost during rotation
+
+twodoub: rot r9,r9,0<2> ;rotate roundoff register twice, this places
+ ;old guard into sticky
+ bb0 30,r9,nosticky2d ;do not or old guard and old sticky if
+ ;old sticky is 0
+ br.n noround2d ;skip or of old guard and old round if old
+ ;sticky set
+ set r9,r9,1<0> ;or old guard and old sticky into new sticky
+nosticky2d: bb0 31,r9,noround2d ;do not or old guard and old round if
+ ;old round is 0
+ set r9,r9,1<0> ;or old guard and old round into new sticky
+noround2d: bb0 0,r11,roundclr2d ;do not set round bit if old LSB = 0
+ set r9,r9,1<1> ;set new round bit
+roundclr2d: bb0 1,r11,guardclr2d ;do not set guard bit if old LSB + 1 = 0
+ set r9,r9,1<2> ;set new guard bit
+guardclr2d: extu r11,r11,30<2> ;shift lower mantissa over 2
+ mak r6,r5,2<30> ;shift off low bits of high mantissa
+ or r11,r6,r11 ;load high bit onto lower mantissa
+ extu r5,r5,19<2> ;shift right twice upper 19 bits of mantissa
+ br.n round ;round mantissa and assemble result
+ mak r9,r9,3<0> ;clear bits lost during rotation
+
+threedoub: bb1 0,r9,noguard3d ;checky sticky initially
+ ;sticky is set, forget most of rest of oring
+nosticky3d: bb0 1,r9,noround3d ;check old round, do not set sticky if
+ ;old round is clear, set otherwise
+ br.n noguard3d ;sticky is set, forget most of rest of oring
+ set r9,r9,1<0> ;set sticky if old round is set
+noround3d: bb0 2,r9,noguard3d ;check old guard, do not set sticky if 0
+ clr r9,r9,2<1> ;clear the original guard and round for when
+ ;you get to round section
+ set r9,r9,1<0> ;set sticky if old guard is set
+noguard3d: cmp r6,r7,32 ;do I need to work with a 1 or 2 word mant.
+ ;when forming sticky, round and guard
+ bb1 gt,r6,d33 ;jump to code that handles 2 word mantissas
+ sub r6,r7,2 ;get number of bits to check for sticky
+ mak r6,r6,5<5> ;shift width into width field
+ mak r8,r11,r6 ;mask off shifted bits -2
+ ff1 r8,r8 ;see if r8 has any ones
+ bb1 5,r8,nostky32 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky32: or r8,r0,34 ;start code to get new mantissa plus two
+ ;extra bits for new round and new guard bits,
+ ;the upper word bits will be shifted after
+ ;the round and guard bits are handled
+ subu r8,r8,r7
+ mak r8,r8,5<5> ;shift field width into second five bits
+ extu r6,r6,5<5> ;shift previous shifted -2 into offset field
+ or r6,r6,r8 ;complete bit field
+ extu r11,r11,r6 ;partially form new low mantissa with 2 more
+ ;bits
+ bb0 0,r11,nornd32d ;do not set new round bit
+ set r9,r9,1<1> ;set new round bit
+nornd32d: bb0 1,r11,nogrd32d ;do not set new guard bit
+ set r9,r9,1<2> ;set new guard bit
+nogrd32d: extu r11,r11,30<2> ;shift off remaining two bits
+ mak r6,r7,5<5> ;shift field width into second 5 bits, if the
+ ;width is 32, then these bits will be 0
+ or r8,r0,32 ;load word length into r8
+ sub r8,r8,r7 ;form offset for high bits moved to low word
+ or r6,r6,r8 ;form complete bit field
+ mak r6,r5,r6 ;get shifted bits of high word
+ or r11,r6,r11 ;form new low word of mantissa
+ bcnd ne0,r8,regular33 ;do not adjust for special case of r8
+ br.n round ;containing zeros, which would cause
+ or r5,r0,r0 ;all of the bits to be extracted under
+ ;the regular method
+regular33: mak r6,r7,5<0> ;place lower 5 bits of shift into r6
+ mak r8,r8,5<5> ;shift r8 into width field
+ or r6,r6,r8 ;form field for shifting of upper bits
+ br.n round ;round and assemble result
+ extu r5,r5,r6 ;form new high word mantissa
+
+d33: cmp r6,r7,33 ;is the number of bits to be shifted is 33?
+ bb1 gt,r6,d34 ;check to see if # of bits is 34
+ bb1 0,r9,nostky33 ;skip checking if old sticky set
+ mak r6,r11,31<0> ;check bits that will be shifted into sticky
+ ff1 r8,r8 ;check for ones
+ bb1 5,r8,nostky33 ;do not set sticky if there are no ones
+ set r9,r9,1<0> ;set new sticky bit
+nostky33: bb0 31,r11,nornd33 ;do not set round if bit is not a 1
+ set r9,r9,1<1> ;set new round bit
+nornd33: bb0 0,r5,nogrd33 ;do not set guard bit if bit is not a 1
+ set r9,r9,1<2> ;set new guard bit
+nogrd33: extu r11,r5,31<1> ;shift high bits into low word
+ br.n round ;round and assemble result
+ or r5,r0,r0 ;clear high word
+
+d34: cmp r6,r7,34 ;is the number of bits to be shifted 34?
+ bb1 gt,r6,d35 ;check to see if # of bits is >= 35
+ bb1 0,r9,nostky34 ;skip checking if old sticky set
+ ff1 r8,r11 ;check bits that will be shifted into sticky
+ bb1 5,r8,nostky34 ;do not set sticky if there are no ones
+ set r9,r9,1<0> ;set new sticky bit
+nostky34: bb0 0,r5,nornd34 ;do not set round if bit is not a 1
+ set r9,r9,1<1> ;set new round bit
+nornd34: bb0 1,r5,nogrd34 ;do not set guard bit if bit is not a 1
+ set r9,r9,1<2> ;set new guard bit
+nogrd34: extu r11,r5,30<2> ;shift high bits into low word
+ br.n round ;round and assemble result
+ or r5,r0,r0 ;clear high word
+
+d35: cmp r6,r7,52 ;see if # of shifts is 35 <= X <= 52
+ bb1 gt,r6,d53 ;check to see if # of shifts is 52
+ bb1.n 0,r9,nostky35 ;skip checking if old sticky set
+ sub r7,r7,34 ;subtract 32 from # of shifts so that opera-
+ ;tions can be done on the upper word, and
+ ;then subtract two more checking guard and
+ ;sticky bits
+ ff1 r8,r11 ;see if lower word has a bit for sticky
+ bb1 5,r8,stkycheck35 ;see if upper word has any sticky bits
+ br.n nostky35 ;quit checking for sticky
+ set r9,r9,1<0> ;set sticky bit
+stkycheck35: mak r6,r7,5<5> ;place width into width field
+ mak r8,r5,r6 ;mask off shifted bits - 2
+ ff1 r8,r8 ;see if r8 has any ones
+ bb1 5,r8,nostky35 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky35: or r8,r0,32 ;look at what does not get shifted off plus
+ ;round and sticky, remember that the r7 value
+ ;was adjusted so that it did not include
+ ;new round or new sticky in shifted off bits
+ subu r8,r8,r7 ;complement width
+ mak r8,r8,5<5> ;shift width into width field
+ or r8,r7,r8 ;add offset field
+ extu r11,r5,r8 ;extract upper bits into low word
+ bb0 0,r11,nornd35 ;do not set new round bit
+ set r9,r9,1<1> ;set new round bit
+nornd35: bb0 1,r11,nogrd35 ;do not set new guard bit
+ set r9,r9,1<2> ;set new guard bit
+nogrd35: extu r11,r11,30<2> ;shift off remaining guard and round bits
+ br.n round ;round and assemble result
+ or r5,r0,r0 ;clear high word
+
+d53: cmp r6,r7,53 ;check to see if # of shifts is 53
+ bb1 gt,r6,d54 ;branch to see if shifts = 54
+ bb1 0,r9,nostky53 ;skip checking if old sticky set
+ ff1 r8,r11 ;see if lower word has a bit for sticky
+ bb1 5,r8,stkycheck53 ;see if upper word has any sticky bits
+ br.n nostky53 ;quit checking for sticky
+ set r9,r9,1<0> ;set sticky bit
+stkycheck53: mak r6,r5,19<0> ;check bits that are shifted into sticky
+ ff1 r8,r6 ;see if r6 has any ones
+ bb1 5,r8,nostky53 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky53: bb0 19,r5,nornd53 ;do not set new round bit
+ set r9,r9,1<1> ;set new round bit
+nornd53: set r9,r9,1<2> ;set new guard bit,this is hidden bit
+ or r5,r0,r0 ;clear high word
+ br.n round ;round and assemble result
+ or r11,r0,r0 ;clear low word
+
+d54: cmp r6,r7,54 ;check to see if # of shifts is 54
+ bb1 gt,r6,d55 ;branch to execute for shifts =>55
+ bb1 0,r9,nostky54 ;skip checking if old sticky set
+ ff1 r8,r11 ;see if lower word has a bit for sticky
+ bb1 5,r8,stkycheck54 ;see if upper word has any sticky bits
+ br.n nostky54 ;quit checking for sticky
+ set r9,r9,1<0> ;set sticky bit
+stkycheck54: mak r6,r5,20<0> ;check bits that are shifted into sticky
+ ff1 r8,r6 ;see if r6 has any ones
+ bb1 5,r8,nostky54 ;do not set sticky if no ones found
+ set r9,r9,1<0> ;set sticky bit
+nostky54: set r9,r9,1<1> ;set new round bit,this is hidden bit
+ clr r9,r9,1<2> ;clear guard bit since nothing shifted in
+ or r5,r0,r0 ;clear high word
+ br.n round ;round and assemble result
+ or r11,r0,r0 ;clear low word
+
+d55: set r9,r9,1<0> ;set new sticky bit,this contains hidden bit
+ clr r9,r9,2<1> ;clear guard and round bits since nothing
+ ;shifted in
+ or r5,r0,r0 ;clear high word
+ or r11,r0,r0 ;clear low word
+
+
+;The first item that the rounding code does is see if either guard, round,
+;or sticky is set. If all are clear, then there is no denormalization loss
+;and no need to round, then branch to assemble answer.
+;For rounding, a branch table is set up. The left two most bits are the
+;rounding mode. The third bit is either the LSB of the mantissa or the
+;sign bit, depending on the rounding mode. The three LSB''s are the guard,
+;round and sticky bits.
+
+round: ff1 r8,r9 ;see if there is denormalization loss
+ bb1 5,r8,assemble ;no denormalization loss or inexactness
+ extu r6,r10,2<modelo> ;extract rounding mode
+ bb1.n modehi,r10,signext ;use sign bit instead of LSB
+ mak r6,r6,2<4> ;shift over rounding mode
+ extu r7,r11,1<0> ;extract LSB
+ br.n grs ;skip sign extraction
+ mak r7,r7,1<3> ;shift over LSB
+signext: extu r7,r10,1<31> ;extract sign bit
+ mak r7,r7,1<3> ;shift sign bit over
+grs: or r6,r6,r7
+ or r6,r6,r9 ;or in guard, round, and sticky
+ or.u r1,r0,hi16(roundtable) ;form address of branch table
+ or r1,r1,lo16(roundtable)
+ lda r6,r1[r6] ;scale offset into branch table
+ jmp.n r6 ;jump to branch table
+ set r9,r9,1<3> ;set inexact flag in r9
+
+roundtable: br noaddone
+r000001: br noaddone
+r000010: br noaddone
+r000011: br noaddone
+r000100: br noaddone
+r000101: br addone
+r000110: br addone
+r000111: br addone
+r001000: br noaddone
+r001001: br noaddone
+r001010: br noaddone
+r001011: br noaddone
+r001100: br addone
+r001101: br addone
+r001110: br addone
+r001111: br addone
+r010000: br noaddone
+r010001: br noaddone
+r010010: br noaddone
+r010011: br noaddone
+r010100: br noaddone
+r010101: br noaddone
+r010110: br noaddone
+r010111: br noaddone
+r011000: br noaddone
+r011001: br noaddone
+r011010: br noaddone
+r011011: br noaddone
+r011100: br noaddone
+r011101: br noaddone
+r011110: br noaddone
+r011111: br noaddone
+r100000: br noaddone
+r100001: br noaddone
+r100010: br noaddone
+r100011: br noaddone
+r100100: br noaddone
+r100101: br noaddone
+r100110: br noaddone
+r100111: br noaddone
+r101000: br noaddone
+r101001: br addone
+r101010: br addone
+r101011: br addone
+r101100: br addone
+r101101: br addone
+r101110: br addone
+r101111: br addone
+r110000: br noaddone
+r110001: br addone
+r110010: br addone
+r110011: br addone
+r110100: br addone
+r110101: br addone
+r110110: br addone
+r110111: br addone
+r111000: br noaddone
+r111001: br noaddone
+r111010: br noaddone
+r111011: br noaddone
+r111100: br noaddone
+r111101: br noaddone
+r111110: br noaddone
+r111111: br noaddone
+
+;Round by adding a one to the LSB of the mantissa.
+addone: or r6,r0,1 ;load a 1 into r6 so that add.co can be used
+ add.co r11,r11,r6 ;add a one to the lower word of result
+ bb0.n destsize,r12,noaddone ;single result,forget carry
+ set r9,r9,1<4> ;indicate that a 1 has been added
+ add.ci r5,r5,r0 ;propagate carry into high word
+
+
+;Branch to inexact user handler if there is one.
+
+noaddone:
+#ifdef HANDLER
+ bb1.n efinx,r12,modformdef ;branch to modify form for user
+ ;handler
+ or r2,r2,5 ;set inexact and underflow flags
+#endif
+
+
+;Assemble the result of the denormalization routine for writeback to the
+;destination register. The exponent of a denormalized number is zero,
+;so simply assemble the sign and the new mantissa.
+
+assemble: bb1 destsize,r12,doubassem ;assemble double result
+ bb0 sign,r10,exassems ;exit assemble if sign is zero
+ set r11,r11,1<sign> ;make result negative
+exassems: br Ureturn ;return from subroutine
+
+doubassem: bb0.n sign,r10,signclr ;do not set sign in r10
+ or r10,r5,r0 ;load high word from r5 into r10
+ set r10,r10,1<sign> ;high word with sign loaded
+signclr: br Ureturn ;return from subroutine
+
+
+;modfordef modifies the result of denormalization to the input format of
+;the inexact user handler. This input format is the same format that
+;MANTHI, MANTLO, and IMPCR were initially loaded with.
+
+#ifdef HANDLER
+modformdef: clr r12,r12,12<20> ;clear result exponent,IMPCR complete
+ clr r10,r10,4<25> ;clear old guard,round,sticky,and addone
+ mak r5,r9,3<26> ;make grs field
+ bb0.n 4,r9,newaddone ;do not set new addone in MANTHI
+ or r10,r5,r10 ;or in new grs field
+ set r10,r10,1<25> ;set new addone
+newaddone: bb1.n destsize,r12,moddefd ;branch to handle double precision
+ clr r10,r10,21<0> ;clear upper bits of old mantissa
+moddefs: extu r5,r11,20<3> ;extract upper bits
+ or r10,r5,r10 ;MANTHI complete
+ bsr.n _handler ;execute user handler for inexact
+ rot r11,r11,0<3> ;MANTLO complete
+ br Ureturn ;return from subroutine
+moddefd: bsr.n _handler ;execute user handler for inexact
+ or r10,r5,r10 ;MANTHI complete,r5 should be set to OR
+#endif
+
+
+;Return to fpui.
+
+Ureturn: ld r1,r31,0 ;load return address
+ jmp r1 ;return from subroutine
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+;function _FPoverflow --
+;The documentation for this release gives an overall description of this code.
+data
+align 4
+msg2: string "here at line %d, r1 is %x\n\0"
+text
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+#line 23
+ global _FPoverflow
+ text
+
+
+;If the overflow user handler bit is not set, then the inexact bit in the
+;FPSR is set, and the inexact user handler bit is checked. If it is set,
+;then the inexact user handler is executed, else the default routine for
+;overflow is executed.
+
+_FPoverflow:
+ st r1,r31,0 ;save return address
+#ifdef HANDLER
+ set r2,r2,1<overflow> ;set overflow bit in r2 which holds FPSR
+ bb1 efovf,r12,hand ;go to user handler if bit set for overflow
+ set r2,r2,1<inexact> ;set inexact bit in r2 since overflow bit
+ ;in FPCR is not set
+ bb0 efinx,r12,nohandler;if userhandler for inexact not set,then
+ ;round result
+ br callhandler ;branch to user handler for inexact
+
+;Before the overflow user handler is executed, the exponent is modified
+;by subtracting 192 for single precision and 1536 for double precision.
+
+hand: bb1 10,r12,doubleprec ;double precision result
+singleprec: or.u r5,r0,0x0c00 ;load exponent adjust
+ br.n callhandler ;prepare to call user handler
+ subu r12,r12,r5 ;adjust single precision exponent
+doubleprec: or.u r5,r0,0x6000 ;load exponent adjust
+ subu r12,r12,r5 ;adjust double precision exponent
+callhandler: bsr _handler ;branch to common handler routine
+ br return ;return from overflow subroutine
+#endif
+
+;Determine which rounding mode to use for the default procedure.
+
+nohandler: bb1 modehi,r10,signed ;mode is either round toward pos. or neg.
+ bb0 modelo,r10,OFnearest ;rounding mode is round nearest
+ br OFzero ;rounding mode is round zero
+signed: bb0 modelo,r10,OFnegative ;rounding mode is round negative
+ br positive ;rounding mode is round positive
+
+
+;In the round toward nearest mode, positive values are rounded to
+;postive infinity and negative values are loaded toward negative infinity.
+;The value for single or double precision is loaded from a data table.
+
+OFnearest:
+ bb1.n destsize,r12,neardouble ;branch to neardouble of
+ ;double result
+ mask.u r5,r10,0x8000 ;mask off sign bit from MANTHI
+ or.u r11,r0,hi16(0x7f800000) ;load single infinity constant
+ or r11,r11,lo16(0x7f800000)
+ br.n return ;return with result
+ or r11,r5,r11 ;adjust sign
+neardouble:
+ or r11,r0,r0 ;load lower word of infinity
+ or.u r10,r0,hi16(0x7ff00000) ;load upper word of infinity
+ or r10,r10,lo16(0x7ff00000)
+ br.n return ;return with result
+ or r10,r5,r10 ;adjust sign
+
+
+;In the round toward zero mode, positive values are rounded to the largest
+;postive finite number and negative values are rounded toward the largest
+;negative finite number.
+;The value for single or double precision is loaded from a data table.
+
+OFzero:
+ bb1.n destsize,r12,zerodouble ;branch to zerodouble of
+ ;double result
+ mask.u r5,r10,0x8000 ;mask off sign bit from MANTHI
+ or.u r11,r0,hi16(0x7f7fffff) ;load single finite number constant
+ or r11,r11,lo16(0x7f7fffff)
+ br.n return ;return with result
+ or r11,r5,r11 ;adjust sign
+zerodouble:
+ set r11,r0,0<0> ;load lower word of finite number
+ or.u r10,r0,hi16(0x7fefffff) ;load upper word of finite number
+ or r10,r10,lo16(0x7fefffff)
+ br.n return ;return with result
+ or r10,r5,r10 ;adjust sign
+
+
+;In the round toward positve mode, positive values are rounded to
+;postive infinity and negative values are loaded toward the largest
+;negative finite number.
+;The value for single or double precision is loaded from a data table.
+
+positive:
+ bb1 destsize,r12,posdouble ;branch to section for double result
+possingle:
+ bb1 sign,r10,possingleneg ;branch to section for negatives
+possinglepos:
+ or.u r11,r0,hi16(0x7f800000) ;load single infinity constant
+ br.n return ;return with result
+ or r11,r11,lo16(0x7f800000)
+possingleneg:
+ or.u r11,r0,hi16(0x7f7fffff) ;load single finite number constant
+ or r11,r11,lo16(0x7f7fffff)
+ br.n return ;return with result
+ set r11,r11,1<sign> ;set sign for negative
+posdouble:
+ bb1 sign,r10,posdoubleneg ;branch to negative double results
+posdoublepos:
+ or r11,r0,r0 ;load lower word of double infinity
+ or.u r10,r0,hi16(0x7ff00000) ;load upper word of infinity
+ br.n return ;return with result
+ or r10,r10,lo16(0x7ff00000)
+posdoubleneg:
+ set r11,r0,0<0> ;load lower word of finite number
+ or.u r10,r0,hi16(0x7fefffff) ;load upper word of finite number
+ or r10,r10,lo16(0x7fefffff)
+ br.n return ;return with result
+ set r10,r10,1<sign> ;set sign for negative
+
+
+;In the round toward negative mode, positive values are rounded to the largest
+;postive finite number and negative values are rounded to negative infinity.
+;The value for single or double precision is loaded from a data table.
+
+OFnegative:
+ bb1 destsize,r12,negdouble ;branch to section for double result
+negsingle:
+ bb1 sign,r10,negsingleneg ;branch to section for negatives
+negsinglepos:
+ or.u r11,r0,hi16(0x7f7fffff) ;load single finite number constant
+ br.n return ;return with result
+ or r11,r11,lo16(0x7f7fffff)
+negsingleneg:
+ or.u r11,r0,hi16(0x7f800000) ;load single infinity constant
+ or r11,r11,lo16(0x7f800000)
+ br.n return ;return with result
+ set r11,r11,1<sign> ;set sign for negative
+negdouble:
+ bb1 sign,r10,negdoubleneg ;branch to negative double results
+negdoublepos:
+ set r11,r0,0<0> ;load lower word of finite number
+ or.u r10,r0,hi16(0x7fefffff) ;load upper word of finite number
+ br.n return ;return with result
+ or r10,r10,lo16(0x7fefffff)
+negdoubleneg:
+ or r11,r0,r0 ;load lower word of double infinity
+ or.u r10,r0,hi16(0x7ff00000) ;load upper word of infinity
+ or r10,r10,lo16(0x7ff00000)
+ set r10,r10,1<sign> ;set sign for negative
+
+return:
+ ld r1,r31,0 ;ld return address
+ jmp r1 ;return from subroutine
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ text
+
+;If either S1 or S2 is a signalling NaN, then set the invalid operation
+;bit of the FPSR. If the invalid operation user handler flag is set and
+;then NaN is signalling, then branch to the handler routine to go to the
+;user handler.
+;If S1 is the only NaN or one of two NaN''s, then write
+;a quiet S1 to the result. A signalling NaN must be made quiet before
+;it can be written, but a signalling S2 is not modified in this routine
+;if S1 is a NaN.
+
+LABEL(_NaN)
+ bb0.n s1nan,r12,S2sigcheck ;S1 is not a NaN
+ st r1,r31,0 ;save return address
+ bb1 sigbit,r5,S2sigcheck ;S1 is not a signaling NaN
+ set r2,r2,1<oper> ;set invalid operation bit in FPSR
+#ifdef JEFF_DEBUGxxxxxxx
+ /*
+ * Generate a signal to the offending process.
+ * This uses hardcoded constants from mach/exception.h
+ * and mach/machine/exception.h.
+ */
+ ldcr r2, cr17 /* first arg: current_thread() */
+ or r3, r0, 3 /* second arg: EXC_ARITHMETIC */
+ or r4, r0, 3 /* third arg: EXC_M88K_FLOAT_P */
+ or r5, r0, r0
+ subu r31, r31, 48
+ bsr.n _thread_doexception
+ st r1, r31, 44
+ ld r1, r31, 44
+ br.n FPnan_return
+ addu r31, r31, 48
+#endif
+#ifdef HANDLER
+ bb0 oper,r3,S1nohandler ;branch if no user handler
+ bsr _handler ;branch to handler
+ br FPnan_return
+_LABEL(S1nohandler)
+#endif
+ br.n S1write ;FPSR bit already set, S1 is made quiet,
+ ;and since we always write S1 if it is a
+ ;NaN, write S1 and skip rest of routine
+ set r5,r5,1<sigbit> ;make S1 a quiet NaN
+
+_LABEL(S2sigcheck)
+ bb0 s2nan,r12,S1write ;S2 is not a NaN
+ bb1 sigbit,r7,S1write ;S2 is not a signaling NaN
+ set r2,r2,1<oper> ;set invalid operation bit in FPSR
+#ifdef HANDLER
+ bb0 oper,r3,S2nohandler ;branch if no user handler
+ bsr _handler ;branch to handler
+ br FPnan_return
+#endif
+
+_LABEL(S2nohandler)
+ set r7,r7,1<sigbit> ;make S2 a quiet NaN
+
+
+;Write a single or double precision quiet NaN unless the opeation is FCMP.
+;If the operation is FCMP, then set the not comparable bit in the result.
+
+_LABEL(S1write)
+ bb0 s1nan,r12,S2write ;do not write S1 if it is not a NaN
+ extu r10,r9,5<11> ;extract opcode
+ cmp r11,r10,FCMPop ;compare to FCMP
+ bb1 ne,r11,S1noFCMP ;operation is not FCMP
+ set r6,r0,1<nc> ;set the not comparable bit
+ br.n FPnan_return ;return from subroutine
+ set r6,r6,1<ne> ;set the not equal bit
+_LABEL(S1noFCMP)
+ bb1.n dsize,r9,wrdoubS1 ;double destination
+ set r5,r5,11<20> ;set all exponent bits to 1
+;The single result will be formed the same way whether S1 is a single or double
+_LABEL(wrsingS1)
+ mak r10,r5,28<3> ;wipe out extra exponent bits
+ extu r11,r6,3<29> ;get lower three bits of mantissa
+ or r10,r10,r11 ;combine all of result except sign
+ clr r6,r5,31<0> ;clear all but sign
+ br.n FPnan_return ;return from function
+ or r6,r6,r10 ;form result
+
+_LABEL(wrdoubS1)
+;;;;;; bb1 s1size,r9,wrdoubS1d ;write double source to double dest.
+/* took out the above instruction -- don't see why it's there.... jfriedl */
+_LABEL(wrdoubS1s)
+ set r6,r6,29<0> ;set extra bits of lower word
+_LABEL(wrdoubS1d)
+ br FPnan_return ;no modification necessary for writing
+ ;double to double, so return from function
+
+_LABEL(S2write)
+ extu r10,r9,5<11> ;extract opcode
+ cmp r11,r10,FCMPop ;compare to FCMP
+ bb1.n ne,r11,S2noFCMP ;operation is not FCMP
+ set r7,r7,11<20> ;set all exponent bits to 1
+ set r6,r0,1<nc> ;set the not comparable bit
+ br.n FPnan_return ;return from subroutine
+ set r6,r6,1<ne> ;set the not equal bit
+_LABEL(S2noFCMP)
+ bb1.n dsize,r9,wrdoubS2 ;double destination
+ /*
+ * In the original, the ".n" above and the "set r5..." below
+ * were omitted here. Since they're in the S1 stuff above,
+ * and since this isn't working right now (r5 isn't being set
+ * to it's part of the nan), I'll try this...
+ * jfriedl Dec 1, 1989
+ */
+ set r5,r5,11<20> ;set all exponent bits to 1
+;The single result will be formed the same way whether S1 is a single or double
+_LABEL(wrsingS2)
+ mak r10,r7,28<3> ;wipe out extra exponent bits
+ extu r11,r8,3<29> ;get lower three bits of mantissa
+ or r10,r10,r11 ;combine all of result except sign
+ clr r6,r7,31<0> ;clear all but sign
+ br.n FPnan_return ;return from function
+ or r6,r6,r10 ;form result
+
+_LABEL(wrdoubS2)
+
+;;;; bb1 s2size,r9,FPnan_return ;write double source to double dest.
+ /*
+ * I took out the above branch because I just don't see how it
+ * makes sense. jfriedl Dec 1, '89
+ */
+_LABEL(wrdoubS2s)
+ set r6,r8,29<0> ;set extra bits of lower word
+
+
+;Return from this subroutine with the result.
+
+_LABEL(FPnan_return)
+ ;no modification necessary for writing
+ ;double to double, so return from function
+ ld r1,r31, 0 ;retrieve return address
+ jmp r1 ;return from function
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+;function _infinity --
+;See the documentation of this release for an overall description of this
+;code.
+
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ global _infinity
+ text
+
+;Extract the opcode, compare to a constant, and branch to the code
+;for the instruction.
+
+_infinity: extu r10,r9,5<11> ;extract opcode
+ cmp r11,r10,FADDop ;compare to FADD
+ bb1.n eq,r11,FADD ;operation is FADD
+ st r1,r31,0 ;save return address
+ cmp r11,r10,FSUBop ;compare to FSUB
+ bb1 eq,r11,FSUB ;operation is FSUB
+ cmp r11,r10,FCMPop ;compare to FCMP
+ bb1 eq,r11,FCMP ;operation is FCMP
+ cmp r11,r10,FMULop ;compare to FMUL
+ bb1 eq,r11,FMUL ;operation is FMUL
+ cmp r11,r10,FDIVop ;compare to FDIV
+ bb1 eq,r11,FDIV ;operation is FDIV
+; cmp r11,r10,FSQRTop;compare to FSQRT
+; bb1 eq,r11,FSQRT ;operation is FSQRT
+ cmp r11,r10,INTop ;compare to INT
+ bb1 eq,r11,FP_inf_overflw ;operation is INT
+ cmp r11,r10,NINTop ;compare to NINT
+ bb1 eq,r11,FP_inf_overflw ;operation is NINT
+ cmp r11,r10,TRNCop ;compare to TRNC
+ bb1 eq,r11,FP_inf_overflw ;operation is TRNC
+
+
+;Adding infinities of opposite signs will cause an exception,
+;but all other operands will result in a correctly signed infinity.
+
+FADD: bb0 s1inf,r12,addS2write ;branch if S1 not infinity
+ bb0 s2inf,r12,addS1write ;S2 is not inf., so branch to write S1
+ bb1 sign,r5,addS1neg ;handle case of S1 negative
+addS1pos: bb1 sign,r7,excpt ;adding infinities of different signs
+ ;causes an exception
+ br poswrinf ;branch to write positive infinity
+addS1neg: bb0 sign,r7,excpt ;adding infinities of different signs
+ ;causes an exception
+ br negwrinf ;branch to write negative infinity
+addS1write: bb0 sign,r5,poswrinf ;branch to write positive infinity
+ br negwrinf ;branch to write negative infinity
+addS2write: bb0 sign,r7,poswrinf ;branch to write positive infinity
+ br negwrinf ;branch to write negative infinity
+
+
+;Subtracting infinities of the same sign will cause an exception,
+;but all other operands will result in a correctly signed infinity.
+
+FSUB: bb0 s1inf,r12,subS2write ;branch if S1 not infinity
+ bb0 s2inf,r12,subS1write ;S2 is not inf., so branch to write S1
+ bb1 sign,r5,subS1neg ;handle case of S1 negative
+subS1pos: bb0 sign,r7,excpt ;subtracting infinities of the same sign
+ ;causes an exception
+ br poswrinf ;branch to write positive infinity
+subS1neg: bb1 sign,r7,excpt ;subtracting infinities of the same sign
+ ;causes an exception
+ br negwrinf ;branch to write negative infinity
+subS1write: bb0 sign,r5,poswrinf ;branch to write positive infinity
+ br negwrinf ;branch to write negative infinity
+subS2write: bb1 sign,r7,poswrinf ;branch to write positive infinity
+ br negwrinf ;branch to write negative infinity
+
+
+;Compare the operands, at least one of which is infinity, and set the
+;correct bits in the destination register.
+
+FCMP: bb0.n s1inf,r12,FCMPS1f ;branch for finite S1
+ set r4,r0,1<cp> ;since neither S1 or S2 is a NaN, set cp
+FCMPS1i: bb1 sign,r5,FCMPS1ni ;branch to negative S1i
+FCMPS1pi: bb0 s2inf,r12,FCMPS1piS2f ;branch to finite S2 with S1pi
+FCMPS1piS2i: bb1 sign,r7,FCMPS1piS2ni ;branch to negative S2i with S1pi
+FCMPS1piS2pi: set r4,r4,1<eq> ;set eq bit
+ set r4,r4,1<le> ;set le bit
+ set r4,r4,1<ge> ;set ge bit
+ set r4,r4,1<ib> ;set ib bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1piS2ni: set r4,r4,1<ne> ;set ne bit
+ set r4,r4,1<gt> ;set gt bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ge> ;set ge bit
+FCMPS1piS2f: set r4,r4,1<ne> ;set ne bit
+ set r4,r4,1<gt> ;set gt bit
+ bsr.n _zero ;see if any of the operands are zero
+ set r4,r4,1<ge> ;set ge bit
+ bb0 s2zero,r12,FCMPS1piS2nz ;check for negative if s2 not zero
+ set r4,r4,1<ou> ;set ou bit
+ br.n move
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1piS2nz: bb1 sign,r7,move ;return from subroutine if s2 is neg.
+FCMPS1piS2pf: set r4,r4,1<ou> ;set ou bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1ni: bb0 s2inf,r12,FCMPS1niS2f ;branch to finite S2 with S1ni
+FCMPS1niS2i: bb1 sign,r7,FCMPS1niS2ni ;branch to negative S2i with S1ni
+FCMPS1niS2pi: set r4,r4,1<ne> ;set eq bit
+ set r4,r4,1<le> ;set le bit
+ set r4,r4,1<lt> ;set lt bit
+ set r4,r4,1<ou> ;set ou bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1niS2ni: set r4,r4,1<eq> ;set eq bit
+ set r4,r4,1<le> ;set le bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ge> ;set ge bit
+FCMPS1niS2f: set r4,r4,1<ne> ;set eq bit
+ set r4,r4,1<le> ;set le bit
+ bsr.n _zero ;see if any of the operands are zero
+ set r4,r4,1<lt> ;set lt bit
+ bb0 s2zero,r12,FCMPS1niS2nz ;branch if s2 is not zero
+ set r4,r4,1<ou> ;set ou bit
+ br.n move
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1niS2nz: bb1 sign,r7,move ;return from subroutine if s2 is neg.
+ set r4,r4,1<ou> ;set ou bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1f: bb1 sign,r5,FCMPS1nf ;branch to negative S1f
+FCMPS1pf: bb1.n sign,r7,FCMPS1pfS2ni ;branch to negative S2i with S1pf
+ set r4,r4,1<ne> ;set ne bit
+FCMPS1pfS2pi: set r4,r4,1<le> ;set le bit
+ set r4,r4,1<lt> ;set lt bit
+ bsr.n _zero
+ set r4,r4,1<ib> ;set ib bit
+ bb0 s1zero,r12,FCMPS1pfS2pinozero
+FCMPS1pfS2pizero: br.n move
+ set r4,r4,1<ob> ;set ob bit
+FCMPS1pfS2pinozero: br.n move
+ set r4,r4,1<in> ;set in bit
+FCMPS1pfS2ni: set r4,r4,1<gt> ;set gt bit
+ br.n move ;return from subroutine
+ set r4,r4,1<ge> ;set ge bit
+FCMPS1nf: bb1.n sign,r7,FCMPS1nfS2ni ;branch to negative S2i with S1nf
+ set r4,r4,1<ne> ;set ne bit
+ set r4,r4,1<le> ;set gt bit
+ set r4,r4,1<lt> ;set ge bit
+ bsr.n _zero ;see which of the operands are zero
+ set r4,r4,1<ob> ;set ob bit
+ bb0 s1zero,r12,FCMPS1nfS2pinozero ;no ls and lo
+FCMPS1nfS2pizero: br.n move
+ set r4,r4,1<ib> ;set ib bit
+FCMPS1nfS2pinozero: br.n move
+ set r4,r4,1<ou> ;set ou bit
+FCMPS1nfS2ni: set r4,r4,1<gt> ;set gt bit
+ set r4,r4,1<ge> ;set ge bit
+
+move: br.n inf_return ;return from subroutine
+ or r6,r0,r4 ;transfer answer to r6
+
+
+;Multiplying infinity and zero causes an exception, but all other
+;operations produce a correctly signed infinity.
+
+FMUL: bsr _zero ;see if any of the operands are zero
+ bb1 s1zero,r12,excpt ;infinity X 0 causes an exception
+ bb1 s2zero,r12,excpt ;infinity X 0 causes an exception
+ bb1 sign,r5,FMULS1neg ;handle negative cases of S1
+ bb0 sign,r7,poswrinf ;+ X + = +
+ br negwrinf ;+ X - = -
+FMULS1neg: bb1 sign,r7,poswrinf ;- X - = +
+ br negwrinf ;- X + = -
+
+
+;Dividing infinity by infinity causes an exception, but dividing
+;infinity by a finite yields a correctly signed infinity, and
+;dividing a finite by an infinity produces a correctly signed zero.
+
+FDIV: bb1 s1inf,r12,FDIVS1inf ;handle case of S1 being infinity
+ bb1 sign,r5,FDIVS1nf ;handle cases of S1 being neg. non-inf.
+ bb1 sign,r7,FDIVS1pfS2mi ;handle case of negative S2
+FDIVS1pfS2pi: br poswrzero ;+f / +inf = +0
+FDIVS1pfS2mi: br negwrzero ;+f / -inf = -0
+FDIVS1nf: bb1 sign,r7,FDIVS1nfS2mi ;handle case of negative S2
+FDIVS1nfS2pi: br negwrzero ;-f / +inf = -0
+FDIVS1nfS2mi: br poswrzero ;-f / -inf = +0
+FDIVS1inf: bb1 s2inf,r12,excpt ;inf / inf = exception
+ bb1 sign,r5,FDIVS1mi ;handle cases of S1 being neg. inf.
+ bb1 sign,r7,FDIVS1piS2nf ;handle case of negative S2
+FDIVS1piS2pf: br poswrinf ;+inf / +f = +inf
+FDIVS1piS2nf: br negwrinf ;+inf / -f = -inf
+FDIVS1mi: bb1 sign,r7,FDIVS1miS2nf ;handle case of negative S2
+FDIVS1miS2pf: br negwrinf ;-inf / +f = -inf
+FDIVS1miS2nf: br poswrinf ;-inf / -f = +inf
+
+
+;The square root of positive infinity is positive infinity,
+;but the square root of negative infinity is a NaN
+
+;FSQRT: bb0 sign,r7,poswrinf ;write sqrt(inf) = inf
+; br excpt ;write sqrt(-inf) = NaN
+
+excpt:
+ set r2,r2,1<oper> ;set invalid operation bit of FPSR
+#ifdef HANDLER
+ bb0 oper,r3,nohandler ;branch if no user handler
+ bsr _handler ;branch to interface with user handler
+ br inf_return ;return from function
+nohandler:
+#endif
+ set r5,r0,0<0> ;write NaN into r5
+ br.n inf_return ;return from subroutine
+ set r6,r0,0<0> ;write NaN into r6, writing NaN''s into
+ ;both of these registers is quicker than
+ ;checking for single or double precision
+
+
+;Write positive infinity of the correct precision
+
+poswrinf: bb1 dsize,r9,poswrinfd ;branch to write double precision inf.
+ br.n inf_return ;return from subroutine
+ or.u r6,r0,0x7f80 ;load r6 with single precision pos inf.
+poswrinfd: or.u r5,r0,0x7ff0 ;load double precision pos inf.
+ br.n inf_return ;return from subroutine
+ or r6,r0,r0
+
+
+;Write negative infinity of the correct precision
+
+negwrinf: bb1 dsize,r9,negwrinfd ;branch to write double precision inf.
+ br.n inf_return ;return from subroutine
+ or.u r6,r0,0xff80 ;load r6 with single precision pos inf.
+negwrinfd: or.u r5,r0,0xfff0 ;load double precision pos inf.
+ br.n inf_return ;return from subroutine
+ or r6,r0,r0
+
+
+;Write a positive zero disregarding precision.
+
+poswrzero: or r5,r0,r0 ;write to both high word and low word now
+ br.n inf_return ;it does not matter that both are written
+ or r6,r0,r0
+
+
+;Write a negative zero of the correct precision.
+
+negwrzero: or r6,r0,r0 ;clear low word
+ bb1 dsize,r9,negwrzerod ;branch to write double precision zero
+ br.n inf_return ;return from subroutine
+ set r6,r6,1<31> ;set sign bit
+negwrzerod: or r5,r0,r0 ;clear high word
+ br.n inf_return ;return from subroutine
+ set r5,r5,1<31> ;set sign bit
+
+FP_inf_overflw:
+ set r2,r2,1<oper> ;set invalid operand bit
+#ifdef HANDLER
+ bb0 oper,r3,nohandlero ;do not go to user handler routine
+ bsr _handler ;go to user handler routine
+ br inf_return ;return from subroutine
+#endif
+
+nohandlero: bb0.n sign,r7,inf_return ;if positive then return from subroutine
+
+ set r6,r6,31<0> ;set result to largest positive integer
+ or.c r6,r0,r6 ;negate r6,giving largest negative int.
+
+inf_return: ld r1,r31,0 ;load return address
+ jmp r1 ;return from subroutine
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#define FADD denorm_FADD
+#define FSUB denorm_FSUB
+#define FCMP denorm_FCMP
+#define FMUL denorm_FMUL
+#define FDIV denorm_FDIV
+#define NINT denorm_NINT
+#define TRNC denorm_TRNC
+#define return denorm_return
+;function _denorm --
+;See the documentation for this release for an overall description of this
+;code.
+
+ global _denorm
+ text
+
+;Check to see if either S1 or S2 is a denormalized number. First
+;extract the exponent to see if it is zero, and then check to see if
+;the mantissa is not zero. If the number is denormalized, then set the
+;1 or 0 bit 10 r12.
+
+_denorm: st r1,r31,0 ;save return address
+dnmcheckS1: extu r10,r5,11<20> ;extract exponent
+ bcnd ne0,r10,dnmsetS2 ;S1 is not a denorm, so S2 must be
+ bb1.n 9,r9,dnmcheckS1d ;S1 is double precision
+ mak r10,r5,20<3> ;mak field with only mantissa bits
+ ;into final result
+dnmcheckS1s: extu r11,r6,3<29> ;get three low bits of mantissa
+ or r10,r10,r11 ;assemble all of the mantissa bits
+ bcnd eq0,r10,dnmsetS2 ;S1 is not a denorm, so S2 must be
+ br dnmsetS1 ;S1 is a denorm
+
+dnmcheckS1d: or r10,r6,r10 ;or all of mantissa bits
+ bcnd eq0,r10,dnmsetS2 ;S1 is not a denorm, so S2 must be
+dnmsetS1: set r12,r12,1<1> ;S1 is a denorm
+
+dnmcheckS2: extu r10,r7,11<20> ;extract exponent
+ bcnd ne0,r10,S1form ;S2 is not a denorm
+ bb1.n 7,r9,dnmcheckS2d ;S2 is double precision
+ mak r10,r7,20<3> ;mak field with only mantissa bits
+dnmcheckS2s: extu r11,r8,3<29> ;get three low bits of mantissa
+ or r10,r10,r11 ;assemble all of the mantissa bits
+ bcnd eq0,r10,S1form ;S2 is not a denorm
+ br dnmsetS2 ;S1 is a denorm
+dnmcheckS2d: or r10,r8,r10 ;or all or mantissa bits
+ bcnd eq0,r10,S1form ;S2 is not a denorm
+dnmsetS2: set r12,r12,1<0> ;S2 is a denorm
+
+
+;Since the operations are going to be reperformed with modified denorms,
+;the operands which were initially single precision need to be modified
+;back to single precision.
+
+S1form: bb1 9,r9,S2form ;S1 is double precision, so do not
+ ;modify S1 into single format
+ mak r11,r5,28<3> ; over final exponent and mantissa
+ ;eliminating extra 3 bits of exponent
+ extu r6,r6,3<29> ;get low 3 bits of mantissa
+ or r11,r6,r11 ;form complete mantissa and exponent
+ extu r10,r5,1<31> ;get the 31 bit
+ mak r10,r10,1<31> ;place 31 bit 10 correct position
+ or r6,r10,r11 ;or 31, exponent, and all of mantissa
+
+S2form: bb1 7,r9,checkop ;S2 is double precision, so do not
+ ;modify S2 into single format
+ mak r11,r7,28<3> ; over final exponent and mantissa
+ ;eliminating extra 3 bits of exponent
+ extu r8,r8,3<29> ;get low 3 bits of mantissa
+ or r11,r8,r11 ;form complete mantissa and exponent
+ extu r10,r7,1<31> ;get the 31 bit
+ mak r10,r10,1<31> ;place 31 bit 10 correct position
+ or r8,r10,r11 ;or 31, exponent, and all of mantissa
+
+
+;Extract the opcode, compare to a constant, and branch to the code that
+;deals with that opcode.
+
+checkop: extu r10,r9,5<11> ;extract opcode
+ cmp r11,r10,0x05 ;compare to FADD
+ bb1 2,r11,FADD ;operation is FADD
+ cmp r11,r10,0x06 ;compare to FSUB
+ bb1 2,r11,FSUB ;operation is FSUB
+ cmp r11,r10,0x07 ;compare to FCMP
+ bb1 2,r11,FCMP ;operation is FCMP
+ cmp r11,r10,0x00 ;compare to FMUL
+ bb1 2,r11,FMUL ;operation is FMUL
+ cmp r11,r10,0x0e ;compare to FDIV
+ bb1 2,r11,FDIV ;operation is FDIV
+; cmp r11,r10,0x0f;compare to FSQRT
+; bb1 2,r11,FSQRT ;operation is FSQRT
+ cmp r11,r10,0x09 ;compare to INT
+ bb1 2,r11,INT ;operation is INT
+ cmp r11,r10,0x0a ;compare to NINT
+ bb1 2,r11,NINT ;operation is NINT
+ cmp r11,r10,0x0b ;compare to TRNC
+ bb1 2,r11,TRNC ;operation is TRNC
+
+
+;For all the following operations, the denormalized number is set to
+;zero and the operation is reperformed the correct destination and source
+;sizes.
+
+FADD: bb0 1,r12,FADDS2dnm ;S1 is not denorm, so S2 must be
+ or r5,r0,r0 ;set S1 to zero
+ or r6,r0,r0
+FADDS2chk: bb0 0,r12,FADDcalc ;S2 is not a denorm
+FADDS2dnm: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+FADDcalc: bb1 5,r9,FADDdD ;branch for double precision destination
+FADDsD: bb1 9,r9,FADDsDdS1 ;branch for double precision S1
+FADDsDsS1: bb1 7,r9,FADDsDsS1dS2 ;branch for double precision S2
+FADDsDsS1sS2: br.n return ;return from subroutine
+ fadd.sss r6,r6,r8 ;add the two sources and place result 10 S1
+FADDsDsS1dS2: br.n return ;return from subroutine
+ fadd.ssd r6,r6,r7 ;add the two sources and place result 10 S1
+FADDsDdS1: bb1 7,r9,FADDsDdS1dS2 ;branch for double precision S2
+FADDsDdS1sS2: br.n return ;return from subroutine
+ fadd.sds r6,r5,r8 ;add the two sources and place result 10 S1
+FADDsDdS1dS2: br.n return ;return from subroutine
+ fadd.sdd r6,r5,r7 ;add the two sources and place result 10 S1
+FADDdD: bb1 9,r9,FADDdDdS1 ;branch for double precision S1
+FADDdDsS1: bb1 7,r9,FADDdDsS1dS2 ;branch for double precision S2
+FADDdDsS1sS2: br.n return ;return from subroutine
+ fadd.dss r5,r6,r8 ;add the two sources and place result 10 S1
+FADDdDsS1dS2: br.n return ;return from subroutine
+ fadd.dsd r5,r6,r7 ;add the two sources and place result 10 S1
+FADDdDdS1: bb1 7,r9,FADDdDdS1dS2 ;branch for double precision S2
+FADDdDdS1sS2: br.n return ;return from subroutine
+ fadd.dds r5,r5,r8 ;add the two sources and place result 10 S1
+FADDdDdS1dS2: br.n return ;return from subroutine
+ fadd.ddd r5,r5,r7 ;add the two sources and place result 10 S1
+
+FSUB: bb0 1,r12,FSUBS2dnm ;S1 is not denorm, so S2 must be
+ or r5,r0,r0 ;set S1 to zero
+ or r6,r0,r0
+FSUBS2chk: bb0 0,r12,FSUBcalc ;S2 is not a denorm
+FSUBS2dnm: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+FSUBcalc: bb1 5,r9,FSUBdD ;branch for double precision destination
+FSUBsD: bb1 9,r9,FSUBsDdS1 ;branch for double precision S1
+FSUBsDsS1: bb1 7,r9,FSUBsDsS1dS2 ;branch for double precision S2
+FSUBsDsS1sS2: br.n return ;return from subroutine
+ fsub.sss r6,r6,r8 ;add the two sources and place result 10 S1
+FSUBsDsS1dS2: br.n return ;return from subroutine
+ fsub.ssd r6,r6,r7 ;add the two sources and place result 10 S1
+FSUBsDdS1: bb1 7,r9,FSUBsDdS1dS2 ;branch for double precision S2
+FSUBsDdS1sS2: br.n return ;return from subroutine
+ fsub.sds r6,r5,r8 ;add the two sources and place result 10 S1
+FSUBsDdS1dS2: br.n return ;return from subroutine
+ fsub.sdd r6,r5,r7 ;add the two sources and place result 10 S1
+FSUBdD: bb1 9,r9,FSUBdDdS1 ;branch for double precision S1
+FSUBdDsS1: bb1 7,r9,FSUBdDsS1dS2 ;branch for double precision S2
+FSUBdDsS1sS2: br.n return ;return from subroutine
+ fsub.dss r5,r6,r8 ;add the two sources and place result 10 S1
+FSUBdDsS1dS2: br.n return ;return from subroutine
+ fsub.dsd r5,r6,r7 ;add the two sources and place result 10 S1
+FSUBdDdS1: bb1 7,r9,FSUBdDdS1dS2 ;branch for double precision S2
+FSUBdDdS1sS2: br.n return ;return from subroutine
+ fsub.dds r5,r5,r8 ;add the two sources and place result 10 S1
+FSUBdDdS1dS2: br.n return ;return from subroutine
+ fsub.ddd r5,r5,r7 ;add the two sources and place result 10 S1
+
+FCMP: bb0 1,r12,FCMPS2dnm ;S1 is not denorm, so S2 must be
+ or r5,r0,r0 ;set S1 to zero
+ or r6,r0,r0
+FCMPS2chk: bb0 0,r12,FCMPcalc ;S2 is not a denorm
+FCMPS2dnm: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+FCMPcalc: bb1 9,r9,FCMPdS1 ;branch for double precision S1
+FCMPsS1: bb1 7,r9,FCMPsS1dS2 ;branch for double precision S2
+FCMPsS1sS2: br.n return ;return from subroutine
+ fcmp.sss r6,r6,r8 ;add the two sources and place result 10 S1
+FCMPsS1dS2: br.n return ;return from subroutine
+ fcmp.ssd r6,r6,r7 ;add the two sources and place result 10 S1
+FCMPdS1: bb1 7,r9,FCMPdS1dS2 ;branch for double precision S2
+FCMPdS1sS2: br.n return ;return from subroutine
+ fcmp.sds r6,r5,r8 ;add the two sources and place result 10 S1
+FCMPdS1dS2: br.n return ;return from subroutine
+ fcmp.sdd r6,r5,r7 ;add the two sources and place result 10 S1
+
+FMUL: bb0 1,r12,FMULS2dnm ;S1 is not denorm, so S2 must be
+ or r5,r0,r0 ;set S1 to zero
+ or r6,r0,r0
+FMULS2chk: bb0 0,r12,FMULcalc ;S2 is not a denorm
+FMULS2dnm: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+FMULcalc: bb1 5,r9,FMULdD ;branch for double precision destination
+FMULsD: bb1 9,r9,FMULsDdS1 ;branch for double precision S1
+FMULsDsS1: bb1 7,r9,FMULsDsS1dS2 ;branch for double precision S2
+FMULsDsS1sS2: br.n return ;return from subroutine
+ fmul.sss r6,r6,r8 ;add the two sources and place result 10 S1
+FMULsDsS1dS2: br.n return ;return from subroutine
+ fmul.ssd r6,r6,r7 ;add the two sources and place result 10 S1
+FMULsDdS1: bb1 7,r9,FMULsDdS1dS2 ;branch for double precision S2
+FMULsDdS1sS2: br.n return ;return from subroutine
+ fmul.sds r6,r5,r8 ;add the two sources and place result 10 S1
+FMULsDdS1dS2: br.n return ;return from subroutine
+ fmul.sdd r6,r5,r7 ;add the two sources and place result 10 S1
+FMULdD: bb1 9,r9,FMULdDdS1 ;branch for double precision S1
+FMULdDsS1: bb1 7,r9,FMULdDsS1dS2 ;branch for double precision S2
+FMULdDsS1sS2: br.n return ;return from subroutine
+ fmul.dss r5,r6,r8 ;add the two sources and place result 10 S1
+FMULdDsS1dS2: br.n return ;return from subroutine
+ fmul.dsd r5,r6,r7 ;add the two sources and place result 10 S1
+FMULdDdS1: bb1 7,r9,FMULdDdS1dS2 ;branch for double precision S2
+FMULdDdS1sS2: br.n return ;return from subroutine
+ fmul.dds r5,r5,r8 ;add the two sources and place result 10 S1
+FMULdDdS1dS2: br.n return ;return from subroutine
+ fmul.ddd r5,r5,r7 ;add the two sources and place result 10 S1
+
+FDIV: bb0 1,r12,FDIVS2dnm ;S1 is not denorm, so S2 must be
+ or r5,r0,r0 ;set S1 to zero
+ or r6,r0,r0
+FDIVS2chk: bb0 0,r12,FDIVcalc ;S2 is not a denorm
+FDIVS2dnm: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+FDIVcalc: bb1 5,r9,FDIVdD ;branch for double precision destination
+FDIVsD: bb1 9,r9,FDIVsDdS1 ;branch for double precision S1
+FDIVsDsS1: bb1 7,r9,FDIVsDsS1dS2 ;branch for double precision S2
+FDIVsDsS1sS2: fdiv.sss r6,r6,r8 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVsDsS1dS2: fdiv.ssd r6,r6,r7 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVsDdS1: bb1 7,r9,FDIVsDdS1dS2 ;branch for double precision S2
+FDIVsDdS1sS2: fdiv.sds r6,r5,r8 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVsDdS1dS2: fdiv.sdd r6,r5,r7 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVdD: bb1 9,r9,FDIVdDdS1 ;branch for double precision S1
+FDIVdDsS1: bb1 7,r9,FDIVdDsS1dS2 ;branch for double precision S2
+FDIVdDsS1sS2: fdiv.dss r5,r6,r8 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVdDsS1dS2: fdiv.dsd r5,r6,r7 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVdDdS1: bb1 7,r9,FDIVdDdS1dS2 ;branch for double precision S2
+FDIVdDdS1sS2: fdiv.dds r5,r5,r8 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+FDIVdDdS1dS2: fdiv.ddd r5,r5,r7 ;add the two sources and place result 10 S1
+ br return ;return from subroutine
+
+;FSQRT: or r7,r0,r0 ;set S2 to zero
+; or r8,r0,r0
+;FSQRTcalc: bb1 5,r9,FSQRTdD ;branch for double precision destination
+;FSQRTsD: bb1 7,r9,FSQRTsDdS2 ;branch for double precision S2
+;FSQRTsDsS2: br.n return ;return from subroutine
+ ;fsqrt.ss r6,r8 ;add the two sources and place result 10 S1
+;FSQRTsDdS2: br.n return ;return from subroutine
+ ;fsqrt.sd r6,r7 ;add the two sources and place result 10 S1
+;FSQRTdD: bb1 7,r9,FSQRTdDdS2 ;branch for double precision S2
+;FSQRTdDsS2: br.n return ;return from subroutine
+ ;fsqrt.ds r5,r8 ;add the two sources and place result 10 S1
+;FSQRTdDdS2: br.n return ;return from subroutine
+ ;fsqrt.dd r5,r7 ;add the two sources and place result 10 S1
+
+INT: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+INTcalc: bb1 7,r9,INTdS2 ;branch for double precision S2
+INTsS2: br.n return ;return from subroutine
+ int.ss r6,r8 ;add the two sources and place result 10 S1
+INTdS2: br.n return ;return from subroutine
+ int.sd r6,r7 ;add the two sources and place result 10 S1
+
+NINT: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+NINTcalc: bb1 7,r9,NINTdS2 ;branch for double precision S2
+NINTsS2: br.n return ;return from subroutine
+ nint.ss r6,r8 ;add the two sources and place result 10 S1
+NINTdS2: br.n return ;return from subroutine
+ nint.sd r6,r7 ;add the two sources and place result 10 S1
+
+TRNC: or r7,r0,r0 ;set S2 to zero
+ or r8,r0,r0
+TRNCcalc: bb1 7,r9,TRNCdS2 ;branch for double precision S2
+TRNCsS2: br.n return ;return from subroutine
+ trnc.ss r6,r8 ;add the two sources and place result 10 S1
+TRNCdS2: trnc.sd r6,r7 ;add the two sources and place result 10 S1
+
+
+;Return to the routine that detected the reserved operand.
+
+return: ld r1,r31,0 ;load return address
+ jmp r1 ;return from subroutine
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+#ifndef __LUNA_SUB_H__
+#include "luna_sub.h"
+#endif
+ global _zero
+ text
+
+;S1 and/or S2 is an infinity, and the other operand may be a zero.
+;Knowing which operands are infinity, check the remaining operands for zeros.
+
+_zero: bb0 s1inf,r12,S1noinf ;see if S1 is zero
+ bb0 s2inf,r12,S2noinf ;see if S2 is zero
+ jmp r1 ;return from function
+
+;See if S1 is zero. Whether or not S1 is a zero, being in this routine
+;implies that S2 is infinity, so return to subroutine infinity after
+;completing this code. Set the s1zero flag in r12 if S1 is zero.
+
+S1noinf: bb1 s1size,r9,S1noinfd ;work with double precision operand
+S1noinfs: or r10,r0,r5 ;load high word into r10
+ clr r10,r10,1<sign> ;clear the sign bit
+ extu r11,r6,3<29> ;extract lower 3 bits of mantissa
+ or r10,r10,r11 ;or these 3 bits with high word
+ bcnd ne0,r10,operation ;do not set zero flag
+ jmp.n r1 ;since this operand was not infinity,
+ ;S2 must have been, so return from
+ ;function
+ set r12,r12,1<s1zero> ;set zeroflag
+S1noinfd: clr r10,r5,1<sign> ;clear the sign bit
+ or r10,r6,r10 ;or high and low word
+ bcnd ne0,r10,operation ;do not set zero flag
+ jmp.n r1 ;since this operand was not infinity,
+ ;S2 must have been, so return from
+ ;function
+ set r12,r12,1<s1zero> ;set zeroflag
+
+
+;Check S2 for zero. If it is zero, then set the s2zero flag in r12.
+
+S2noinf: bb1 s2size,r9,S2noinfd ;work with double precision operand
+S2noinfs: or r10,r0,r7 ;load high word into r10
+ clr r10,r10,1<sign> ;clear the sign bit
+ extu r11,r8,3<29> ;extract lower 3 bits of mantissa
+ or r10,r10,r11 ;or these 3 bits with high word
+ bcnd ne0,r10,operation ;do not set zero flag
+ jmp.n r1 ;since this operand was not infinity,
+ ;S1 must have been, so return from
+ ;function
+ set r12,r12,1<s2zero> ;set zeroflag
+S2noinfd: clr r10,r7,1<sign> ;clear the sign bit
+ or r10,r8,r10 ;or high and low word
+ bcnd ne0,r10,operation ;do not set zero flag
+ set r12,r12,1<s2zero> ;set zeroflag
+ ;since this operand was not infinity,
+ ;S1 must have been, so return from
+ ;function
+operation: jmp r1 ;return from function
+
+ data
+;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;
+
+ text
+ align 4
+; input: r3 is the excepton frame
+_Xfp_imprecise: global _Xfp_imprecise
+ or r29, r3, r0 ; r29 is now the E.F.
+ subu r31, r31, 40
+ st r1, r31, 32
+ st r29, r31, 36
+
+ ld r2 , r29, EF_FPSR * 4
+ ld r3 , r29, EF_FPCR * 4
+ ld r4 , r29, EF_FPECR * 4
+ ld r10, r29, EF_FPRH * 4
+ ld r11, r29, EF_FPRL * 4
+ ld r12, r29, EF_FPIT * 4
+
+;Load into r1 the return address for the exception handlers. Looking
+;at FPECR, branch to the appropriate exception handler.
+
+ or.u r1,r0,hi16(fpui_wrapup);load return address of functions
+ or r1,r1,lo16(fpui_wrapup)
+
+ bb0 2,r4,2f ;branch to FPunderflow if bit set
+ br _FPunderflow
+ 2: bb0 1,r4,3f ;branch to FPoverflow if bit set
+ br _FPoverflow
+ 3:
+#ifdef HANDLER
+ br _handler ;branch to handler since bit will be set
+ ;for inexact
+#endif
+ /* should never get here!!!! */
+ data
+ align 4
+ 1: string "error in inprecise fp exception handler, r4 is 0x%08x\n\0"
+ align 4
+ text
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ or r3, r4, r0
+ bsr _printf
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ bsr _panic
+
+fpui_wrapup:
+ tb1 0,r0,0 ;make sure all floating point operations
+ ldcr r5, psr ;load the PSR
+ ;have finished
+ or r5, r5, 0x2 ;disable interrupts
+ stcr r5, psr
+#if 0
+Why is this done? -- it screws up things later.
+ or r5, r5, 0x8 ;set SFU 1 disable bit, disable SFU 1
+ stcr r5, psr
+#endif
+ ld r1, r31, 32
+ ld r29,r31, 36
+ addu r31, r31, 40
+
+ ; write back the results
+ extu r2, r12, 5<0>
+ addu r3, r29, EF_R0*4
+ bb0 destsize, r12, Iwritesingle
+ st r10, r3 [r2]
+ addu r2, r2, 1
+ clr r2, r2, 27<5>
+Iwritesingle:
+ st r11, r3 [r2]
+;Return..
+ jmp r1
diff --git a/sys/arch/mvme88k/m88k/machdep.c b/sys/arch/mvme88k/m88k/machdep.c
new file mode 100644
index 00000000000..28eb27967ff
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/machdep.c
@@ -0,0 +1,1360 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/kernel.h>
+#include <sys/map.h>
+#include <sys/proc.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/clist.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/msgbuf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/mount.h>
+#include <sys/user.h>
+#include <sys/exec.h>
+#include <sys/vnode.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#ifdef SYSVMSG
+#include <sys/msg.h>
+#endif
+#ifdef SYSVSEM
+#include <sys/sem.h>
+#endif
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
+
+#include <machine/cpu.h>
+#include <machine/reg.h>
+#include <machine/psl.h>
+#include <machine/locore.h>
+#include <machine/board.h>
+#include <machine/trap.h>
+#include <machine/bug.h>
+
+#include <dev/cons.h>
+
+#include <vm/vm.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#define __IS_MACHDEP_C__
+#include <assym.s> /* EF_EPSR, etc. */
+#include <machine/m88100.h> /* DMT_VALID */
+#include <machine/m882xx.h> /* CMMU stuff */
+#if DDB
+# include <machine/db_machdep.h>
+#endif /* DDB */
+
+#if 0
+#include <machine/m88100.h> /* DMT_VALID */
+#include <machine/m882xx.h> /* CMMU stuff */
+#include <vm/vm.h>
+#include <vm/vm_kern.h> /* kernel_map */
+#include <sys/param.h>
+#include <sys/msgbuf.h>
+#include <sys/buf.h>
+#include <machine/locore.h> /* USERMODE */
+/*
+#include <machine/nvram.h>
+*/
+#include <sys/types.h>
+#endif /* 0 */
+
+static int waittime = -1;
+
+static void level0_intr(int, unsigned *);
+static void level1_intr(int, unsigned *);
+static void level2_intr(int, unsigned *);
+static void level3_intr(int, unsigned *);
+static void level4_intr(int, unsigned *);
+static void level5_intr(int, unsigned *);
+static void level6_intr(int, unsigned *);
+static void level7_intr(int, unsigned *);
+
+unsigned char *ivec[] = {
+ (unsigned char *)0xFFFE007,
+ (unsigned char *)0xFFFE00B,
+ (unsigned char *)0xFFFE00F,
+ (unsigned char *)0xFFFE013,
+ (unsigned char *)0xFFFE017,
+ (unsigned char *)0xFFFE01B,
+ (unsigned char *)0xFFFE01F,
+};
+
+static void (*int_handler[8])() =
+{
+ level0_intr,
+ level1_intr,
+ level2_intr,
+ level3_intr,
+ level4_intr,
+ level5_intr,
+ level6_intr,
+ level7_intr,
+};
+
+unsigned char *int_mask_level = (unsigned char *)INT_MASK_LEVEL;
+unsigned char *int_pri_level = (unsigned char *)INT_PRI_LEVEL;
+unsigned char *iackaddr;
+
+int physmem; /* available physical memory, in pages */
+int cold;
+vm_offset_t avail_end, avail_start, avail_next;
+int msgbufmapped = 0;
+int foodebug = 0;
+int longformat = 0;
+
+extern char kstack[]; /* kernel stack - actually this is == UADDR */
+extern char *cpu_string;
+extern short exframesize[];
+
+/*
+ * Declare these as initialized data so we can patch them.
+ */
+int nswbuf = 0;
+#ifdef NBUF
+int nbuf = NBUF;
+#else
+int nbuf = 0;
+#endif
+#ifdef BUFPAGES
+int bufpages = BUFPAGES;
+#else
+int bufpages = 0;
+#endif
+int *nofault;
+
+caddr_t allocsys __P((caddr_t));
+
+/*
+ * Info for CTL_HW
+ */
+char machine[] = "MVME187"; /* cpu "architecture" */
+char cpu_model[120];
+extern char version[];
+
+ /*
+ * Console initialization: called early on from main,
+ * before vm init or startup. Do enough configuration
+ * to choose and initialize a console.
+ */
+void
+consinit()
+{
+
+ /*
+ * Initialize the console before we print anything out.
+ */
+ cninit();
+
+#if defined (DDB)
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger();
+#endif
+}
+
+/*
+ * Figure out how much real memory is available.
+ * Start looking from the megabyte after the end of the kernel data,
+ * until we find non-memory.
+ */
+vm_offset_t
+size_memory(void)
+{
+ volatile unsigned int *look;
+ unsigned int *max;
+ extern char end[];
+ #define PATTERN 0x5a5a5a5a
+ #define STRIDE (4*1024) /* 4k at a time */
+ #define Roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
+
+ /*
+ * count it up.
+ */
+ max = (void*)MAXPHYSMEM;
+ for (look = (void*)Roundup(end, STRIDE); look < max;
+ look = (int*)((unsigned)look + STRIDE)) {
+ unsigned save;
+
+ /* if can't access, we've reached the end */
+ if (foodebug)
+ printf("%x\n", look);
+ if (badwordaddr((vm_offset_t)look)) {
+ printf("%x\n", look);
+ look = (int *)((int)look - STRIDE);
+ break;
+ }
+
+#if 1
+ /*
+ * If we write a value, we expect to read the same value back.
+ * We'll do this twice, the 2nd time with the opposite bit
+ * pattern from the first, to make sure we check all bits.
+ */
+ save = *look;
+ if (*look = PATTERN, *look != PATTERN)
+ break;
+ if (*look = ~PATTERN, *look != ~PATTERN)
+ break;
+ *look = save;
+#endif
+ }
+
+ physmem = btoc(trunc_page((unsigned)look)); /* in pages */
+ return(trunc_page((unsigned)look));
+}
+
+void
+identifycpu()
+{
+ /* XXX -take this one out. It can be done in m187_bootstrap() */
+ strcpy(cpu_model, "Motorola M88K");
+ printf("Model: %s\n", cpu_model);
+}
+
+/* The following two functions assume UPAGES == 3 */
+#if UPAGES != 3
+#error "UPAGES changed?"
+#endif
+
+void
+save_u_area(struct proc *p, vm_offset_t va)
+{
+ p->p_md.md_upte[0] = kvtopte(va)->bits;
+ p->p_md.md_upte[1] = kvtopte(va + NBPG)->bits;
+ p->p_md.md_upte[2] = kvtopte(va + NBPG + NBPG)->bits;
+}
+
+void
+load_u_area(struct proc *p)
+{
+ pte_template_t *t;
+
+ t = kvtopte(UADDR);
+ t->bits = p->p_md.md_upte[0];
+ t = kvtopte(UADDR + NBPG);
+ t->bits = p->p_md.md_upte[1];
+ t = kvtopte(UADDR + NBPG + NBPG);
+ t->bits = p->p_md.md_upte[2];
+ cmmu_flush_tlb(1, UADDR, 3 * NBPG);
+}
+
+
+void
+cpu_startup()
+{
+ caddr_t v;
+ int sz, i;
+ vm_size_t size;
+ int base, residual;
+ vm_offset_t minaddr, maxaddr, uarea_pages;
+ extern vm_offset_t miniroot;
+
+ /*
+ * Initialize error message buffer (at end of core).
+ * avail_end was pre-decremented in m1x7_init.
+ */
+ for (i = 0; i < btoc(sizeof(struct msgbuf)); i++)
+ pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
+ avail_end + i * NBPG, VM_PROT_ALL, TRUE);
+ msgbufmapped = 1;
+
+ printf(version);
+ identifycpu();
+ printf("real mem = %d\n", ctob(physmem));
+
+ /*
+ * Find out how much space we need, allocate it,
+ * and then give everything true virtual addresses.
+ */
+ sz = (int)allocsys((caddr_t)0);
+ if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
+ panic("startup: no room for tables");
+ if (allocsys(v) - v != sz)
+ panic("startup: table size inconsistency");
+
+ /*
+ * Grab UADDR virtual address
+ */
+
+ uarea_pages = UADDR;
+
+ vm_map_find(kernel_map, vm_object_allocate(PAGE_SIZE * UPAGES), 0,
+ (vm_offset_t *)&uarea_pages, PAGE_SIZE * UPAGES, TRUE);
+
+ if (uarea_pages != UADDR) {
+ printf("uarea_pages %x: UADDR not free\n", uarea_pages);
+ panic("bad UADDR");
+ }
+ /*
+ * Now allocate buffers proper. They are different than the above
+ * in that they usually occupy more virtual memory than physical.
+ */
+ size = MAXBSIZE * nbuf;
+ buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
+ &maxaddr, size, TRUE);
+ minaddr = (vm_offset_t)buffers;
+ if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
+ (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS)
+ panic("startup: cannot allocate buffers");
+ if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
+ /* don't want to alloc more physical mem than needed */
+ bufpages = btoc(MAXBSIZE) * nbuf;
+ }
+ base = bufpages / nbuf;
+ residual = bufpages % nbuf;
+ for (i = 0; i < nbuf; i++) {
+ vm_size_t curbufsize;
+ vm_offset_t curbuf;
+
+ /*
+ * First <residual> buffers get (base+1) physical pages
+ * allocated for them. The rest get (base) physical pages.
+ *
+ * The rest of each buffer occupies virtual space,
+ * but has no physical memory allocated for it.
+ */
+ curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
+ curbufsize = CLBYTES * (i < residual ? base+1 : base);
+ vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
+ vm_map_simplify(buffer_map, curbuf);
+ }
+
+ /*
+ * Allocate a submap for exec arguments. This map effectively
+ * limits the number of processes exec'ing at any time.
+ */
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, TRUE);
+
+ /*
+ * Allocate a map for IO.
+ */
+ phys_map = vm_map_create(kernel_pmap, IO_SPACE_START,
+ IO_SPACE_END, TRUE);
+ if (phys_map == NULL)
+ panic("cpu_startup: unable to create physmap");
+
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
+ M_MBUF, M_NOWAIT);
+ bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
+ mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, FALSE);
+
+ /*
+ * Initialize callouts
+ */
+ callfree = callout;
+ for (i = 1; i < ncallout; i++)
+ callout[i-1].c_next = &callout[i];
+ callout[i-1].c_next = NULL;
+
+ printf("avail mem = %d\n", ptoa(cnt.v_free_count));
+ printf("using %d buffers containing %d bytes of memory\n",
+ nbuf, bufpages * CLBYTES);
+
+ mfs_initminiroot(miniroot);
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ /*
+ * Configure the system.
+ */
+ nofault = NULL;
+ configure();
+
+ dumpconf();
+}
+
+/*
+ * Allocate space for system data structures. We are given
+ * a starting virtual address and we return a final virtual
+ * address; along the way we set each data structure pointer.
+ *
+ * We call allocsys() with 0 to find out how much space we want,
+ * allocate that much and fill it with zeroes, and then call
+ * allocsys() again with the correct base virtual address.
+ */
+caddr_t
+allocsys(v)
+ register caddr_t v;
+{
+
+#define valloc(name, type, num) \
+ v = (caddr_t)(((name) = (type *)v) + (num))
+
+#ifdef REAL_CLISTS
+ valloc(cfree, struct cblock, nclist);
+#endif
+ valloc(callout, struct callout, ncallout);
+ valloc(swapmap, struct map, nswapmap = maxproc * 2);
+#ifdef SYSVSHM
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+#endif
+#ifdef SYSVSEM
+ valloc(sema, struct semid_ds, seminfo.semmni);
+ valloc(sem, struct sem, seminfo.semmns);
+ /* This is pretty disgusting! */
+ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
+#endif
+#ifdef SYSVMSG
+ valloc(msgpool, char, msginfo.msgmax);
+ valloc(msgmaps, struct msgmap, msginfo.msgseg);
+ valloc(msghdrs, struct msg, msginfo.msgtql);
+ valloc(msqids, struct msqid_ds, msginfo.msgmni);
+#endif
+
+ /*
+ * Determine how many buffers to allocate (enough to
+ * hold 5% of total physical memory, but at least 16).
+ * Allocate 1/2 as many swap buffer headers as file i/o buffers.
+ */
+ if (bufpages == 0)
+ if (physmem < btoc(2 * 1024 * 1024))
+ bufpages = (physmem / 10) / CLSIZE;
+ else
+ bufpages = (physmem / 20) / CLSIZE;
+ if (nbuf == 0) {
+ nbuf = bufpages;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+ if (nswbuf == 0) {
+ nswbuf = (nbuf / 2) &~ 1; /* force even */
+ if (nswbuf > 256)
+ nswbuf = 256; /* sanity */
+ }
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
+ return v;
+}
+
+/*
+ * Set registers on exec.
+ * Clear all except sp and pc.
+ */
+/* ARGSUSED */
+void
+setregs(p, pack, stack, retval)
+ struct proc *p;
+ struct exec_package *pack;
+ u_long stack;
+ int retval[2];
+{
+ register struct trapframe *tf = p->p_md.md_tf;
+ register int psr;
+
+ /*
+ * The syscall will ``return'' to snip; set it.
+ * Set the rest of the registers to 0 except for r31 (stack pointer,
+ * built in exec()) and psr (supervisor bit).
+ */
+ psr = tf->epsr & PSR_SUPERVISOR_MODE_BIT;
+#if 0
+ /*
+ I don't think I need to mess with fpstate on 88k because
+ we make sure the floating point pipeline is drained in
+ locore.s. Should check on this later. Nivas.
+ */
+
+ if ((fs = p->p_md.md_fpstate) != NULL) {
+ /*
+ * We hold an FPU state. If we own *the* FPU chip state
+ * we must get rid of it, and the only way to do that is
+ * to save it. In any case, get rid of our FPU state.
+ */
+ if (p == fpproc) {
+ savefpstate(fs);
+ fpproc = NULL;
+ }
+ free((void *)fs, M_SUBPROC);
+ p->p_md.md_fpstate = NULL;
+ }
+#endif /* 0 */
+ bzero((caddr_t)tf, sizeof *tf);
+ tf->epsr = psr;
+ tf->snip = pack->ep_entry & ~3;
+ tf->sfip = tf->snip + 4;
+ tf->r[31] = stack;
+ retval[1] = 0;
+}
+
+/*
+ * WARNING: code in locore.s assumes the layout shown for sf_signum
+ * thru sf_handler so... don't screw with them!
+ */
+struct sigframe {
+ int sf_signo; /* signo for handler */
+ int sf_code; /* additional info for handler */
+ struct sigcontext *sf_scp; /* context ptr for handler */
+ sig_t sf_handler; /* handler addr for u_sigc */
+ struct sigcontext sf_sc; /* actual context */
+};
+
+#ifdef DEBUG
+int sigdebug = 0;
+int sigpid = 0;
+#define SDB_FOLLOW 0x01
+#define SDB_KSTACK 0x02
+#define SDB_FPSTATE 0x04
+#endif
+
+/*
+ * Send an interrupt to process.
+ */
+void
+sendsig(catcher, sig, mask, code)
+ sig_t catcher;
+ int sig, mask;
+ unsigned long code;
+{
+ register struct proc *p = curproc;
+ register struct trapframe *tf;
+ register struct sigacts *psp = p->p_sigacts;
+ struct sigframe *fp;
+ int oonstack, fsize;
+ struct sigframe sf;
+ int addr;
+ extern char sigcode[], esigcode[];
+
+#define szsigcode (esigcode - sigcode)
+
+ tf = p->p_md.md_tf;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in data space, the
+ * call to grow() is a nop, and the copyout()
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ fsize = sizeof(struct sigframe);
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
+ psp->ps_sigstk.ss_size - fsize);
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
+ } else
+ fp = (struct sigframe *)(tf->r[31] - fsize);
+ if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
+ (void)grow(p, (unsigned)fp);
+#ifdef DEBUG
+ if ((sigdebug & SDB_FOLLOW) ||
+ (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
+ p->p_pid, sig, &oonstack, fp, &fp->sf_sc);
+#endif
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ sf.sf_signo = sig;
+ sf.sf_code = code;
+ sf.sf_scp = &fp->sf_sc;
+ sf.sf_sc.sc_onstack = oonstack;
+ sf.sf_sc.sc_mask = mask;
+ /*
+ * Copy the whole user context into signal context that we
+ * are building.
+ */
+
+ bcopy((caddr_t)tf->r, (caddr_t)sf.sf_sc.sc_regs,
+ sizeof(sf.sf_sc.sc_regs));
+ sf.sf_sc.sc_xip = tf->sxip;
+ sf.sf_sc.sc_nip = tf->snip;
+ sf.sf_sc.sc_fip = tf->sfip;
+ sf.sf_sc.sc_ps = tf->epsr;
+ sf.sf_sc.sc_sp = tf->r[31];
+ sf.sf_sc.sc_fpsr = tf->fpsr;
+ sf.sf_sc.sc_fpcr = tf->fpcr;
+ sf.sf_sc.sc_ssbr = tf->ssbr;
+ sf.sf_sc.sc_dmt0 = tf->dmt0;
+ sf.sf_sc.sc_dmd0 = tf->dmd0;
+ sf.sf_sc.sc_dma0 = tf->dma0;
+ sf.sf_sc.sc_dmt1 = tf->dmt1;
+ sf.sf_sc.sc_dmd1 = tf->dmd1;
+ sf.sf_sc.sc_dma1 = tf->dma1;
+ sf.sf_sc.sc_dmt2 = tf->dmt2;
+ sf.sf_sc.sc_dmd2 = tf->dmd2;
+ sf.sf_sc.sc_dma2 = tf->dma2;
+ sf.sf_sc.sc_fpecr = tf->fpecr;
+ sf.sf_sc.sc_fphs1 = tf->fphs1;
+ sf.sf_sc.sc_fpls1 = tf->fpls1;
+ sf.sf_sc.sc_fphs2 = tf->fphs2;
+ sf.sf_sc.sc_fpls2 = tf->fpls2;
+ sf.sf_sc.sc_fppt = tf->fppt;
+ sf.sf_sc.sc_fprh = tf->fprh;
+ sf.sf_sc.sc_fprl = tf->fprl;
+ sf.sf_sc.sc_fpit = tf->fpit;
+ if (copyout((caddr_t)&sf, (caddr_t)&fp, sizeof sf)) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ sig = sigmask(SIGILL);
+ p->p_sigignore &= ~sig;
+ p->p_sigcatch &= ~sig;
+ p->p_sigmask &= ~sig;
+ psignal(p, SIGILL);
+ return;
+ }
+ /*
+ * Build the argument list for the signal handler.
+ * Signal trampoline code is at base of user stack.
+ */
+ addr = (int)PS_STRINGS - szsigcode;
+ tf->snip = addr & ~3;
+ tf->sfip = tf->snip + 4;
+ tf->r[31] = (unsigned)fp;
+#ifdef DEBUG
+ if ((sigdebug & SDB_FOLLOW) ||
+ (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d returns\n",
+ p->p_pid, sig);
+#endif
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psl to gain improper priviledges or to cause
+ * a machine fault.
+ */
+struct sigreturn_args {
+ struct sigcontext *scp;
+};
+/* ARGSUSED */
+sigreturn(p, uap, retval)
+ struct proc *p;
+ struct sigreturn_args *uap;
+ int *retval;
+{
+ register struct sigcontext *scp;
+ register struct trapframe *tf;
+ struct sigcontext ksc;
+ int error;
+
+ scp = uap->scp;
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
+#endif
+ if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0)
+ return (EINVAL);
+ tf = p->p_md.md_tf;
+ /*
+ * xip, nip and fip must be multiples of 4. This is all
+ * that is required; if it holds, just do it.
+ */
+ if (((scp->sc_xip | scp->sc_nip | scp->sc_fip) & 3) != 0)
+ return (EINVAL);
+ bcopy((caddr_t)scp->sc_regs, (caddr_t)tf->r,
+ sizeof(scp->sc_regs));
+ tf->sxip = scp->sc_xip;
+ tf->snip = scp->sc_nip;
+ tf->sfip = scp->sc_fip;
+ tf->epsr = scp->sc_ps;
+ tf->r[31] = scp->sc_sp;
+ tf->fpsr = scp->sc_fpsr;
+ tf->fpcr = scp->sc_fpcr;
+ tf->ssbr = scp->sc_ssbr;
+ tf->dmt0 = scp->sc_dmt0;
+ tf->dmd0 = scp->sc_dmd0;
+ tf->dma0 = scp->sc_dma0;
+ tf->dmt1 = scp->sc_dmt1;
+ tf->dmd1 = scp->sc_dmd1;
+ tf->dma1 = scp->sc_dma1;
+ tf->dmt2 = scp->sc_dmt2;
+ tf->dmd2 = scp->sc_dmd2;
+ tf->dma2 = scp->sc_dma2;
+ tf->fpecr = scp->sc_fpecr;
+ tf->fphs1 = scp->sc_fphs1;
+ tf->fpls1 = scp->sc_fpls1;
+ tf->fphs2 = scp->sc_fphs2;
+ tf->fpls2 = scp->sc_fpls2;
+ tf->fppt = scp->sc_fppt;
+ tf->fprh = scp->sc_fprh;
+ tf->fprl = scp->sc_fprl;
+ tf->fpit = scp->sc_fpit;
+
+ tf->epsr = scp->sc_ps;
+
+ /*
+ * Restore the user supplied information
+ */
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
+ p->p_sigmask = scp->sc_mask &~ sigcantmask;
+ return (EJUSTRETURN);
+}
+
+void
+bootsync(void)
+{
+ if (waittime < 0) {
+ register struct buf *bp;
+ int iter, nbusy;
+
+ waittime = 0;
+ (void) spl0();
+ printf("syncing disks... ");
+ /*
+ * Release vnodes held by texts before sync.
+ */
+ if (panicstr == 0)
+ vnode_pager_umount(NULL);
+ sync(&proc0, (void *)NULL, (int *)NULL);
+
+ for (iter = 0; iter < 20; iter++) {
+ nbusy = 0;
+ for (bp = &buf[nbuf]; --bp >= buf; )
+ if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
+ nbusy++;
+ if (nbusy == 0)
+ break;
+ printf("%d ", nbusy);
+ delay(40000 * iter);
+ }
+ if (nbusy)
+ printf("giving up\n");
+ else
+ printf("done\n");
+ /*
+ * If we've been adjusting the clock, the todr
+ * will be out of synch; adjust it now.
+ */
+ resettodr();
+ }
+}
+
+doboot()
+{
+ bugreturn();
+}
+
+void
+boot(howto)
+ register int howto;
+{
+ /* take a snap shot before clobbering any registers */
+ if (curproc)
+ savectx(curproc->p_addr, 0);
+
+ boothowto = howto;
+ if ((howto&RB_NOSYNC) == 0)
+ bootsync();
+ splhigh(); /* extreme priority */
+ if (howto&RB_HALT) {
+ printf("halted\n\n");
+ bugreturn();
+ } else {
+ if (howto & RB_DUMP)
+ dumpsys();
+ doboot();
+ /*NOTREACHED*/
+ }
+ /*NOTREACHED*/
+}
+
+unsigned dumpmag = 0x8fca0101; /* magic number for savecore */
+int dumpsize = 0; /* also for savecore */
+long dumplo = 0;
+
+dumpconf()
+{
+ int nblks;
+
+ dumpsize = physmem;
+ if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
+ nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
+ if (dumpsize > btoc(dbtob(nblks - dumplo)))
+ dumpsize = btoc(dbtob(nblks - dumplo));
+ else if (dumplo == 0)
+ dumplo = nblks - btodb(ctob(physmem));
+ }
+ /*
+ * Don't dump on the first CLBYTES (why CLBYTES?)
+ * in case the dump device includes a disk label.
+ */
+ if (dumplo < btodb(CLBYTES))
+ dumplo = btodb(CLBYTES);
+}
+
+/*
+ * Doadump comes here after turning off memory management and
+ * getting on the dump stack, either when called above, or by
+ * the auto-restart code.
+ */
+dumpsys()
+{
+
+ msgbufmapped = 0;
+ if (dumpdev == NODEV)
+ return;
+ /*
+ * For dumps during autoconfiguration,
+ * if dump device has already configured...
+ */
+ if (dumpsize == 0)
+ dumpconf();
+ if (dumplo < 0)
+ return;
+ printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
+ printf("dump ");
+ switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
+
+ case ENXIO:
+ printf("device bad\n");
+ break;
+
+ case EFAULT:
+ printf("device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("area improper\n");
+ break;
+
+ case EIO:
+ printf("i/o error\n");
+ break;
+
+ default:
+ printf("succeeded\n");
+ break;
+ }
+}
+
+/*
+ * Return the best possible estimate of the time in the timeval
+ * to which tvp points. We do this by returning the current time
+ * plus the amount of time since the last clock interrupt (clock.c:clkread).
+ *
+ * Check that this time is no less than any previously-reported time,
+ * which could happen around the time of a clock adjustment. Just for fun,
+ * we guarantee that the time will be greater than the value obtained by a
+ * previous call.
+ */
+void
+microtime(tvp)
+ register struct timeval *tvp;
+{
+ int s = splhigh();
+ static struct timeval lasttime;
+
+ *tvp = time;
+ tvp->tv_usec += clkread();
+ while (tvp->tv_usec > 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+ if (tvp->tv_sec == lasttime.tv_sec &&
+ tvp->tv_usec <= lasttime.tv_usec &&
+ (tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+ lasttime = *tvp;
+ splx(s);
+}
+
+#ifdef PGINPROF
+/*
+ * Return the difference (in microseconds)
+ * between the current time and a previous
+ * time as represented by the arguments.
+ * If there is a pending clock interrupt
+ * which has not been serviced due to high
+ * ipl, return error code.
+ */
+unsigned vmtime(int otime, int olbolt, int oicr)
+{
+ return ((time.tv_sec-otime)*60 + lbolt-olbolt)*16667;
+}
+#endif /* PGINPROF */
+
+badwordaddr(void *addr)
+{
+ return badaddr((vm_offset_t)addr, 4);
+}
+
+/* returns positive if memory is not there; */
+unsigned check_memory(void *addr, unsigned flag)
+{
+ return badaddr((vm_offset_t)addr, 1);
+}
+
+void start_clock(void)
+{
+ printf("Start clock\n");
+}
+
+static void
+level0_intr(int level, unsigned *frame)
+{
+ printf("Spurious interrupt\n");
+}
+
+static void
+level1_intr(int level, unsigned *frame)
+{
+ register char vec;
+ iackaddr = ivec[level];
+
+ /* generate IACK and get the vector */
+ asm volatile ("ld.b %0,%1" : "=r" (vec) : "" (iackaddr));
+}
+#if 0
+static void
+level1_intr(int level, unsigned *frame)
+{
+ register char vec;
+ iackaddr = ivec[level];
+
+ /* generate IACK and get the vector */
+ asm volatile ("ld.b %0,%1" : "=r" (vec) : "" (iackaddr));
+}
+#endif
+
+static void
+level2_intr(int level, unsigned *frame)
+{
+ iackaddr = ivec[level];
+}
+
+static void
+level3_intr(int level, unsigned *frame)
+{
+ iackaddr = ivec[level];
+}
+
+static void
+level4_intr(int level, unsigned *frame)
+{
+ iackaddr = ivec[level];
+}
+
+static void
+level5_intr(int level, unsigned *frame)
+{
+ iackaddr = ivec[level];
+}
+
+static void
+level6_intr(int level, unsigned *frame)
+{
+ register char vec;
+ struct clockframe clkframe;
+ iackaddr = ivec[level];
+
+ /* generate IACK and get the vector */
+ asm volatile("ld.b %0,%1" : "=r" (vec) : "" (iackaddr));
+ switch (vec){
+ case TIMER1IRQ:
+ break;
+ case TIMER2IRQ:
+ /*
+ * build clockframe and pass to the clock
+ * interrupt handler
+ */
+ clkframe.pc = frame[EF_SXIP] & ~3;
+ clkframe.sr = frame[EF_EPSR];
+ clkframe.ipl = frame[EF_MASK];
+ clockintr(&clkframe);
+ break;
+ }
+}
+
+static void
+level7_intr(int level, unsigned *frame)
+{
+ iackaddr = ivec[level];
+}
+
+/*
+ * Device interrupt handler
+ *
+ * when we enter, interrupts are disabled;
+ * when we leave, they should be disabled,
+ * but they need not be enabled throughout
+ * the routine.
+ */
+
+void
+ext_int(unsigned vec, unsigned *eframe)
+{
+ register unsigned char mask, level;
+ register int s; /* XXX */
+
+ asm volatile ("ld.b %0,%1" : "=r" (mask) : "" (int_mask_level));
+ asm volatile ("ld.b %0,%1" : "=r" (level) : "" (int_pri_level));
+
+ /* get the mask and stash it away in the trap frame */
+ eframe[EF_MASK] = mask;
+ /* and block ints level or lower */
+ spln((char)mask);
+ enable_interrupt();
+ (*int_handler[level])(level,eframe);
+ /*
+ * process any remaining data access exceptions before
+ * returning to assembler
+ */
+ disable_interrupt();
+ if (eframe[EF_DMT0] && DMT_VALID)
+ {
+ trap(T_DATAFLT, eframe);
+ data_access_emulation(eframe);
+ }
+ mask = eframe[EF_MASK];
+ asm volatile ("st.b %0,%1" : "=r" (mask) : "" (int_mask_level));
+}
+
+/*
+ * check a word wide address.
+ * write < 0 -> check for write access.
+ * otherwise read.
+ */
+int wprobe(void *addr, unsigned int write)
+{
+ /* XXX only checking reads */
+ return badaddr((vm_offset_t)addr, sizeof(int));
+}
+
+cpu_exec_aout_makecmds(p, epp)
+ struct proc *p;
+ struct exec_package *epp;
+{
+ int error = ENOEXEC;
+
+#ifdef COMPAT_SUNOS
+ extern sun_exec_aout_makecmds __P((struct proc *, struct exec_package *));
+ if ((error = sun_exec_aout_makecmds(p, epp)) == 0)
+ return 0;
+#endif
+ return error;
+}
+
+#if NOTYET
+/*
+ * nvram_read(BUF, ADDRESS, SIZE)
+ * nvram_write(BUF, ADDRESS, SIZE)
+ *
+ * Read and write non-volatile RAM.
+ * Only one byte from each word in the NVRAM area is accessable.
+ * ADDRESS points to the virtual starting address, which is some address
+ * after the nvram start (NVRAM_ADDR). SIZE refers to virtual size.
+ */
+void nvram_read(char *buf, vm_offset_t address, unsigned size)
+{
+ unsigned index = (unsigned)address - NVRAM_ADDR;
+ unsigned char *source = (char*)(NVRAM_ADDR + index * 4);
+
+ while (size-- > 0)
+ {
+ *buf++ = *source;
+ source += 4; /* bump up to point to next readable byte */
+ }
+}
+
+void nvram_write(char *buf, vm_offset_t address, unsigned size)
+{
+ unsigned index = (unsigned)address - NVRAM_ADDR;
+ unsigned char *source = (char*)(NVRAM_ADDR + index * 4);
+
+ while (size-- > 0)
+ {
+ *source = *buf++;
+ source += 4; /* bump up to point to next readable byte */
+ }
+}
+#endif /* NOTYET */
+
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+
+sysarch(p, uap, retval)
+ struct proc *p;
+ register struct sysarch_args *uap;
+ int *retval;
+{
+ int error = 0;
+
+ switch(uap->op) {
+ default:
+ error = EINVAL;
+ break;
+ }
+ return(error);
+}
+
+/*
+ * machine dependent system variables.
+ */
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+
+ /* all sysctl names are this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ default:
+ return (EOPNOTSUPP);
+ }
+ /*NOTREACHED*/
+}
+
+/*
+ * insert an element into a queue
+ */
+#undef _insque
+_insque(element, head)
+ register struct prochd *element, *head;
+{
+ element->ph_link = head->ph_link;
+ head->ph_link = (struct proc *)element;
+ element->ph_rlink = (struct proc *)head;
+ ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
+}
+
+/*
+ * remove an element from a queue
+ */
+#undef _remque
+_remque(element)
+ register struct prochd *element;
+{
+ ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
+ ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
+ element->ph_rlink = (struct proc *)0;
+}
+
+#if 0
+/*
+ * Below written in C to allow access to debugging code
+ */
+copyinstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
+ void *toaddr, *fromaddr;
+{
+ int c,tally;
+
+ tally = 0;
+ while (maxlength--) {
+ c = fubyte(fromaddr++);
+ if (c == -1) {
+ if(lencopied) *lencopied = tally;
+ return(EFAULT);
+ }
+ tally++;
+ *(char *)toaddr++ = (char) c;
+ if (c == 0){
+ if(lencopied) *lencopied = (u_int)tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = (u_int)tally;
+ return(ENAMETOOLONG);
+}
+
+copyoutstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
+ void *fromaddr, *toaddr;
+{
+ int c;
+ int tally;
+
+ tally = 0;
+ while (maxlength--) {
+ c = subyte(toaddr++, *(char *)fromaddr);
+ if (c == -1) return(EFAULT);
+ tally++;
+ if (*(char *)fromaddr++ == 0){
+ if(lencopied) *lencopied = tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = tally;
+ return(ENAMETOOLONG);
+}
+
+#endif /* 0 */
+
+copystr(fromaddr, toaddr, maxlength, lencopied)
+ u_int *lencopied, maxlength;
+ void *fromaddr, *toaddr;
+{
+ u_int tally;
+
+ tally = 0;
+ while (maxlength--) {
+ *(u_char *)toaddr = *(u_char *)fromaddr++;
+ tally++;
+ if (*(u_char *)toaddr++ == 0) {
+ if(lencopied) *lencopied = tally;
+ return(0);
+ }
+ }
+ if(lencopied) *lencopied = tally;
+ return(ENAMETOOLONG);
+}
+
+void
+putchar(char c)
+{
+ bugoutchr(c);
+}
+/* dummys for now */
+
+bugsyscall()
+{
+}
+
+mmrw()
+{
+}
+
+netintr()
+{
+}
+
+MY_info(f, p, flags, s)
+struct trapframe *f;
+caddr_t p;
+int flags;
+char *s;
+{
+ regdump(f);
+ printf("proc %x flags %x type %s\n", p, flags, s);
+}
+
+MY_info_done(f, flags)
+struct trapframe *f;
+int flags;
+{
+ regdump(f);
+}
+
+regdump(struct trapframe *f)
+{
+#define R(i) f->r[i]
+ printf("R00-05: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(0),R(1),R(2),R(3),R(4),R(5));
+ printf("R06-11: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(6),R(7),R(8),R(9),R(10),R(11));
+ printf("R12-17: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(12),R(13),R(14),R(15),R(16),R(17));
+ printf("R18-23: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(18),R(19),R(20),R(21),R(22),R(23));
+ printf("R24-29: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(24),R(25),R(26),R(27),R(28),R(29));
+ printf("R30-31: 0x%08x 0x%08x\n",R(30),R(31));
+ printf("sxip %x snip %x sfip %x\n", f->sxip, f->snip, f->sfip);
+ if (f->vector == 0x3) { /* print dmt stuff for data access fault */
+ printf("dmt0 %x dmd0 %x dma0 %x\n", f->dmt0, f->dmd0, f->dma0);
+ printf("dmt1 %x dmd1 %x dma1 %x\n", f->dmt1, f->dmd1, f->dma1);
+ printf("dmt2 %x dmd2 %x dma2 %x\n", f->dmt2, f->dmd2, f->dma2);
+ }
+ if (longformat) {
+ printf("fpsr %x", f->fpsr);
+ printf("fpcr %x", f->fpcr);
+ printf("epsr %x", f->epsr);
+ printf("ssbr %x\n", f->ssbr);
+ printf("dmt0 %x", f->dmt0);
+ printf("dmd0 %x", f->dmd0);
+ printf("dma0 %x", f->dma0);
+ printf("dmt1 %x", f->dmt1);
+ printf("dmd1 %x", f->dmd1);
+ printf("dma1 %x", f->dma1);
+ printf("dmt2 %x", f->dmt2);
+ printf("dmd2 %x", f->dmd2);
+ printf("dma2 %x\n", f->dma2);
+ printf("fpecr %x", f->fpecr);
+ printf("fphs1 %x", f->fphs1);
+ printf("fpls1 %x", f->fpls1);
+ printf("fphs2 %x", f->fphs2);
+ printf("fpls2 %x", f->fpls2);
+ printf("fppt %x", f->fppt);
+ printf("fprh %x", f->fprh);
+ printf("fprl %x", f->fprl);
+ printf("fpit %x\n", f->fpit);
+ printf("vector %x", f->vector);
+ printf("mask %x", f->mask);
+ printf("mode %x", f->mode);
+ printf("scratch1 %x", f->scratch1);
+ printf("pad %x\n", f->pad);
+ }
+}
+
+#if DDB
+inline int
+db_splhigh(void)
+{
+ return (db_spln(6));
+}
+
+inline int
+db_splx(int s)
+{
+ return (db_spln(s));
+}
+#endif /* DDB */
diff --git a/sys/arch/mvme88k/m88k/misc.s b/sys/arch/mvme88k/m88k/misc.s
new file mode 100644
index 00000000000..a627f6a28da
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/misc.s
@@ -0,0 +1,64 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ *
+ * HISTORY
+ * $Log: misc.s,v $
+ * Revision 1.1 1995/10/18 10:54:27 deraadt
+ * Initial revision
+ *
+ * Revision 2.3 93/01/26 18:01:25 danner
+ * Conditionalied "#define ASSEMBLER".
+ * [93/01/25 jfriedl]
+ *
+ * Revision 2.2 92/08/03 17:52:14 jfriedl
+ * created [danner]
+ *
+ */
+
+#ifndef ASSEMBLER
+ #define ASSEMBLER
+#endif
+
+#include <m88k/asm.h>
+
+LABEL(_ff1)
+ jmp.n r1
+ ff1 r2, r2
+
+/*
+ * invalidate_pte(pte)
+ *
+ * This function will invalidate specified pte indivisibly
+ * to avoid the write-back of used-bit and/or modify-bit into
+ * that pte. It also returns the pte found in the table.
+ */
+LABEL(_invalidate_pte)
+ or r3,r0,r0
+ xmem r3,r2,0
+ tb1 0,r0,0
+ jmp.n r1
+ or r2,r3,r0
diff --git a/sys/arch/mvme88k/m88k/pmap.c b/sys/arch/mvme88k/m88k/pmap.c
new file mode 100644
index 00000000000..adfdc4c5099
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/pmap.c
@@ -0,0 +1,5538 @@
+/*
+ * HISTORY
+ */
+
+/* don't want to make them general yet. */
+#ifdef luna88k
+# define OMRON_PMAP
+#endif
+# define OMRON_PMAP
+
+#include <sys/types.h>
+#include <machine/board.h>
+#include <vm/pmap.h>
+#include <machine/m882xx.h>/* CMMU stuff */
+#include <vm/vm_kern.h> /* vm/vm_kern.h */
+#include <assym.s>
+
+/*#ifdef luna88k*/
+# define splblock splhigh
+/*#endif */
+
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <machine/assert.h>
+
+
+ /*
+ * VM externals
+ */
+extern vm_offset_t avail_start, avail_next, avail_end;
+extern vm_offset_t virtual_avail, virtual_end;
+
+#if 0
+/*
+ * Machine configuration stuff
+ */
+pmap_table_t pmap_table_build();
+#endif /* 0 */
+
+/*
+ * Static variables, functions and variables for debugging
+ */
+#ifdef DEBUG
+#define static
+
+boolean_t code_cache_enable = TRUE;
+boolean_t data_cache_enable = TRUE;
+boolean_t kernel_text_ro = FALSE; /* If TRUE kernel text set READ ONLY */
+
+/*
+ * conditional debugging
+ */
+
+#define CD_NORM 0x01
+#define CD_FULL 0x02
+
+#define CD_ACTIVATE 0x0000004 /* _pmap_activate */
+#define CD_KMAP 0x0000008 /* pmap_expand_kmap */
+#define CD_MAP 0x0000010 /* pmap_map */
+#define CD_MAPB 0x0000020 /* pmap_map_batc */
+#define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
+#define CD_BOOT 0x0000080 /* pmap_bootstrap */
+#define CD_INIT 0x0000100 /* pmap_init */
+#define CD_CREAT 0x0000200 /* pmap_create */
+#define CD_FREE 0x0000400 /* pmap_free_tables */
+#define CD_DESTR 0x0000800 /* pmap_destroy */
+#define CD_RM 0x0001000 /* pmap_remove */
+#define CD_RMAL 0x0002000 /* pmap_remove_all */
+#define CD_COW 0x0004000 /* pmap_copy_on_write */
+#define CD_PROT 0x0008000 /* pmap_protect */
+#define CD_EXP 0x0010000 /* pmap_expand */
+#define CD_ENT 0x0020000 /* pmap_enter */
+#define CD_UPD 0x0040000 /* pmap_update */
+#define CD_COL 0x0080000 /* pmap_collect */
+#define CD_CMOD 0x0100000 /* pmap_clear_modify */
+#define CD_IMOD 0x0200000 /* pmap_is_modified */
+#define CD_CREF 0x0400000 /* pmap_clear_reference */
+#define CD_PGMV 0x0800000 /* pagemove */
+#define CD_CHKPV 0x1000000 /* check_pv_list */
+#define CD_CHKPM 0x2000000 /* check_pmap_consistency */
+#define CD_CHKM 0x4000000 /* check_map */
+#define CD_ALL 0x0FFFFFC
+
+int pmap_con_dbg = CD_FULL|CD_NORM;
+
+#endif /* DBG */
+
+int cmmumap = 0;
+int mapallio = 1;
+int mapextra = 1;
+int mydebug = 0;
+extern proc0paddr;
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+caddr_t CADDR1, CADDR2, vmmap;
+u_int *CMAP1, *CMAP2, *vmpte, *msgbufmap;
+
+/*
+ * PHYS_TO_VM_PAGE and vm_page_set_modified, called by pmap_remove_range
+ * and pmap_remove_all, are still stubbed out.
+ *
+ * VM-routines would keep truck of the page status through calling
+ * pmap_is_modified.
+ */
+
+#ifndef PHYS_TO_VM_PAGE
+#define PHYS_TO_VM_PAGE(pa)
+#endif
+
+#ifndef vm_page_set_modified
+#define vm_page_set_modified(m)
+#endif
+
+static struct pmap kernel_pmap_store;
+pmap_t kernel_pmap = &kernel_pmap_store;
+
+typedef struct kpdt_entry *kpdt_entry_t;
+struct kpdt_entry {
+ kpdt_entry_t next;
+ vm_offset_t phys;
+};
+#define KPDT_ENTRY_NULL ((kpdt_entry_t)0)
+
+static kpdt_entry_t kpdt_free;
+
+/*
+ * MAX_KERNEL_VA_SIZE must be fit into the virtual address space between
+ * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS.
+ */
+#define MAX_KERNEL_VA_SIZE (256*1024*1024)
+
+
+/*
+ * Size of kernel page tables, which is enough to map MAX_KERNEL_VA_SIZE
+ */
+#define MAX_KERNEL_PDT_SIZE (M88K_BTOP(MAX_KERNEL_VA_SIZE) * sizeof(pt_entry_t))
+
+
+/*
+ * Two pages of scratch space.
+ * Used in copy_to_phys(), pmap_copy_page() and pmap_zero_page().
+ */
+vm_offset_t phys_map_vaddr1, phys_map_vaddr2;
+
+int ptes_per_vm_page; /* number of M88K ptes required to map one VM page */
+
+
+#define PMAP_MAX 512
+
+/*
+ * The Modify List
+ *
+ * This is an array, one byte per physical page, which keeps track
+ * of modified flags for pages which are no longer containd in any
+ * pmap. (for mapped pages, the modified flags are in the PTE.)
+ */
+char *pmap_modify_list;
+
+
+/* The PV (Physical to virtual) List.
+ *
+ * For each vm_page_t, pmap keeps a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t; the list is the pv_table.
+ * This is used by things like pmap_remove, when we must find and remove all
+ * mappings for a particular physical page.
+ */
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+static pv_entry_t pv_head_table; /* array of entries, one per page */
+
+/*
+ * Index into pv_head table, its lock bits, and the modify bits
+ * starting at pmap_phys_start.
+ */
+#define PFIDX(pa) (atop(pa - pmap_phys_start))
+#define PFIDX_TO_PVH(pfidx) (&pv_head_table[pfidx])
+
+
+/*
+ * Locking and TLB invalidation primitives
+ */
+
+/*
+ * Locking Protocols:
+ *
+ * There are two structures in the pmap module that need locking:
+ * the pmaps themselves, and the per-page pv_lists (which are locked
+ * by locking the pv_lock_table entry that corresponds to the pv_head
+ * for the list in question.) Most routines want to lock a pmap and
+ * then do operations in it that require pv_list locking -- however
+ * pmap_remove_all and pmap_copy_on_write operate on a physical page
+ * basis and want to do the locking in the reverse order, i.e. lock
+ * a pv_list and then go through all the pmaps referenced by that list.
+ * To protect against deadlock between these two cases, the pmap_lock
+ * is used. There are three different locking protocols as a result:
+ *
+ * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
+ * the pmap.
+ *
+ * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
+ * lock on the pmap_lock (shared read), then lock the pmap
+ * and finally the pv_lists as needed [i.e. pmap lock before
+ * pv_list lock.]
+ *
+ * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
+ * Get a write lock on the pmap_lock (exclusive write); this
+ * also guaranteees exclusive access to the pv_lists. Lock the
+ * pmaps as needed.
+ *
+ * At no time may any routine hold more than one pmap lock or more than
+ * one pv_list lock. Because interrupt level routines can allocate
+ * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
+ * kernel_pmap can only be held at splvm.
+ */
+/* DCR: 12/18/91 - The above explanation is no longer true. The pmap
+ * system lock has been removed in favor of a backoff strategy to
+ * avoid deadlock. Now, pv_list-based operations first get the
+ * pv_list lock, then try to get the pmap lock, but if they can't,
+ * they release the pv_list lock and retry the whole operation.
+ */
+
+#define SPLVM(spl) { spl = splvm(); }
+#define SPLX(spl) { splx(spl); }
+
+#define PMAP_LOCK(pmap, spl) SPLVM(spl)
+#define PMAP_UNLOCK(pmap, spl) SPLX(spl)
+
+#define PV_LOCK_TABLE_SIZE(n) 0
+#define LOCK_PVH(index)
+#define UNLOCK_PVH(index)
+
+/*
+ * First and last physical address that we maintain any information
+ * for. Initalized to zero so that pmap operations done before
+ * pmap_init won't touch any non-existent structures.
+ */
+
+static vm_offset_t pmap_phys_start = (vm_offset_t) 0;
+static vm_offset_t pmap_phys_end = (vm_offset_t) 0;
+
+#define PMAP_MANAGED(pa) ((pa) >= pmap_phys_start && (pa) < pmap_phys_end)
+
+/*
+ * This variable extract vax's pmap.c.
+ * pmap_verify_free refer to this.
+ * pmap_init initialize this.
+ * '90.7.17 Fuzzy
+ */
+boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
+
+/*
+ * Consistency checks.
+ * These checks are disabled by default; enabled by setting CD_FULL
+ * in pmap_con_dbg.
+ */
+#ifdef DEBUG
+#define CHECK_PV_LIST(phys,pv_h,who) \
+ if (pmap_con_dbg & CD_CHKPV) check_pv_list(phys,pv_h,who)
+#define CHECK_PMAP_CONSISTENCY(who) \
+ if (pmap_con_dbg & CD_CHKPM) check_pmap_consistency(who)
+#else
+#define CHECK_PV_LIST(phys,pv_h,who)
+#define CHECK_PMAP_CONSISTENCY(who)
+#endif /* DEBUG */
+
+/*
+ * number of BATC entries used
+ */
+int batc_used;
+
+/*
+ * keep track BATC mapping
+ */
+batc_entry_t batc_entry[BATC_MAX];
+
+int maxcmmu_pb = 4; /* max number of CMMUs per processors pbus */
+int n_cmmus_pb = 1; /* number of CMMUs per processors pbus */
+
+#define cpu_number() 0 /* just being lazy, should be taken out -nivas*/
+
+vm_offset_t kmapva = 0;
+
+static void flush_atc_entry(unsigned users, vm_offset_t va, int kernel)
+{
+ /* always flush cpu 0 TLB till we understand if this
+ is required XXX -nivas */
+/* if (users) */
+ cmmu_flush_remote_tlb(cpu_number(), kernel, va, M88K_PGBYTES);
+}
+
+/*
+ * Routine: _PMAP_ACTIVATE
+ *
+ * Author: N. Sugai
+ *
+ * Function:
+ * Binds the given physical map to the given processor.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * p pointer to proc structure
+ * cpu CPU number
+ *
+ * If the specified pmap is not kernel_pmap, this routine makes arp
+ * template and stores it into UAPR (user area pointer register) in the
+ * CMMUs connected to the specified CPU.
+ *
+ * If kernel_pmap is specified, only flushes the TLBs mapping kernel
+ * virtual space, in the CMMUs connected to the specified CPU.
+ *
+ * NOTE:
+ * All of the code of this function extracted from macro PMAP_ACTIVATE
+ * to make debugging easy. Accordingly, PMAP_ACTIVATE simlpy call
+ * _pmap_activate.
+ *
+ */
+void
+_pmap_activate(
+ register pmap_t pmap,
+ register pcb_t pcb,
+ register int my_cpu)
+{
+ register apr_template_t apr_data;
+ register int n;
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_ACTIVATE | CD_FULL)) == (CD_ACTIVATE | CD_NORM))
+ printf("(_pmap_activate :%x) pmap 0x%x\n", curproc, (unsigned)pmap);
+#endif
+
+ if (pmap != kernel_pmap) {
+ /*
+ * Lock the pmap to put this cpu in its active set.
+ */
+ simple_lock(&pmap->lock);
+
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(pmap->sdt_paddr);
+ apr_data.field.wt = 0;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1;
+#ifdef OMRON_PMAP
+ /*
+ * cmmu_pmap_activate will set the uapr and the batc entries, then
+ * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE
+ * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL.
+ */
+ cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc);
+ for (n = 0; n < BATC_MAX; n++)
+ *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
+#else
+ cmmu_set_uapr(apr_data.bits);
+ cmmu_flush_tlb(0, 0, -1);
+#endif
+
+ /*
+ * Mark that this cpu is using the pmap.
+ */
+ simple_unlock(&pmap->lock);
+
+ } else {
+
+ /*
+ * kernel_pmap must be always active.
+ */
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
+ printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc);
+#endif
+
+ }
+} /* _pmap_activate */
+
+/*
+ * Routine: _PMAP_DEACTIVATE
+ *
+ * Author: N. Sugai
+ *
+ * Function:
+ * Unbinds the given physical map to the given processor.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * th pointer to thread structure
+ * cpu CPU number
+ *
+ * _pmap_deactive simply clears the cpus_using field in given pmap structure.
+ *
+ * NOTE:
+ * All of the code of this function extracted from macro PMAP_DEACTIVATE
+ * to make debugging easy. Accordingly, PMAP_DEACTIVATE simlpy call
+ * _pmap_deactivate.
+ *
+ */
+void
+_pmap_deactivate(
+ register pmap_t pmap,
+ register pcb_t pcb,
+ register int my_cpu)
+{
+ if (pmap != kernel_pmap) {
+ /* Nothing to do */
+ }
+}
+
+/*
+ * Author: Joe Uemura
+ * Convert machine-independent protection code to M88K protection bits.
+ *
+ * History:
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ */
+
+static int unsigned m88k_protection(
+ pmap_t map,
+ vm_prot_t prot)
+{
+ register pte_template_t p;
+
+ p.bits = 0;
+ p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
+
+ return(p.bits);
+
+} /* m88k_protection */
+
+
+/*
+ * Routine: PMAP_PTE
+ *
+ * Author: Joe Uemura
+ *
+ * Function:
+ * Given a map and a virtual address, compute a (virtual) pointer
+ * to the page table entry (PTE) which maps the address .
+ * If the page table associated with the address does not
+ * exist, PT_ENTRY_NULL is returned (and the map may need to grow).
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * virt virtual address for which page table entry is desired
+ *
+ * Otherwise the page table address is extracted from the segment table,
+ * the page table index is added, and the result is returned.
+ *
+ * Calls:
+ * SDTENT
+ * SDT_VALID
+ * PDT_IDX
+ *
+ * History:
+ * 90/9/12 Fuzzy if pmap == PMAP_NULL, panic
+ */
+pt_entry_t * pmap_pte(
+ pmap_t map,
+ vm_offset_t virt)
+{
+ sdt_entry_t *sdt;
+
+ if (map == PMAP_NULL)
+ panic("pmap_pte: pmap is NULL");
+
+ sdt = SDTENT(map,virt);
+
+ /*
+ * Check whether page table is exist or not.
+ */
+ if (!SDT_VALID(sdt))
+ return(PT_ENTRY_NULL);
+ else
+ return((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) + PDTIDX(virt));
+
+} /* pmap_pte */
+
+
+/*
+ * Routine: PMAP_EXPAND_KMAP (internal)
+ *
+ * History:
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.27 Fuzzy
+ * allocated pte entry clear
+ * '90.8.28 Fuzzy
+ * Bug: No free kernel page table process
+ * panic("pmap_expand_kmap:...");
+ * --> #ifdef DBG
+ * printf("Warnning: Ran out of page table entry VALID\n");
+ * #endif
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ * Author: Fuzzy
+ *
+ * Function:
+ * Allocate a page descriptor table (pte_table) and validate associated
+ * segment table entry, returning pointer to page table entry. This is
+ * much like 'pmap_expand', except that table space is acquired
+ * from an area set up by pmap_bootstrap, instead of through
+ * kmem_alloc. (Obviously, because kmem_alloc uses the kernel map
+ * for allocation - which we can't do when trying to expand the
+ * kernel map!) Note that segment tables for the kernel map were
+ * all allocated at pmap_bootstrap time, so we only need to worry
+ * about the page table here.
+ *
+ * Parameters:
+ * virt VA for which translation tables are needed
+ * prot protection attributes for segment entries
+ *
+ * Extern/Global:
+ * kpdt_free kernel page table free queue
+ *
+ * Calls:
+ * m88k_protection
+ * SDTENT
+ * SDT_VALID
+ * PDT_IDX
+ *
+ * This routine simply dequeues a table from the kpdt_free list,
+ * initalizes all its entries (invalidates them), and sets the
+ * corresponding segment table entry to point to it. If the kpdt_free
+ * list is empty - we panic (no other places to get memory, sorry). (Such
+ * a panic indicates that pmap_bootstrap is not allocating enough table
+ * space for the kernel virtual address space).
+ *
+ */
+
+static pt_entry_t * pmap_expand_kmap(
+ vm_offset_t virt,
+ vm_prot_t prot)
+{
+ int aprot;
+ sdt_entry_t *sdt;
+ kpdt_entry_t kpdt_ent;
+ pmap_t map = kernel_pmap;
+
+#if DEBUG
+ if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
+ printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
+#endif
+
+ aprot = m88k_protection (map, prot);
+
+ /* segment table entry derivate from map and virt. */
+ sdt = SDTENT(map, virt);
+ if (SDT_VALID(sdt))
+ panic("pmap_expand_kmap: segment table entry VALID");
+
+ kpdt_ent = kpdt_free;
+ if (kpdt_ent == KPDT_ENTRY_NULL) {
+ printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
+ return(PT_ENTRY_NULL);
+ }
+ kpdt_free = kpdt_free->next;
+
+ ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
+ (unsigned)(kpdt_ent->phys) = 0;
+ (unsigned)(kpdt_ent->next) = 0;
+
+ return((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
+}/* pmap_expand_kmap() */
+
+/*
+ * Routine: PMAP_MAP
+ *
+ * Function:
+ * Map memory at initalization. The physical addresses being
+ * mapped are not managed and are never unmapped.
+ *
+ * Parameters:
+ * virt virtual address of range to map (IN)
+ * start physical address of range to map (IN)
+ * end physical address of end of range (IN)
+ * prot protection attributes (IN)
+ *
+ * Calls:
+ * pmap_pte
+ * pmap_expand_kmap
+ *
+ * Special Assumptions
+ * For now, VM is already on, only need to map the specified
+ * memory. Used only by pmap_bootstrap() and vm_page_startup().
+ *
+ * For each page that needs mapping:
+ * pmap_pte is called to obtain the address of the page table
+ * table entry (PTE). If the page table does not exist,
+ * pmap_expand_kmap is called to allocate it. Finally, the page table
+ * entry is set to point to the physical page.
+ *
+ *
+ * initialize template with paddr, prot, dt
+ * look for number of phys pages in range
+ * {
+ * pmap_pte(virt) - expand if necessary
+ * stuff pte from template
+ * increment virt one page
+ * increment template paddr one page
+ * }
+ *
+ *
+ * History:
+ * 90/09/12 Fuzzy calculation of allocating page table entry number
+ * 90/09/12 Fuzzy When mapped VA map again, output warinning message.
+ *
+ */
+vm_offset_t pmap_map(
+ register vm_offset_t virt,
+ register vm_offset_t start,
+ register vm_offset_t end,
+ register vm_prot_t prot
+#ifdef OMRON_PMAP
+ , register unsigned cmode
+#endif /* OMRON */
+ )
+{
+ int aprot;
+ unsigned npages;
+ unsigned num_phys_pages;
+ pt_entry_t *pte;
+ pte_template_t template;
+
+#if DEBUG
+ if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
+ printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x\n",
+ curproc, start, end, virt, prot);
+#endif
+
+ if (start > end)
+ panic("pmap_map: start greater than end address");
+
+ aprot = m88k_protection (kernel_pmap, prot);
+
+#ifdef OMRON_PMAP
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
+#else /* OMRON */
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID;
+#endif /* OMRON */
+
+ npages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+
+ for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
+
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map: Cannot allocate pte table");
+
+#ifdef DEBUG
+ if (pmap_con_dbg & CD_MAP)
+ if (pte->dtype)
+ printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+#endif
+
+ *pte = template.pte;
+ virt += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ }
+
+ return(virt);
+
+} /* pmap_map() */
+
+/*
+ * Routine: PMAP_MAP_BATC
+ *
+ * Function:
+ * Map memory using BATC at initalization. The physical addresses being
+ * mapped are not managed and are never unmapped.
+ *
+ * Parameters:
+ * virt virtual address of range to map (IN)
+ * start physical address of range to map (IN)
+ * end physical address of end of range (IN)
+ * prot protection attributes (IN)
+ * cmode cache control attributes (IN)
+ *
+ * External & Global:
+ * batc_used number of BATC used (IN/OUT)
+ *
+ * Calls:
+ * m88k_protection
+ * BATC_BLK_ALIGNED
+ * cmmu_store
+ * pmap_pte
+ * pmap_expand_kmap
+ *
+ *
+ * For each page that needs mapping:
+ * If both virt and phys are on the BATC block boundary, map using BATC.
+ * Else make mapping in the same manner as pmap_map.
+ *
+ * initialize BATC and pte template
+ * look for number of phys pages in range
+ * {
+ * if virt and phys are on BATC block boundary
+ * {
+ * map using BATC
+ * increment virt and phys one BATC block
+ * continue outer loop
+ * }
+ * pmap_pte(virt) - expand if necessary
+ * stuff pte from template
+ * increment virt one page
+ * increment template paddr one page
+ * }
+ *
+ * Author: Sugai
+ * Oct 25 '90 Initial virsion
+ *
+ */
+vm_offset_t
+pmap_map_batc (
+ register vm_offset_t virt,
+ register vm_offset_t start,
+ register vm_offset_t end,
+ register vm_prot_t prot,
+ register unsigned cmode)
+{
+ int aprot;
+ unsigned num_phys_pages;
+ vm_offset_t phys;
+ pt_entry_t *pte;
+ pte_template_t template;
+ batc_template_t batctmp;
+ register int i;
+
+#if DEBUG
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ printf ("(pmap_map_batc :%x) phys address from %x to %x mapped at virtual %x, prot %x\n", curproc,
+ start, end, virt, prot);
+#endif
+
+ if (start > end)
+ panic("pmap_map_batc: start greater than end address");
+
+ aprot = m88k_protection (kernel_pmap, prot);
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
+ phys = start;
+ batctmp.bits = 0;
+ batctmp.field.sup = 1; /* supervisor */
+ batctmp.field.wt = template.pte.wt; /* write through */
+ batctmp.field.g = template.pte.g; /* global */
+ batctmp.field.ci = template.pte.ci; /* cache inhibit */
+ batctmp.field.wp = template.pte.prot; /* protection */
+ batctmp.field.v = 1; /* valid */
+
+ num_phys_pages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+
+ while (num_phys_pages > 0) {
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
+ num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
+#endif
+
+ if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
+ num_phys_pages >= BATC_BLKBYTES/M88K_PGBYTES &&
+ batc_used < BATC_MAX ) {
+
+ /*
+ * map by BATC
+ */
+ batctmp.field.lba = M88K_BTOBLK(virt);
+ batctmp.field.pba = M88K_BTOBLK(phys);
+
+ cmmu_set_pair_batc_entry(0, batc_used, batctmp.bits);
+
+ batc_entry[batc_used] = batctmp.field;
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
+ printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
+ }
+ if (pmap_con_dbg & CD_MAPB) {
+
+ for (i = 0; i < BATC_BLKBYTES; i += M88K_PGBYTES ) {
+ pte = pmap_pte(kernel_pmap, virt+i);
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
+ }
+ }
+#endif
+ batc_used++;
+ virt += BATC_BLKBYTES;
+ phys += BATC_BLKBYTES;
+ template.pte.pfn = M88K_BTOP(phys);
+ num_phys_pages -= BATC_BLKBYTES/M88K_PGBYTES;
+ continue;
+ }
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map_batc: Cannot allocate pte table");
+
+#ifdef DEBUG
+ if (pmap_con_dbg & CD_MAPB)
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+#endif
+
+ *pte = template.pte;
+ virt += M88K_PGBYTES;
+ phys += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ num_phys_pages--;
+ }
+
+ return(M88K_ROUND_PAGE(virt));
+
+} /* pmap_map_batc() */
+
+/*
+ * Routine: PMAP_CACHE_CONTROL
+ *
+ * Author: Sugai 90/09/07
+ *
+ * Function:
+ * Set the cache-control bits in the page table entries(PTE) which maps
+ * the specifid virutal address range.
+ *
+ * mode
+ * writethrough 0x200
+ * global 0x80
+ * cache inhibit 0x40
+ *
+ * Parameters:
+ * pmap_t map
+ * vm_offset_t s
+ * vm_offset_t e
+ * unsigned mode
+ *
+ * Calls:
+ * PMAP_LOCK
+ * PMAP_UNLOCK
+ * pmap_pte
+ * invalidate_pte
+ * flush_atc_entry
+ * dcachefall
+ *
+ * This routine sequences through the pages of the specified range.
+ * For each, it calls pmap_pte to acquire a pointer to the page table
+ * entry (PTE). If the PTE is invalid, or non-existant, nothing is done.
+ * Otherwise, the cache-control bits in the PTE's are adjusted as specified.
+ *
+ */
+void pmap_cache_ctrl(
+ pmap_t pmap,
+ vm_offset_t s,
+ vm_offset_t e,
+ unsigned mode)
+{
+ int spl, spl_sav;
+ pt_entry_t *pte;
+ vm_offset_t va;
+ int kflush;
+ int cpu;
+ register pte_template_t opte;
+
+#ifdef DEBUG
+ if ( mode & CACHE_MASK ) {
+ printf("(cache_ctrl) illegal mode %x\n",mode);
+ return;
+ }
+ if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
+ printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
+ }
+#endif /* DEBUG */
+
+ if ( pmap == PMAP_NULL ) {
+ panic("pmap_cache_ctrl: pmap is NULL");
+ }
+
+ PMAP_LOCK(pmap, spl);
+
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ for (va = s; va < e; va += M88K_PGBYTES) {
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ continue;
+#ifdef DEBUG
+ printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
+#endif /* DEBUG */
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ * XXX
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
+ flush_atc_entry(0, va, kflush);
+ splx(spl_sav);
+
+ /*
+ * Data cache should be copied back and invalidated.
+ */
+ cmmu_flush_remote_cache(0, M88K_PTOB(pte->pfn), M88K_PGBYTES);
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+
+} /* pmap_cache_ctrl */
+
+
+/*
+ * Routine: PMAP_BOOTSTRAP
+ *
+ * Author: Fuzzy '90.7.12
+ *
+ * 90.7.23. JU - changed blkclr to bzero
+ *
+ *
+ * Function:
+ * Bootstarp the system enough to run with virtual memory.
+ * Map the kernel's code and data, allocate the kernel
+ * translation table space, and map control registers
+ * and other IO addresses.
+ *
+ * Parameters:
+ * load_start PA where kernel was loaded (IN)
+ * &phys_start PA of first available physical page (IN/OUT)
+ * &phys_end PA of last available physical page (IN)
+ * &virtual_avail VA of first available page (after kernel bss)
+ * &virtual_end VA of last available page (end of kernel address space)
+ *
+ * Extern/Global:
+ *
+ * PAGE_SIZE VM (software) page size (IN)
+ * kernelstart start symbol of kernel text (IN)
+ * etext end of kernel text (IN)
+ * phys_map_vaddr1 VA of page mapped arbitrarily for debug/IO (OUT)
+ * phys_map_vaddr2 VA of page mapped arbitrarily for debug/IO (OUT)
+ *
+ * Calls:
+ * simple_lock_init
+ * pmap_map
+ * pmap_map_batc
+ *
+ * The physical address 'load_start' is mapped at
+ * VM_MIN_KERNEL_ADDRESS, which maps the kernel code and data at the
+ * virtual address for which it was (presumably) linked. Immediately
+ * following the end of the kernel code/data, sufficent page of
+ * physical memory are reserved to hold translation tables for the kernel
+ * address space. The 'phys_start' parameter is adjusted upward to
+ * reflect this allocation. This space is mapped in virtual memory
+ * immediately following the kernel code/data map.
+ *
+ * A pair of virtual pages are reserved for debugging and IO
+ * purposes. They are arbitrarily mapped when needed. They are used,
+ * for example, by pmap_copy_page and pmap_zero_page.
+ *
+ * For m88k, we have to map BUG memory also. This is a read only
+ * mapping for 0x10000 bytes. We will end up having load_start as
+ * 0 and VM_MIN_KERNEL_ADDRESS as 0 - yes sir, we have one-to-one
+ * mapping!!!
+ */
+
+void
+pmap_bootstrap(
+ vm_offset_t load_start, /* IN */
+ vm_offset_t *phys_start, /* IN/OUT */
+ vm_offset_t *phys_end, /* IN */
+ vm_offset_t *virt_start, /* OUT */
+ vm_offset_t *virt_end) /* OUT */
+{
+ kpdt_entry_t kpdt_virt;
+ sdt_entry_t *kmap;
+ vm_offset_t vaddr,
+ virt,
+ kpdt_phys,
+ s_text,
+ e_text,
+ kernel_pmap_size;
+ apr_template_t apr_data;
+ pt_entry_t *pte;
+ int i;
+ extern char *kernelstart, *etext;
+#if 0
+ pmap_table_t ptable;
+#endif /* 0 */
+
+ printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
+ ptes_per_vm_page = PAGE_SIZE >> M88K_PGSHIFT;
+ if (ptes_per_vm_page == 0)
+ panic("pmap_bootstrap: VM page size < MACHINE page size");
+
+ if ( ! PAGE_ALIGNED(load_start)) {
+ printf("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x\n", load_start);
+ }
+
+ /*
+ * Allocate the kernel page table from the front of available
+ * physical memory,
+ * i.e. just after where the kernel image was loaded.
+ */
+ /*
+ * The calling sequence is
+ * ...
+ * pmap_bootstrap(&kernelstart,...)
+ * kernelstart is the first symbol in the load image.
+ * We link the kernel such that &kernelstart == 0x10000 (size of
+ * BUG ROM)
+ * The expression (&kernelstart - load_start) will end up as
+ * 0, making *virt_start == *phys_start, giving a 1-to-1 map)
+ */
+
+ *phys_start = M88K_ROUND_PAGE(*phys_start);
+ *virt_start = *phys_start + ((unsigned)&kernelstart - GOOFYLDOFFSET - load_start);
+
+ /*
+ * Initialilze kernel_pmap structure
+ */
+ kernel_pmap->ref_count = 1;
+ kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start);
+ kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start);
+ kmapva = *virt_start;
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kernel_pmap->sdt_paddr = %x\n",kernel_pmap->sdt_paddr);
+ printf("kernel_pmap->sdt_vaddr = %x\n",kernel_pmap->sdt_vaddr);
+ }
+ /* init double-linked list of pmap structure */
+ kernel_pmap->next = kernel_pmap;
+ kernel_pmap->prev = kernel_pmap;
+#endif
+
+ /*
+ * Reserve space for segment table entries.
+ * One for the regular segment table and one for the shadow table
+ * The shadow table keeps track of the virtual address of page
+ * tables. This is used in virtual-to-physical address translation
+ * functions. Remember, MMU cares only for physical addresses of
+ * segment and page table addresses. For kernel page tables, we
+ * really don't need this virtual stuff (since the kernel will
+ * be mapped 1-to-1) but for user page tables, this is required.
+ * Just to be consistent, we will maintain the shadow table for
+ * kernel pmap also.
+ */
+
+ kernel_pmap_size = 2*SDT_SIZE;
+
+ /* save pointers to where page table entries start in physical memory */
+ kpdt_phys = (*phys_start + kernel_pmap_size);
+ kpdt_virt = (kpdt_entry_t)(*virt_start + kernel_pmap_size);
+ kernel_pmap_size += MAX_KERNEL_PDT_SIZE;
+ *phys_start += kernel_pmap_size;
+ *virt_start += kernel_pmap_size;
+
+ /* init all segment and page descriptor to zero */
+ bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size);
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kpdt_phys = %x\n",kpdt_phys);
+ printf("kpdt_virt = %x\n",kpdt_virt);
+ printf("end of kpdt at (virt)0x%08x ; (phys)0x%08x\n",
+ *virt_start,*phys_start);
+ }
+#endif
+ /*
+ * init the kpdt queue
+ */
+ kpdt_free = kpdt_virt;
+ for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) {
+ kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
+ kpdt_virt->phys = kpdt_phys;
+ kpdt_virt = kpdt_virt->next;
+ kpdt_phys += PDT_SIZE;
+ }
+ kpdt_virt->next = KPDT_ENTRY_NULL; /* terminate the list */
+
+ /*
+ * Map the kernel image into virtual space
+ */
+
+ s_text = load_start; /* paddr of text */
+ e_text = load_start + ((unsigned)&etext - (unsigned)&kernelstart - GOOFYLDOFFSET); /* paddr of end of text section*/
+ e_text = M88K_ROUND_PAGE(e_text);
+
+ #ifdef OMRON_PMAP
+ #define PMAPER pmap_map
+ #else
+ #define PMAPER pmap_map_batc
+ #endif
+
+ /* map the first 64k (BUG ROM) read only, cache inhibited */
+ vaddr = PMAPER(
+ 0,
+ 0,
+ 0x10000,
+ VM_PROT_READ|VM_PROT_WRITE,
+ CACHE_INH);
+
+ assert(vaddr == (unsigned)&kernelstart - GOOFYLDOFFSET);
+
+ vaddr = PMAPER(
+ (vm_offset_t)((unsigned)&kernelstart - GOOFYLDOFFSET),
+ s_text,
+ e_text,
+ VM_PROT_WRITE | VM_PROT_READ, /* shouldn't it be RO? XXX*/
+ CACHE_INH);
+
+ vaddr = PMAPER(
+ vaddr,
+ e_text,
+ (vm_offset_t)kmap,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_GLOBAL);
+
+ /*
+ * Map system segment & page tables - should be cache inhibited.
+ */
+ if (kmapva != vaddr) {
+ printf("(pmap_bootstrap) correcting vaddr\n");
+ while (vaddr < (*virt_start - kernel_pmap_size))
+ vaddr = M88K_ROUND_PAGE(vaddr + 1);
+ }
+
+ vaddr = PMAPER(
+ vaddr,
+ (vm_offset_t)kmap,
+ *phys_start,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_INH);
+
+ if (vaddr != *virt_start) {
+ *virt_start = vaddr;
+ *phys_start = round_page(*phys_start);
+ }
+
+
+ *virt_start = round_page(*virt_start);
+ *virt_end = VM_MAX_KERNEL_ADDRESS;
+
+ /*
+ * Map a few more pages for phys routines and debugger.
+ */
+
+ phys_map_vaddr1 = round_page(*virt_start);
+ phys_map_vaddr2 = phys_map_vaddr1 + PAGE_SIZE;
+
+ /*
+ * To make 1:1 mapping of virt:phys, throw away a few phys pages
+ */
+
+ *phys_start += 2 * PAGE_SIZE;
+ *virt_start += 2 * PAGE_SIZE;
+
+ /*
+ * establish mapping for code and data cmmu
+ */
+
+ if (cmmumap) {
+ PMAPER(
+ CMMU_I,
+ CMMU_I,
+ CMMU_I + 0x1000,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_INH);
+
+ PMAPER(
+ CMMU_D,
+ CMMU_D,
+ CMMU_D + 0x1000,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_INH);
+ }
+#if 0
+ if (mapextra) {
+ PMAPER(
+ 0x01000000,
+ 0x01000000,
+ 0x02000000,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_INH);
+ }
+#endif /* 0 */
+ if (mapallio) {
+ PMAPER(
+ 0xFF800000,
+ 0xFF800000,
+ 0xFFFF0000,
+ VM_PROT_WRITE|VM_PROT_READ,
+ CACHE_INH);
+ }
+
+#if 0
+ ptable = pmap_table_build(avail_end);
+
+ for ( ; ptable->size != 0xffffffffU; ptable++)
+ if (ptable->size)
+ PMAPER(ptable->virt_start,
+ ptable->phys_start,
+ ptable->phys_start + ptable->size,
+ ptable->prot,
+ ptable->cacheability);
+
+#endif /* 0 */
+
+ /*
+ * Allocate all the submaps we need
+ */
+#define SYSMAP(c, p, v, n) \
+({ \
+ v = (c)virt; \
+ if ((p = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL) \
+ pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE); \
+ virt += ((n)*NBPG); \
+})
+
+ virt = *virt_start;
+
+ SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 );
+ SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 );
+ SYSMAP(caddr_t ,vmpte ,vmmap ,1 );
+ SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 );
+
+ *virt_start = virt;
+ /*
+ * Set translation for UPAGES at UADDR. The idea is we want to
+ * have translations set up for UADDR. Later on, the ptes for
+ * for this address will be set so that kstack will refer
+ * to the u area. Make sure pmap knows about this virtual
+ * address by doing vm_findspace on kernel_map.
+ */
+
+ for (i = 0, virt = UADDR; i < UPAGES; i++, virt += PAGE_SIZE) {
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("setting up mapping for Upage %d @ %x\n", i, virt);
+ }
+#endif
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE);
+ }
+ /*
+ * Switch to using new page tables
+ */
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(kernel_pmap->sdt_paddr);
+ apr_data.field.wt = 1;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 1;
+ apr_data.field.te = 1; /* Translation enable */
+
+ /* Invalidate entire kernel TLB. */
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("invalidating tlb %x\n", apr_data.bits);
+ }
+#endif
+ cmmu_flush_remote_tlb(0, 1, 0, -1);
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("done invalidating tlb %x\n", apr_data.bits);
+ }
+#endif
+
+ if (mydebug) {
+ pmap_print(kernel_pmap);
+ pmap_print_trace(kernel_pmap, (vm_offset_t)0xFFF00000, 1);
+ }
+ /* still physical */
+ /* Load supervisor pointer to segment table. */
+ cmmu_remote_set_sapr(0, apr_data.bits);
+ /* virtual now on */
+#ifdef DEBUG
+ printf("running virtual - avail_next 0x%x\n", *phys_start);
+#endif
+ avail_next = *phys_start;
+ if (mydebug) {
+ pmap_print_trace(kernel_pmap, proc0paddr, 1);
+ }
+
+} /* pmap_bootstrap() */
+
+/*
+ * Bootstrap memory allocator. This function allows for early dynamic
+ * memory allocation until the virtual memory system has been bootstrapped.
+ * After that point, either kmem_alloc or malloc should be used. This
+ * function works by stealing pages from the (to be) managed page pool,
+ * stealing virtual address space, then mapping the pages and zeroing them.
+ *
+ * It should be used from pmap_bootstrap till vm_page_startup, afterwards
+ * it cannot be used, and will generate a panic if tried. Note that this
+ * memory will never be freed, and in essence it is wired down.
+ */
+
+void *
+pmap_bootstrap_alloc(int size)
+{
+ register void *mem;
+
+ size = round_page(size);
+ mem = (void *)virtual_avail;
+ virtual_avail = pmap_map(virtual_avail, avail_start,
+ avail_start + size, VM_PROT_READ|VM_PROT_WRITE, CACHE_INH);
+ avail_start += size;
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("pmap_bootstrap_alloc: size %x virtual_avail %x avail_start %x\n",
+ size, virtual_avail, avail_start);
+ }
+#endif
+ bzero((void *)mem, size);
+ return (mem);
+}
+
+/*
+ * Routine: PMAP_INIT
+ *
+ * History
+ * June 13 90 Fri. Fuzzy
+ * Rewrite lvl1 --> segment
+ * lvl3 --> page
+ * '90.7.19 Fuzzy sdt_zone unused
+ *
+ * Function:
+ * Initialize the pmap module. It is called by vm_init, to initialize
+ * any structures that the pmap system needs to map virtual memory.
+ *
+ * Parameters:
+ * phys_start physical address of first available page
+ * (was last set by pmap_bootstrap)
+ * phys_end physical address of last available page
+ *
+ * Extern/Globals
+ * pv_head_table (OUT)
+ * pv_lock_table (OUT)
+ * pmap_modify_list (OUT)
+ * pmap_phys_start (OUT)
+ * pmap_phys_end (OUT)
+ * pmap_initialized(OUT)
+ *
+ * Calls:
+ * kmem_alloc
+ * zinit
+ *
+ * This routine does not really have much to do. It allocates space
+ * for the pv_head_table, pv_lock_table, pmap_modify_list; and sets these
+ * pointers. It also initializes zones for pmap structures, pv_entry
+ * structures, and segment tables.
+ *
+ * Last, it sets the pmap_phys_start and pmap_phys_end global
+ * variables. These define the range of pages 'managed' be pmap. These
+ * are pages for which pmap must maintain the PV list and the modify
+ * list. (All other pages are kernel-specific and are permanently
+ * wired.)
+ *
+ *
+ * kmem_alloc() memory for pv_table
+ * kmem_alloc() memory for modify_bits
+ * zinit(pmap_zone)
+ * zinit(segment zone)
+ *
+ */
+void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
+{
+ register long npages;
+ register vm_offset_t addr;
+ register vm_size_t s;
+ register int i;
+ vm_size_t pvl_table_size;
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
+ printf("(pmap_init) phys_start %x phys_end %x\n", phys_start, phys_end);
+#endif
+
+ /*
+ * Allocate memory for the pv_head_table,
+ * the modify bit array, and the pte_page table.
+ */
+ npages = atop(phys_end - phys_start);
+ pvl_table_size = PV_LOCK_TABLE_SIZE(npages);
+ s = (vm_size_t)(npages * sizeof(struct pv_entry) /* pv_list */
+ #if 0
+ + pvl_table_size /* pv_lock_table */
+ #endif /* 0 */
+ + npages); /* pmap_modify_list */
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
+ printf("(pmap_init) nbr of managed pages = %x\n", npages);
+ printf("(pmap_init) size of pv_list = %x\n",
+ npages * sizeof(struct pv_entry));
+ }
+#endif
+
+ s = round_page(s);
+ addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+
+ pv_head_table = (pv_entry_t)addr;
+ addr = (vm_offset_t)(pv_head_table + npages);
+
+ pmap_modify_list = (char *)addr;
+
+ /*
+ * Only now, when all of the data structures are allocated,
+ * can we set pmap_phys_start and pmap_phys_end. If we set them
+ * too soon, the kmem_alloc above will blow up when it causes
+ * a call to pmap_enter, and pmap_enter tries to manipulate the
+ * (not yet existing) pv_list.
+ */
+ pmap_phys_start = phys_start;
+ pmap_phys_end = phys_end;
+
+ pmap_initialized = TRUE;
+
+} /* pmap_init() */
+
+
+/*
+ * Routine: PMAP_ZERO_PAGE
+ *
+ * History:
+ * '90.7.13 Fuzzy
+ * '90.9.05 Fuzzy
+ * Bug: template page invalid --> template page valid
+ *
+ * template = M88K_TRUNC_PAGE(phys)
+ * | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
+ * | DT_VALID;
+ * ^^^^^^^^ add
+ *
+ * Function:
+ * Zeros the specified (machine independent) page.
+ *
+ * Parameters:
+ * phys PA of page to zero
+ *
+ * Extern/Global:
+ * phys_map_vaddr1
+ *
+ * Calls:
+ * M88K_TRUNC_PAGE
+ * m88k_protection
+ * cmmu_sflush_page
+ * DO_PTES
+ * bzero
+ *
+ * Special Assumptions:
+ * no locking required
+ *
+ * This routine maps the physical pages ath the 'phys_map' virtual
+ * address set up in pmap_bootstrap. It flushes the TLB to make the new
+ * mappings effective, and zeros all the bits.
+ */
+void pmap_zero_page(vm_offset_t phys)
+{
+ vm_offset_t srcva;
+ pte_template_t template;
+ unsigned int i;
+ unsigned int spl_sav;
+
+ register int my_cpu = cpu_number();
+ pt_entry_t *srcpte;
+
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+
+ for (i = 0; i < ptes_per_vm_page; i++, phys += M88K_PGBYTES)
+ {
+ template.bits = M88K_TRUNC_PAGE(phys)
+ | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
+ | DT_VALID | CACHE_GLOBAL;
+
+
+ spl_sav = splblock();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+ splx(spl_sav);
+ bzero (srcva, M88K_PGBYTES);
+ /* force the data out */
+ cmmu_flush_remote_data_cache(my_cpu,phys, M88K_PGBYTES);
+ }
+
+} /* pmap_zero_page() */
+
+
+/*
+ * Routine: PMAP_CREATE
+ *
+ * Author: Fuzzy
+ *
+ * History:
+ * '90.7.13 Fuzzy level 1 --> segment exchange
+ * '90.7.16 Fuzzy PT_ALIGNED --> PAGE_ALIGNED exchange
+ * l1_utemplate delete
+ * '90.7.20 Fuzzy kernel segment entries in segment table
+ * entries for user space address delete.
+ * copying kernel segment entries
+ * to user pmap segment entries delete.
+ * all user segment table entries initialize
+ * to zero (invalid).
+ *
+ * Function:
+ * Create and return a physical map. If the size specified for the
+ * map is zero, the map is an actual physical map, and may be referenced
+ * by the hardware. If the size specified is non-zero, the map will be
+ * used in software only, and is bounded by that size.
+ *
+ * Paramerters:
+ * size size of the map
+ *
+ * Calls:
+ * zalloc
+ * simple_lock_init
+ *
+ * This routines allocates a pmap structure and segment translation
+ * table from the zones set up by pmap_init. The segment table entries
+ * for user space addresses are initalized to zero (invalid).
+ * The pmap structure is initalized with the virtual and physical
+ * addresses of the segment table. The address (virtual) of the
+ * pmap structure is returned.
+ */
+pmap_t pmap_create(vm_size_t size)
+{
+ register pmap_t p;
+
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+ if (size != 0)
+ return(PMAP_NULL);
+
+ CHECK_PMAP_CONSISTENCY("pmap_create");
+
+ p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
+ if (p == PMAP_NULL) {
+ panic("pmap_create: cannot allocate a pmap");
+ }
+
+ bzero(p, sizeof(*p));
+ pmap_pinit(p);
+ return(p);
+
+} /* pmap_create() */
+
+void
+pmap_pinit(pmap_t p)
+{
+ register pmap_statistics_t stats;
+ sdt_entry_t *segdt;
+ int i;
+
+ /*
+ * Allocate memory for *actual* segment table and *shadow* table.
+ */
+ segdt = kmem_alloc(kernel_map, 2 * SDT_SIZE);
+ if (segdt == NULL)
+ panic("pmap_create: kmem_alloc failure");
+
+#if 0
+ /* maybe, we can use bzero to zero out the segdt. */
+ bzero(segdt, 2 * SDT_SIZE); */
+#endif /* 0 */
+ /* use pmap zero page to zero it out */
+ pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt));
+ if (PAGE_SIZE == SDT_SIZE) /* only got half */
+ pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt+PAGE_SIZE));
+ if (PAGE_SIZE < 2*SDT_SIZE) /* get remainder */
+ bzero((vm_offset_t)segdt+PAGE_SIZE, (2*SDT_SIZE)-PAGE_SIZE);
+
+ /*
+ * Initialize pointer to segment table both virtual and physical.
+ */
+ p->sdt_vaddr = segdt;
+ p->sdt_paddr = (sdt_entry_t *)pmap_extract(kernel_pmap,(vm_offset_t)segdt);
+
+ if (!PAGE_ALIGNED(p->sdt_paddr)) {
+ printf("pmap_create: std table = %x\n",(int)p->sdt_paddr);
+ panic("pmap_create: sdt_table not aligned on page boundary");
+ }
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
+ printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
+ curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
+ }
+#endif
+
+ /*
+ * memory for page tables should be CACHE DISABLED
+ */
+ pmap_cache_ctrl(kernel_pmap,
+ (vm_offset_t)segdt,
+ (vm_offset_t)segdt+SDT_SIZE,
+ CACHE_INH);
+ /*
+ * Initalize SDT_ENTRIES.
+ */
+ /*
+ * There is no need to clear segment table, since kmem_alloc would
+ * provides us clean pages.
+ */
+
+ /*
+ * Initialize pmap structure.
+ */
+ p->ref_count = 1;
+
+#ifdef OMRON_PMAP
+ /* initialize block address translation cache */
+ for (i = 0; i < BATC_MAX; i++) {
+ p->i_batc[i].bits = 0;
+ p->d_batc[i].bits = 0;
+ }
+#endif
+
+ /*
+ * Initialize statistics.
+ */
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
+
+#ifdef DEBUG
+ /* link into list of pmaps, just after kernel pmap */
+ p->next = kernel_pmap->next;
+ p->prev = kernel_pmap;
+ kernel_pmap->next = p;
+ p->next->prev = p;
+#endif
+
+} /* pmap_pinit() */
+
+/*
+ * Routine: PMAP_FREE_TABLES (internal)
+ *
+ * History:
+ * '90. 7.16 Fuzzy level 3 --> page discriptor table
+ * level 1 --> segment discriptor table
+ * 90/07/20 N.Sugai sdt_zone no longer exist. We must
+ * use kmem_free instead of zfree.
+ * '90. 7.26 Fuzzy VM_MIN_ADDRESS -> VM_MIN_USER_ADDRESS
+ * VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.22 Fuzzy Debugging message add
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ * '90. 9.11 Fuzzy sdt_va: vm_offset_t --> unsigned long
+ *
+ * Internal procedure used by pmap_destroy() to actualy deallocate
+ * the tables.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Calls:
+ * pmap_pte
+ * kmem_free
+ * PT_FREE
+ *
+ * Special Assumptions:
+ * No locking is needed, since this is only called which the
+ * ref_count field of the pmap structure goes to zero.
+ *
+ * This routine sequences of through the user address space, releasing
+ * all translation table space back to the system using PT_FREE.
+ * The loops are indexed by the virtual address space
+ * ranges represented by the table group sizes(PDT_TABLE_GROUP_VA_SPACE).
+ *
+ */
+
+static void pmap_free_tables(pmap_t pmap)
+{
+ unsigned long sdt_va; /* outer loop index */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ unsigned int i,j;
+
+#if DEBUG
+ if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
+ printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
+#endif
+
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
+ */
+
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
+
+ /* Segment table Loop */
+ for ( ; i < j; i++)
+ {
+ sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
+ if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ printf("(pmap_free_tables :%x) free page table = 0x%x\n", curproc, gdttbl);
+#endif
+ PT_FREE(gdttbl);
+ }
+
+ } /* Segment Loop */
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ printf("(pmap_free_tables :%x) free segment table = 0x%x\n", curproc, sdttbl);
+#endif
+ /*
+ * Freeing both *actual* and *shadow* segment tables
+ */
+ kmem_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
+
+} /* pmap_free_tables() */
+
+
+void
+pmap_release(register pmap_t p)
+{
+ pmap_free_tables(p);
+#ifdef DBG
+ DEBUG ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
+ /* unlink from list of pmap structs */
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+#endif
+
+}
+
+/*
+ * Routine: PMAP_DESTROY
+ *
+ * History:
+ * '90. 7.16 Fuzzy
+ *
+ * Function:
+ * Retire the given physical map from service. Should only be called
+ * if the map contains no valid mappings.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Calls:
+ * CHECK_PMAP_CONSISTENCY
+ * PMAP_LOCK, PMAP_UNLOCK
+ * pmap_free_tables
+ * zfree
+ *
+ * Special Assumptions:
+ * Map contains no valid mappings.
+ *
+ * This routine decrements the reference count in the pmap
+ * structure. If it goes to zero, pmap_free_tables is called to release
+ * the memory space to the system. Then, call kmem_free to free the
+ * pmap structure.
+ */
+void pmap_destroy(register pmap_t p)
+{
+ register int c, s;
+
+ if (p == PMAP_NULL) {
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) pmap is NULL\n", curproc);
+#endif
+ return;
+ }
+
+ if (p == kernel_pmap) {
+ panic("pmap_destroy: Attempt to destroy kernel pmap");
+ }
+
+ CHECK_PMAP_CONSISTENCY("pmap_destroy");
+
+ PMAP_LOCK(p, s);
+ c = --p->ref_count;
+ PMAP_UNLOCK(p, s);
+
+ if (c == 0) {
+ pmap_release(p);
+ free((caddr_t)p,M_VMPMAP);
+ }
+
+} /* pmap_destroy() */
+
+
+/*
+ * Routine: PMAP_REFERENCE
+ *
+ * Author: Fuzzy
+ *
+ * Function:
+ * Add a reference to the specified pmap.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Calls:
+ * PMAP_LOCK, PMAP_UNLOCK
+ *
+ * Under a pmap read lock, the ref_count field of the pmap structure
+ * is incremented. The function then returns.
+ */
+void pmap_reference(register pmap_t p)
+{
+ int s;
+
+ if (p != PMAP_NULL) {
+ PMAP_LOCK(p, s);
+ p->ref_count++;
+ PMAP_UNLOCK(p, s);
+ }
+
+} /* pmap_reference */
+
+
+/*
+ * Routine: PMAP_REMOVE_RANGE (internal)
+ *
+ * Update:
+ *
+ * July 16, 90 - JUemura initial porting
+ * '90.7.27 Fuzzy Calls: add Macros
+ * '90.8.3 Fuzzy if defined TEST, 'static' undeclared.
+ * '90.8.29 Fuzzy line 112 (if (pte == PT_ENTRY_NULL) { ...)
+ * delete (check sdt invalid).
+ * '90.8.30 Fuzzy delete "if defined TEST, 'static' undeclared."
+ *
+ * Function:
+ * Invalidate page table entries associated with the
+ * given virtual address range. The entries given are the first
+ * (inclusive) and last (exclusive) entries for the VM pages.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * s virtual address of start of range to remove
+ * e virtual address of start of range to remove
+ *
+ * External/Global:
+ * pv lists
+ * pmap_modify_list
+ *
+ * Calls:
+ * CHECK_PAGE_ALIGN
+ * SDTENT
+ * SDT_VALID
+ * SDT_NEXT
+ * pmap_pte
+ * PDT_VALID
+ * M88K_PTOB
+ * PMAP_MANAGED
+ * PFIDX
+ * LOCK_PVH
+ * UNLOCK_PVH
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * zfree
+ * invalidate_pte
+ * flush_atc_entry
+ * vm_page_set_modified
+ * PHYS_TO_VM_PAGE
+ *
+ * Special Assumptions:
+ * The pmap must be locked.
+ *
+ * This routine sequences through the pages defined by the given
+ * range. For each page, pmap_pte is called to obtain a (virtual)
+ * pointer to the page table entry (PTE) associated with the page's
+ * virtual address. If the page table entry does not exist, or is invalid,
+ * nothing need be done.
+ *
+ * If the PTE is valid, the routine must invalidated the entry. The
+ * 'modified' bit, if on, is referenced to the VM through the
+ * 'vm_page_set_modified' macro, and into the appropriate entry in the
+ * pmap_modify_list. Next, the function must find the PV list entry
+ * associated with this pmap/va (if it doesn't exist - the function
+ * panics). The PV list entry is unlinked from the list, and returned to
+ * its zone.
+ */
+
+static void pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
+{
+ int pfi;
+ int pfn;
+ int num_removed = 0,
+ num_unwired = 0;
+ register int i;
+ pt_entry_t *pte;
+ pv_entry_t prev, cur;
+ pv_entry_t pvl;
+ vm_offset_t pa, va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (e <= s)
+ panic("pmap_remove_range: end < start");
+
+ /*
+ * Pmap has been locked by pmap_remove.
+ */
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * Loop through the range in vm_page_size increments.
+ * Do not assume that either start or end fail on any
+ * kind of page boundary (though this may be true!?).
+ */
+
+ CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
+
+ for (va = s; va < e; va += PAGE_SIZE) {
+
+ sdt_entry_t *sdt;
+
+ sdt = SDTENT(pmap,va);
+
+ if (!SDT_VALID(sdt)) {
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
+ continue;
+ }
+
+ pte = pmap_pte(pmap,va);
+
+ if (!PDT_VALID(pte)) {
+ continue; /* no page mapping */
+ }
+
+ num_removed++;
+
+ if (pte->wired)
+ num_unwired++;
+
+ pfn = pte->pfn;
+ pa = M88K_PTOB(pfn);
+
+ if (PMAP_MANAGED(pa)) {
+ pfi = PFIDX(pa);
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
+
+ if (pvl->pmap == PMAP_NULL)
+ panic("pmap_remove: null pv_list");
+
+ if (pvl->va == va && pvl->pmap == pmap) {
+
+ /*
+ * Hander is the pv_entry. Copy the next one
+ * to hander and free the next one (we can't
+ * free the hander)
+ */
+ cur = pvl->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ } else {
+ pvl->pmap = PMAP_NULL;
+ }
+
+ } else {
+
+ for (prev = pvl; (cur = prev->next) != PV_ENTRY_NULL; prev = cur) {
+ if (cur->va == va && cur->pmap == pmap) {
+ break;
+ }
+ }
+ if (cur == PV_ENTRY_NULL) {
+ printf("pmap_remove_range: looking for VA "
+ "0x%x PV list at 0x%x\n", va, (unsigned)pvl);
+ panic("pmap_remove_range: mapping not in pv_list");
+ }
+
+ prev->next = cur->next;
+ free((caddr_t)cur, M_VMPVENT);
+ }
+
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
+
+ } /* if PAGE_MANAGED */
+
+ /*
+ * For each pte in vm_page (NOTE: vm_page, not
+ * M88K (machine dependent) page !! ), reflect
+ * modify bits to pager and zero (invalidate,
+ * remove) the pte entry.
+ */
+ tva = va;
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(0, tva, kflush);
+
+ if (opte.pte.modified) {
+ vm_page_set_modified(PHYS_TO_VM_PAGE(opte.bits & M88K_PGMASK));
+ /* keep track ourselves too */
+ if (PMAP_MANAGED(pa))
+ pmap_modify_list[pfi] = 1;
+ }
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+
+ } /* end for ( va = s; ...) */
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
+
+} /* pmap_remove_range */
+
+/*
+ * Routine: PMAP_REMOVE
+ *
+ * History:
+ * '90.7.16 Fuzzy Unchanged
+ * '90.7.26 Fuzzy VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS
+ * '90.8.23 Fuzzy add Debugging message
+ *
+ * Function:
+ * Remove the given range of addresses from the specified map.
+ * It is assumed that start is properly rounded to the VM page size.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Special Assumptions:
+ * Assumes not all entries must be valid in specified range.
+ *
+ * Calls:
+ * CHECK_PAGE_ALIGN
+ * PMAP_LOCK, PMAP_UNLOCK
+ * pmap_remove_range
+ * panic
+ *
+ * After taking pmap read lock, pmap_remove_range is called to do the
+ * real work.
+ */
+void
+pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
+{
+ int spl;
+
+ if (map == PMAP_NULL) {
+ return;
+ }
+
+#if DEBUG
+ if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
+ printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
+#endif
+
+ CHECK_PAGE_ALIGN(s, "pmap_remove start addr");
+
+ if (s>e)
+ panic("pmap_remove: start greater than end address");
+
+ pmap_remove_range(map, s, e);
+} /* pmap_remove() */
+
+
+/*
+ * Routine: PMAP_REMOVE_ALL
+ *
+ * History:
+ * '90.7.27 Fuzzy 'Calls:' modify
+ * '90.8.28 Fuzzy add Debugging message
+ *
+ * Function:
+ * Removes this physical page from all physical maps in which it
+ * resides. Reflects back modify bits to the pager.
+ *
+ * Parameters:
+ * phys physical address of pages which is to
+ * be removed from all maps
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ * pmap_modify_list
+ *
+ * Calls:
+ * PMAP_MANAGED
+ * SPLVM, SPLX
+ * PFIDX
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * simple_lock
+ * M88K_PTOB
+ * PDT_VALID
+ * pmap_pte
+ * vm_page_set_modified
+ * PHYS_TO_VM_PAGE
+ * zfree
+ *
+ * If the page specified by the given address is not a managed page,
+ * this routine simply returns. Otherwise, the PV list associated with
+ * that page is traversed. For each pmap/va pair pmap_pte is called to
+ * obtain a pointer to the page table entry (PTE) associated with the
+ * va (the PTE must exist and be valid, otherwise the routine panics).
+ * The hardware 'modified' bit in the PTE is examined. If it is on, the
+ * pmap_modify_list entry corresponding to the physical page is set to 1.
+ * Then, the PTE is invalidated, and the PV list entry is unlinked and
+ * freed.
+ *
+ * At the end of this function, the PV list for the specified page
+ * will be null.
+ */
+void
+pmap_remove_all(vm_offset_t phys)
+{
+ pv_entry_t pvl, cur;
+ register pt_entry_t *pte;
+ int pfi;
+ register int i;
+ register vm_offset_t va;
+ register pmap_t pmap;
+ int spl;
+ int dbgcnt = 0;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+ /* not a managed page. */
+#ifdef DEBUG
+ if (pmap_con_dbg & CD_RMAL)
+ printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
+#endif
+ return;
+ }
+
+ SPLVM(spl);
+
+ /*
+ * Walk down PV list, removing all mappings.
+ * We have to do the same work as in pmap_remove_pte_page
+ * since that routine locks the pv_head. We don't have
+ * to lock the pv_head, since we have the entire pmap system.
+ */
+remove_all_Retry:
+
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
+
+ /*
+ * Loop for each entry on the pv list
+ */
+ while ((pmap = pvl->pmap) != PMAP_NULL) {
+ va = pvl->va;
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Do a few consistency checks to make sure
+ * the PV list and the pmap are in synch.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
+ (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
+ panic("pmap_remove_all: pte NULL");
+ }
+ if (!PDT_VALID(pte))
+ panic("pmap_remove_all: pte invalid");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_remove_all: pte doesn't point to page");
+ if (pte->wired)
+ panic("pmap_remove_all: removing a wired page");
+
+ pmap->stats.resident_count--;
+
+ if ((cur = pvl->next) != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ }
+ else
+ pvl->pmap = PMAP_NULL;
+
+ /*
+ * Reflect modified pages to pager.
+ */
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(users, va, kflush);
+
+ if (opte.pte.modified) {
+ vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys));
+ /* keep track ourselves too */
+ pmap_modify_list[pfi] = 1;
+ }
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ /*
+ * Do not free any page tables,
+ * leaves that for when VM calls pmap_collect().
+ */
+ dbgcnt++;
+ }
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
+
+ SPLX(spl);
+
+} /* pmap_remove_all() */
+
+
+
+
+/*
+ * Routine: PMAP_COPY_ON_WRITE
+ *
+ * History:
+ * '90. 7.16 Fuzzy level 3 --> page table changed
+ * '90. 7.19 Fuzzy Comment 'Calls' add
+ * '90. 7.26 Fuzzy VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS
+ * '90. 8.18 Fuzzy Add Debugging Message (PA no mappings)
+ * '90. 8.18 Fuzzy Bug Fixs
+ * for (i=ptes_per_vm_page; i>0; i++) {
+ * ^^
+ * for (i=ptes_per_vm_page; i>0; i--) {
+ *
+ * Function:
+ * Remove write privileges from all physical maps for this physical page.
+ *
+ * Parameters:
+ * phys physical address of page to be read-protected.
+ *
+ * Calls:
+ * SPLVM, SPLX
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * simple_lock, simple_unlock
+ * panic
+ * PDT_VALID
+ * M88K_PTOB
+ * pmap_pte
+ *
+ * Special Assumptions:
+ * All mapings of the page are user-space mappings.
+ *
+ * This routine walks the PV list. For each pmap/va pair it locates
+ * the page table entry (the PTE), and sets the hardware enforced
+ * read-only bit. The TLB is appropriately flushed.
+ */
+static void pmap_copy_on_write(vm_offset_t phys)
+{
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ register int i;
+ int spl, spl_sav;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+#ifdef DEBUG
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys);
+#endif
+ return;
+ }
+
+ SPLVM(spl);
+
+ pv_e = PFIDX_TO_PVH(PFIDX(phys));
+ CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
+ if (pv_e->pmap == PMAP_NULL) {
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
+#endif
+
+ SPLX(spl);
+
+ return; /* no mappings */
+ }
+
+ /*
+ * Run down the list of mappings to this physical page,
+ * disabling write privileges on each one.
+ */
+
+ while (pv_e != PV_ENTRY_NULL) {
+ pmap_t pmap;
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ va = pv_e->va;
+
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * Check for existing and valid pte
+ */
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_copy_on_write: pte from pv_list not in map");
+ if (!PDT_VALID(pte))
+ panic("pmap_copy_on_write: invalid pte");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_copy_on_write: pte doesn't point to page");
+
+ /*
+ * Flush TLBs of which cpus using pmap.
+ */
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ pv_e = pv_e->next;
+ }
+ CHECK_PV_LIST(phys, PFIDX_TO_PVH(PFIDX(phys)), "pmap_copy_on_write");
+
+ SPLX(spl);
+
+} /* pmap_copy_on_write */
+
+
+
+/*
+ * Routine: PMAP_PROTECT
+ *
+ * History:
+ * '90.7.16 Fuzzy
+ * '90.7.26 Fuzzy VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS
+ * '90.8.21 Fuzzy Debugging message add
+ *
+ * Function:
+ * Sets the physical protection on the specified range of this map
+ * as requested.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * s start address of start of range
+ * e end address of end of range
+ * prot desired protection attributes
+ *
+ * Calls:
+ * m88k_protection
+ * PMAP_LOCK, PMAP_UNLOCK
+ * CHECK_PAGE_ALIGN
+ * panic
+ * pmap_pte
+ * SDT_NEXT
+ * PDT_VALID
+ *
+ * This routine sequences through the pages of the specified range.
+ * For each, it calls pmap_pte to acquire a pointer to the page table
+ * entry (PTE). If the PTE is invalid, or non-existant, nothing is done.
+ * Otherwise, the PTE's protection attributes are adjusted as specified.
+ */
+void pmap_protect(
+ pmap_t pmap,
+ vm_offset_t s,
+ vm_offset_t e,
+ vm_prot_t prot)
+{
+ pte_template_t maprot;
+ unsigned ap;
+ int spl, spl_sav;
+ register int i;
+ pt_entry_t *pte;
+ vm_offset_t va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
+ return;
+ if ((prot & VM_PROT_READ) == 0) {
+ pmap_remove(pmap, s, e);
+ return;
+ }
+ if (s > e)
+ panic("pmap_protect: start grater than end address");
+
+ maprot.bits = m88k_protection(pmap, prot);
+ ap = maprot.pte.prot;
+
+ PMAP_LOCK(pmap, spl);
+
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ CHECK_PAGE_ALIGN(s, "pmap_protect");
+
+ /*
+ * Loop through the range in vm_page_size increment.
+ * Do not assume that either start or end fall on any
+ * kind of page boundary (though this may be true ?!).
+ */
+ for (va = s; va <= e; va += PAGE_SIZE) {
+
+ pte = pmap_pte(pmap, va);
+
+ if (pte == PT_ENTRY_NULL) {
+
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
+#endif
+ continue;
+ }
+
+ if (!PDT_VALID(pte)) {
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
+#endif
+ continue; /* no page mapping */
+ }
+
+ tva = va;
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = ap;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(0, tva, kflush);
+ splx(spl_sav);
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+
+} /* pmap_protect() */
+
+
+
+/*
+ * Routine: PMAP_EXPAND
+ *
+ * History:
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.16 Fuzzy
+ * Extern/Global no --> user_pt_map, kernel_pmap
+ * added Debug message
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ * Function:
+ * Expands a pmap to be able to map the specified virtual address.
+ * New kernel virtual memory is allocated for a page table
+ *
+ * Must be called with the pmap system and the pmap unlocked, since
+ * these must be unlocked to use vm_allocate or vm_deallocate (via
+ * kmem_alloc, zalloc). Thus it must be called in a unlock/lock loop
+ * that checks whether the map has been expanded enough. ( We won't loop
+ * forever, since page table aren't shrunk.)
+ *
+ * Parameters:
+ * map point to map structure
+ * v VA indicating which tables are needed
+ *
+ * Extern/Global:
+ * user_pt_map
+ * kernel_pmap
+ *
+ * Calls:
+ * pmap_pte
+ * kmem_alloc
+ * kmem_free
+ * zalloc
+ * zfree
+ * pmap_extract
+ *
+ * Special Assumptions
+ * no pmap locks held
+ *
+ * 1: This routine immediately allocates space for a page table.
+ *
+ * 2: The page table entries (PTEs) are initialized (set invalid), and
+ * the corresponding segment table entry is set to point to the new
+ * page table.
+ *
+ *
+ * if (kernel_pmap)
+ * pmap_expand_kmap()
+ * ptva = kmem_alloc(user_pt_map)
+ *
+ */
+static void pmap_expand(pmap_t map, vm_offset_t v)
+{
+ int i,
+ spl;
+ vm_offset_t pdt_vaddr,
+ pdt_paddr;
+
+ sdt_entry_t *sdt;
+ pt_entry_t *pte;
+ vm_offset_t pmap_extract();
+
+ if (map == PMAP_NULL) {
+ panic("pmap_expand: pmap is NULL");
+ }
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
+ printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
+#endif
+
+ CHECK_PAGE_ALIGN (v, "pmap_expand");
+
+ /*
+ * Handle kernel pmap in pmap_expand_kmap().
+ */
+ if (map == kernel_pmap) {
+ PMAP_LOCK(map, spl);
+ if (pmap_expand_kmap(v, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
+ panic ("pmap_expand: Cannot allocate kernel pte table");
+ PMAP_UNLOCK(map, spl);
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_EXP | CD_FULL)) == (CD_EXP | CD_FULL))
+ printf("(pmap_expand :%x) kernel_pmap\n", curproc);
+#endif
+ return;
+ }
+
+ /* XXX */
+#ifdef MACH_KERNEL
+ if (kmem_alloc_wired(kernel_map, &pdt_vaddr, PAGE_SIZE) != KERN_SUCCESS)
+ panic("pmap_enter: kmem_alloc failure");
+ pmap_zero_page(pmap_extract(kernel_pmap, pdt_vaddr));
+#else
+ pdt_vaddr = kmem_alloc (kernel_map, PAGE_SIZE);
+#endif
+
+ pdt_paddr = pmap_extract(kernel_pmap, pdt_vaddr);
+
+ /*
+ * the page for page tables should be CACHE DISABLED
+ */
+ pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
+
+ PMAP_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) {
+ /*
+ * Someone else caused us to expand
+ * during our vm_allocate.
+ */
+ PMAP_UNLOCK(map, spl);
+ /* XXX */
+ kmem_free (kernel_map, pdt_vaddr, PAGE_SIZE);
+#ifdef DEBUG
+ if (pmap_con_dbg & CD_EXP)
+ printf("(pmap_expand :%x) table has already allocated\n", curproc);
+#endif
+ return;
+ }
+
+ /*
+ * Apply a mask to V to obtain the vaddr of the beginning of
+ * its containing page 'table group',i.e. the group of
+ * page tables that fit eithin a single VM page.
+ * Using that, obtain the segment table pointer that references the
+ * first page table in the group, and initilize all the
+ * segment table descriptions for the page 'table group'.
+ */
+ v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
+
+ sdt = SDTENT(map,v);
+
+ /*
+ * Init each of the segment entries to point the freshly allocated
+ * page tables.
+ */
+
+ for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
+ ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
+ sdt++;
+ pdt_paddr += PDT_SIZE;
+ pdt_vaddr += PDT_SIZE;
+ }
+
+ PMAP_UNLOCK(map, spl);
+
+} /* pmap_expand() */
+
+
+
+/*
+ * Routine: PMAP_ENTER
+ *
+ *
+ * Update:
+ * July 13,90 - JUemura
+ * initial porting
+ * *****TO CHECK*****
+ * locks removed since we don't have to allocate
+ * level 2 tables anymore. locks needed?
+ * '90.7.26 Fuzzy VM_MIN_KERNEL_ADDRESS -> VM_MAX_USER_ADDRESS
+ * '90.8.17 Fuzzy Debug message added(PV no mapped at VA)
+ * '90.8.31 Sugai Remove redundant message output
+ *
+ * Function:
+ * Insert the given physical page (p) at the specified virtual
+ * address (v) in the target phisical map with the protecton requested.
+ * If specified, the page will be wired down, meaning that the
+ * related pte can not be reclaimed.
+ *
+ * N.B.: This is only routine which MAY NOT lazy-evaluation or lose
+ * information. That is, this routine must actually insert this page
+ * into the given map NOW.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * va VA of page to be mapped
+ * pa PA of page to be mapped
+ * prot protection attributes for page
+ * wired wired attribute for page
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ * pmap_modify_list
+ *
+ * Calls:
+ * m88k_protection
+ * pmap_pte
+ * pmap_expand
+ * pmap_remove_range
+ * zfree
+ *
+ * This routine starts off by calling pmap_pte to obtain a (virtual)
+ * pointer to the page table entry corresponding to given virtual
+ * address. If the page table itself does not exist, pmap_expand is
+ * called to allocate it.
+ *
+ * If the page table entry (PTE) already maps the given physical page,
+ * all that is needed is to set the protection and wired attributes as
+ * given. TLB entries are flushed and pmap_enter returns.
+ *
+ * If the page table entry (PTE) maps a different physical page than
+ * that given, the old mapping is removed by a call to map_remove_range.
+ * And execution of pmap_enter continues.
+ *
+ * To map the new physical page, the routine first inserts a new
+ * entry in the PV list exhibiting the given pmap and virtual address.
+ * It then inserts the physical page address, protection attributes, and
+ * wired attributes into the page table entry (PTE).
+ *
+ *
+ * get machine-dependent prot code
+ * get the pte for this page
+ * if necessary pmap expand(pmap,v)
+ * if (changing wired attribute or protection) {
+ * flush entry from TLB
+ * update template
+ * for (ptes per vm page)
+ * stuff pte
+ * } else if (mapped at wrong addr)
+ * flush entry from TLB
+ * pmap_remove_range
+ * } else {
+ * enter mapping in pv_list
+ * setup template and stuff ptes
+ * }
+ *
+ */
+void pmap_enter(
+ register pmap_t pmap,
+ vm_offset_t va,
+ vm_offset_t pa,
+ vm_prot_t prot,
+ boolean_t wired)
+{
+ int ap;
+ int spl, spl_sav;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t old_pa;
+ pte_template_t template;
+ register int i;
+ int pfi;
+ pv_entry_t pvl;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_enter: pmap is NULL");
+ }
+
+ CHECK_PAGE_ALIGN (va, "pmap_entry - VA");
+ CHECK_PAGE_ALIGN (pa, "pmap_entry - PA");
+
+ /*
+ * Range check no longer use, since we use whole address space
+ */
+
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (pmap == kernel_pmap)
+ printf ("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
+ else
+ printf ("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
+ }
+#endif
+
+ ap = m88k_protection (pmap, prot);
+
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * zalloc may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then will retry, throwing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+ Retry:
+
+ PMAP_LOCK(pmap, spl);
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough M88K
+ * pages to map one VM page.
+ */
+ while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ /*
+ * Must unlock to expand the pmap.
+ */
+ PMAP_UNLOCK(pmap, spl);
+ pmap_expand(pmap, va);
+ PMAP_LOCK(pmap, spl);
+ }
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = M88K_PTOB(pte->pfn);
+ if (old_pa == pa) {
+
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * May be changing its wired attributes or protection
+ */
+
+ if (wired && !pte->wired)
+ pmap->stats.wired_count++;
+ else if (!wired && pte->wired)
+ pmap->stats.wired_count--;
+
+/*#ifdef luna88k*/ /* KLUDGE (or is it?) */ /* is it for dealing with IO mem? */
+ if (pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+/*#endif*/
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL /*XXX*/;
+ if (wired)
+ template.pte.wired = 1;
+
+ /*
+ * If there is a same mapping, we have nothing to do.
+ */
+ if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
+ || (pte->prot != template.pte.prot)) {
+
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ template.pte.modified = opte.pte.modified;
+ *pte++ = template.pte;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ template.bits += M88K_PGBYTES;
+ va += M88K_PGBYTES;
+ }
+ }
+
+ } else { /* if ( pa == old_pa) */
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (old_pa != (vm_offset_t) 0) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ pmap_remove_range(pmap, va, va + PAGE_SIZE);
+ }
+
+ if (PMAP_MANAGED(pa)) {
+
+ /*
+ * Enter the mappimg in the PV list for this
+ * physical page.
+ */
+ pfi = PFIDX(pa);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (pa, pvl, "pmap_enter before");
+
+ if (pvl->pmap == PMAP_NULL) {
+
+ /*
+ * No mappings yet
+ */
+ pvl->va = va;
+ pvl->pmap = pmap;
+ pvl->next = PV_ENTRY_NULL;
+
+ } else {
+#ifdef DEBUG
+ /*
+ * check that this mapping is not already there
+ */
+ {
+ pv_entry_t e = pvl;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == va)
+ panic ("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
+#endif
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ PMAP_UNLOCK(pmap, spl);
+ pv_e = (pv_entry_t) malloc(sizeof *pv_e, M_VMPVENT,
+ M_NOWAIT);
+ goto Retry;
+ }
+ pv_e->va = va;
+ pv_e->pmap = pmap;
+ pv_e->next = pvl->next;
+ pvl->next = pv_e;
+ /*
+ * Remeber that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ }
+
+ /*
+ * And count the mapping.
+ */
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+/*#ifdef luna88k */ /* KLUDGE (or is it?) */
+ if (pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+ /* SHOULDN't THE NEXT THING HAVE CACHE_GLOBAL? */
+/*#endif */
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa);
+
+ if (wired)
+ template.pte.wired = 1;
+
+ DO_PTES (pte, template.bits);
+
+ } /* if ( pa == old_pa ) ... else */
+
+ PMAP_UNLOCK(pmap, spl);
+
+ if (pv_e != PV_ENTRY_NULL)
+ free((caddr_t) pv_e, M_VMPVENT);
+
+} /* pmap_enter */
+
+
+
+/*
+ * Routine: pmap_change_wiring
+ *
+ * Author: Fuzzy
+ *
+ * Function: Change the wiring attributes for a map/virtual-address
+ * Pair.
+ * Prameterts:
+ * pmap pointer to pmap structure
+ * v virtual address of page to be wired/unwired
+ * wired flag indicating new wired state
+ *
+ * Extern/Global:
+ * pte_per_vm_page
+ *
+ * Calls:
+ * PMAP_LOCK, PMAP_UNLOCK
+ * pmap_pte
+ * panic
+ *
+ * Special Assumptions:
+ * The mapping must already exist in the pmap.
+ */
+void pmap_change_wiring(
+ pmap_t map,
+ vm_offset_t v,
+ boolean_t wired)
+{
+ register pt_entry_t *pte;
+ register int i;
+ int spl;
+
+ PMAP_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic ("pmap_change_wiring: pte missing");
+
+ if (wired && !pte->wired)
+ /*
+ * wiring mapping
+ */
+ map->stats.wired_count++;
+
+ else if (!wired && pte->wired)
+ /*
+ * unwired mapping
+ */
+ map->stats.wired_count--;
+
+ for (i = ptes_per_vm_page; i>0; i--)
+ (pte++)->wired = wired;
+
+ PMAP_UNLOCK(map, spl);
+
+} /* pmap_change_wiring() */
+
+
+
+/*
+ * Routine: PMAP_EXTRACT
+ *
+ * Author: Fuzzy
+ *
+ * Function:
+ * Extract the physical page address associoated
+ * with the given map/virtual_address pair.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * va virtual address
+ *
+ * Calls:
+ * PMAP_LOCK, PMAP_UNLOCK
+ * pmap_pte
+ *
+ *
+ * This routine checks BATC mapping first. BATC has been used and
+ * the specified pmap is kernel_pmap, batc_entry is scanned to find out
+ * the mapping.
+ * Then the routine calls pmap_pte to get a (virtual) pointer to
+ * the page table entry (PTE) associated with the given virtual
+ * address. If the page table does not exist, or if the PTE is not valid,
+ * then 0 address is returned. Otherwise, the physical page address from
+ * the PTE is returned.
+ */
+vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ register int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
+ return(pa);
+ }
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
+
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+
+ PMAP_UNLOCK(pmap, spl);
+
+#if 0
+ printf("pmap_extract ret %x\n", pa);
+#endif /* 0 */
+ return(pa);
+
+} /* pamp_extract() */
+
+/*
+ a version for the kernel debugger
+*/
+
+vm_offset_t pmap_extract_unlocked(pmap_t pmap, vm_offset_t va)
+{
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ register int i;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
+ return(pa);
+ }
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
+
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+
+ return(pa);
+
+} /* pamp_extract_unlocked() */
+
+
+/*
+ * Routine: PMAP_COPY
+ *
+ * History:
+ * '90.7.16 Fuzzy
+ *
+ * Function:
+ * Copy the range specigfied by src_adr/len from the source map
+ * to the range dst_addr/len in the destination map. This routine
+ * is only advisory and need not do anything.
+ *
+ * Parameters:
+ * dst_pmap pointer to destination pmap structure
+ * src_pmap pointer to source pmap structure
+ * dst_addr VA in destionation map
+ * len length of address space being copied
+ * src_addr VA in source map
+ *
+ * At this time, the 88200 pmap implementation does nothing in this
+ * function. Translation tables in the destination map will be allocated
+ * at VM fault time.
+ */
+void pmap_copy(
+ pmap_t dst_pmap,
+ pmap_t src_pmap,
+ vm_offset_t dst_addr,
+ vm_size_t len,
+ vm_offset_t src_addr)
+{
+#ifdef lint
+ dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
+#endif
+
+
+}/* pmap_copy() */
+
+
+/*
+ * Routine: PMAP_UPDATE
+ *
+ * History:
+ * '90.7.16 Fuzzy
+ * '90.8.27 Fuzzy Debugging message add
+ *
+ * Function:
+ * Require that all active physical maps contain no incorrect entries
+ * NOW. [This update includes forcing updates of any address map
+ * cashing]
+ * Generally used to ensure that thread about to run will see a
+ * semantically correct world.
+ *
+ * Parameters:
+ * none
+ *
+ * Call:
+ * cmmuflush
+ *
+ * The 88200 pmap implementation does not defer any operations.
+ * Therefore, the translation table trees are always consistent while the
+ * pmap lock is not held. Therefore, there is really no work to do in
+ * this function other than to flush the TLB.
+ */
+void pmap_update(void)
+{
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_UPD | CD_FULL)) == (CD_UPD | CD_FULL))
+ printf("(pmap_update :%x) Called \n", curproc);
+#endif
+
+}/* pmap_update() */
+
+
+
+/*
+ * Routine: PMAP_COLLECT
+ *
+ * History:
+ * '90. 7.16 Fuzzy
+ * '90. 7.26 Fuzzy VM_MIN_ADDRESS --> VM_MIN_USER_ADDRESS
+ * VM_MIN_KERNEL_ADDRESS --> VM_MAX_USER_ADDRESS
+ * '90. 7.27 Fuzzy Calls: add Macro
+ * '90. 8.22 Fuzzy add Debugging message
+ * '90. 9.11 Fuzzy sdt_va: vm_offset_t --> unsigned long
+ *
+ * Runction:
+ * Garbage collects the physical map system for pages which are
+ * no longer used. there may well be pages which are not
+ * referenced, but others may be collected as well.
+ * Called by the pageout daemon when pages are scarce.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Calls:
+ * CHECK_PMAP_CONSISTENCY
+ * panic
+ * PMAP_LOCK, PMAP_UNLOCK
+ * PT_FREE
+ * pmap_pte
+ * pmap_remove_range
+ *
+ * The intent of this routine is to release memory pages being used
+ * by translation tables. They can be release only if they contain no
+ * valid mappings, and their parent table entry has been invalidated.
+ *
+ * The routine sequences through the entries user address space,
+ * inspecting page-sized groups of page tables for wired entries. If
+ * a full page of tables has no wired enties, any otherwise valid
+ * entries are invalidated (via pmap_remove_range). Then, the segment
+ * table entries corresponding to this group of page tables are
+ * invalidated. Finally, PT_FREE is called to return the page to the
+ * system.
+ *
+ * If all entries in a segment table are invalidated, it too can
+ * be returned to the system.
+ *
+ * [Note: depending upon compilation options, tables may be in zones
+ * or allocated through kmem_alloc. In the former case, the
+ * module deals with a single table at a time.]
+ */
+void pmap_collect(pmap_t pmap)
+{
+
+ vm_offset_t sdt_va; /* outer loop index */
+ vm_offset_t sdt_vt; /* end of segment */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ sdt_entry_t *sdtp; /* ptr to index into segment table */
+ sdt_entry_t *sdt; /* ptr to index into segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
+ pt_entry_t *gdtp; /* ptr to index into a page table */
+ boolean_t found_gdt_wired; /* flag indicating a wired page exists in */
+ /* a page table's address range */
+ int spl;
+ unsigned int i,j;
+
+
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_collect: pmap is NULL");
+ }
+ if (pmap == kernel_pmap) {
+#ifdef MACH_KERNEL
+ return;
+#else
+ panic("pmap_collect attempted on kernel pmap");
+#endif
+ }
+
+ CHECK_PMAP_CONSISTENCY ("pmap_collect");
+
+#if DBG
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ printf ("(pmap_collect :%x) pmap %x\n", curproc, pmap);
+#endif
+
+ PMAP_LOCK(pmap, spl);
+
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+ sdtp = sdttbl;
+
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
+ */
+
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
+
+ /* Segment table loop */
+ for ( ; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE)
+ {
+ sdt_va = VM_MIN_USER_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
+
+ gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va);
+
+ if (gdttbl == PT_ENTRY_NULL)
+ continue; /* no maps in this range */
+
+ gdttblend = gdttbl + (PDT_ENTRIES * PDT_TABLE_GROUP_SIZE);
+
+ /* scan page maps for wired pages */
+ found_gdt_wired = FALSE;
+ for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
+ if (gdtp->wired) {
+ found_gdt_wired = TRUE;
+ break;
+ }
+ }
+
+ if (found_gdt_wired)
+ continue; /* can't free this range */
+
+ /* figure out end of range. Watch for wraparound */
+
+ sdt_vt = sdt_va <= VM_MAX_USER_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
+ sdt_va+PDT_TABLE_GROUP_VA_SPACE :
+ VM_MAX_USER_ADDRESS;
+
+ /* invalidate all maps in this range */
+ pmap_remove_range (pmap, (vm_offset_t)sdt_va, (vm_offset_t)sdt_vt);
+
+ /*
+ * we can safely deallocated the page map(s)
+ */
+ for (sdt = sdtp; sdt < (sdtp+PDT_TABLE_GROUP_SIZE); sdt++) {
+ ((sdt_entry_template_t *) sdt) -> bits = 0;
+ ((sdt_entry_template_t *) sdt+SDT_ENTRIES) -> bits = 0;
+ }
+
+ /*
+ * we have to unlock before freeing the table, since PT_FREE
+ * calls kmem_free or zfree, which will invoke another pmap routine
+ */
+ PMAP_UNLOCK(pmap, spl);
+ PT_FREE(gdttbl);
+ PMAP_LOCK(pmap, spl);
+
+ } /* Segment table Loop */
+
+ PMAP_UNLOCK(pmap, spl);
+
+#if DBG
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ printf ("(pmap_collect :%x) done \n", curproc);
+#endif
+
+ CHECK_PMAP_CONSISTENCY("pmap_collect");
+} /* pmap collect() */
+
+
+
+/*
+ * Routine: PMAP_ACTIVATE
+ *
+ * Author: Fuzzy
+ *
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ * In a mono-processor implementation the my_cpu
+ * argument is ignored, and the PMAP_ACTIVATE macro
+ * simply sets the MMU root pointer element of the PCB
+ * to the physical address of the segment descriptor table.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * pcbp pointer to current pcb
+ * cpu CPU number
+ */
+void pmap_activate(pmap_t pmap, pcb_t pcb)
+{
+#ifdef lint
+ my_cpu++;
+#endif
+ PMAP_ACTIVATE(pmap, pcb, 0);
+} /* pmap_activate() */
+
+
+
+/*
+ * Routine: PMAP_DEACTIVATE
+ *
+ * Author: Fuzzy
+ *
+ * Function:
+ * Unbinds the given physical map from the given processor,
+ * i.e. the pmap i no longer is use on the processor.
+ * In a mono-processor the PMAP_DEACTIVATE macro is null.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * pcb pointer to pcb
+ */
+void pmap_deactivate(pmap_t pmap, pcb_t pcb)
+{
+#ifdef lint
+ pmap++; th++; which_cpu++;
+#endif
+ PMAP_DEACTIVATE(pmap, pcb, 0);
+} /* pmap_deactivate() */
+
+
+
+/*
+ * Routine: PMAP_KERNEL
+ *
+ * History:
+ * '90. 7.16 Fuzzy unchanged
+ *
+ * Function:
+ * Retruns a pointer to the kernel pmap.
+ */
+pmap_t pmap_kernel(void)
+{
+ return (kernel_pmap);
+}/* pmap_kernel() */
+
+
+/*
+ * Routine: PMAP_COPY_PAGE
+ *
+ * History:
+ * '90.7.16 Fuzzy M68K --> M88K
+ * DT_PAGE --> DT_VALID
+ *
+ * Function:
+ * Copies the specified (machine independent) pages.
+ *
+ * Parameters:
+ * src PA of source page
+ * dst PA of destination page
+ *
+ * Extern/Global:
+ * phys_map_vaddr1
+ * phys_map_vaddr2
+ *
+ * Calls:
+ * m88kprotection
+ * M88K_TRUNC_PAGE
+ * cmmu_sflush_page
+ * DO_PTES
+ * bcopy
+ *
+ * Special Assumptions:
+ * no locking reauired
+ *
+ * This routine maps the phsical pages at the 'phys_map' virtual
+ * addresses set up in pmap_bootstrap. It flushes the TLB to make the
+ * new mappings effective, and performs the copy.
+ */
+void pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+ vm_offset_t dstva, srcva;
+ unsigned int spl_sav;
+ int i;
+ int aprot;
+ pte_template_t template;
+ pt_entry_t *dstpte, *srcpte;
+ int my_cpu = cpu_number();
+
+ /*
+ * Map source physical address.
+ */
+ aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE));
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ dstpte = pmap_pte(kernel_pmap, dstva);
+
+ for (i=0; i < ptes_per_vm_page; i++, src += M88K_PGBYTES, dst += M88K_PGBYTES)
+ {
+ template.bits = M88K_TRUNC_PAGE(src) | aprot | DT_VALID | CACHE_GLOBAL;
+
+ /* do we need to write back dirty bits */
+ spl_sav = splblock();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ /*
+ * Map destination physical address.
+ */
+ template.bits = M88K_TRUNC_PAGE(dst) | aprot | CACHE_GLOBAL | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+ splx(spl_sav);
+
+ bcopy((void*)srcva, (void*)dstva, M88K_PGBYTES);
+ /* flush source, dest out of cache? */
+ cmmu_flush_remote_data_cache(my_cpu, src, M88K_PGBYTES);
+ cmmu_flush_remote_data_cache(my_cpu, dst, M88K_PGBYTES);
+ }
+
+} /* pmap_copy_page() */
+
+
+/*
+ * copy_to_phys
+ *
+ * Author: Fuzzy
+ * History:
+ * 10/17/90 take out of pmap.c of SUN3, and modify for m88k
+ *
+ * Copy virtual memory to physical memory by mapping the physical
+ * memory into virtual memory and then doing a virtual to virtual
+ * copy with bcopy.
+ *
+ * Parameters:
+ * srcva VA of source page
+ * dstpa PA of destination page
+ * bytecount copy byte size
+ *
+ * Extern/Global:
+ * phys_map_vaddr2
+ *
+ * Calls:
+ * m88kprotection
+ * M88K_TRUNC_PAGE
+ * cmmu_sflush_page
+ * DO_PTES
+ * bcopy
+ *
+ */
+void copy_to_phys(
+ register vm_offset_t srcva,
+ register vm_offset_t dstpa,
+ register int bytecount)
+{
+ register vm_offset_t dstva;
+ register pt_entry_t *dstpte;
+ register int copy_size, offset;
+ int aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ dstpte = pmap_pte(kernel_pmap, dstva);
+ copy_size = M88K_PGBYTES;
+ offset = dstpa - M88K_TRUNC_PAGE(dstpa);
+ dstpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0){
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+
+ /*
+ * Map distation physical address.
+ */
+
+ for (i = 0; i < ptes_per_vm_page; i++)
+ {
+ template.bits = M88K_TRUNC_PAGE(dstpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+
+ dstva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcva += copy_size;
+ dstva += copy_size;
+ dstpa += M88K_PGBYTES;
+ bytecount -= copy_size;
+ offset = 0;
+ }
+ }
+}
+
+/*
+ * copy_from_phys
+ *
+ * Author: David Rudolph
+ * History:
+ *
+ * Copy physical memory to virtual memory by mapping the physical
+ * memory into virtual memory and then doing a virtual to virtual
+ * copy with bcopy.
+ *
+ * Parameters:
+ * srcpa PA of source page
+ * dstva VA of destination page
+ * bytecount copy byte size
+ *
+ * Extern/Global:
+ * phys_map_vaddr2
+ *
+ * Calls:
+ * m88kprotection
+ * M88K_TRUNC_PAGE
+ * cmmu_sflush_page
+ * DO_PTES
+ * bcopy
+ *
+ */
+void copy_from_phys(
+ register vm_offset_t srcpa,
+ register vm_offset_t dstva,
+ register int bytecount)
+{
+ register vm_offset_t srcva;
+ register pt_entry_t *srcpte;
+ register int copy_size, offset;
+ int aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ copy_size = M88K_PGBYTES;
+ offset = srcpa - M88K_TRUNC_PAGE(srcpa);
+ srcpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0){
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+
+ /*
+ * Map destnation physical address.
+ */
+
+ for (i=0; i < ptes_per_vm_page; i++)
+ {
+ template.bits = M88K_TRUNC_PAGE(srcpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ srcva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcpa += M88K_PGBYTES;
+ dstva += copy_size;
+ srcva += copy_size;
+ bytecount -= copy_size;
+ offset = 0;
+ /* cache flush source? */
+ }
+ }
+}
+
+/*
+ * Routine: PMAP_PAGEABLE
+ *
+ * History:
+ * '90.7.16 Fuzzy
+ *
+ * Function:
+ * Make the specified pages (by pmap, offset) pageable (or not) as
+ * requested. A page which is not pageable may not take a fault;
+ * therefore, its page table entry must remain valid for the duration.
+ * this routine is merely advisory; pmap_enter will specify that
+ * these pages are to be wired down (or not) as appropriate.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * start virtual address of start of range
+ * end virtual address of end of range
+ * pageable flag indicating whether range is to be pageable.
+ *
+ * This routine currently does nothing in the 88100 implemetation.
+ */
+void pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable)
+{
+#ifdef lint
+ pmap++; start++; end++; pageable++;
+#endif
+} /* pmap_pagealbe() */
+
+
+
+/*
+ * Routine: PMAP_REDZONE
+ *
+ * History:
+ * '90.7.16 Fuzzy m68k --> m88K
+ * pte protection & supervisor bit
+ *
+ * Function:
+ * Give the kernel read-only access to the specified address. This
+ * is used to detect stack overflows. It is assumed that the address
+ * specified is the last possible kernel stack address. Therefore, we
+ * round up to the nearest machine dependent page.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ * addr virtual address of page to which access should
+ * be restricted to read-only
+ *
+ * Calls:
+ * M88K_ROUND_PAGE
+ * PMAP_LOCK
+ * pmap_pte
+ * PDT_VALID
+ *
+ * This function calls pmap_pte to obtain a pointer to the page
+ * table entry associated with the given virtual address. If there is a
+ * page entry, and it is valid, its write protect bit will be set.
+ */
+void pmap_redzone(pmap_t pmap, vm_offset_t va)
+{
+ pt_entry_t *pte;
+ int spl, spl_sav;
+ register int i;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ va = M88K_ROUND_PAGE(va);
+ PMAP_LOCK(pmap, spl);
+
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte))
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va +=M88K_PGBYTES;
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+
+} /* pmap_redzone() */
+
+
+
+/*
+ * Routine: PMAP_CLEAR_MODIFY
+ *
+ * Author: Fuzzy
+ *
+ * History:
+ * '90.7.24 Fuzzy
+ * '90.8.21 Fuzzy Debugging message add
+ *
+ * Function:
+ * Clear the modify bits on the specified physical page.
+ *
+ * Parameters:
+ * phys physical address of page
+ *
+ * Extern/Global:
+ * pv_head_table, pv_lists
+ * pmap_modify_list
+ *
+ * Calls:
+ * PMAP_MANAGED
+ * SPLVM, SPLX
+ * PFIDX
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * simple_lock, simple_unlock
+ * pmap_pte
+ * panic
+ *
+ * For managed pages, the modify_list entry corresponding to the
+ * page's frame index will be zeroed. The PV list will be traversed.
+ * For each pmap/va the hardware 'modified' bit in the page descripter table
+ * entry inspected - and turned off if necessary. If any of the
+ * inspected bits were found on, an TLB flush will be performed.
+ */
+void pmap_clear_modify(vm_offset_t phys)
+{
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ register vm_offset_t va;
+ register int i;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+#ifdef DBG
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
+#endif
+ return;
+ }
+
+ SPLVM(spl);
+
+clear_modify_Retry:
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (phys, pvl, "pmap_clear_modify");
+
+ /* update correspoinding pmap_modify_list element */
+ pmap_modify_list[pfi] = 0;
+
+ if (pvl->pmap == PMAP_NULL) {
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
+#endif
+
+ SPLX(spl);
+ return;
+ }
+
+ /* for each listed pmap, trun off the page modified bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ goto clear_modify_Retry;
+ }
+
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_modify: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ /* clear modified bit */
+ opte.pte.modified = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ SPLX(spl);
+
+} /* pmap_clear_modify() */
+
+
+
+/*
+ * Routine: PMAP_IS_MODIFIED
+ *
+ * History:
+ * '90. 7.16 Fuzzy
+ * '90. 7.19 Fuzzy comments 'Calls'
+ * '90. 8.20 Fuzzy Added debugging message
+ * '90. 8.20 Fuzzy when panic, print virt_address
+ *
+ * Function:
+ * Return whether or not the specified physical page is modified
+ * by any physical maps. That is, whether the hardware has
+ * stored data into the page.
+ *
+ * Parameters:
+ * phys physical address og a page
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ * pmap_modify_list
+ *
+ * Calls:
+ * simple_lock, simple_unlock
+ * SPLVM, SPLX
+ * PMAP_MANAGED
+ * PFIDX
+ * PFIDX_TO_PVH
+ * pmap_pte
+ *
+ * If the physical address specified is not a managed page, this
+ * routine simply returns TRUE (looks like it is returning FALSE XXX).
+ *
+ * If the entry in the modify list, corresponding to the given page,
+ * is TRUE, this routine return TRUE. (This means at least one mapping
+ * has been invalidated where the MMU had set the modified bit in the
+ * page descripter table entry (PTE).
+ *
+ * Otherwise, this routine walks the PV list corresponding to the
+ * given page. For each pmap/va pair, the page descripter table entry is
+ * examined. If a modified bit is found on, the function returns TRUE
+ * immediately (doesn't need to walk remainder of list).
+ */
+boolean_t pmap_is_modified(vm_offset_t phys)
+{
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+ boolean_t modified_flag;
+
+ if (!PMAP_MANAGED(phys)) {
+#ifdef DBG
+ if (pmap_con_dbg & CD_IMOD)
+ printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
+#endif
+ return(FALSE);
+ }
+
+ SPLVM(spl);
+
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
+is_mod_Retry:
+
+ if ((boolean_t) pmap_modify_list[pfi]) {
+ /* we've already cached a modify flag for this page,
+ no use looking further... */
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) already cached a modify flag for this page\n", curproc);
+#endif
+ SPLX(spl);
+ return(TRUE);
+ }
+
+ if (pvl->pmap == PMAP_NULL) {
+ /* unmapped page - get info from page_modified array
+ maintained by pmap_remove_range/ pmap_remove_all */
+ modified_flag = (boolean_t) pmap_modify_list[pfi];
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
+#endif
+ SPLX(spl);
+ return(modified_flag);
+ }
+
+ /* for each listed pmap, check modified bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto is_mod_Retry;
+ }
+
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL) {
+ printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
+ panic("pmap_is_modified: bad pv list entry");
+ }
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->modified) {
+ simple_unlock(&pvep->pmap->lock);
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
+ printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
+#endif
+ SPLX(spl);
+ return(TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ SPLX(spl);
+ return(FALSE);
+
+} /* pmap_is_modified() */
+
+
+
+/*
+ * Routine: PMAP_CLEAR_REFERECE
+ *
+ * History:
+ * '90. 7.16 Fuzzy unchanged
+ * '90. 7.19 Fuzzy comment "Calls:' add
+ * '90. 8.21 Fuzzy Debugging message add
+ * '93. 3. 1 jfriedl Added call to LOCK_PVH
+ *
+ * Function:
+ * Clear the reference bits on the specified physical page.
+ *
+ * Parameters:
+ * phys physical address of page
+ *
+ * Calls:
+ * PMAP_MANAGED
+ * SPLVM, SPLX
+ * PFIDX
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * simple_lock
+ * pmap_pte
+ * panic
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ *
+ * For managed pages, the coressponding PV list will be traversed.
+ * For each pmap/va the hardware 'used' bit in the page table entry
+ * inspected - and turned off if necessary. If any of the inspected bits
+ * werw found on, an TLB flush will be performed.
+ */
+void pmap_clear_reference(vm_offset_t phys)
+{
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ register vm_offset_t va;
+ register int i;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+#ifdef DBG
+ if (pmap_con_dbg & CD_CREF) {
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
+ }
+#endif
+ return;
+ }
+
+ SPLVM(spl);
+
+clear_reference_Retry:
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
+
+
+ if (pvl->pmap == PMAP_NULL) {
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
+#endif
+ SPLX(spl);
+ return;
+ }
+
+ /* for each listed pmap, turn off the page refrenced bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ goto clear_reference_Retry;
+ }
+
+ users = 0;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_reference: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splblock();
+ opte.bits = invalidate_pte(pte);
+ /* clear reference bit */
+ opte.pte.pg_used = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ SPLX(spl);
+
+} /* pmap_clear_reference() */
+
+
+
+/*
+ * Routine: PMAP_IS_REFERENCED
+ *
+ * History:
+ * '90. 7.16 Fuzzy
+ * '90. 7.19 Fuzzy comment 'Calls:' add
+ *
+ * Function:
+ * Retrun whether or not the specifeid physical page is referenced by
+ * any physical maps. That is, whether the hardware has touched the page.
+ *
+ * Parameters:
+ * phys physical address of a page
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ *
+ * Calls:
+ * PMAP_MANAGED
+ * SPLVM
+ * PFIDX
+ * PFIDX_TO_PVH
+ * CHECK_PV_LIST
+ * simple_lock
+ * pmap_pte
+ *
+ * If the physical address specified is not a managed page, this
+ * routine simply returns TRUE.
+ *
+ * Otherwise, this routine walks the PV list corresponding to the
+ * given page. For each pmap/va/ pair, the page descripter table entry is
+ * examined. If a used bit is found on, the function returns TRUE
+ * immediately (doesn't need to walk remainder of list).
+ */
+boolean_t pmap_is_referenced(vm_offset_t phys)
+{
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+
+ if (!PMAP_MANAGED(phys))
+ return(FALSE);
+
+ SPLVM(spl);
+
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
+
+is_ref_Retry:
+
+ if (pvl->pmap == PMAP_NULL) {
+ SPLX(spl);
+ return(FALSE);
+ }
+
+ /* for each listed pmap, check used bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto is_ref_Retry;
+ }
+
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL)
+ panic("pmap_is_referenced: bad pv list entry.");
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->pg_used) {
+ simple_unlock(&pvep->pmap->lock);
+ SPLX(spl);
+ return(TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ SPLX(spl);
+ return(FALSE);
+} /* pmap_is referenced() */
+
+/*
+ * Routine: PMAP_VERIFY_FREE
+ *
+ * History:
+ * '90. 7.17 Fuzzy This routine extract vax's pmap.c.
+ * This do not exit in m68k's pmap.c.
+ * vm_page_alloc calls this.
+ * Variables changed below,
+ * vm_first_phys --> pmap_phys_start
+ * vm_last_phys --> pmap_phys_end
+ * Macro chnged below,
+ * pa_index --> PFIDX
+ * pai_to_pvh --> PFI_TO_PVH
+ *
+ * Calls:
+ * SPLVM, SPLX
+ * PFIDX
+ * PFI_TO_PVH
+ *
+ * Global/Extern:
+ * pmap_initialized
+ * pmap_phys_start
+ * pmap_phys_end
+ * TRUE, FALSE
+ * PMAP_NULL
+ *
+ * This routine check physical address if that have pmap modules.
+ * It returns TRUE/FALSE.
+ */
+
+boolean_t pmap_verify_free(vm_offset_t phys)
+{
+ pv_entry_t pv_h;
+ int spl;
+ boolean_t result;
+
+ if (!pmap_initialized)
+ return(TRUE);
+
+ if (!PMAP_MANAGED(phys))
+ return(FALSE);
+
+ SPLVM(spl);
+
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+
+ result = (pv_h->pmap == PMAP_NULL);
+ SPLX(spl);
+
+ return(result);
+
+} /* pmap_verify_free */
+
+
+/*
+ * Routine: PMAP_VALID_PAGE
+ *
+ * History:
+ * '90.7.18 Fuzzy This function do not exist in m68K pmap list.
+ * vm_page_startup() routine calls this.
+ *
+ * The physical address space is dense... there are no holes.
+ * All addresses provided to vm_page_startup() are valid.
+ */
+boolean_t pmap_valid_page(vm_offset_t p)
+{
+#ifdef lint
+ p++;
+#endif
+ return(TRUE);
+} /* pmap_valid_page() */
+
+/*
+ * Routine: PMAP_PAGE_PROTECT
+ *
+ * History:
+ * '90.8.4 Fuzzy extract vax pmap.c
+ *
+ * Calls:
+ * pmap_copy_on_write
+ * pmap_remove_all
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
+{
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_copy_on_write(phys);
+ break;
+ case VM_PROT_ALL:
+ break;
+ default:
+ pmap_remove_all(phys);
+ break;
+ }
+}
+
+#if 0
+/*
+ * Routine: PAGEMOVE
+ *
+ * History:
+ *
+ * 11/08/09 N.Sugai Initial version
+ *
+ * Function:
+ * Move pages from one kernel virtual address to another.
+ *
+ * Parameters:
+ * from kernel virtual address of source
+ * to kernel virtual address of distination
+ * size size in bytes
+ *
+ * Calls:
+ * PMAP_LOCK
+ * PMAP_UNLOCK
+ * LOCK_PVH
+ * UNLOCK_PVH
+ * CHECK_PV_LIST
+ * pmap_pte
+ * pmap_expand_kmap
+ * cmmu_sflush
+ *
+ * Special Assumptions:
+ * size must be a multiple of CLBYTES (?)
+ */
+void pagemove(vm_offset_t from, vm_offset_t to, int size)
+{
+ vm_offset_t pa;
+ pt_entry_t *srcpte, *dstpte;
+ int pfi;
+ pv_entry_t pvl;
+ int spl;
+ register int i;
+ register unsigned users;
+ register pte_template_t opte;
+
+ PMAP_LOCK(kernel_pmap, spl);
+
+ users = 0;
+
+ while (size > 0) {
+
+ /*
+ * check if the source addr is mapped
+ */
+ if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) {
+ printf("pagemove: source vaddr 0x%x\n", from);
+ panic("pagemove: Source addr not mapped");
+ }
+
+ /*
+ *
+ */
+ if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL)
+ if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE))
+ == PT_ENTRY_NULL)
+ panic("pagemove: Cannot allocate distination pte");
+ /*
+ *
+ */
+ if (dstpte->dtype == DT_VALID) {
+ printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte));
+ panic("pagemove: Distination pte already valid");
+ }
+
+#ifdef DBG
+ if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
+ printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
+ if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
+ printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
+
+#endif /* DBG */
+
+ /*
+ * Update pv_list
+ */
+ pa = M88K_PTOB(srcpte->pfn);
+ if (PMAP_MANAGED(pa)) {
+ pfi = PFIDX(pa);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(pa, pvl, "pagemove");
+ pvl->va = (vm_offset_t)to;
+ }
+
+ /*
+ * copy pte
+ */
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(srcpte);
+ flush_atc_entry(users, from, 1);
+ ((pte_template_t *)dstpte)->bits = opte.bits;
+ from += M88K_PGBYTES;
+ to += M88K_PGBYTES;
+ srcpte++; dstpte++;
+ }
+ size -= PAGE_SIZE;
+ }
+
+ PMAP_UNLOCK(kernel_pmap, spl);
+
+} /* pagemove */
+
+#endif /* 0 */
+/*
+ * Routine: icache_flush
+ *
+ * Function:
+ * Invalidate instruction cache for all CPUs on specified
+ * physical address. Called when a page is removed from a
+ * vm_map. This is done because the Instruction CMMUs are not
+ * snooped, and if a page is subsequently used as a text page,
+ * we want the CMMUs to re-load the cache for the page.
+ *
+ * Parameters:
+ * pa physical address of the (vm) page
+ *
+ * Extern/globals:
+ * ptes_per_vm_page
+ *
+ * Calls:
+ * cachefall
+ *
+ * Called by:
+ * vm_remove_page
+ *
+ */
+void icache_flush(vm_offset_t pa)
+{
+ register int i;
+ register int cpu = 0;
+
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cmmu_flush_remote_inst_cache(cpu, pa, M88K_PGBYTES);
+ }
+
+} /* icache_flush */
+
+/*
+ * Routine: pmap_dcache_flush
+ *
+ * Function:
+ * Flush DATA cache on specified virtual address.
+ *
+ * Parameters:
+ * pmap specify pmap
+ * va virtual address of the (vm) page to be flushed
+ *
+ * Extern/globals:
+ * pmap_pte
+ * ptes_per_vm_page
+ *
+ * Calls:
+ * dcacheflush
+ *
+ */
+void pmap_dcache_flush(pmap_t pmap, vm_offset_t va)
+{
+ register vm_offset_t pa;
+ register int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_dcache_flush: pmap is NULL");
+
+ PMAP_LOCK(pmap, spl);
+
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cmmu_flush_data_cache(pa, M88K_PGBYTES);
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+
+
+} /* pmap_dcache_flush */
+
+static void cache_flush_loop(int mode, vm_offset_t pa, int size)
+{
+ register int i;
+ register int ncpus;
+ void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
+
+ switch (mode) {
+ default:
+ panic("bad cache_flush_loop mode");
+ return;
+
+ case FLUSH_CACHE: /* All caches, all CPUs */
+ ncpus = NCPUS;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
+ ncpus = NCPUS;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
+ ncpus = NCPUS;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+
+ case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+ }
+
+ if (ncpus == 1) {
+ (*cfunc)(cpu_number(), pa, size);
+ }
+ else {
+ for (i=0; i<NCPUS; i++) {
+ (*cfunc)(i, pa, size);
+ }
+ }
+}
+
+/*
+ * pmap_cache_flush
+ * Internal function.
+ */
+void pmap_cache_flush(
+ pmap_t pmap,
+ vm_offset_t virt,
+ int bytes,
+ int mode)
+{
+ register vm_offset_t pa;
+ register vm_offset_t va;
+ register int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_dcache_flush: NULL pmap");
+
+ /*
+ * If it is more than a couple of pages, just blow the whole cache
+ * because of the number of cycles involved.
+ */
+ if (bytes > 2*M88K_PGBYTES) {
+ cache_flush_loop(mode, 0, -1);
+ return;
+ }
+
+ PMAP_LOCK(pmap, spl);
+ for(va = virt; bytes > 0; bytes -= M88K_PGBYTES,va += M88K_PGBYTES) {
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cache_flush_loop(mode, pa, M88K_PGBYTES);
+ }
+ }
+ PMAP_UNLOCK(pmap, spl);
+} /* pmap_ccacheflush */
+
+
+#ifdef JUNK
+/*
+ * Machine-level page attributes
+ *
+ * This implementation was lifted from the MIPS pmap module.
+ * We currently only use it to invalidate the I-Cache for
+ * debugger use.
+ *
+ * These are infrequently used features of the M88K CMMU,
+ * basically cache control functions. The cachability
+ * property of mappings must be remembered across paging
+ * operations, so that they can be restored on need.
+ *
+ * Obviously these attributes will be used in a sparse
+ * fashion, so we use a simple list of attribute-value
+ * pairs.
+ *
+ * Some notes on the cache management based upon my quick
+ * calculation and previous experience.
+ * We must carefully weigh the cost of cache invalidate time to
+ * cache refill time. If "cachefall()" is called for more than
+ * two pages, it is usually faster to simply invalidate the entire
+ * cache and let it refill, since the number of cycles required to
+ * perform the invalidate becomes greater than the number to refill.
+ * If we are only performing an invalidate for something like a
+ * debugger breakpoint, it becomes worthwhile to only perform a
+ * line invalidate. Remember, we must account for the amount of
+ * time required to perform the pmap lookups.
+ */
+/*
+ * pmap_attributes:
+ *
+ * Set/Get special memory attributes
+ *
+ * This is currently only used to invalidate the I-cache when a
+ * breakpoint is set by the debugger.
+ *
+ */
+int pmap_attribute(
+ pmap_t pmap,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
+{
+ register vm_offset_t start, end;
+ int ret;
+#ifdef notyet
+ pmap_attribute_t a;
+#endif
+
+ if (attribute != MATTR_CACHE)
+ return KERN_INVALID_ARGUMENT;
+
+ if (pmap == PMAP_NULL)
+ return KERN_SUCCESS;
+
+ start = trunc_page(address);
+ end = round_page(address + size);
+ ret = KERN_SUCCESS;
+
+
+ /* All we are looking for right now is an instruction cache flush.
+ */
+ switch(*value) {
+ case MATTR_VAL_CACHE_FLUSH:
+ pmap_cache_flush(pmap, start, size, FLUSH_CACHE);
+ break;
+ case MATTR_VAL_DCACHE_FLUSH:
+ pmap_cache_flush(pmap, start, size, FLUSH_DATA_CACHE);
+ break;
+ case MATTR_VAL_ICACHE_FLUSH:
+ pmap_cache_flush(pmap, start, size, FLUSH_CODE_CACHE);
+ /* ptrace_user_iflush(pmap, start, size); */
+ break;
+
+ default:
+ ret = KERN_INVALID_ARGUMENT;
+ }
+
+ return ret;
+}
+#endif /* JUNK */
+#ifdef DEBUG
+/*
+ * DEBUGGING ROUTINES - check_pv_list and check_pmp_consistency are used
+ * only for debugging. They are invoked only
+ * through the macros CHECK_PV_LIST AND CHECK_PMAP_CONSISTENCY
+ * defined early in this sourcefile.
+ */
+
+/*
+ * Routine: CHECK_PV_LIST (internal)
+ *
+ * History:
+ * '90.7.13 Fuzzy
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ * Function:
+ * Debug-mode routine to check consistency of a PV list. First, it
+ * makes sure every map thinks the physical page is the same. This
+ * should be called by all routines which touch a PV list.
+ *
+ * Parameters:
+ * phys physical address of page whose PV list is
+ * to be checked
+ * pv_h pointer to head to the PV list
+ * who string containing caller's name to be
+ * printed if a panic arises
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ *
+ * Calls:
+ * pmap_extract
+ *
+ * Special Assumptions:
+ * No locking is required.
+ *
+ * This function walks the given PV list. For each pmap/va pair,
+ * pmap_extract is called to obtain the physical address of the page from
+ * the pmap in question. If the retruned physical address does not match
+ * that for the PV list being perused, the function panics.
+ */
+
+static void check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
+{
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t pa;
+
+ if (pv_h != PFIDX_TO_PVH(PFIDX(phys))) {
+ printf("check_pv_list: incorrect pv_h supplied.\n");
+ panic(who);
+ }
+
+ if (!PAGE_ALIGNED(phys)) {
+ printf("check_pv_list: supplied phys addr not page aligned.\n");
+ panic(who);
+ }
+
+ if (pv_h->pmap == PMAP_NULL) {
+ if (pv_h->next != PV_ENTRY_NULL) {
+ printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
+ panic(who);
+ }
+ else return; /* proper empry lst */
+ }
+
+ pv_e = pv_h;
+ while (pv_e != PV_ENTRY_NULL) {
+ if (!PAGE_ALIGNED(pv_e->va)) {
+ printf("check_pv_list: non-aligned VA in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+ /*
+ * We can't call pmap_extract since it requires lock.
+ */
+ if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t)0;
+ else
+ pa = M88K_PTOB(pte->pfn) | (pv_e->va & M88K_PGOFSET);
+
+ if (pa != phys) {
+ printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+
+ pv_e = pv_e->next;
+ }
+
+} /* check_pv_list() */
+
+/*
+ * Routine: CHECK_MAP (itnernal)
+ *
+ * History:
+ * June 13 '90 Fuzzy
+ * Rewrite level 1 --> segment
+ * level 3 --> page
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ *
+ * Function:
+ * Debug mode routine to check consistency of map.
+ * Called by check_pmap_consistency only.
+ *
+ * Parameters:
+ * map pointer to pmap structure
+ * s start of range to be checked
+ * e end of range to be checked
+ * who string containing caller's name to be
+ * printed if a panic arises
+ *
+ * Extern/Global:
+ * pv_head_array, pv lists
+ *
+ * Calls:
+ * pmap_pte
+ *
+ * Special Assumptions:
+ * No locking required.
+ *
+ * This function sequences through the given range of addresses. For
+ * each page, pmap_pte is called to obtain the page table entry. If
+ * its valid, and the physical page it maps is managed, the PV list is
+ * searched for the corresponding pmap/va entry. If not found, the
+ * function panics. If duplicate PV list entries are found, the function
+ * panics.
+ */
+
+static void check_map(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e,
+ char *who)
+{
+ vm_offset_t va,
+ old_va,
+ phys;
+ pv_entry_t pv_h,
+ pv_e,
+ saved_pv_e;
+ pt_entry_t *ptep;
+ boolean_t found;
+ int loopcnt;
+
+
+ /*
+ * for each page in the address space, check to see if there's
+ * a valid mapping. If so makes sure it's listed in the PV_list.
+ */
+
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) checking map at 0x%x\n", map);
+
+ old_va = s;
+ for (va = s; va < e; va += PAGE_SIZE) {
+ /* check for overflow - happens if e=0xffffffff */
+ if (va < old_va)
+ break;
+ else
+ old_va = va;
+
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2)
+ /* don't try anything with these */
+ continue;
+
+ ptep = pmap_pte(map, va);
+
+ if (ptep == PT_ENTRY_NULL) {
+ /* no page table, skip to next segment entry */
+ va = SDT_NEXT(va)-PAGE_SIZE;
+ continue;
+ }
+
+ if (!PDT_VALID(ptep))
+ continue; /* no page mapping */
+
+ phys = M88K_PTOB(ptep->pfn); /* pick up phys addr */
+
+ if (!PMAP_MANAGED(phys))
+ continue; /* no PV list */
+
+ /* note: vm_page_startup allocates some memory for itself
+ through pmap_map before pmap_init is run. However,
+ it doesn't adjust the physical start of memory.
+ So, pmap thinks those pages are managed - but they're
+ not actually under it's control. So, the following
+ conditional is a hack to avoid those addresses
+ reserved by vm_page_startup */
+ /* pmap_init also allocate some memory for itself. */
+
+ if (map == kernel_pmap &&
+ va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start))))
+ continue;
+
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ found = FALSE;
+
+ if (pv_h->pmap != PMAP_NULL) {
+
+ loopcnt = 10000; /* loop limit */
+ pv_e = pv_h;
+ while(pv_e != PV_ENTRY_NULL) {
+
+ if (loopcnt-- < 0) {
+ printf("check_map: loop in PV list at PVH 0x%x (for phys 0x%x)\n", pv_h, phys);
+ panic(who);
+ }
+
+ if (pv_e->pmap == map && pv_e->va == va) {
+ if (found) {
+ printf("check_map: Duplicate PV list entries at 0x%x and 0x%x in PV list 0x%x.\n", saved_pv_e, pv_e, pv_h);
+ printf("check_map: for pmap 0x%x, VA 0x%x,phys 0x%x.\n", map, va, phys);
+ panic(who);
+ }
+ else {
+ found = TRUE;
+ saved_pv_e = pv_e;
+ }
+ }
+ pv_e = pv_e->next;
+ }
+ }
+
+ if (!found) {
+ printf("check_map: Mapping for pmap 0x%x VA 0x%x Phys 0x%x does not appear in PV list 0x%x.\n", map, va, phys, pv_h);
+ }
+ }
+
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) done \n");
+
+} /* check_map() */
+
+/*
+ * Routine: CHECK_PMAP_CONSISTENCY (internal)
+ *
+ * History:
+ * '90. 7.16 Fuzzy
+ * '90.8.3 Fuzzy
+ * if defined TEST, 'static' undeclared.
+ * '90.8.30 Fuzzy
+ * delete "if defined TEST, 'static' undeclared."
+ *
+ * Function:
+ * Debug mode routine which walks all pmap, checking for internal
+ * consistency. We are called UNLOCKED, so we'll take the write
+ * lock.
+ *
+ * Parameters:
+ * who string containing caller's name tobe
+ * printed if a panic arises
+ *
+ * Extern/Global:
+ * list of pmap structures
+ *
+ * Calls:
+ * check map
+ * check pv_list
+ *
+ * This function obtains the pmap write lock. Then, for each pmap
+ * structure in the pmap struct queue, it calls check_map to verify the
+ * consistency of its translation table hierarchy.
+ *
+ * Once all pmaps have been checked, check_pv_list is called to check
+ * consistency of the PV lists for each managed page.
+ *
+ * NOTE: Added by Sugai 10/29/90
+ * There are some pages do not appaer in PV list. These pages are
+ * allocated for pv structures by kmem_alloc called in pmap_init.
+ * Though they are in the range of pmap_phys_start to pmap_phys_end,
+ * PV maniupulations had not been activated when these pages were alloceted.
+ *
+ */
+
+static void check_pmap_consistency(char *who)
+{
+ pmap_t p;
+ int i;
+ vm_offset_t phys;
+ pv_entry_t pv_h;
+ int spl;
+
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
+
+ if (pv_head_table == PV_ENTRY_NULL) {
+
+ printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
+ return;
+ }
+
+ SPLVM(spl);
+
+ p = kernel_pmap;
+ check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who);
+
+ /* run through all pmaps. check consistency of each one... */
+ i = PMAP_MAX;
+ for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) {
+ if (i == 0) { /* can not read pmap list */
+ printf("check_pmap_consistency: pmap strcut loop error.\n");
+ panic(who);
+ }
+ check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who);
+ }
+
+ /* run through all managed paes, check pv_list for each one */
+ for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) {
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ check_pv_list(phys, pv_h, who);
+ }
+
+ SPLX(spl);
+
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
+
+} /* check_pmap_consistency() */
+#endif /* DBG */
+
+/*
+ * PMAP PRINT MACROS AND ROUTINES FOR DEBUGGING
+ * These routines are called only from the debugger.
+ * (No locking required.)
+ * usually found in pmap.c Fuzzy '90.7.12
+ */
+
+#define PRINT_SDT(p) \
+ printf("%08x : ", \
+ ((sdt_entry_template_t *)p)-> bits); \
+ printf("table adress=0x%x, prot=%d, dtype=%d\n", \
+ M88K_PTOB(p->table_addr), \
+ p->prot, \
+ p->dtype);
+
+#define PRINT_PDT(p) \
+ printf("%08x : ", \
+ ((pte_template_t *)p)-> bits); \
+ printf("frame num=0x%x, prot=%d, dtype=%d, wired=%d, modified=%d, pg_used=%d\n", \
+ p->pfn, \
+ p->prot, \
+ p->dtype, \
+ p->wired, \
+ p->modified, \
+ p->pg_used);
+
+/*
+ * Routine: PMAP_PRINT
+ *
+ * Author: Fuzzy '90.7.12
+ *
+ * History:
+ * '90.7.25 Fuzzy Null sdt entry skip, and skip count print.
+ *
+ * Function:
+ * Print pmap stucture, including segment table.
+ *
+ * Parameters:
+ * pmap pointer to pmap structure
+ *
+ * Special Assumptions:
+ * No locking required.
+ *
+ * This function prints the fields of the pmap structure, then
+ * iterates through the segment translation table, printing each entry.
+ */
+void pmap_print (pmap_t pmap)
+{
+ sdt_entry_t *sdtp;
+ sdt_entry_t *sdtv;
+ int i;
+
+ printf("Pmap @ 0x%x:\n", (unsigned)pmap);
+ sdtp = pmap->sdt_paddr;
+ sdtv = pmap->sdt_vaddr;
+ printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n",
+ (unsigned)sdtp, (unsigned)sdtv,
+ pmap->ref_count);
+
+#ifdef statistics_not_yet_maintained
+ printf(" statistics: pagesize %d: free_count %d; "
+ "active_count %d; inactive_count %d; wire_count %d\n",
+ pmap->stats.pagesize,
+ pmap->stats.free_count,
+ pmap->stats.active_count,
+ pmap->stats.inactive_count,
+ pmap->stats.wire_count);
+
+ printf(" zero_fill_count %d; reactiveations %d; "
+ "pageins %d; pageouts %d; faults %d\n",
+ pmap->stats.zero_fill_count,
+ pmap->stats.reactivations,
+ pmap->stats.pageins,
+ pmap->stats.pageouts,
+ pmap->stats.fault);
+
+ printf(" cow_faults %d, lookups %d, hits %d\n",
+ pmap->stats.cow_faults,
+ pmap->stats.loopups,
+ pmap->stats.faults);
+#endif
+
+ sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */
+ sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */
+ if (sdtp == (sdt_entry_t *)0)
+ printf("Error in pmap - sdt_paddr is null.\n");
+ else {
+ int count = 0;
+ printf(" Segment table at 0x%x (0x%x):\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) {
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ count = 0;
+ printf(" (%x)phys: ", i);
+ PRINT_SDT(sdtp);
+ printf(" (%x)virt: ", i);
+ PRINT_SDT(sdtv);
+ }
+ else
+ count++;
+ }
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ }
+
+} /* pmap_print() */
+
+/*
+ * Routine: PMAP_PRINT_TRACE
+ *
+ * Function:
+ * Using virt addr, derive phys addr, printing pmap tables along the way.
+ *
+ * Parameters:
+ * pmap pointer to pmap strucuture
+ * va virtual address whose translation is to be trace
+ * long_format flag indicating long from output is desired
+ *
+ * Special Assumptions:
+ * No locking required.
+ *
+ * This function chases down through the translation tree as
+ * appropriate for the given virtual address. each table entry
+ * encoutered is printed. If the long_format is desired, all entries of
+ * each table are printed, with special indication of the entries used in
+ * the translation.
+ */
+void pmap_print_trace (
+ pmap_t pmap,
+ vm_offset_t va,
+ boolean_t long_format)
+{
+ sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */
+ sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */
+ pt_entry_t *ptep; /* ptr to pte table of physical page addresses */
+
+ int i; /* table loop index */
+ unsigned long prev_entry; /* keep track of value of previous table entry */
+ int n_dup_entries; /* count contiguous duplicate entries */
+
+ printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n",
+ va, (unsigned)pmap);
+
+ /*** SDT TABLES ***/
+ /* get addrs of sdt tables */
+ sdtp = (sdt_entry_t *)pmap->sdt_vaddr;
+ sdtv = sdtp + SDT_ENTRIES;
+
+ if (sdtp == SDT_ENTRY_NULL) {
+ printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+
+ if (long_format) {
+ printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits
+ && SDTIDX(va) != i && i != SDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress duplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)sdtp)->bits;
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)phys: ", i);
+ } else {
+ printf(" (%x)phys: ", i);
+ }
+ PRINT_SDT(sdtp);
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)virt: ", i);
+ } else {
+ printf(" (%x)virt: ", i);
+ }
+ PRINT_SDT(sdtv);
+ } /* for */
+ } else {
+ /* index into both tables for given VA */
+ sdtp += SDTIDX(va);
+ sdtv += SDTIDX(va);
+ printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n",
+ SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv);
+ printf(" phys: ");
+ PRINT_SDT(sdtp);
+ printf(" virt: ");
+ PRINT_SDT(sdtv);
+ }
+
+ /*** PTE TABLES ***/
+ /* get addrs of page (pte) table (no shadow table) */
+
+ sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va);
+ #ifdef DBG
+ printf("*** DEBUG (sdtp) ");
+ PRINT_SDT(sdtp);
+ #endif
+ sdtv = sdtp + SDT_ENTRIES;
+ ptep = (pt_entry_t *)(M88K_PTOB(sdtv->table_addr));
+ if (sdtp->dtype != DT_VALID) {
+ printf(" segment table entry invlid, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+ if (long_format) {
+ printf(" page table (ptes) at 0x%x\n", (unsigned)ptep);
+ for (i = 0; i < PDT_ENTRIES; i++, ptep++) {
+ if (prev_entry == ((pte_template_t *)ptep)->bits
+ && PDTIDX(va) != i && i != PDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress suplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)ptep)->bits;
+ if (PDTIDX(va) == i) {
+ printf(" >> (%x)pte: ", i);
+ } else {
+ printf(" (%x)pte: ", i);
+ }
+ PRINT_PDT(ptep);
+ } /* for */
+ } else {
+ /* index into page table */
+ ptep += PDTIDX(va);
+ printf(" pte index 0x%x\n", PDTIDX(va));
+ printf(" pte: ");
+ PRINT_PDT(ptep);
+ }
+} /* pmap_print_trace() */
+
+/*
+ * Check whether the current transaction being looked at by dodexc()
+ * could have been the one that caused a fault. Given the virtual
+ * address, map, and transaction type, checks whether the page at that
+ * address is valid, and, for write transactions, whether it has write
+ * permission.
+ */
+boolean_t pmap_check_transaction(
+ pmap_t pmap,
+ vm_offset_t va,
+ vm_prot_t type)
+{
+ pt_entry_t *pte;
+ sdt_entry_t *sdt;
+ int spl;
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
+
+ if (!PDT_VALID(pte)) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
+
+ /*
+ * Valid pte. If the transaction was a read, there is no way it
+ * could have been a fault, so return true. For now, assume
+ * that a write transaction could have caused a fault. We need
+ * to check pte and sdt entries for write permission to really
+ * tell.
+ */
+
+ if (type == VM_PROT_READ) {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ } else {
+ sdt = SDTENT(pmap,va);
+ if (sdt->prot || pte->prot) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ } else {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ }
+ }
+}
+
+/* New functions to satisfy rpd - contributed by danner */
+
+void pmap_virtual_space(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ *startp = virtual_avail;
+ *endp = virtual_end;
+}
+
+unsigned int pmap_free_pages(void)
+{
+ return atop(avail_end - avail_next);
+}
+
+boolean_t pmap_next_page(vm_offset_t *addrp)
+{
+ if (avail_next == avail_end)
+ return FALSE;
+
+ *addrp = avail_next;
+ avail_next += PAGE_SIZE;
+ return TRUE;
+}
+
+#if 0
+#ifdef OMRON_PMAP
+/*
+ * Set BATC
+ */
+void pmap_set_batc(
+ pmap_t pmap,
+ boolean_t data,
+ int i,
+ vm_offset_t va,
+ vm_offset_t pa,
+ boolean_t super,
+ boolean_t wt,
+ boolean_t global,
+ boolean_t ci,
+ boolean_t wp,
+ boolean_t valid)
+{
+ register batc_template_t batctmp;
+
+ if (i < 0 || i > (BATC_MAX - 1)) {
+ panic("pmap_set_batc: illegal batc number\n");
+ /* bad number */
+ return;
+ }
+
+ batctmp.field.lba = va >> 19;
+ batctmp.field.pba = pa >> 19;
+ batctmp.field.sup = super;
+ batctmp.field.wt = wt;
+ batctmp.field.g = global;
+ batctmp.field.ci = ci;
+ batctmp.field.wp = wp;
+ batctmp.field.v = valid;
+
+ if (data) {
+ pmap->d_batc[i].bits = batctmp.bits;
+ } else {
+ pmap->i_batc[i].bits = batctmp.bits;
+ }
+}
+
+void use_batc(
+ task_t task,
+ boolean_t data, /* for data-cmmu ? */
+ int i, /* batc number */
+ vm_offset_t va, /* virtual address */
+ vm_offset_t pa, /* physical address */
+ boolean_t s, /* for super-mode ? */
+ boolean_t wt, /* is writethrough */
+ boolean_t g, /* is global ? */
+ boolean_t ci, /* is cache inhibited ? */
+ boolean_t wp, /* is write-protected ? */
+ boolean_t v) /* is valid ? */
+{
+ pmap_t pmap;
+ pmap = vm_map_pmap(task->map);
+ pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
+}
+
+#endif
+#endif /* 0 */
+#ifdef notyet
+/*
+ * Machine-level page attributes
+ *
+ * The only attribute that may be controlled right now is cacheability.
+ *
+ * Obviously these attributes will be used in a sparse
+ * fashion, so we use a simple sorted list of address ranges
+ * which possess the attribute.
+ */
+
+/*
+ * Destroy an attribute list.
+ */
+void pmap_destroy_ranges(pmap_range_t *ranges)
+{
+ register pmap_range_t this, next;
+
+ this = *ranges;
+ while (this != 0) {
+ next = this->next;
+ pmap_range_free(this);
+ this = next;
+ }
+ *ranges = 0;
+}
+
+/*
+ * Lookup an address in a sorted range list.
+ */
+boolean_t pmap_range_lookup(
+ pmap_range_t *ranges,
+ vm_offset_t address)
+{
+ register pmap_range_t range;
+
+ for (range = *ranges; range != 0; range = range->next) {
+ if (address < range->start)
+ return FALSE;
+ if (address < range->end)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * Add a range to a list.
+ * The pmap must be locked.
+ */
+void pmap_range_add(
+ pmap_range_t *ranges,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ register pmap_range_t range, *prev;
+
+ /* look for the start address */
+
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start < range->start)
+ break;
+ if (start <= range->end)
+ goto start_overlaps;
+ }
+
+ /* start address is not present */
+
+ if ((range == 0) || (end < range->start)) {
+ /* no overlap; allocate a new range */
+
+ range = pmap_range_alloc();
+ range->start = start;
+ range->end = end;
+ range->next = *prev;
+ *prev = range;
+ return;
+ }
+
+ /* extend existing range forward to start */
+
+ range->start = start;
+
+ start_overlaps:
+ assert((range->start <= start) && (start <= range->end));
+
+ /* delete redundant ranges */
+
+ while ((range->next != 0) && (range->next->start <= end)) {
+ pmap_range_t old;
+
+ old = range->next;
+ range->next = old->next;
+ range->end = old->end;
+ pmap_range_free(old);
+ }
+
+ /* extend existing range backward to end */
+
+ if (range->end < end)
+ range->end = end;
+}
+
+/*
+ * Remove a range from a list.
+ * The pmap must be locked.
+ */
+void pmap_range_remove(
+ pmap_range_t *ranges,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ register pmap_range_t range, *prev;
+
+ /* look for start address */
+
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start <= range->start)
+ break;
+ if (start < range->end) {
+ if (end < range->end) {
+ pmap_range_t new;
+
+ /* split this range */
+
+ new = pmap_range_alloc();
+ new->next = range->next;
+ new->start = end;
+ new->end = range->end;
+
+ range->next = new;
+ range->end = start;
+ return;
+ }
+
+ /* truncate this range */
+
+ range->end = start;
+ }
+ }
+
+ /* start address is not in the middle of a range */
+
+ while ((range != 0) && (range->end <= end)) {
+ *prev = range->next;
+ pmap_range_free(range);
+ range = *prev;
+ }
+
+ if ((range != 0) && (range->start < end))
+ range->start = end;
+}
+#endif /* notyet */
diff --git a/sys/arch/mvme88k/m88k/process.S b/sys/arch/mvme88k/m88k/process.S
new file mode 100644
index 00000000000..9369cc63e80
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/process.S
@@ -0,0 +1,270 @@
+#ifndef ASSEMBLER /* predefined by ascpp, at least */
+#define ASSEMBLER
+#endif
+
+#include "machine/locore.h"
+#include "machine/asm.h"
+#include "assym.s"
+
+#ifndef NBPG
+#define NBPG 4096
+#endif /* NBPG */
+
+ data
+ align 4
+Lsw0:
+ string "cpu_switch\n"
+ align 4
+swchanpanic:
+ string "switch wchan\n"
+ align 4
+swsrunpanic:
+ string "switch SRUN\n"
+
+ text
+ align 8
+Lswchanpanic:
+ or.u r2, r0, hi16(swchanpanic)
+ or r2, r2, lo16(swchanpanic)
+ bsr _panic
+
+Lswsrunpanic:
+ or.u r2, r0, hi16(swsrunpanic)
+ or r2, r2, lo16(swsrunpanic)
+ bsr _panic
+/*
+ * At exit of a process, do a cpu_switch for the last time.
+ * The mapping of the pcb at p->p_addr has already been deleted,
+ * and the memory for the pcb+stack has been freed.
+ * The ipl is high enough to prevent the memory from being reallocated.
+ */
+ENTRY(switch_exit)
+ /*
+ * Change pcb to idle u. area, i.e., set r31 to top of stack
+ * and set curpcb to point to _idle_u.
+ */
+ or.u r31, r0, hi16(_idle_u)
+ or r31, r31,lo16(_idle_u)
+ or.u r10, r10,hi16(_curpcb)
+ or r10, r10,lo16(_curpcb)
+ st r31, r0, r10 /* curpcb = &idle_u */
+ addu r31, r31, UPAGES * NBPG /* now on idle_u stack */
+ or.u r10, r0, hi16(_curproc)
+ st r0, r10, lo16(_curproc) /* curproc = NULL */
+ bsr.n _cpu_switch
+ or r2, r0, r10
+
+/*
+ * When no processes are on the runq, switch
+ * idles here watiing for something to come ready.
+ */
+LABEL(idle)
+ or.u r10, r0, hi16(_curproc)
+ st r0, r10, lo16(_curproc) /* curproc = NULL */
+
+ or r2,r0,0
+ bsr _spln /*(void) spl0(); */
+ ; spin reading _whichqs until nonzero
+1:
+ or.u r10, r0, hi16(_whichqs)
+ ld r11, r10,lo16(_whichqs)
+ bcnd eq0, r11, 1b
+ bsr.n _spln
+ or r2,r0,6
+ br Lsw1
+/*
+ * cpu_switch()
+ * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
+ * XXX - how about using stack for saving spl and last proc?
+ */
+ENTRY(cpu_switch)
+ or.u r10, r0, hi16(_curpcb)
+ ld r10,r10, lo16(_curpcb)
+ st r1, r10, 0 ; save r1 in pcb
+ bsr _spl
+ or.u r10, r0, hi16(_curpcb) ; a call can clobber
+ ld r10,r10, lo16(_curpcb) ; r10 - so reload it
+ st r2, r10, 19 * 4 ; save ipl in pcb
+ or.u r11, r0, hi16(_curproc)
+ ld r11,r11, lo16(_curproc)
+ or.u r12, r0, hi16(_lastproc)
+ or r12, r12, lo16(_lastproc)
+ st r11, r12, 0 ; lastproc = curproc
+ or.u r11, r0, hi16(_curproc)
+ st r0, r11, lo16(_curproc) ; curproc = NULL
+ bsr.n _spln
+ or r2,r0,6
+Lsw1:
+ /*
+ * Find the highest-priority queue that isn't empty,
+ * then take the first proc from that queue.
+ */
+ or r6, r0, r0
+ or.u r7, r0, hi16(_whichqs)
+ ld r7, r7, lo16(_whichqs)
+Lswchk:
+ bcnd eq0, r7, idle
+ ff1 r6, r7 ; 0 <= r6 <= 31
+
+ or.u r7, r0, hi16(_qs)
+ or r7, r7, lo16(_qs)
+ mak r6, r6, 0<3>
+ lda r8, r7[r6] ; r8 = qs[ff1(whichqs)]
+ ; r8 is q, r9 is p
+ ld r9, r8, P_FORW ; p = q->p_forw
+ ld r12, r9, P_FORW ; r12 is p->p_forw
+ st r12, r8, P_FORW ; q->p_forw = p->p_forw
+ st r12, r8, 0 ; q = p->p_forw
+ ld r12, r9, P_BACK ; r12 is p->p_back
+ st r12, r8, P_BACK ; q->p_back = p->p_back
+ lda r8, r7[r6] ; reload r8 with qs[ff1(whichqs)]
+ ld r12, r8, P_FORW; q->p_forw
+ cmp r12, r12, r8 ; q == q->p_forw; anyone left on queue?
+ bb1 ne, r12, Lsw2 ; no, skip
+ ext r6, r6, 0<3>
+ add r6, r6, 1 ; turn off the bit we looked at
+ or.u r7, r0, hi16(_whichqs)
+ ld r8, r7, lo16(_whichqs)
+ and.c r8, r8, r6 ; whichqs &= ~the bit
+ st r8, r7, lo16(_whichqs) ; reset bit in whichqs
+Lsw2:
+ ld r2, r9, P_WCHAN
+ bcnd ne0, r2, Lswchanpanic
+ ld.b r2, r9, P_STAT
+ cmp r2, r2, SRUN
+ bb1 ne, r2, Lswsrunpanic
+
+ or.u r11, r0, hi16(_want_resched)
+ st r0, r11, lo16(_want_resched)
+
+ or.u r11, r0, hi16(_curproc)
+ st r9, r11,lo16(_curproc) ; curproc = p
+
+ or.u r2, r0, hi16(_lastproc)
+ ld r2, r2, lo16(_lastproc)
+
+ or.u r10, r0, hi16(_curpcb)
+ ld r10,r10, lo16(_curpcb)
+
+ cmp r2, r2, r9
+ bb1 eq, r2, Lswsameproc
+
+ /*
+ * Save state of previous process in its pcb.
+ */
+
+ ; r1 and ipl already saved above
+ st r14,r10,4
+ st r15,r10,2*4
+ st r16,r10,3*4
+ st r17,r10,4*4
+ st r18,r10,5*4
+ st r19,r10,6*4
+ st r20,r10,7*4
+ st r21,r10,8*4
+ st r22,r10,9*4
+ st r23,r10,10*4
+ st r24,r10,11*4
+ st r25,r10,12*4
+ st r26,r10,13*4
+ st r27,r10,14*4
+ st r28,r10,15*4
+ st r29,r10,16*4
+ st r30,r10,17*4 /* save frame pointer */
+ st r31,r10,18*4 /* save stack pointer */
+ /* ipl already saved */
+ ; r9 is curproc
+ or.u r10, r0, hi16(_curpcb)
+ or r10,r10, lo16(_curpcb)
+ st r0, r9, P_BACK ; p->p_back = 0
+ ld r3, r9, P_ADDR
+ st r3, r10, 0 ; curpcb = p->p_addr
+ /* see if pmap_activate needs to be called */
+ ld r2, r9, P_VMSPACE ; vmspace = p->p_vmspace
+ addu r2, r2, VM_PMAP ; pmap = &vmspace.vm_pmap
+#if 0
+ ld r5, r2, PM_STCHG ; pmap->st_changed?
+ bcnd eq0, r5, Lswnochg ; no, skip
+#endif
+ or r14, r0, r9 ; save p in r14
+ bsr _pmap_activate ; pmap_activate(pmap, pcb)
+ or r9, r0, r14 ; restore p saved in r14
+
+Lswnochg:
+ or.u r31, r0, hi16(_intstack_end)
+ or r31,r31, lo16(_intstack_end); now goto a tmp stack for NMI
+ bsr.n _load_u_area ; load_u_area(p)
+ or r2, r0, r9
+ or.u r10, r0, hi16(_curpcb)
+ ld r10, r10, lo16(_curpcb)
+ ; XXX Is this correct/necessary?
+ st r10, r14, P_ADDR ; p->p_addr = curpcb; restore p_addr
+ ; flush some data cache here
+
+ ; restore from the current context
+
+ ld r1,r10,0
+ ld r14,r10,4
+ ld r15,r10,2*4
+ ld r16,r10,3*4
+ ld r17,r10,4*4
+ ld r18,r10,5*4
+ ld r19,r10,6*4
+ ld r20,r10,7*4
+ ld r21,r10,8*4
+ ld r22,r10,9*4
+ ld r23,r10,10*4
+ ld r24,r10,11*4
+ ld r25,r10,12*4
+ ld r26,r10,13*4
+ ld r27,r10,14*4
+ ld r28,r10,15*4
+ ld r29,r10,16*4
+ ld r30,r10,17*4 /* restore frame pointer */
+ ld r31,r10,18*4 /* restore stack pointer */
+Lswsameproc:
+ ld r2, r10,19*4 /* restore interrupt mask */
+ subu r31,r31,40
+ st r1, r31,32 ; save r1 on stack
+ bsr _spln
+Lcxswdone:
+ ld r1, r31,32 ; restore r1 from stack
+ addu r31,r31,40
+ jmp.n r1
+ or r2, r0, 1 ; return 1 (for alternate returns)
+
+/*
+ * savectx(pcb)
+ * Update pcb, saving current processor state.
+ */
+ENTRY(savectx)
+ /* get the spl mask */
+ subu r31,r31,40 /* allocate stack for r1 and args */
+ st r1,r31,36 /* save return address */
+ st r2,r31,32 /* save r2 */
+ bsr _spl /* get the current interrupt mask */
+ ld r1,r31,36 /* recover return address */
+ ld r10,r31,32 /* recover r2 into r10 */
+ addu r31,r31,40 /* put stack pointer back */
+ st r1,r10,0 /* do setjmp */ /* save return address */
+ st r14,r10,4
+ st r15,r10,2*4
+ st r16,r10,3*4
+ st r17,r10,4*4
+ st r18,r10,5*4
+ st r19,r10,6*4
+ st r20,r10,7*4
+ st r21,r10,8*4
+ st r22,r10,9*4
+ st r23,r10,10*4
+ st r24,r10,11*4
+ st r25,r10,12*4
+ st r26,r10,13*4
+ st r27,r10,14*4
+ st r28,r10,15*4
+ st r29,r10,16*4
+ st r30,r10,17*4 /* save frame pointer */
+ st r31,r10,18*4 /* save stack pointer */
+ st r2, r10,19*4 /* save interrupt mask */
+ jmp.n r1
+ or r2,r0,r0
diff --git a/sys/arch/mvme88k/m88k/process_machdep.c b/sys/arch/mvme88k/m88k/process_machdep.c
new file mode 100644
index 00000000000..62a3ba89dfb
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/process_machdep.c
@@ -0,0 +1,155 @@
+/* $NetBSD: process_machdep.c,v 1.5 1994/11/20 20:54:37 deraadt Exp $ */
+
+/*
+ * Copyright (c) 1993 The Regents of the University of California.
+ * Copyright (c) 1993 Jan-Simon Pendry
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Id: procfs_i386.c,v 4.1 1993/12/17 10:47:45 jsp Rel
+ */
+
+/*
+ * This file may seem a bit stylized, but that so that it's easier to port.
+ * Functions to be implemented here are:
+ *
+ * process_read_regs(proc, regs)
+ * Get the current user-visible register set from the process
+ * and copy it into the regs structure (<machine/reg.h>).
+ * The process is stopped at the time read_regs is called.
+ *
+ * process_write_regs(proc, regs)
+ * Update the current register set from the passed in regs
+ * structure. Take care to avoid clobbering special CPU
+ * registers or privileged bits in the PSL.
+ * The process is stopped at the time write_regs is called.
+ *
+ * process_sstep(proc)
+ * Arrange for the process to trap after executing a single instruction.
+ *
+ * process_set_pc(proc)
+ * Set the process's program counter.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/vnode.h>
+#include <machine/psl.h>
+#include <machine/reg.h>
+#if 0
+#include <machine/frame.h>
+#endif
+#include <sys/ptrace.h>
+
+int
+process_read_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+#if 0
+ /* NOTE: struct reg == struct trapframe */
+ bcopy(p->p_md.md_tf, (caddr_t)regs, sizeof(struct reg));
+#endif
+ return (0);
+}
+
+int
+process_write_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+#if 0
+ int psr = p->p_md.md_tf->tf_psr & ~PSR_ICC;
+ bcopy((caddr_t)regs, p->p_md.md_tf, sizeof(struct reg));
+ p->p_md.md_tf->tf_psr = psr | (regs->r_psr & PSR_ICC);
+#endif
+ return (0);
+}
+
+int
+process_sstep(p, sstep)
+ struct proc *p;
+{
+#if 0
+ if (sstep)
+ return EINVAL;
+#endif
+ return (0);
+}
+
+int
+process_set_pc(p, addr)
+ struct proc *p;
+ caddr_t addr;
+{
+#if 0
+ p->p_md.md_tf->tf_pc = (u_int)addr;
+ p->p_md.md_tf->tf_npc = (u_int)addr + 4;
+#endif
+ return (0);
+}
+
+int
+process_read_fpregs(p, regs)
+struct proc *p;
+struct fpreg *regs;
+{
+#if 0
+ extern struct fpstate initfpstate;
+ struct fpstate *statep = &initfpstate;
+
+ /* NOTE: struct fpreg == struct fpstate */
+ if (p->p_md.md_fpstate)
+ statep = p->p_md.md_fpstate;
+ bcopy(statep, regs, sizeof(struct fpreg));
+#endif
+ return 0;
+}
+
+int
+process_write_fpregs(p, regs)
+struct proc *p;
+struct fpreg *regs;
+{
+#if 0
+ if (p->p_md.md_fpstate == NULL)
+ return EINVAL;
+
+ bcopy(regs, p->p_md.md_fpstate, sizeof(struct fpreg));
+#endif
+ return 0;
+}
diff --git a/sys/arch/mvme88k/m88k/swapgeneric.c b/sys/arch/mvme88k/m88k/swapgeneric.c
new file mode 100644
index 00000000000..b0db1cf7830
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/swapgeneric.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)swapgeneric.c 7.5 (Berkeley) 5/7/91
+ * $Id: swapgeneric.c,v 1.1 1995/10/18 10:54:27 deraadt Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <sys/fcntl.h> /* XXXX and all that uses it */
+#include <sys/proc.h> /* XXXX and all that uses it */
+#include <sys/disk.h>
+
+#include "sd.h"
+#include "cd.h"
+
+/*
+ * Only boot on ufs. (XXX?)
+ */
+int ffs_mountroot();
+int (*mountroot)() = ffs_mountroot;
+
+/*
+ * Generic configuration; all in one
+ */
+dev_t rootdev = NODEV;
+dev_t dumpdev = NODEV;
+
+struct swdevt swdevt[] = {
+ { NODEV, 1, 0 },
+ { NODEV, 0, 0 },
+};
+
+#if NSD > 0
+extern struct cfdriver sdcd;
+#endif
+#if NCD > 0
+extern struct cfdriver cdcd;
+#endif
+
+struct genericconf {
+ struct cfdriver *gc_driver;
+ dev_t gc_root;
+};
+
+/*
+ * the system will assign rootdev to the first partition 'a'
+ * found with FS_BSDFFS fstype. so these should be ordered
+ * in prefernece of boot. however it does walk units backwards
+ * to remain compatible with the old amiga method of picking
+ * the last root found.
+ */
+struct genericconf genericconf[] = {
+#if NSD > 0
+ {&sdcd, makedev(4, 0)},
+#endif
+#if NCD > 0
+ {&cdcd, makedev(6, 0)},
+#endif
+ { 0 },
+};
+
+struct genericconf *
+getgenconf(bp)
+ char *bp;
+{
+ char *cp;
+ struct genericconf *gc;
+
+ for (;;) {
+ printf("root device> ");
+ gets(bp);
+ for (gc = genericconf; gc->gc_driver; gc++)
+ if (gc->gc_driver->cd_name[0] == bp[0] &&
+ gc->gc_driver->cd_name[1] == bp[1])
+ break;
+ if (gc->gc_driver == NULL) {
+ printf("use one of:");
+ for (gc = genericconf; gc->gc_driver; gc++)
+ printf(" %s%%d", gc->gc_driver->cd_name);
+ printf("\n");
+ continue;
+ }
+ cp = bp + 2;
+ if (*cp >= '0' && *cp <= '9')
+ break;
+ printf("bad/missing unit number\n");
+ }
+ return(gc);
+}
+
+setconf()
+{
+ struct dkdevice *dkp;
+ struct partition *pp;
+ struct genericconf *gc;
+ struct bdevsw *bdp;
+ int unit, swaponroot;
+ char name[128];
+ char *cp;
+
+ swaponroot = 0;
+
+ if (rootdev != NODEV)
+ goto justdoswap;
+
+ unit = 0;
+ if (boothowto & RB_ASKNAME) {
+ gc = getgenconf(name);
+ cp = name + 2;
+ while (*cp >= '0' && *cp <= '9')
+ unit = 10 * unit + *cp++ - '0';
+ if (*cp == '*')
+ swaponroot = 1;
+ unit &= 0x7;
+ goto found;
+ }
+ for (gc = genericconf; gc->gc_driver; gc++) {
+ for (unit = gc->gc_driver->cd_ndevs - 1; unit >= 0; unit--) {
+ if (gc->gc_driver->cd_devs[unit] == NULL)
+ continue;
+ /*
+ * this is a hack these drivers should use
+ * dk_dev and not another instance directly above.
+ */
+ dkp = (struct dkdevice *)
+ ((struct device *)gc->gc_driver->cd_devs[unit] + 1);
+ if (dkp->dk_driver == NULL ||
+ dkp->dk_driver->d_strategy == NULL)
+ continue;
+ for (bdp = bdevsw; bdp < (bdevsw + nblkdev); bdp++)
+ if (bdp->d_strategy ==
+ dkp->dk_driver->d_strategy)
+ break;
+ if (bdp->d_open(MAKEDISKDEV(major(gc->gc_root),
+ unit, 0), FREAD | FNONBLOCK, 0, curproc))
+ continue;
+ bdp->d_close(MAKEDISKDEV(major(gc->gc_root), unit,
+ 0), FREAD | FNONBLOCK, 0, curproc);
+ pp = &dkp->dk_label.d_partitions[0];
+ if (pp->p_size == 0 || pp->p_fstype != FS_BSDFFS)
+ continue;
+ goto found;
+ }
+ }
+ printf("no suitable root\n");
+ asm("or r9,r0,0x0063");
+ asm("tb0 0,r0,0x1f0");
+ /*NOTREACHED*/
+found:
+
+ gc->gc_root = MAKEDISKDEV(major(gc->gc_root), unit, 0);
+ rootdev = gc->gc_root;
+
+justdoswap:
+ swdevt[0].sw_dev = MAKEDISKDEV(major(rootdev),
+ DISKUNIT(rootdev), 1);
+ /*
+ swdevt[0].sw_dev = dumpdev = MAKEDISKDEV(major(rootdev),
+ DISKUNIT(rootdev), 1);
+ */
+ /* swap size and dumplo set during autoconfigure */
+ if (swaponroot)
+ rootdev = swdevt[0].sw_dev;
+}
+
+gets(cp)
+ char *cp;
+{
+ register char *lp;
+ register c;
+
+ lp = cp;
+ for (;;) {
+ cnputc(c = cngetc());
+ switch (c) {
+ case '\n':
+ case '\r':
+ *lp = 0;
+ return;
+ case '\b':
+ case '\177':
+ if (lp > cp) {
+ lp--;
+ cnputc(' ');
+ cnputc('\b');
+ }
+ continue;
+ case '#':
+ lp--;
+ if (lp < cp)
+ lp = cp;
+ continue;
+ case '@':
+ case 'u'&037:
+ lp = cp;
+ cnputc('\n');
+ continue;
+ default:
+ *lp++ = c;
+ }
+ }
+}
diff --git a/sys/arch/mvme88k/m88k/syscall.stub b/sys/arch/mvme88k/m88k/syscall.stub
new file mode 100644
index 00000000000..4a1055556d1
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/syscall.stub
@@ -0,0 +1,29 @@
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- xip
+ * br err <- nip
+ * jmp r1 <- fip
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip/sfip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to skip nip.
+ * nip = fip, fip += 4
+ * (doesn't matter what fip + 4 will be but we will never
+ * execute this since jmp r1 at nip will change the execution flow.)
+ * 2. If the system call returned an errno > 0, plug the value
+ * in r2, and leave nip and fip unchanged. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART or EJUSTRETURN,
+ * we need to rexecute the trap instruction. Back up the pipe
+ * line.
+ * fip = nip, nip = xip
+ */
diff --git a/sys/arch/mvme88k/m88k/timerreg.h b/sys/arch/mvme88k/m88k/timerreg.h
new file mode 100644
index 00000000000..3bafa844c02
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/timerreg.h
@@ -0,0 +1,8 @@
+struct ticktimer {
+ u_int ttcmpreg; /* Timer compare register */
+ u_int ttcounter; /* Timer counter */
+ u_int tticr; /* Timer control register */
+};
+
+struct timers {
+};
diff --git a/sys/arch/mvme88k/m88k/trap.c b/sys/arch/mvme88k/m88k/trap.c
new file mode 100644
index 00000000000..ebdf5288c81
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/trap.c
@@ -0,0 +1,608 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h> /* kernel_map */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/syscall.h>
+#include <sys/ktrace.h>
+#include <machine/cpu.h> /* DMT_VALID, etc. */
+#include <machine/m88100.h> /* DMT_VALID, etc. */
+#include <machine/trap.h>
+#include <machine/psl.h> /* FIP_E, etc. */
+
+#include <sys/systm.h>
+
+#if (DDB)
+#include <machine/db_machdep.h>
+#endif /* DDB */
+
+int stop_on_user_memory_error = 0;
+
+#define TRAPTRACE
+#if defined(TRAPTRACE)
+unsigned traptrace = 0;
+#endif
+
+#if DDB
+#define DEBUG_MSG db_printf
+#else
+#define DEBUG_MSG printf
+#endif /* DDB */
+
+#ifdef JEFF_DEBUG
+# undef DEBUG_MSG
+# define DEBUG_MSG raw_printf
+#endif
+
+#define USERMODE(PSR) (((struct psr*)&(PSR))->psr_mode == 0)
+#define SYSTEMMODE(PSR) (((struct psr*)&(PSR))->psr_mode != 0)
+
+/* XXX MAJOR CLEANUP REQUIRED TO PORT TO BSD */
+
+char *trap_type[] = {
+ "Reset",
+ "Interrupt Exception",
+ "Instruction Access",
+ "Data Access Exception",
+ "Misaligned Access",
+ "Unimplemented Opcode",
+ "Privileg Violation",
+ "Bounds Check Violation",
+ "Illegal Integer Divide",
+ "Integer Overflow",
+ "Error Exception",
+};
+
+int trap_types = sizeof trap_type / sizeof trap_type[0];
+
+static inline void
+userret(struct proc *p, struct m88100_saved_state *frame, u_quad_t oticks)
+{
+ int sig;
+
+ /* take pending signals */
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ p->p_priority = p->p_usrpri;
+
+ if (want_ast) {
+ want_ast = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ }
+
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ (void) splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ (void) spl0();
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL)
+ addupc_task(p, frame->sxip & ~3,
+ (int)(p->p_sticks - oticks));
+
+ curpriority = p->p_priority;
+}
+
+void
+panictrap(int type, struct m88100_saved_state *frame)
+{
+ static int panicing = 0;
+ if (panicing++ == 0) {
+ printf("trap type %d, v = %x, frame %x\n", type, frame->sxip & ~3, frame);
+ regdump(frame);
+ }
+ if ((u_int)type < trap_types)
+ panic(trap_type[type]);
+ panic("trap");
+ /*NOTREACHED*/
+}
+
+/*ARGSUSED*/
+void
+trap(unsigned type, struct m88100_saved_state *frame)
+{
+ struct proc *p;
+ u_quad_t sticks = 0;
+ vm_map_t map;
+ vm_offset_t va;
+ vm_prot_t ftype;
+ unsigned nss, fault_addr;
+ struct vmspace *vm;
+ int result;
+ int sig = 0;
+
+ extern vm_map_t kernel_map;
+ extern int fubail(), subail();
+
+ cnt.v_trap++;
+ if ((p = curproc) == NULL)
+ p = &proc0;
+
+ if (USERMODE(frame->epsr)) {
+ sticks = p->p_sticks;
+ type += T_USER;
+ p->p_md.md_tf = frame; /* for ptrace/signals */
+ }
+
+ switch(type)
+ {
+ default:
+ panictrap(frame->vector, frame);
+ /*NOTREACHED*/
+
+#if defined(DDB)
+ case T_KDB_BREAK:
+ /*FALLTHRU*/
+ case T_KDB_BREAK+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt(); /* turn interrupts on */
+ ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
+ db_disable_interrupt(); /* shut them back off */
+ db_splx(s);
+ return;
+ }
+ case T_KDB_ENTRY:
+ /*FALLTHRU*/
+ case T_KDB_ENTRY+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt(); /* turn interrupts on */
+ ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
+ db_disable_interrupt(); /* shut them back off */
+ db_splx(s);
+ return;
+ }
+
+#if 0
+ case T_ILLFLT:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt(); /* turn interrupts on */
+ ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
+ "error fault", (db_regs_t*)frame);
+ db_disable_interrupt(); /* shut them back off */
+ db_splx(s);
+ return;
+ }
+#endif /* 0 */
+#endif /* DDB */
+
+ case T_MISALGNFLT:
+ DEBUG_MSG("kernel misalgined "
+ "access exception @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_INSTFLT:
+ /* kernel mode instruction access fault */
+ /* XXX I think this should be illegal, but not sure. Will leave
+ * the way it is for now. Should never,never happen for a non-paged
+ * kernel
+ */
+ /*FALLTHRU*/
+ case T_DATAFLT:
+ /* kernel mode data fault */
+ /*
+ * if the faulting address is in user space, handle it in
+ * the context of the user process. Else, use kernel map.
+ */
+
+ if (type == T_DATAFLT) {
+ fault_addr = frame->dma0;
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR))
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ } else {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* if instruction fault or data fault on a kernel address... */
+ if ((type == T_INSTFLT) || (frame->dmt0 & DMT_DAS))
+ map = kernel_map;
+
+ /*
+ * We don't want to call vm_fault() if it is fuwintr() or
+ * suwintr(). These routines are for copying from interrupt
+ * context and vm_fault() can potentially sleep.
+ */
+
+ if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
+ p->p_addr->u_pcb.pcb_onfault == (int)subail)
+ goto outtahere;
+
+ result = vm_fault(map, va, ftype, FALSE);
+
+ if (result == KERN_SUCCESS) {
+ /*
+ * We could resolve the fault. Call data_access_emulation
+ * to drain the data unit pipe line and reset dmt0 so that
+ * trap won't get called again. For inst faults, back up
+ * the pipe line.
+ */
+ if (type == T_DATAFLT) {
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ } else {
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ return;
+ }
+
+ /* XXX Is this right? */
+ if (type == T_DATAFLT && (frame->dmt0 & DMT_DAS) == 0)
+ goto user_fault;
+
+ /*
+ * if still the fault is not resolved ...
+ */
+ if (!p->p_addr->u_pcb.pcb_onfault)
+ panictrap(frame->vector, frame);
+
+ outtahere:
+ frame->snip = ((unsigned)p->p_addr->u_pcb.pcb_onfault ) | FIP_V;
+ frame->sfip = ((unsigned)p->p_addr->u_pcb.pcb_onfault + 4) | FIP_V;
+ frame->sxip = 0;
+ frame->dmt0 = 0; /* XXX what about other trans. in data unit */
+ return;
+
+ case T_INSTFLT+T_USER:
+ /* User mode instruction access fault */
+ /*FALLTHRU*/
+ case T_DATAFLT+T_USER:
+ user_fault:
+ sig = SIGILL;
+ if (type == T_INSTFLT+T_USER)
+ fault_addr = frame->sxip & XIP_ADDR;
+ else
+ fault_addr = frame->dma0;
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR))
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ result = vm_fault(map, va, ftype, FALSE);
+
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (result == KERN_SUCCESS) {
+ nss = clrnd(USRSTACK - va);/* XXX check this */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (result == KERN_PROTECTION_FAILURE)
+ result = KERN_INVALID_ADDRESS;
+ }
+
+ if (result == KERN_SUCCESS) {
+ if (type == T_DATAFLT+T_USER) {
+ /*
+ * We could resolve the fault. Call
+ * data_access_emulation to drain the data unit
+ * pipe line and reset dmt0 so that trap won't
+ * get called again.
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ } else {
+ /* back up SXIP, SNIP clearing the the Error bit */
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ } else {
+ sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
+ }
+
+ break;
+
+ case T_MISALGNFLT+T_USER:
+ sig = SIGBUS;
+ break;
+
+ case T_PRIVINFLT+T_USER:
+ case T_ILLFLT+T_USER:
+ sig = SIGILL;
+ break;
+
+ case T_BNDFLT+T_USER:
+ case T_ZERODIV+T_USER:
+ case T_OVFFLT+T_USER:
+ sig = SIGBUS;
+ break;
+
+ case T_FPEPFLT+T_USER:
+ case T_FPEIFLT+T_USER:
+ sig = SIGFPE;
+ break;
+
+ case T_ASTFLT+T_USER:
+ want_ast = 0;
+ (void) spl0();
+ if (ssir & SIR_NET) {
+ siroff(SIR_NET);
+ cnt.v_soft++;
+ netintr();
+ }
+ if (ssir & SIR_CLOCK) {
+ siroff(SIR_CLOCK);
+ cnt.v_soft++;
+ /* XXXX softclock(&frame.f_stackadj); */
+ softclock();
+ }
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ break;
+
+ case T_SIGTRAP+T_USER:
+ break;
+
+ case T_STEPBPT+T_USER:
+ /*
+ * This trap is used by the kernel to support single-step
+ * debugging (although any user could generate this trap
+ * which should probably be handled differently). When a
+ * process is continued by a debugger with the PT_STEP
+ * function of ptrace (single step), the kernel inserts
+ * one or two breakpoints in the user process so that only
+ * one instruction (or two in the case of a delayed branch)
+ * is executed. When this breakpoint is hit, we get the
+ * T_STEPBPT trap.
+ */
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = frame->sxip; /* set up next NIP */
+ break;
+
+ case T_USERBPT+T_USER:
+ /*
+ * This trap is meant to be used by debuggers to implement
+ * breakpoint debugging. When we get this trap, we just
+ * return a signal which gets caught by the debugger.
+ */
+
+ frame->sfip = frame->snip; /* set up the next FIP */
+ frame->snip = frame->sxip; /* set up the next NIP */
+ break;
+
+ }
+
+ /*
+ * If trap from supervisor mode, just return
+ */
+ if (SYSTEMMODE(frame->epsr))
+ return;
+
+ if (sig) {
+ trapsignal(p, sig, frame->vector);
+ /*
+ * don't want multiple faults - we are going to
+ * deliver signal.
+ */
+ frame->dmt0 = 0;
+ }
+
+ userret(p, frame, sticks);
+}
+
+void error_fault(struct m88100_saved_state *frame)
+{
+ DEBUG_MSG("\n[ERROR FAULT (Bad News[tm]) frame 0x%08x]\n", frame);
+#if DDB
+ gimmeabreak();
+ DEBUG_MSG("[you really can't restart after an error fault.]\n");
+ gimmeabreak();
+#endif /* DDB */
+}
+
+syscall(u_int code, struct m88100_saved_state *tf)
+{
+ register int i, nsys, *ap, nap;
+ register struct sysent *callp;
+ register struct proc *p;
+ int error, new;
+ struct args {
+ int i[8];
+ } args;
+ int rval[2];
+ u_quad_t sticks;
+ extern struct pcb *curpcb;
+
+ cnt.v_syscall++;
+
+ callp = p->p_emul->e_sysent;
+ nsys = p->p_emul->e_nsysent;
+
+ p = curproc;
+#ifdef DIAGNOSTIC
+ if (USERMODE(tf->epsr) == 0)
+ panic("syscall");
+ if (curpcb != &p->p_addr->u_pcb)
+ panic("syscall curpcb/ppcb");
+ if (tf != (struct trapframe *)((caddr_t)curpcb))
+ panic("syscall trapframe");
+#endif
+
+ sticks = p->p_sticks;
+ p->p_md.md_tf = tf;
+
+ /*
+ * For 88k, all the arguments are passed in the registers (r2-r12)
+ * For syscall (and __syscall), r2 (and r3) has the actual code.
+ * __syscall takes a quad syscall number, so that other
+ * arguments are at their natural alignments.
+ */
+ ap = &tf->r[2];
+ nap = 6;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ if (callp != sysent)
+ break;
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ /* Callp currently points to syscall, which returns ENOSYS. */
+
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else {
+ callp += code;
+ i = callp->sy_narg;
+ if (i > 8)
+ panic("syscall nargs");
+ /*
+ * just copy them; syscall stub made sure all the
+ * args are moved from user stack to registers.
+ */
+ bcopy((caddr_t)ap, (caddr_t)args.i, i * 4);
+ }
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
+#endif
+ rval[0] = 0;
+ rval[1] = 0; /* doesn't seem to be used any where */
+ error = (*callp->sy_call)(p, &args, rval);
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- xip
+ * br err <- nip
+ * jmp r1 <- fip
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip/sfip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to skip nip.
+ * nip = fip, fip += 4
+ * (doesn't matter what fip + 4 will be but we will never
+ * execute this since jmp r1 at nip will change the execution flow.)
+ * 2. If the system call returned an errno > 0, plug the value
+ * in r2, and leave nip and fip unchanged. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART or EJUSTRETURN,
+ * we need to rexecute the trap instruction. Back up the pipe
+ * line.
+ * fip = nip, nip = xip
+ */
+
+ if (error == 0) {
+ /*
+ * If fork succeeded and we are the child, our stack
+ * has moved and the pointer tf is no longer valid,
+ * and p is wrong. Compute the new trapframe pointer.
+ * (The trap frame invariably resides at the
+ * tippity-top of the u. area.)
+ */
+ p = curproc;
+ tf = USER_REGS(p);
+ tf->r[2] = 0;
+ tf->epsr &= ~PSR_C;
+ tf->snip = tf->sfip & ~3;
+ tf->sfip = tf->snip + 4;
+ } else if (error > 0 /*error != ERESTART && error != EJUSTRETURN*/) {
+bad:
+ tf->r[2] = error;
+ tf->epsr |= PSR_C; /* fail */
+ tf->snip = tf->snip & ~3;
+ tf->sfip = tf->sfip & ~3;
+ } else {
+ /* if (error == ERESTART || error == EJUSTRETURN)
+ back up the pipe line */
+ tf->sfip = tf->snip & ~3;
+ tf->snip = tf->sxip & ~3;
+ }
+ userret(p, tf, sticks);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, rval[0]);
+#endif
+}
+
+#if MACH_PCSAMPLE > 0
+#include "mach_pcsample.h"
+/*
+ * return saved state for interrupted user thread
+ */
+unsigned interrupted_pc(p)
+proc *p;
+{
+ struct m88100_saved_state *frame = &p->pcb->user_state;
+ unsigned sxip = frame->sxip;
+ unsigned PC = sxip & ~3; /* clear lower bits which are flags... */
+ return PC;
+}
+#endif /* MACH_PCSAMPLE > 0*/
diff --git a/sys/arch/mvme88k/m88k/vm_machdep.c b/sys/arch/mvme88k/m88k/vm_machdep.c
new file mode 100644
index 00000000000..f7bc68de766
--- /dev/null
+++ b/sys/arch/mvme88k/m88k/vm_machdep.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 1993 Adam Glass
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
+ * from: @(#)vm_machdep.c 7.10 (Berkeley) 5/7/91
+ * vm_machdep.c,v 1.3 1993/07/07 07:09:32 cgd Exp
+ * $Id: vm_machdep.c,v 1.1 1995/10/18 10:54:27 deraadt Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/user.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <machine/cpu.h>
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the kernel stack and pcb, making the child
+ * ready to run, and marking it so that it can return differently
+ * than the parent. Returns 1 in the child process, 0 in the parent.
+ * We currently double-map the user area so that the stack is at the same
+ * address in each process; in the future we will probably relocate
+ * the frame pointers on the stack after copying.
+ */
+cpu_fork(struct proc *p1, struct proc *p2)
+{
+ register struct user *up = p2->p_addr;
+ int off, ssz;
+ caddr_t sp;
+ extern caddr_t getsp();
+ extern char kstack[];
+
+ p2->p_md.md_tf = p1->p_md.md_tf;
+
+ /*
+ * Copy pcb and stack from proc p1 to p2.
+ * We do this as cheaply as possible, copying only the active
+ * part of the stack. The stack and pcb need to agree;
+ * this is tricky, as the final pcb is constructed by savectx,
+ * but its frame isn't yet on the stack when the stack is copied.
+ * cpu_switch compensates for this when the child eventually runs.
+ * This should be done differently, with a single call
+ * that copies and updates the pcb+stack,
+ * replacing the bcopy and savectx.
+ */
+ p2->p_addr->u_pcb = p1->p_addr->u_pcb;
+ sp = getsp();
+ ssz = (unsigned int)UADDR + UPAGES * NBPG - (unsigned int)sp;
+ off = (unsigned int)sp - (unsigned int)UADDR;
+#if 0
+ bcopy((caddr_t)(UADDR + off), (caddr_t)((unsigned int)p2->p_addr + off),
+ ssz);
+#endif /* 0 */
+ /* copy from UADDR to p2 */
+ memcpy((caddr_t)((unsigned int)p2->p_addr + off),
+ (caddr_t)(UADDR + off), ssz);
+ save_u_area(p2, p2->p_addr);
+ PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
+
+ /*
+ * Arrange for a non-local goto when the new process
+ * is started, to resume here, returning nonzero from setjmp.
+ */
+ if (savectx(up, 1)) {
+ /*
+ * Return 1 in child.
+ */
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * cpu_exit is called as the last action during exit.
+ * We release the address space and machine-dependent resources,
+ * including the memory for the user structure and kernel stack.
+ * Once finished, we call switch_exit, which switches to a temporary
+ * pcb and stack and never returns. We block memory allocation
+ * until switch_exit has made things safe again.
+ */
+volatile void
+cpu_exit(struct proc *p)
+{
+ extern volatile void switch_exit();
+ vmspace_free(p->p_vmspace);
+
+ (void) splimp();
+ kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
+ switch_exit(p);
+ /* NOTREACHED */
+}
+
+int
+cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred)
+{
+
+ return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
+ (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
+ p));
+}
+
+/*
+ * Finish a swapin operation.
+ * We neded to update the cached PTEs for the user area in the
+ * machine dependent part of the proc structure.
+ */
+
+void
+cpu_swapin(struct proc *p)
+{
+ save_u_area(p, (vm_offset_t)p->p_addr);
+}
+
+extern vm_map_t phys_map;
+
+/*
+ * Map an IO request into kernel virtual address space. Requests fall into
+ * one of five catagories:
+ *
+ * B_PHYS|B_UAREA: User u-area swap.
+ * Address is relative to start of u-area (p_addr).
+ * B_PHYS|B_PAGET: User page table swap.
+ * Address is a kernel VA in usrpt (Usrptmap).
+ * B_PHYS|B_DIRTY: Dirty page push.
+ * Address is a VA in proc2's address space.
+ * B_PHYS|B_PGIN: Kernel pagein of user pages.
+ * Address is VA in user's address space.
+ * B_PHYS: User "raw" IO request.
+ * Address is VA in user's address space.
+ *
+ * All requests are (re)mapped into kernel VA space via the useriomap
+ * (a name with only slightly more meaning than "kernelmap")
+ *
+ * XXX we allocate KVA space by using kmem_alloc_wait which we know
+ * allocates space without backing physical memory. This implementation
+ * is a total crock, the multiple mappings of these physical pages should
+ * be reflected in the higher-level VM structures to avoid problems.
+ */
+void
+vmapbuf(struct buf *bp)
+{
+ register int npf;
+ register caddr_t addr;
+ register long flags = bp->b_flags;
+ struct proc *p;
+ int off;
+ vm_offset_t kva;
+ register vm_offset_t pa;
+
+ if ((flags & B_PHYS) == 0)
+ panic("vmapbuf");
+ addr = bp->b_saveaddr = bp->b_data;
+ off = (int)addr & PGOFSET;
+ p = bp->b_proc;
+ npf = btoc(round_page(bp->b_bcount + off));
+
+ /*
+ * Why phys_map? kernelmap should be OK - after all, the
+ * we are mapping user va to kernel va or remapping some
+ * kernel va to another kernel va. XXX -nivas
+ */
+
+ kva = kmem_alloc_wait(phys_map, ctob(npf));
+ bp->b_data = (caddr_t) (kva + off);
+ while (npf--) {
+ pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
+ (vm_offset_t)addr);
+ if (pa == 0)
+ panic("vmapbuf: null page frame");
+ pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
+ VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ addr += PAGE_SIZE;
+ kva += PAGE_SIZE;
+ }
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+void
+vunmapbuf(struct buf *bp)
+{
+ register caddr_t addr;
+ register int npf;
+ vm_offset_t kva;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+ addr = bp->b_data;
+ npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
+ kva = (vm_offset_t)((int)addr & ~PGOFSET);
+ kmem_free_wakeup(phys_map, kva, ctob(npf));
+ bp->b_data = bp->b_saveaddr;
+ bp->b_saveaddr = NULL;
+}
+
+caddr_t
+obio_vm_alloc(int npages)
+{
+ vm_size_t size;
+ vm_offset_t addr;
+ int result;
+
+ if (npages == 0);
+ size = npages*NBPG;
+ addr = vm_map_min(phys_map);
+ result = vm_map_find(phys_map, NULL, (vm_offset_t) 0, &addr, size, TRUE);
+ if (result != KERN_SUCCESS) return NULL;
+ vm_map_lock(phys_map);
+ vm_map_delete(phys_map, addr, addr+size);
+ vm_map_unlock(phys_map);
+ return (caddr_t) addr;
+}
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+void
+pagemove(caddr_t from, caddr_t to, int size)
+{
+ register vm_offset_t pa;
+
+#ifdef DEBUG
+ if (size & CLOFSET)
+ panic("pagemove");
+#endif
+ while (size > 0) {
+ pa = pmap_extract(kernel_pmap, (vm_offset_t)from);
+#ifdef DEBUG
+ if (pa == 0)
+ panic("pagemove 2");
+ if (pmap_extract(kernel_pmap, (vm_offset_t)to) != 0)
+ panic("pagemove 3");
+#endif
+ pmap_remove(kernel_pmap,
+ (vm_offset_t)from, (vm_offset_t)from + NBPG);
+ pmap_enter(kernel_pmap,
+ (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1);
+ from += NBPG;
+ to += NBPG;
+ size -= NBPG;
+ }
+}
diff --git a/sys/arch/mvme88k/stand/Makefile b/sys/arch/mvme88k/stand/Makefile
new file mode 100644
index 00000000000..39788d542fb
--- /dev/null
+++ b/sys/arch/mvme88k/stand/Makefile
@@ -0,0 +1,4 @@
+SUBDIRS = libbug kerncrt boot bugexec
+SUBDIR = libbug kerncrt boot bugexec
+
+.include <bsd.prog.mk>
diff --git a/sys/arch/mvme88k/stand/boot/Makefile b/sys/arch/mvme88k/stand/boot/Makefile
new file mode 100644
index 00000000000..df343d81f70
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/Makefile
@@ -0,0 +1,30 @@
+all: boot boot.out
+CFLAGS+=-fwritable-strings -I${.CURDIR}/../include
+CFLAGS+=-I${.CURDIR}/../.. -I${.CURDIR}/../../machine
+CFLAGS+=-I/usr/src/sys
+LDFLAGS+= -L ${.CURDIR}/../libbug -L/usr/local/lib
+BOOT=FC0000
+#BOOT=1000000
+
+LIBBUG!= cd $(.CURDIR)/../libbug; \
+ printf "xxx:\n\techo \$${.OBJDIR}/libbug.a\n" | ${MAKE} -r -s -f - xxx
+
+LDADD+=${LIBBUG} #/usr/local/lib/libgcc.a
+SRCS+=bugcrt.c bugio.c main.c
+
+.PATH: ${.CURDIR}/../bugcrt ${.CURDIR}/../libbug ${.CURDIR}/../../../../lib/libc_sa ${.CURDIR}/${MACHINE_ARCH}
+
+boot: bugcrt.o main.o bcopy.o memset.o printf.o ${LIBBUG}
+# ld -o {.TARGET} -x -n -Ttext ${BOOT} bugcrt.o bugio.o main.o bcopy.o memset.o printf.o /usr/local/lib/libgcc.a
+ ld -o ${.TARGET} -x -N -Ttext ${BOOT} ${.ALLSRC} ${LDADD}
+
+boot.out:
+ ${.CURDIR}/wrtvid ${.OBJDIR}/boot && mv ${.OBJDIR}/boot.? ${.CURDIR}
+
+#main.o: main.c
+# ${CC} ${CFLAGS} -c -O ${.ALLSRC}
+# ${LD} -x -r ${.TARGET}
+# ${LD} -x ${.TARGET}
+# mv a.out ${.TARGET}
+
+.include <bsd.prog.mk>
diff --git a/sys/arch/mvme88k/stand/boot/boot.1 b/sys/arch/mvme88k/stand/boot/boot.1
new file mode 100644
index 00000000000..03aa7928309
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/boot.1
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/boot.2 b/sys/arch/mvme88k/stand/boot/boot.2
new file mode 100644
index 00000000000..f1ebc35b445
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/boot.2
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/foo b/sys/arch/mvme88k/stand/boot/foo
new file mode 100644
index 00000000000..53aac89a35e
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/foo
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/foo.1 b/sys/arch/mvme88k/stand/boot/foo.1
new file mode 100644
index 00000000000..53aac89a35e
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/foo.1
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/foo.2 b/sys/arch/mvme88k/stand/boot/foo.2
new file mode 100644
index 00000000000..dcef04fabfb
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/foo.2
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/main.c b/sys/arch/mvme88k/stand/boot/main.c
new file mode 100644
index 00000000000..0759c9f2644
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/main.c
@@ -0,0 +1,264 @@
+#include <sys/param.h>
+#include <sys/reboot.h>
+#include "bug.h"
+#include "bugio.h"
+#include "machine/exec.h"
+
+int readblk __P((int, char *));
+int loados __P((void));
+void putchar __P((char));
+void _main __P((void));
+void tapefileseek __P((int));
+
+char Clun, Dlun;
+
+#define DEV_BSIZE 512
+#define KERNEL_LOAD_ADDR 0x10000
+#if !defined(BUG_BLKSIZE)
+#define BUG_BLKSIZE 256
+#endif /* BUG_BLKSIZE */
+#define sec2blk(x) ((x) * (DEV_BSIZE/BUG_BLKSIZE))
+
+struct kernel {
+ void *entry;
+ void *symtab;
+ void *esym;
+ int bflags;
+ int bdev;
+ char *kname;
+ void *smini;
+ void *emini;
+ unsigned int end_loaded;
+} kernel;
+
+int howto = 0;
+int bootdev = 0;
+int *miniroot;
+
+void
+putchar(char c)
+{
+ bugoutchr(c);
+}
+
+main(struct bugenv *env)
+{
+ printf("Clun %x Dlun %x\n", env->clun, env->dlun);
+ Clun = (char)env->clun;
+ Dlun = (char)env->dlun;
+ loados();
+ return;
+}
+
+
+loados(void)
+{
+ int i, size;
+ register char *loadaddr = (char *)KERNEL_LOAD_ADDR; /* load addr 64k*/
+ struct exec *hdr;
+ int (*fptr)();
+ int *esym;
+ int cnt, strtablen, ret;
+ char *addr;
+
+ howto |= RB_SINGLE|RB_KDB;
+
+ tapefileseek(2); /* seek to file 2 - the OS */
+ if (readblk(1, loadaddr) == -1) {
+ printf("Unable to read blk 0\n");
+ return 1;
+ }
+ hdr = (struct exec *)loadaddr;
+
+ /* We only deal with ZMAGIC files */
+ if ((int)hdr->a_entry != (int)(loadaddr + sizeof(struct exec))) {
+ printf("a_entry != loadaddr + exec size\n");
+ }
+ size = hdr->a_text + hdr->a_data;
+ size -= DEV_BSIZE; /* account for the block already read */
+
+ printf("Loading [%x+%x", hdr->a_text, hdr->a_data);
+ if (readblk(size / DEV_BSIZE, loadaddr + DEV_BSIZE) == -1) {
+ printf("Error reading the OS\n");
+ return 1;
+ }
+
+ /* zero out BSS */
+
+ printf("+%x]", hdr->a_bss);
+#if DEBUG
+ printf("zero'd out %x (%x)\n", loadaddr + hdr->a_text + hdr->a_data,
+ hdr->a_bss);
+#endif
+ memset(loadaddr + hdr->a_text + hdr->a_data, 0, hdr->a_bss);
+
+ addr = loadaddr + hdr->a_text + hdr->a_data + hdr->a_bss;
+
+ if (hdr->a_syms != 0 /* && !(kernel.bflags & RB_NOSYM)*/) {
+ /*
+ * DDB expects the following layout:
+ * no. of syms
+ * symbols
+ * size of strtab
+ * entries of strtab
+ * esym->...
+ * Where as size of strtab is part of strtab, we need
+ * to prepend the size of symtab to satisfy ddb.
+ * esym is expected to point past the last byte of
+ * string table, rouded up to an int.
+ */
+ bcopy(&hdr->a_syms, addr, sizeof(hdr->a_syms));
+ addr += 4; /* account for a_syms copied above */
+ printf (" + [ %x",hdr->a_syms);
+
+ cnt = (hdr->a_syms + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+
+ ret = readblk(cnt / DEV_BSIZE, addr);
+ if (ret != 0) {
+ printf("unable to load kernel\n");
+ return 1;
+ }
+
+ esym = (void *) ((int)addr + hdr->a_syms);
+
+ if ((int)addr + cnt <= (int)esym) {
+ printf("missed loading count of symbols\n\r");
+ return 1;
+ }
+
+ addr += cnt;
+
+ strtablen = *esym;
+#if 0
+ printf("start load %x end load %x %x\n", addr,
+ len, addr +len);
+ printf("esym %x *esym %x\n",esym, len);
+#endif
+ /*
+ * If symbol table size is not a sector multiple, we
+ * already read part of the string table. Look at the
+ * part already read, and figure out the string table
+ * size. Also, adjust the size yet to read.
+ */
+ if (hdr->a_syms != cnt) {
+ /* already read part of the string table */
+ strtablen -= (cnt - hdr->a_syms);
+ }
+
+ if (strtablen > 0) {
+ printf(" + %x",*esym);
+
+ cnt = (strtablen + DEV_BSIZE -1) & ~(DEV_BSIZE - 1);
+
+ ret = readblk(cnt / DEV_BSIZE, addr);
+ if (ret != 0) {
+ printf("unable to load kernel\n");
+ return 1;
+ }
+ addr += strtablen;
+ printf(" ]\n");
+ } else {
+ printf("+ %x ]\n", *esym);
+ }
+ esym = (int *)(((int)esym) + *esym);
+ esym = (int *)(((int)esym + 4 - 1) & ~3);
+
+ kernel.symtab = (void *)hdr->a_syms;
+ kernel.esym = esym;
+ } else {
+ kernel.symtab = 0;
+ kernel.esym = 0;
+ }
+
+ kernel.end_loaded = (unsigned int)addr;
+ miniroot = (int *)esym;
+ miniroot = (int *)(((int)miniroot + 0x1000 - 1) & ~0xFFF);
+ tapefileseek(3); /* seek to file 3 - minroot */
+ if (readblk(1000, miniroot) != 0) {
+ printf("miniroot not loaded\n");
+ addr = (char *)miniroot;
+ } else {
+ addr = (char *)((int)miniroot + 1000 * DEV_BSIZE);
+ }
+ printf("esym %x miniroot @ %x (ends @ %x)\n", esym, miniroot, addr);
+#if 0
+ {
+ char *symaddr = (char *)0x01F00000;
+ int i;
+
+ tapefileseek(4); /* seek to file 4 - syms */
+ readblk(1, symaddr);
+ i = *symaddr;
+ i = (i * 0x1C + 4 + DEV_BSIZE) & ~(DEV_BSIZE - 1);
+ printf("loading %d symbols (%d sectors)\n",
+ *symaddr, (i + 1) * DEV_BSIZE);
+ readblk(i / DEV_BSIZE, symaddr + DEV_BSIZE);
+ readblk(100, 0x01F00000);
+ }
+#endif
+
+ fptr = (int (*)())hdr->a_entry;
+ /*
+ * Args are passed as
+ * r2 howto
+ * r3 end addr
+ * r4 (Clun << 8) | Dlun & FF
+ * r5 esym
+ * r6 miniroot
+ */
+ bootdev = ((Clun << 8) & 0xFF00 | Dlun & 0xFF) & 0xFFFF;
+#if 0
+ asm volatile ("or r2, r0, %0\n\tor r3, r0, %1\n\tor r4, r0, %2\n\tor r5, r0, %3\n\tor r6, r0, %4\n\tor r7, r0, %5"
+ : /* no outputs */
+ : "r" (howto), "r" (addr), "r" (Clun), "r" (Dlun), "r" (esym), "r" (miniroot)
+ : "r2", "r3", "r4", "r5", "r6", "r7");
+#endif /* 0 */
+ (*fptr)(howto, addr, bootdev, esym, miniroot);
+ return 0;
+}
+
+int
+readblk(int n, char *addr)
+{
+ struct bugdisk_io io;
+
+ io.clun = Clun;
+ io.dlun = Dlun;
+ io.status = 0;
+ io.addr = (void *)addr;
+ io.fileno = 0; /* for tape reads, start io at current pos */
+ io.nblks = sec2blk(n);
+ io.flag = IGNOREFILENO;
+ io.am = 0;
+ bugdskrd(&io);
+ if (io.status)
+ return -1;
+ return 0;
+}
+
+void
+_main(void)
+{
+ return;
+}
+
+void
+tapefileseek(int i)
+{
+ struct bugdisk_io io;
+ void *addr = (void *)KERNEL_LOAD_ADDR; /* some number - don't care */
+
+ io.clun = Clun;
+ io.dlun = Dlun;
+ io.status = 0;
+ io.addr = addr;
+ io.fileno = i; /* for tape reads, this is the file no. */
+ io.nblks = 0;
+ io.flag = 0; /* we want to turn off IFN and EOF bits */
+ io.am = 0;
+ bugdskrd(&io);
+}
+
+__main()
+{
+}
diff --git a/sys/arch/mvme88k/stand/boot/wrtvid b/sys/arch/mvme88k/stand/boot/wrtvid
new file mode 100644
index 00000000000..647a59d253f
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/wrtvid
Binary files differ
diff --git a/sys/arch/mvme88k/stand/boot/wrtvid.c b/sys/arch/mvme88k/stand/boot/wrtvid.c
new file mode 100644
index 00000000000..6161ccaa85a
--- /dev/null
+++ b/sys/arch/mvme88k/stand/boot/wrtvid.c
@@ -0,0 +1,108 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include "vid.h"
+
+#define sec2blk(x) ((x) * 2)
+
+main(int argc, char **argv)
+{
+ struct vid *pvid;
+ struct cfg *pcfg;
+ struct stat stat;
+ int exe_file;
+ int tape_vid;
+ int tape_exe;
+ unsigned int exe_addr;
+ unsigned short exe_addr_u;
+ unsigned short exe_addr_l;
+ char *filename;
+ char fileext[256];
+
+ if (argc == 0){
+ filename = "a.out";
+ } else {
+ filename = argv[1];
+ }
+ exe_file = open(filename, O_RDONLY,0444);
+ if (exe_file == -1)
+ {
+ printf("file %s does not exist\n",filename);
+ exit(2);
+ }
+ sprintf (fileext,"%s%s",filename,".1");
+ tape_vid = open(fileext, O_WRONLY|O_CREAT|O_TRUNC,0644);
+ sprintf (fileext,"%s%s",filename,".2");
+ tape_exe = open(fileext, O_WRONLY|O_CREAT|O_TRUNC,0644);
+
+ pvid = (struct vid *) malloc(sizeof (struct vid));
+
+ memset(pvid,0,sizeof(struct vid));
+
+ strcpy(pvid->vid_id, "NBSD");
+
+ fstat (exe_file,&stat);
+ /* size in 512 byte blocks round up after a.out header removed */
+ /* Actually, blocks == 256 bytes */
+
+ pvid->vid_oss = 1;
+ pvid->vid_osl = (short)sec2blk((stat.st_size - 0x20 + 511) / 512);
+
+ lseek(exe_file,0x14,SEEK_SET);
+ read(exe_file,&exe_addr,4);
+ {
+ union {
+ struct {
+ short osa_u;
+ short osa_l;
+ } osa_u_l;
+ int osa;
+ } u;
+ u.osa = exe_addr;
+ pvid->vid_osa_u = u.osa_u_l.osa_u;
+ pvid->vid_osa_l = u.osa_u_l.osa_l;
+ }
+ pvid->vid_cas = 1;
+ pvid->vid_cal = 1;
+ /* do not want to write past end of structure, not null terminated */
+ strcpy(pvid->vid_mot,"MOTOROL");
+ pvid->vid_mot[7] = 'A';
+
+ write(tape_vid,pvid,sizeof(struct vid));
+
+ free(pvid);
+
+ pcfg = (struct cfg *) malloc (sizeof(struct cfg));
+
+ memset(pcfg,0,sizeof(struct cfg));
+
+ pcfg->cfg_rec = 0x100;
+ pcfg->cfg_psm = 0x200;
+
+ write(tape_vid,pcfg,sizeof(struct cfg));
+
+ free(pcfg);
+
+ copy_exe(exe_file,tape_exe);
+ close (exe_file);
+ close (tape_vid);
+ close (tape_exe);
+}
+
+#define BUF_SIZ 512
+copy_exe(exe_file,tape_exe)
+{
+ char *buf;
+ int cnt = 0;
+
+ buf = (char *)malloc (BUF_SIZ);
+
+ lseek (exe_file,0x20,SEEK_SET);
+ while (BUF_SIZ == (cnt = read(exe_file, buf , BUF_SIZ))) {
+ write (tape_exe,buf,cnt);
+ }
+ memset (&buf[cnt],0,BUF_SIZ-cnt);
+ write (tape_exe,buf,BUF_SIZ);
+}
diff --git a/sys/arch/mvme88k/stand/bugcrt/Makefile b/sys/arch/mvme88k/stand/bugcrt/Makefile
new file mode 100644
index 00000000000..821ec7508d4
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugcrt/Makefile
@@ -0,0 +1,10 @@
+OBJ=bugcrt.o
+CFLAGS+=-I${.CURDIR}/../include
+CFLAGS+=-I${.CURDIR}/../..
+CFLAGS+=-I/usr/src/sys
+CFLAGS+=-fwritable-strings
+
+SRCS=bugcrt.c
+all: bugcrt.o
+
+.include <bsd.prog.mk>
diff --git a/sys/arch/mvme88k/stand/bugcrt/bugcrt.c b/sys/arch/mvme88k/stand/bugcrt/bugcrt.c
new file mode 100644
index 00000000000..5353f981db9
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugcrt/bugcrt.c
@@ -0,0 +1,40 @@
+#include "bug.h"
+
+asm (" text");
+/*asm ("_stack: word _stack0xFC0000; stack");*/
+asm ("stack: word stack");
+asm (" word _start");
+asm (" align 8");
+
+struct bugenv bugenv;
+
+start()
+{
+ register int dlun asm("r2");
+ register int clun asm("r3");
+ register int ipl asm("r4");
+ register int (*entryptr)() asm("r6");
+ register int *cfg asm("r7");
+ register char *strstr asm("r8");
+ register char *endstr asm("r9");
+ int i;
+ char *str;
+
+asm ("; enable SFU1");
+asm (" ldcr r10,cr1");
+asm (" xor r10,r10,0x8");
+asm (" stcr r10,cr1");
+
+ bugenv.clun = clun;
+ bugenv.dlun = dlun;
+ bugenv.ipl = ipl;
+ bugenv.entry= entryptr;
+
+ for (str = strstr, i = 0; str <= strstr; str++, i++) {
+ bugenv.bootargs[i] = *str;
+ }
+ bugenv.bootargs[i] = 0;
+
+ main(&bugenv);
+ bugreturn();
+}
diff --git a/sys/arch/mvme88k/stand/bugexec/Makefile b/sys/arch/mvme88k/stand/bugexec/Makefile
new file mode 100644
index 00000000000..3ed0ff02de6
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugexec/Makefile
@@ -0,0 +1,26 @@
+all: hello
+SRCS= hello.c
+OBJS= hello.o
+
+CFLAGS+=-I${.CURDIR}/include -I${.CURDIR}/${MACHINE_ARCH}
+CFLAGS+=-I${.CURDIR}/../include -I${.CURDIR}/../.. -I/usr/src/sys
+CFLAGS+=-fwritable-strings
+
+LIBBUG!= cd $(.CURDIR)/../libbug; \
+ printf "xxx:\n\techo \$${.OBJDIR}/libbug.a\n" | ${MAKE} -r -s -f - xxx
+
+BUGCRT!= cd $(.CURDIR)/../bugcrt; \
+ printf "xxx:\n\techo \$${.OBJDIR}/bugcrt.o\n" | ${MAKE} -r -s -f - xxx
+
+KERNCRT!= cd $(.CURDIR)/../kerncrt; \
+ printf "xxx:\n\techo \$${.OBJDIR}/kerncrt.o\n" | ${MAKE} -r -s -f - xxx
+
+LDADD+=${LIBBUG} /usr/local/lib/libgcc.a
+
+hello: $(OBJS) ${LIBBUG}
+ ${LD} -x -Ttext 10020 ${KERNCRT} $(OBJS) ${LDADD} -o ${.TARGET}
+clean:
+ rm -f a.out *.core
+ rm -f hello.o hello.bug hello.bug.1 hello.bug.2
+
+.include <bsd.prog.mk>
diff --git a/sys/arch/mvme88k/stand/bugexec/hello b/sys/arch/mvme88k/stand/bugexec/hello
new file mode 100644
index 00000000000..8670fd2b147
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugexec/hello
Binary files differ
diff --git a/sys/arch/mvme88k/stand/bugexec/hello.c b/sys/arch/mvme88k/stand/bugexec/hello.c
new file mode 100644
index 00000000000..08ab75d2a27
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugexec/hello.c
@@ -0,0 +1,54 @@
+#include "bug.h"
+#include "bugio.h"
+
+void putchar __P((char));
+int bcd2int __P((unsigned int));
+
+void
+putchar(char c)
+{
+ bugoutchr(c);
+}
+
+main(struct bugenv *env)
+{
+ struct bugrtc rtc;
+ struct bugbrdid brdid;
+
+ bugrtcrd(&rtc);
+ printf("From RTC:\n");
+ printf("Year %d\tMonth %d\tDay %d\tDay of Week %d\n",
+ bcd2int(rtc.Y), bcd2int(rtc.M), bcd2int(rtc.D), bcd2int(rtc.d));
+ printf("Hour %d\tMin %d\tSec %d\tCal %d\n",
+ bcd2int(rtc.H), bcd2int(rtc.m), bcd2int(rtc.s), bcd2int(rtc.c));
+ printf("From BRDID:\n");
+ bugbrdid(&brdid);
+/* printf("Eye catcher %c%c%c%c\n", brdid.eye[0], brdid.eye[1],
+ brdid.eye[2], brdid.eye[3]); */
+ printf("Board no %d (%d) \tsuffix %c%c\n", bcd2int(brdid.brdno),
+ brdid.brdno, brdid.brdsuf[0], brdid.brdsuf[1]);
+/* printf("Clun %x\tdlun %x\n", brdid.clun, brdid.dlun); */
+ return 0;
+}
+
+ipow(int base, int i)
+{
+ int cnt = 1;
+ while (i--) {
+ cnt *= base;
+ }
+ return cnt;
+}
+
+int
+bcd2int(unsigned int i)
+{
+ unsigned val = 0;
+ int cnt = 0;
+ while (i) {
+ val += (i&0xf) * ipow(10,cnt);
+ cnt++;
+ i >>= 4;
+ }
+ return val;
+}
diff --git a/sys/arch/mvme88k/stand/bugexec/wrtos.c b/sys/arch/mvme88k/stand/bugexec/wrtos.c
new file mode 100644
index 00000000000..1e01b697399
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugexec/wrtos.c
@@ -0,0 +1,64 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include "vid.h"
+
+#define sec2blk(x) ((x) * 2)
+#define BUF_SIZ 512
+
+main(int argc, char **argv)
+{
+ struct vid *pvid;
+ struct cfg *pcfg;
+ struct stat stat;
+ int exe_file;
+ int tape_vid;
+ int tape_exe;
+ char *filename;
+ char fileext[256];
+ char hdrbuf[BUF_SIZ];
+
+ if (argc == 0){
+ filename = "a.out";
+ } else {
+ filename = argv[1];
+ }
+ exe_file = open(filename, O_RDONLY,0444);
+ if (exe_file == -1)
+ {
+ printf("file %s does not exist\n",filename);
+ exit(2);
+ }
+ sprintf (fileext,"%s%s",filename,".1");
+ tape_vid = open(fileext, O_WRONLY|O_CREAT|O_TRUNC,0644);
+ sprintf (fileext,"%s%s",filename,".2");
+ tape_exe = open(fileext, O_WRONLY|O_CREAT|O_TRUNC,0644);
+
+ lseek(exe_file,0,SEEK_SET);
+ memset (hdrbuf,0,BUF_SIZ);
+ read(exe_file,hdrbuf, 0x20); /* read the header */
+
+ write(tape_vid,hdrbuf,BUF_SIZ);
+
+ copy_exe(exe_file,tape_exe);
+ close (exe_file);
+ close (tape_vid);
+ close (tape_exe);
+}
+
+copy_exe(exe_file,tape_exe)
+{
+ char *buf;
+ int cnt = 0;
+
+ buf = (char *)malloc (BUF_SIZ);
+
+ lseek (exe_file,0x20,SEEK_SET);
+ while (BUF_SIZ == (cnt = read(exe_file, buf , BUF_SIZ))) {
+ write (tape_exe,buf,cnt);
+ }
+ memset (&buf[cnt],0,BUF_SIZ-cnt);
+ write (tape_exe,buf,BUF_SIZ);
+}
diff --git a/sys/arch/mvme88k/stand/bugexec/xyz b/sys/arch/mvme88k/stand/bugexec/xyz
new file mode 100644
index 00000000000..dbdee90a4e1
--- /dev/null
+++ b/sys/arch/mvme88k/stand/bugexec/xyz
Binary files differ
diff --git a/sys/arch/mvme88k/stand/include/bug.h b/sys/arch/mvme88k/stand/include/bug.h
new file mode 100644
index 00000000000..93fe1e6ccd5
--- /dev/null
+++ b/sys/arch/mvme88k/stand/include/bug.h
@@ -0,0 +1,8 @@
+struct bugenv {
+ int clun;
+ int dlun;
+ int ipl;
+ int (*entry)();
+ char bootargs[256];
+};
+
diff --git a/sys/arch/mvme88k/stand/include/bugio.h b/sys/arch/mvme88k/stand/include/bugio.h
new file mode 100644
index 00000000000..74bb77bd336
--- /dev/null
+++ b/sys/arch/mvme88k/stand/include/bugio.h
@@ -0,0 +1,62 @@
+#include "sys/cdefs.h"
+
+struct bugdisk_io {
+ char clun;
+ char dlun;
+ short status;
+ void *addr;
+ int blkno;
+#define fileno blkno
+ short nblks;
+ char flag;
+#define FILEMARKFLAG 0x80
+#define IGNOREFILENO 0x02
+#define ENDOFFILE 0x01
+ char am;
+};
+
+/* values are in BCD {upper nibble+lower nibble} */
+
+struct bugrtc {
+ unsigned char Y;
+ unsigned char M;
+ unsigned char D;
+ unsigned char d;
+ unsigned char H;
+ unsigned char m;
+ unsigned char s;
+ unsigned char c;
+};
+
+/* Board ID - lots of info */
+
+struct bugbrdid {
+ unsigned char eye[4];
+ char rev;
+ char month;
+ char day;
+ char year;
+ short packetsize;
+ short dummy;
+ short brdno;
+ unsigned char brdsuf[2];
+ char options[3];
+ char family:4;
+ char cpu:4;
+ short clun;
+ short dlun;
+ short type;
+ short dev;
+ int option;
+};
+
+char buginchr __P((void));
+int buginstat __P((void));
+int bugoutchr __P((unsigned char));
+int bugoutstr __P((char *, char *));
+int bugpcrlf __P((void));
+int bugdskrd __P((struct bugdisk_io *));
+int bugdskwr __P((struct bugdisk_io *));
+int bugrtcrd __P((struct bugrtc *));
+int bugreturn __P((void));
+int bugbrdid __P((struct bugbrdid *));
diff --git a/sys/arch/mvme88k/stand/kerncrt/Makefile b/sys/arch/mvme88k/stand/kerncrt/Makefile
new file mode 100644
index 00000000000..9645605f8aa
--- /dev/null
+++ b/sys/arch/mvme88k/stand/kerncrt/Makefile
@@ -0,0 +1,9 @@
+OBJ=kerncrt.o
+CFLAGS+=-I${.CURDIR}/../include
+CFLAGS+=-I${.CURDIR}/../..
+CFLAGS+=-I/usr/src/sys
+
+SRCS=kerncrt.c
+all: kerncrt.o
+
+.include <bsd.prog.mk>
diff --git a/sys/arch/mvme88k/stand/kerncrt/kerncrt.c b/sys/arch/mvme88k/stand/kerncrt/kerncrt.c
new file mode 100644
index 00000000000..a5f04d2398a
--- /dev/null
+++ b/sys/arch/mvme88k/stand/kerncrt/kerncrt.c
@@ -0,0 +1,11 @@
+#include "bug.h"
+start(struct bugenv *bugarea)
+{
+ main(bugarea);
+ bugreturn();
+}
+
+__main()
+{
+ return;
+}
diff --git a/sys/arch/mvme88k/stand/libbug/Makefile b/sys/arch/mvme88k/stand/libbug/Makefile
new file mode 100644
index 00000000000..065f4b015a1
--- /dev/null
+++ b/sys/arch/mvme88k/stand/libbug/Makefile
@@ -0,0 +1,28 @@
+LIB=bug
+
+CFLAGS+=-I${.CURDIR}/../include
+CFLAGS+=-I${.CURDIR}/../../include
+CFLAGS+=-I${.CURDIR}/../..
+CFLAGS+=-I/usr/src/sys
+CFLAGS+=-fwritable-strings
+
+SRCS+=bugio.c
+#SRCS+=bugcrt.c bugio.c main.c
+
+.if (${MACHINE_ARCH} == "m68k")
+SRCS+=mvme147.c bcopy.c memset.c
+.endif
+.if (${MACHINE_ARCH} == "m88k")
+SRCS+=bcopy.c memset.c printf.c
+.endif
+
+.PATH: ${.CURDIR}/../../../../lib/libc_sa ${.CURDIR}/${MACHINE_ARCH}
+
+all: bugio.o
+
+#bugio.o: bugio.c
+# ${CC} ${CFLAGS} -c -O ${.ALLSRC}
+# ${LD} -x -r ${.TARGET}
+# mv a.out ${.TARGET}
+
+.include <bsd.lib.mk>
diff --git a/sys/arch/mvme88k/stand/libbug/bugio.c b/sys/arch/mvme88k/stand/libbug/bugio.c
new file mode 100644
index 00000000000..6406dac3e13
--- /dev/null
+++ b/sys/arch/mvme88k/stand/libbug/bugio.c
@@ -0,0 +1,101 @@
+#include "bugio.h"
+
+#define INCHR "0x0000"
+#define INSTAT "0x0001"
+#define INLN "0x0002"
+#define READSTR "0x0003"
+#define READLN "0x0004"
+#define DSKRD "0x0010"
+#define DSKWR "0x0011"
+#define DSKCFIG "0x0012"
+#define OUTCHR "0x0020"
+#define PCRLF "0x0026"
+#define TMDISP "0x0042"
+#define DELAY "0x0043"
+#define RTC_DSP "0x0052"
+#define RTC_RD "0x0053"
+#define RETURN "0x0063"
+#define BRD_ID "0x0070"
+#define BUGTRAP "0x01F0"
+
+char
+buginchr(void)
+{
+ register int cc asm("r2");
+ asm("or r9,r0," INCHR);
+ asm("tb0 0,r0,0x1F0");
+ /*asm("or %0,r0,r2" : "=r" (cc) : );*/
+ return ((char)cc & 0xFF);
+}
+
+/* return 1 if not empty else 0 */
+
+buginstat(void)
+{
+ int ret;
+ asm("or r9,r0," INSTAT);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return (ret & 0x40 ? 1 : 0);
+}
+
+bugoutchr(unsigned char c)
+{
+ unsigned char cc;
+
+ if ((cc = c) == '\n') {
+ bugpcrlf();
+ return;
+ }
+ asm("or r2,r0,%0" : : "r" (cc));
+ asm("or r9,r0," OUTCHR);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugpcrlf(void)
+{
+ asm("or r9,r0," PCRLF);
+ asm("tb0 0,r0,0x1F0");
+}
+/* return 0 on success */
+
+bugdskrd(struct bugdisk_io *arg)
+{
+ int ret;
+ asm("or r9,r0, " DSKRD);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return ((ret&0x4) == 0x4 ? 1 : 0);
+}
+
+/* return 0 on success */
+
+bugdskwr(struct bugdisk_io *arg)
+{
+ int ret;
+ asm("or r9,r0, " DSKWR);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ret) : );
+ return ((ret&0x4) == 0x4 ? 1 : 0);
+}
+
+bugrtcrd(struct bugrtc *rtc)
+{
+ asm("or r9,r0, " RTC_RD);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugreturn(void)
+{
+ asm("or r9,r0, " RETURN);
+ asm("tb0 0,r0,0x1F0");
+}
+
+bugbrdid(struct bugbrdid *id)
+{
+ struct bugbrdid *ptr;
+ asm("or r9,r0, " BRD_ID);
+ asm("tb0 0,r0,0x1F0");
+ asm("or %0,r0,r2" : "=r" (ptr) : );
+ bcopy(ptr, id, sizeof(struct bugbrdid));
+}