summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/mips64/conf/files.mips6431
-rw-r--r--sys/arch/mips64/include/ansi.h87
-rw-r--r--sys/arch/mips64/include/archtype.h88
-rw-r--r--sys/arch/mips64/include/asm.h292
-rw-r--r--sys/arch/mips64/include/cdefs.h14
-rw-r--r--sys/arch/mips64/include/cpu.h525
-rw-r--r--sys/arch/mips64/include/cpustate.h146
-rw-r--r--sys/arch/mips64/include/db_machdep.h84
-rw-r--r--sys/arch/mips64/include/disklabel.h87
-rw-r--r--sys/arch/mips64/include/dlfcn.h61
-rw-r--r--sys/arch/mips64/include/ecoff_machdep.h98
-rw-r--r--sys/arch/mips64/include/endian.h50
-rw-r--r--sys/arch/mips64/include/exception.h79
-rw-r--r--sys/arch/mips64/include/float.h80
-rw-r--r--sys/arch/mips64/include/frame.h204
-rw-r--r--sys/arch/mips64/include/ieee.h136
-rw-r--r--sys/arch/mips64/include/ieeefp.h25
-rw-r--r--sys/arch/mips64/include/internal_types.h8
-rw-r--r--sys/arch/mips64/include/kcore.h45
-rw-r--r--sys/arch/mips64/include/kdbparam.h79
-rw-r--r--sys/arch/mips64/include/limits.h98
-rw-r--r--sys/arch/mips64/include/link.h74
-rw-r--r--sys/arch/mips64/include/memconf.h50
-rw-r--r--sys/arch/mips64/include/mips_opcode.h297
-rw-r--r--sys/arch/mips64/include/param.h177
-rw-r--r--sys/arch/mips64/include/pcb.h69
-rw-r--r--sys/arch/mips64/include/pio.h125
-rw-r--r--sys/arch/mips64/include/pmap.h129
-rw-r--r--sys/arch/mips64/include/proc.h70
-rw-r--r--sys/arch/mips64/include/profile.h88
-rw-r--r--sys/arch/mips64/include/ptrace.h50
-rw-r--r--sys/arch/mips64/include/reg.h61
-rw-r--r--sys/arch/mips64/include/regdef.h77
-rw-r--r--sys/arch/mips64/include/regnum.h124
-rw-r--r--sys/arch/mips64/include/reloc.h38
-rw-r--r--sys/arch/mips64/include/setjmp.h12
-rw-r--r--sys/arch/mips64/include/signal.h87
-rw-r--r--sys/arch/mips64/include/spinlock.h10
-rw-r--r--sys/arch/mips64/include/stdarg.h251
-rw-r--r--sys/arch/mips64/include/trap.h127
-rw-r--r--sys/arch/mips64/include/types.h96
-rw-r--r--sys/arch/mips64/include/varargs.h54
-rw-r--r--sys/arch/mips64/include/vmparam.h142
-rw-r--r--sys/arch/mips64/mips64/busdma.c642
-rw-r--r--sys/arch/mips64/mips64/cache_r5k.S1009
-rw-r--r--sys/arch/mips64/mips64/clock.c421
-rw-r--r--sys/arch/mips64/mips64/context.S434
-rw-r--r--sys/arch/mips64/mips64/cp0access.S205
-rw-r--r--sys/arch/mips64/mips64/cpu.c241
-rw-r--r--sys/arch/mips64/mips64/cpu_ecoff.c95
-rw-r--r--sys/arch/mips64/mips64/db_disasm.c414
-rw-r--r--sys/arch/mips64/mips64/db_machdep.c656
-rw-r--r--sys/arch/mips64/mips64/disksubr.c553
-rw-r--r--sys/arch/mips64/mips64/exception.S681
-rw-r--r--sys/arch/mips64/mips64/fp.S3612
-rw-r--r--sys/arch/mips64/mips64/interrupt.c607
-rw-r--r--sys/arch/mips64/mips64/lcore_access.S581
-rw-r--r--sys/arch/mips64/mips64/lcore_ddb.S184
-rw-r--r--sys/arch/mips64/mips64/lcore_float.S519
-rw-r--r--sys/arch/mips64/mips64/mainbus.c192
-rw-r--r--sys/arch/mips64/mips64/mem.c234
-rw-r--r--sys/arch/mips64/mips64/pmap.c1683
-rw-r--r--sys/arch/mips64/mips64/process_machdep.c118
-rw-r--r--sys/arch/mips64/mips64/sendsig.c282
-rw-r--r--sys/arch/mips64/mips64/sys_machdep.c129
-rw-r--r--sys/arch/mips64/mips64/tlbhandler.S647
-rw-r--r--sys/arch/mips64/mips64/trap.c1379
-rw-r--r--sys/arch/mips64/mips64/vm_machdep.c296
68 files changed, 20339 insertions, 0 deletions
diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64
new file mode 100644
index 00000000000..c67836f6251
--- /dev/null
+++ b/sys/arch/mips64/conf/files.mips64
@@ -0,0 +1,31 @@
+# $OpenBSD: files.mips64,v 1.1 2004/08/06 20:56:01 pefo Exp $
+
+file arch/mips64/mips64/cpu_ecoff.c
+file arch/mips64/mips64/disksubr.c
+file arch/mips64/mips64/mem.c
+file arch/mips64/mips64/process_machdep.c
+file arch/mips64/mips64/sys_machdep.c
+file arch/mips64/mips64/vm_machdep.c
+
+file arch/mips64/mips64/clock.c
+file arch/mips64/mips64/cpu.c
+file arch/mips64/mips64/busdma.c
+file arch/mips64/mips64/interrupt.c
+file arch/mips64/mips64/mainbus.c
+file arch/mips64/mips64/pmap.c
+file arch/mips64/mips64/sendsig.c
+file arch/mips64/mips64/trap.c
+
+file arch/mips64/mips64/cache_r5k.S
+file arch/mips64/mips64/context.S
+file arch/mips64/mips64/cp0access.S
+file arch/mips64/mips64/exception.S
+file arch/mips64/mips64/fp.S
+file arch/mips64/mips64/lcore_access.S
+file arch/mips64/mips64/lcore_float.S
+file arch/mips64/mips64/tlbhandler.S
+
+file arch/mips64/mips64/db_disasm.c ddb
+file arch/mips64/mips64/db_machdep.c ddb
+file arch/mips64/mips64/lcore_ddb.S ddb|debug
+
diff --git a/sys/arch/mips64/include/ansi.h b/sys/arch/mips64/include/ansi.h
new file mode 100644
index 00000000000..c80b1bc0650
--- /dev/null
+++ b/sys/arch/mips64/include/ansi.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: ansi.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef _MIPS_ANSI_H_
+#define _MIPS_ANSI_H_
+
+/*
+ * Types which are fundamental to the implementation and may appear in
+ * more than one standard header are defined here. Standard headers
+ * then use:
+ * #ifdef _BSD_SIZE_T_
+ * typedef _BSD_SIZE_T_ size_t;
+ * #undef _BSD_SIZE_T_
+ * #endif
+ */
+#define _BSD_CLOCK_T_ int /* clock() */
+#define _BSD_PTRDIFF_T_ long /* ptr1 - ptr2 */
+#if defined(_LP64)
+#define _BSD_SIZE_T_ unsigned long /* sizeof() */
+#else
+#define _BSD_SIZE_T_ unsigned int /* sizeof() */
+#endif
+#define _BSD_SSIZE_T_ long /* byte count or error */
+#define _BSD_TIME_T_ int /* time() */
+#define _BSD_VA_LIST_ char * /* va_list */
+#define _BSD_CLOCKID_T_ int
+#define _BSD_TIMER_T_ int
+
+/*
+ * Runes (wchar_t) is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you
+ * lose a bit of ANSI conformance, but your programs will still work.
+ *
+ * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t
+ * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains
+ * defined for ctype.h.
+ */
+#define _BSD_WCHAR_T_ int /* wchar_t */
+#define _BSD_WINT_T_ int /* wint_t */
+#define _BSD_RUNE_T_ int /* rune_t */
+
+/*
+ * We describe off_t here so its declaration can be visible to
+ * stdio without pulling in all of <sys/type.h>, thus appeasing ANSI.
+ */
+#define _BSD_OFF_T_ long long /* file offset */
+
+#endif /* !_MIPS_ANSI_H_ */
diff --git a/sys/arch/mips64/include/archtype.h b/sys/arch/mips64/include/archtype.h
new file mode 100644
index 00000000000..1d87f2cd6a9
--- /dev/null
+++ b/sys/arch/mips64/include/archtype.h
@@ -0,0 +1,88 @@
+/* $OpenBSD: archtype.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+/*
+ * Copyright (c) 1997-2003 Opsycon AB, Sweden (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS_ARCHTYPE_H_
+#define _MIPS_ARCHTYPE_H_
+/*
+ * Define architectural identitys for the different Mips machines.
+ */
+#define ARC_CLASS 0x0000 /* Arch class ARC */
+#define ACER_PICA_61 0x0001 /* Acer Labs Pica 61 */
+#define MAGNUM 0x0002 /* Mips MAGNUM R4000 */
+#define DESKSTATION_RPC44 0x0003 /* Deskstation xxx */
+#define DESKSTATION_TYNE 0x0004 /* Deskstation xxx */
+#define NKK_AQUARIUS 0x0005 /* NKK R4{67}00 PC */
+#define NEC_R94 0x0006 /* NEC Magnum class */
+#define SNI_RM200 0x0007 /* Siemens Nixdorf RM200 */
+
+#define SGI_CLASS 0x0010 /* Silicon Graphics Class */
+#define SGI_CRIMSON 0x0011 /* Crimson */
+#define SGI_ONYX 0x0012 /* Onyx (!S model Challenge) */
+#define SGI_INDIGO 0x0013 /* Indigo */
+#define SGI_POWER 0x0014 /* POWER Challenge, POWER Onyx */
+#define SGI_INDY 0x0015 /* Indy, Indigo2, Challenge S */
+#define SGI_POWER10 0x0016 /* POWER Challenge R10k */
+#define SGI_POWERI 0x0017 /* POWER Indigo2 */
+#define SGI_O2 0x0018 /* O2/Moosehead */
+#define SGI_OCTANE 0x0019 /* Octane */
+
+#define ALGOR_CLASS 0x0020 /* Algorithmics Class */
+#define ALGOR_P4032 0x0021 /* ALGORITHMICS P-4032 */
+#define ALGOR_P5064 0x0022 /* ALGORITHMICS P-5064 */
+
+#define GALILEO_CLASS 0x0030 /* Galileo PCI based Class */
+#define GALILEO_G9 0x0031 /* Galileo GT-64011 Eval board */
+#define GALILEO_EV64240 0x0032 /* Galileo EV64240 Eval board */
+#define GALILEO_EV64340 0x0033 /* Galileo EV64340 Eval board */
+
+#define MOMENTUM_CLASS 0x0040 /* Momentum Inc Class */
+#define MOMENTUM_CP7000 0x0041 /* Momentum Ocelot */
+#define MOMENTUM_CP7000G 0x0042 /* Momentum Ocelot-G */
+#define MOMENTUM_JAGUAR 0x0043 /* Momentum Jaguar ATX */
+
+#define WG_CLASS 0x0050 /* Willowglen class */
+#define WG4308 0x0052 /* Willowglen 4308 LMD */
+#define WG4309 0x0053 /* Willowglen 4309 LMD */
+#define WG4409 0x0054 /* Willowglen 4409 LMD */
+#define WG8138 0x0055 /* Willowglen 8138 523x VME card */
+#define WG8168 0x0056 /* Willowglen 8168 5231 VME card */
+#define WG6000 0x0057 /* Willowglen CPU-6000 */
+#define WG7000 0x0058 /* Willowglen CPU-7000 */
+#define WG8200 0x0059 /* Willowglen CPU-8200 */
+#define WG8232 0x005a /* Willowglen CPU-8232 */
+
+#define MISC_CLASS 0x00F0 /* Misc machines... */
+#define LAGUNA 0x00F1 /* Heurikon Laguna VME board */
+
+#define ARCHCLASS(n) ((n) & 0xf0)
+
+#endif /* !_MIPS_ARCHTYPE_H_ */
diff --git a/sys/arch/mips64/include/asm.h b/sys/arch/mips64/include/asm.h
new file mode 100644
index 00000000000..8816f1227c9
--- /dev/null
+++ b/sys/arch/mips64/include/asm.h
@@ -0,0 +1,292 @@
+/* $OpenBSD: asm.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2002 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _MIPS64_ASM_H
+#define _MIPS64_ASM_H
+
+#include <machine/regdef.h>
+
+#define _MIPS_ISA_MIPS1 1 /* R2000/R3000 */
+#define _MIPS_ISA_MIPS2 2 /* R4000/R6000 */
+#define _MIPS_ISA_MIPS3 3 /* R4000 */
+#define _MIPS_ISA_MIPS4 4 /* TFP (R1x000) */
+
+#if !defined(ABICALLS) && !defined(_NO_ABICALLS)
+#define ABICALLS .abicalls
+#endif
+
+#if defined(ABICALLS) && !defined(_KERNEL)
+ ABICALLS
+#endif
+
+#define _C_LABEL(x) x /* XXX Obsolete but keep for a while */
+
+#if !defined(__MIPSEL__) && !defined(__MIPSEB__)
+#error "__MIPSEL__ or __MIPSEB__ must be defined"
+#endif
+/*
+ * Define how to access unaligned data word
+ */
+#if defined(__MIPSEL__)
+#define LWLO lwl
+#define LWHI lwr
+#define SWLO swl
+#define SWHI swr
+#define LDLO ldl
+#define LDHI ldr
+#define SDLO sdl
+#define SDHI sdr
+#endif
+#if defined(__MIPSEB__)
+#define LWLO lwr
+#define LWHI lwl
+#define SWLO swr
+#define SWHI swl
+#define LDLO ldr
+#define LDHI ldl
+#define SDLO sdr
+#define SDHI sdl
+#endif
+
+/*
+ * Define programming environment for ABI.
+ */
+#if defined(ABICALLS) && !defined(_KERNEL) && !defined(_STANDALONE)
+
+#ifndef _MIPS_SIM
+#define _MIPS_SIM 1
+#define _ABIO32 1
+#endif
+#ifndef _MIPS_ISA
+#define _MIPS_ISA 2
+#define _MIPS_ISA_MIPS2 2
+#endif
+
+#if (_MIPS_SIM == _ABIO32) || (_MIPS_SIM == _ABI32)
+#define NARGSAVE 4
+
+#define SETUP_GP \
+ .set noreorder; \
+ .cpload t9; \
+ .set reorder;
+
+#define SAVE_GP(x) \
+ .cprestore x
+
+#endif
+
+#if (_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32)
+#define NARGSAVE 0
+
+#define SETUP_GP
+#define SAVE_GP(x)
+#endif
+
+#else /* defined(ABICALLS) && !defined(_KERNEL) */
+
+#define NARGSAVE 4
+#define SETUP_GP
+#define SAVE_GP(x)
+
+#endif
+
+#define ALIGNSZ 16 /* Stack layout alignment */
+
+#define FRAMESZ(sz) (((sz) + (ALIGNSZ-1)) & ~(ALIGNSZ-1))
+
+/*
+ * Basic register operations based on selected ISA
+ */
+#if (_MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2)
+#define REGSZ 4 /* 32 bit mode register size */
+#define LOGREGSZ 2 /* log rsize */
+#define REG_S sw
+#define REG_L lw
+#define CF_SZ 24 /* Call frame size */
+#define CF_ARGSZ 16 /* Call frame arg size */
+#define CF_RA_OFFS 20 /* Call ra save offset */
+#define _MIPS_SZPTR 32
+#endif
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4)
+#define REGSZ 8 /* 64 bit mode register size */
+#define LOGREGSZ 3 /* log rsize */
+#define REG_S sd
+#define REG_L ld
+#define CF_SZ 48 /* Call frame size (multiple of ALIGNSZ) */
+#define CF_ARGSZ 32 /* Call frame arg size */
+#define CF_RA_OFFS 40 /* Call ra save offset */
+#define _MIPS_SZPTR 64
+#endif
+
+#if (_MIPS_SZPTR == 32)
+#define PTR_L lw
+#define PTR_S sw
+#define PTR_SUB sub
+#define PTR_ADD add
+#define PTR_SUBU subu
+#define PTR_ADDU addu
+#define LI li
+#define LA la
+#define PTR_SLL sll
+#define PTR_SRL srl
+#define PTR_VAL .word
+#endif
+
+#if (_MIPS_SZPTR == 64)
+#define PTR_L ld
+#define PTR_S sd
+#define PTR_ADD dadd
+#define PTR_SUB dsub
+#define PTR_SUBU dsubu
+#define PTR_ADDU daddu
+#define LI dli
+#define LA dla
+#define PTR_SLL dsll
+#define PTR_SRL dsrl
+#define PTR_VAL .dword
+#endif
+
+/*
+ * Define -pg profile entry code.
+ */
+#if defined(XGPROF) || defined(XPROF)
+#define MCOUNT \
+ subu sp, sp, 32; \
+ SAVE_GP(16); \
+ sw ra, 28(sp); \
+ sw gp, 24(sp); \
+ .set noat; \
+ .set noreorder; \
+ move AT, ra; \
+ jal _mcount; \
+ subu sp, sp, 8; \
+ lw ra, 28(sp); \
+ addu sp, sp, 32; \
+ .set reorder; \
+ .set at;
+#else
+#define MCOUNT
+#endif
+
+/*
+ * LEAF(x)
+ *
+ * Declare a leaf routine.
+ */
+#define LEAF(x) \
+ .align 3; \
+ .globl x; \
+ .ent x, 0; \
+x: ; \
+ .frame sp, 0, ra; \
+ SETUP_GP \
+ MCOUNT
+
+#define ALEAF(x) \
+ .globl x; \
+x:
+
+/*
+ * NLEAF(x)
+ *
+ * Declare a non-profiled leaf routine.
+ */
+#define NLEAF(x) \
+ .align 3; \
+ .globl x; \
+ .ent x, 0; \
+x: ; \
+ .frame sp, 0, ra; \
+ SETUP_GP
+
+/*
+ * NON_LEAF(x)
+ *
+ * Declare a non-leaf routine (a routine that makes other C calls).
+ */
+#define NON_LEAF(x, fsize, retpc) \
+ .align 3; \
+ .globl x; \
+ .ent x, 0; \
+x: ; \
+ .frame sp, fsize, retpc; \
+ SETUP_GP \
+ MCOUNT
+
+/*
+ * NNON_LEAF(x)
+ *
+ * Declare a non-profiled non-leaf routine
+ * (a routine that makes other C calls).
+ */
+#define NNON_LEAF(x, fsize, retpc) \
+ .align 3; \
+ .globl x; \
+ .ent x, 0; \
+x: ; \
+ .frame sp, fsize, retpc \
+ SETUP_GP
+
+/*
+ * END(x)
+ *
+ * Mark end of a procedure.
+ */
+#define END(x) \
+ .end x
+
+/*
+ * Macros to panic and printf from assembly language.
+ */
+#define PANIC(msg) \
+ la a0, 9f; \
+ jal panic; \
+ nop ; \
+ MSG(msg)
+
+#define PRINTF(msg) \
+ la a0, 9f; \
+ jal printf; \
+ nop ; \
+ MSG(msg)
+
+#define MSG(msg) \
+ .rdata; \
+9: .asciiz msg; \
+ .text
+
+#define ASMSTR(str) \
+ .asciiz str; \
+ .align 3
+
+#endif /* !_MIPS_ASM_H */
diff --git a/sys/arch/mips64/include/cdefs.h b/sys/arch/mips64/include/cdefs.h
new file mode 100644
index 00000000000..6c61428cdcc
--- /dev/null
+++ b/sys/arch/mips64/include/cdefs.h
@@ -0,0 +1,14 @@
+/* $OpenBSD: cdefs.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+#ifndef _MIPS_CDEFS_H_
+#define _MIPS_CDEFS_H_
+
+#define _C_LABEL(x) _STRING(x)
+
+#define __weak_alias(alias,sym) \
+ __asm__(".weak " __STRING(alias) " ; " __STRING(alias) " = " __STRING(sym))
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." __STRING(sym) " ; .ascii \"" msg "\" ; .text")
+#define __indr_references(sym,msg) /* nothing */
+
+#endif /* !_MIPS_CDEFS_H_ */
diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h
new file mode 100644
index 00000000000..c3172d4639a
--- /dev/null
+++ b/sys/arch/mips64/include/cpu.h
@@ -0,0 +1,525 @@
+/* $OpenBSD: cpu.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell and Rick Macklem.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: @(#)cpu.h 8.4 (Berkeley) 1/4/94
+ */
+
+#ifndef _MIPS_CPU_H_
+#define _MIPS_CPU_H_
+
+#include <machine/psl.h>
+
+#if defined(_LP64)
+#define KSEG0_BASE 0xffffffff80000000
+#define KSEG1_BASE 0xffffffffa0000000
+#define KSSEG_BASE 0xffffffffc0000000
+#define KSEG3_BASE 0xffffffffe0000000
+#else
+#define KSEG0_BASE 0x80000000
+#define KSEG1_BASE 0xa0000000
+#define KSSEG_BASE 0xc0000000
+#define KSEG3_BASE 0xe0000000
+#endif
+#define KSEG_SIZE 0x20000000
+
+#define KSEG0_TO_PHYS(x) ((u_long)(x) & 0x1fffffff)
+#define KSEG1_TO_PHYS(x) ((u_long)(x) & 0x1fffffff)
+#define PHYS_TO_KSEG0(x) ((u_long)(x) | KSEG0_BASE)
+#define PHYS_TO_KSEG1(x) ((u_long)(x) | KSEG1_BASE)
+#define PHYS_TO_KSEG3(x) ((u_long)(x) | KSEG3_BASE)
+
+#ifdef _KERNEL
+
+/*
+ * Status register.
+ */
+#define SR_XX 0x80000000
+#define SR_COP_USABILITY 0x30000000 /* CP0 and CP1 only */
+#define SR_COP_0_BIT 0x10000000
+#define SR_COP_1_BIT 0x20000000
+#define SR_RP 0x08000000
+#define SR_FR_32 0x04000000
+#define SR_RE 0x02000000
+#define SR_BOOT_EXC_VEC 0x00400000
+#define SR_TLB_SHUTDOWN 0x00200000
+#define SR_SOFT_RESET 0x00100000
+#define SR_DIAG_CH 0x00040000
+#define SR_DIAG_CE 0x00020000
+#define SR_DIAG_DE 0x00010000
+#define SR_KX 0x00000080
+#define SR_SX 0x00000040
+#define SR_UX 0x00000020
+#define SR_KSU_MASK 0x00000018
+#define SR_KSU_USER 0x00000010
+#define SR_KSU_SUPER 0x00000008
+#define SR_KSU_KERNEL 0x00000000
+#define SR_ERL 0x00000004
+#define SR_EXL 0x00000002
+#define SR_INT_ENAB 0x00000001
+
+#define SR_INT_MASK 0x0000ff00
+#define SOFT_INT_MASK_0 0x00000100
+#define SOFT_INT_MASK_1 0x00000200
+#define SR_INT_MASK_0 0x00000400
+#define SR_INT_MASK_1 0x00000800
+#define SR_INT_MASK_2 0x00001000
+#define SR_INT_MASK_3 0x00002000
+#define SR_INT_MASK_4 0x00004000
+#define SR_INT_MASK_5 0x00008000
+/*
+ * Interrupt control register in RM7000. Expansion of interrupts.
+ */
+#define IC_INT_MASK 0x00003f00 /* Two msb reserved */
+#define IC_INT_MASK_6 0x00000100
+#define IC_INT_MASK_7 0x00000200
+#define IC_INT_MASK_8 0x00000400
+#define IC_INT_MASK_9 0x00000800
+#define IC_INT_TIMR 0x00001000 /* 12 Timer */
+#define IC_INT_PERF 0x00002000 /* 13 Performance counter */
+#define IC_INT_TE 0x00000080 /* Timer on INT11 */
+
+#define ALL_INT_MASK ((IC_INT_MASK << 8) | SR_INT_MASK)
+#define SOFT_INT_MASK (SOFT_INT_MASK_0 | SOFT_INT_MASK_1)
+#define HW_INT_MASK (ALL_INT_MASK & ~SOFT_INT_MASK)
+
+
+/*
+ * The bits in the cause register.
+ *
+ * CR_BR_DELAY Exception happened in branch delay slot.
+ * CR_COP_ERR Coprocessor error.
+ * CR_IP Interrupt pending bits defined below.
+ * CR_EXC_CODE The exception type (see exception codes below).
+ */
+#define CR_BR_DELAY 0x80000000
+#define CR_COP_ERR 0x30000000
+#define CR_EXC_CODE 0x0000007c
+#define CR_EXC_CODE_SHIFT 2
+#define CR_IPEND 0x003fff00
+#define CR_INT_SOFT0 0x00000100
+#define CR_INT_SOFT1 0x00000200
+#define CR_INT_0 0x00000400
+#define CR_INT_1 0x00000800
+#define CR_INT_2 0x00001000
+#define CR_INT_3 0x00002000
+#define CR_INT_4 0x00004000
+#define CR_INT_5 0x00008000
+/* Following on RM7000 */
+#define CR_INT_6 0x00010000
+#define CR_INT_7 0x00020000
+#define CR_INT_8 0x00040000
+#define CR_INT_9 0x00080000
+#define CR_INT_HARD 0x000ffc00
+#define CR_INT_TIMR 0x00100000 /* 12 Timer */
+#define CR_INT_PERF 0x00200000 /* 13 Performance counter */
+
+/*
+ * The bits in the context register.
+ */
+#define CNTXT_PTE_BASE 0xff800000
+#define CNTXT_BAD_VPN2 0x007ffff0
+
+/*
+ * Location of exception vectors.
+ */
+#define RESET_EXC_VEC 0xffffffffbfc00000
+#define TLB_MISS_EXC_VEC 0xffffffff80000000
+#define XTLB_MISS_EXC_VEC 0xffffffff80000080
+#define CACHE_ERR_EXC_VEC 0xffffffff80000100
+#define GEN_EXC_VEC 0xffffffff80000180
+
+/*
+ * Coprocessor 0 registers:
+ */
+#define COP_0_TLB_INDEX $0
+#define COP_0_TLB_RANDOM $1
+#define COP_0_TLB_LO0 $2
+#define COP_0_TLB_LO1 $3
+#define COP_0_TLB_CONTEXT $4
+#define COP_0_TLB_PG_MASK $5
+#define COP_0_TLB_WIRED $6
+#define COP_0_BAD_VADDR $8
+#define COP_0_COUNT $9
+#define COP_0_TLB_HI $10
+#define COP_0_COMPARE $11
+#define COP_0_STATUS_REG $12
+#define COP_0_CAUSE_REG $13
+#define COP_0_EXC_PC $14
+#define COP_0_PRID $15
+#define COP_0_CONFIG $16
+#define COP_0_LLADDR $17
+#define COP_0_WATCH_LO $18
+#define COP_0_WATCH_HI $19
+#define COP_0_TLB_XCONTEXT $20
+#define COP_0_ECC $26
+#define COP_0_CACHE_ERR $27
+#define COP_0_TAG_LO $28
+#define COP_0_TAG_HI $29
+#define COP_0_ERROR_PC $30
+
+/*
+ * RM7000 specific
+ */
+#define COP_0_WATCH_1 $18
+#define COP_0_WATCH_2 $19
+#define COP_0_WATCH_M $24
+#define COP_0_PC_COUNT $25
+#define COP_0_PC_CTRL $22
+
+#define COP_0_ICR $20 /* Use cfc0/ctc0 to access */
+
+/*
+ * Values for the code field in a break instruction.
+ */
+#define BREAK_INSTR 0x0000000d
+#define BREAK_VAL_MASK 0x03ff0000
+#define BREAK_VAL_SHIFT 16
+#define BREAK_KDB_VAL 512
+#define BREAK_SSTEP_VAL 513
+#define BREAK_BRKPT_VAL 514
+#define BREAK_SOVER_VAL 515
+#define BREAK_DDB_VAL 516
+#define BREAK_KDB (BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
+#define BREAK_SSTEP (BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
+#define BREAK_BRKPT (BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
+#define BREAK_SOVER (BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
+#define BREAK_DDB (BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
+
+/*
+ * Mininum and maximum cache sizes.
+ */
+#define MIN_CACHE_SIZE (16 * 1024)
+#define MAX_CACHE_SIZE (256 * 1024)
+
+/*
+ * The floating point version and status registers.
+ */
+#define FPC_ID $0
+#define FPC_CSR $31
+
+/*
+ * The floating point coprocessor status register bits.
+ */
+#define FPC_ROUNDING_BITS 0x00000003
+#define FPC_ROUND_RN 0x00000000
+#define FPC_ROUND_RZ 0x00000001
+#define FPC_ROUND_RP 0x00000002
+#define FPC_ROUND_RM 0x00000003
+#define FPC_STICKY_BITS 0x0000007c
+#define FPC_STICKY_INEXACT 0x00000004
+#define FPC_STICKY_UNDERFLOW 0x00000008
+#define FPC_STICKY_OVERFLOW 0x00000010
+#define FPC_STICKY_DIV0 0x00000020
+#define FPC_STICKY_INVALID 0x00000040
+#define FPC_ENABLE_BITS 0x00000f80
+#define FPC_ENABLE_INEXACT 0x00000080
+#define FPC_ENABLE_UNDERFLOW 0x00000100
+#define FPC_ENABLE_OVERFLOW 0x00000200
+#define FPC_ENABLE_DIV0 0x00000400
+#define FPC_ENABLE_INVALID 0x00000800
+#define FPC_EXCEPTION_BITS 0x0003f000
+#define FPC_EXCEPTION_INEXACT 0x00001000
+#define FPC_EXCEPTION_UNDERFLOW 0x00002000
+#define FPC_EXCEPTION_OVERFLOW 0x00004000
+#define FPC_EXCEPTION_DIV0 0x00008000
+#define FPC_EXCEPTION_INVALID 0x00010000
+#define FPC_EXCEPTION_UNIMPL 0x00020000
+#define FPC_COND_BIT 0x00800000
+#define FPC_FLUSH_BIT 0x01000000
+#define FPC_MBZ_BITS 0xfe7c0000
+
+/*
+ * Constants to determine if have a floating point instruction.
+ */
+#define OPCODE_SHIFT 26
+#define OPCODE_C1 0x11
+
+/*
+ * The low part of the TLB entry.
+ */
+#define VMTLB_PF_NUM 0x3fffffc0
+#define VMTLB_ATTR_MASK 0x00000038
+#define VMTLB_MOD_BIT 0x00000004
+#define VMTLB_VALID_BIT 0x00000002
+#define VMTLB_GLOBAL_BIT 0x00000001
+
+#define VMTLB_PHYS_PAGE_SHIFT 6
+
+/*
+ * The high part of the TLB entry.
+ */
+#define VMTLB_VIRT_PAGE_NUM 0xffffe000
+#define VMTLB_PID 0x000000ff
+#define VMTLB_PID_SHIFT 0
+#define VMTLB_VIRT_PAGE_SHIFT 12
+
+/*
+ * The number of process id entries.
+ */
+#define VMNUM_PIDS 256
+
+/*
+ * TLB probe return codes.
+ */
+#define VMTLB_NOT_FOUND 0
+#define VMTLB_FOUND 1
+#define VMTLB_FOUND_WITH_PATCH 2
+#define VMTLB_PROBE_ERROR 3
+
+/*
+ * Exported definitions unique to mips cpu support.
+ */
+
+/*
+ * definitions of cpu-dependent requirements
+ * referenced in generic code
+ */
+#define COPY_SIGCODE /* copy sigcode above user stack in exec */
+
+#define cpu_wait(p) /* nothing */
+#define cpu_swapout(p) panic("cpu_swapout: can't get here");
+
+#ifndef _LOCORE
+#include <machine/frame.h>
+/*
+ * Arguments to hardclock and gatherstats encapsulate the previous
+ * machine state in an opaque clockframe.
+ */
+#define clockframe trap_frame /* Use normal trap frame */
+
+#define CLKF_USERMODE(framep) ((framep)->sr & SR_KSU_USER)
+#define CLKF_BASEPRI(framep) ((framep)->cpl == 0)
+#define CLKF_PC(framep) ((framep)->pc)
+#define CLKF_INTR(framep) (0)
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+#define need_resched(info) { want_resched = 1; aston(); }
+
+/*
+ * Give a profiling tick to the current process when the user profiling
+ * buffer pages are invalid. On the PICA, request an ast to send us
+ * through trap, marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+#define signotify(p) aston()
+
+#define aston() (astpending = 1)
+
+int want_resched; /* resched() was called */
+
+/*
+ * CPU identification, from PRID register.
+ */
+union cpuprid {
+ int cpuprid;
+ struct {
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int pad1:16; /* reserved */
+ u_int cp_imp:8; /* implementation identifier */
+ u_int cp_majrev:4; /* major revision identifier */
+ u_int cp_minrev:4; /* minor revision identifier */
+#else
+ u_int cp_minrev:4; /* minor revision identifier */
+ u_int cp_majrev:4; /* major revision identifier */
+ u_int cp_imp:8; /* implementation identifier */
+ u_int pad1:16; /* reserved */
+#endif
+ } cpu;
+};
+#endif /* !_LOCORE */
+#endif /* _KERNEL */
+
+/*
+ * CTL_MACHDEP definitions.
+ */
+#define CPU_ALLOWAPERTURE 1 /* allow mmap of /dev/xf86 */
+#define CPU_MAXID 2 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "allowaperture", CTLTYPE_INT }, \
+}
+
+/*
+ * MIPS CPU types (cp_imp).
+ */
+#define MIPS_R2000 0x01 /* MIPS R2000 CPU ISA I */
+#define MIPS_R3000 0x02 /* MIPS R3000 CPU ISA I */
+#define MIPS_R6000 0x03 /* MIPS R6000 CPU ISA II */
+#define MIPS_R4000 0x04 /* MIPS R4000/4400 CPU ISA III */
+#define MIPS_R3LSI 0x05 /* LSI Logic R3000 derivate ISA I */
+#define MIPS_R6000A 0x06 /* MIPS R6000A CPU ISA II */
+#define MIPS_R3IDT 0x07 /* IDT R3000 derivate ISA I */
+#define MIPS_R10000 0x09 /* MIPS R10000/T5 CPU ISA IV */
+#define MIPS_R4200 0x0a /* MIPS R4200 CPU (ICE) ISA III */
+#define MIPS_R4300 0x0b /* NEC VR4300 CPU ISA III */
+#define MIPS_R4100 0x0c /* NEC VR41xx CPU MIPS-16 ISA III */
+#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */
+#define MIPS_R4600 0x20 /* PMCS R4600 Orion ISA III */
+#define MIPS_R4700 0x21 /* PMCS R4700 Orion ISA III */
+#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based CPU ISA I */
+#define MIPS_R5000 0x23 /* MIPS R5000 CPU ISA IV */
+#define MIPS_RM7000 0x27 /* PMCS RM7000 CPU ISA IV */
+#define MIPS_RM52X0 0x28 /* PMCS RM52X0 CPU ISA IV */
+#define MIPS_RM9000 0x34 /* PMCS RM9000 CPU ISA IV */
+#define MIPS_VR5400 0x54 /* NEC Vr5400 CPU ISA IV+ */
+
+/*
+ * MIPS FPU types
+ */
+#define MIPS_SOFT 0x00 /* Software emulation ISA I */
+#define MIPS_R2360 0x01 /* MIPS R2360 FPC ISA I */
+#define MIPS_R2010 0x02 /* MIPS R2010 FPC ISA I */
+#define MIPS_R3010 0x03 /* MIPS R3010 FPC ISA I */
+#define MIPS_R6010 0x04 /* MIPS R6010 FPC ISA II */
+#define MIPS_R4010 0x05 /* MIPS R4000/R4400 FPC ISA II */
+#define MIPS_R31LSI 0x06 /* LSI Logic derivate ISA I */
+#define MIPS_R10010 0x09 /* MIPS R10000/T5 FPU ISA IV */
+#define MIPS_R4210 0x0a /* MIPS R4200 FPC (ICE) ISA III */
+#define MIPS_UNKF1 0x0b /* unnanounced product cpu ISA III */
+#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */
+#define MIPS_R4600 0x20 /* PMCS R4600 Orion ISA III */
+#define MIPS_R3SONY 0x21 /* Sony R3000 based FPU ISA I */
+#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based FPU ISA I */
+#define MIPS_R5010 0x23 /* MIPS R5000 based FPU ISA IV */
+#define MIPS_RM7000 0x27 /* PMCS RM7000 FPU ISA IV */
+#define MIPS_RM5230 0x28 /* PMCS RM52X0 based FPU ISA IV */
+#define MIPS_RM52XX 0x28 /* PMCS RM52X0 based FPU ISA IV */
+#define MIPS_RM9000 0x34 /* PMCS RM9000 based FPU ISA IV */
+#define MIPS_VR5400 0x54 /* NEC Vr5400 FPU ISA IV+ */
+
+#if defined(_KERNEL) && !defined(_LOCORE)
+union cpuprid cpu_id;
+union cpuprid fpu_id;
+
+u_int CpuPrimaryInstCacheSize;
+u_int CpuPrimaryInstCacheLSize;
+u_int CpuPrimaryInstSetSize;
+u_int CpuPrimaryDataCacheSize;
+u_int CpuPrimaryDataCacheLSize;
+u_int CpuPrimaryDataSetSize;
+u_int CpuCacheAliasMask;
+u_int CpuSecondaryCacheSize;
+u_int CpuTertiaryCacheSize;
+u_int CpuNWayCache;
+u_int CpuCacheType; /* R4K, R5K, RM7K */
+u_int CpuConfigRegister;
+u_int CpuStatusRegister;
+u_int CpuExternalCacheOn; /* R5K, RM7K */
+u_int CpuOnboardCacheOn; /* RM7K */
+
+struct tlb;
+struct user;
+
+void tlb_set_wired(int);
+void tlb_set_pid(int);
+u_int cp0_get_count(void);
+void cp0_set_compare(u_int);
+
+/*
+ * Defines temporary until soft selected cache functions fixed.
+ */
+#define Mips_ConfigCache Mips5k_ConfigCache
+#define Mips_SyncCache Mips5k_SyncCache
+#define Mips_InvalidateICache Mips5k_InvalidateICache
+#define Mips_InvalidateICachePage Mips5k_InvalidateICachePage
+#define Mips_SyncDCachePage Mips5k_SyncDCachePage
+#define Mips_HitSyncDCache Mips5k_HitSyncDCache
+#define Mips_IOSyncDCache Mips5k_IOSyncDCache
+#define Mips_HitInvalidateDCache Mips5k_HitInvalidateDCache
+
+int Mips5k_ConfigCache(void);
+void Mips5k_SyncCache(void);
+void Mips5k_InvalidateICache(vaddr_t, int);
+void Mips5k_InvalidateICachePage(vaddr_t);
+void Mips5k_SyncDCachePage(vaddr_t);
+void Mips5k_HitSyncDCache(vaddr_t, int);
+void Mips5k_IOSyncDCache(vaddr_t, int, int);
+void Mips5k_HitInvalidateDCache(vaddr_t, int);
+
+void tlb_flush(int);
+void tlb_flush_addr(vaddr_t);
+void tlb_write_indexed(int, struct tlb *);
+int tlb_update(vaddr_t, unsigned);
+void tlb_read(int, struct tlb *);
+
+void wbflush(void);
+void savectx(struct user *, int);
+int copykstack(struct user *);
+void switch_exit(struct proc *);
+void MipsSaveCurFPState(struct proc *);
+void MipsSaveCurFPState16(struct proc *);
+
+extern u_int32_t cpu_counter_interval; /* Number of counter ticks/tick */
+extern u_int32_t cpu_counter_last; /* Last compare value loaded */
+
+/*
+ * Enable realtime clock (always enabled).
+ */
+#define enablertclock()
+
+/*
+ * Low level access routines to CPU registers
+ */
+
+void setsoftintr0(void);
+void clearsoftintr0(void);
+void setsoftintr1(void);
+void clearsoftintr1(void);
+u_int32_t enableintr(void);
+u_int32_t disableintr(void);
+u_int32_t updateimask(intrmask_t);
+void setsr(u_int32_t);
+
+#endif /* _KERNEL */
+#endif /* !_MIPS_CPU_H_ */
diff --git a/sys/arch/mips64/include/cpustate.h b/sys/arch/mips64/include/cpustate.h
new file mode 100644
index 00000000000..a94c7269ce4
--- /dev/null
+++ b/sys/arch/mips64/include/cpustate.h
@@ -0,0 +1,146 @@
+/* $OpenBSD: cpustate.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#define KERN_REG_SIZE (NUMSAVEREGS * REGSZ)
+#define KERN_EXC_FRAME_SIZE (CF_SZ + KERN_REG_SIZE + 16)
+
+#define SAVE_REG(reg, offs, base, bo) \
+ REG_S reg, bo + (REGSZ * offs) (base)
+
+#define RESTORE_REG(reg, offs, base, bo) \
+ REG_L reg, bo + (REGSZ * offs) (base)
+
+/*
+ * This macro saves the 'scratch' cpu state on stack.
+ * Macros are generic so no 'special' instructions!
+ * a0 will have a pointer to the 'frame' on return.
+ * a1 will have saved STATUS_REG on return.
+ * a3 will have the exception pc on 'return'.
+ * No traps, no interrupts if frame = k1 or k0!
+ */
+#define SAVE_CPU(frame, bo) \
+ SAVE_REG(AT, AST, frame, bo) ;\
+ SAVE_REG(v0, V0, frame, bo) ;\
+ SAVE_REG(v1, V1, frame, bo) ;\
+ SAVE_REG(a0, A0, frame, bo) ;\
+ SAVE_REG(a1, A1, frame, bo) ;\
+ SAVE_REG(a2, A2, frame, bo) ;\
+ SAVE_REG(a3, A3, frame, bo) ;\
+ SAVE_REG(t0, T0, frame, bo) ;\
+ SAVE_REG(t1, T1, frame, bo) ;\
+ SAVE_REG(t2, T2, frame, bo) ;\
+ SAVE_REG(t3, T3, frame, bo) ;\
+ SAVE_REG(t4, T4, frame, bo) ;\
+ SAVE_REG(t5, T5, frame, bo) ;\
+ SAVE_REG(t6, T6, frame, bo) ;\
+ SAVE_REG(t7, T7, frame, bo) ;\
+ SAVE_REG(t8, T8, frame, bo) ;\
+ SAVE_REG(t9, T9, frame, bo) ;\
+ SAVE_REG(gp, GP, frame, bo) ;\
+ SAVE_REG(ra, RA, frame, bo) ;\
+ mflo v0 ;\
+ mfhi v1 ;\
+ mfc0 a0, COP_0_CAUSE_REG ;\
+ mfc0 a1, COP_0_STATUS_REG ;\
+ mfc0 a2, COP_0_BAD_VADDR ;\
+ mfc0 a3, COP_0_EXC_PC ;\
+ SAVE_REG(v0, MULLO, frame, bo) ;\
+ SAVE_REG(v1, MULHI, frame, bo) ;\
+ SAVE_REG(a0, CAUSE, frame, bo) ;\
+ SAVE_REG(a1, SR, frame, bo) ;\
+ SAVE_REG(a2, BADVADDR, frame, bo) ;\
+ SAVE_REG(a3, PC, frame, bo) ;\
+ SAVE_REG(sp, SP, frame, bo) ;\
+ addu a0, frame, bo ;\
+ lw a2, cpl ;\
+ SAVE_REG(a2, CPL, frame, bo)
+
+/*
+ * Save 'callee save' registers in frame to aid DDB.
+ */
+#define SAVE_CPU_SREG(frame, bo) \
+ SAVE_REG(s0, S0, frame, bo) ;\
+ SAVE_REG(s1, S1, frame, bo) ;\
+ SAVE_REG(s2, S2, frame, bo) ;\
+ SAVE_REG(s3, S3, frame, bo) ;\
+ SAVE_REG(s4, S4, frame, bo) ;\
+ SAVE_REG(s5, S5, frame, bo) ;\
+ SAVE_REG(s6, S6, frame, bo) ;\
+ SAVE_REG(s7, S7, frame, bo) ;\
+ SAVE_REG(s8, S8, frame, bo)
+
+/*
+ * Restore cpu state. When called a0 = EXC_PC.
+ */
+#define RESTORE_CPU(frame, bo) \
+ RESTORE_REG(t1, SR, frame, bo) ;\
+ RESTORE_REG(t2, MULLO, frame, bo) ;\
+ RESTORE_REG(t3, MULHI, frame, bo) ;\
+ mtc0 t1, COP_0_STATUS_REG ;\
+ mtlo t2 ;\
+ mthi t3 ;\
+ dmtc0 a0, COP_0_EXC_PC ;\
+ RESTORE_REG(AT, AST, frame, bo) ;\
+ RESTORE_REG(v0, V0, frame, bo) ;\
+ RESTORE_REG(v1, V1, frame, bo) ;\
+ RESTORE_REG(a0, A0, frame, bo) ;\
+ RESTORE_REG(a1, A1, frame, bo) ;\
+ RESTORE_REG(a2, A2, frame, bo) ;\
+ RESTORE_REG(a3, A3, frame, bo) ;\
+ RESTORE_REG(t0, T0, frame, bo) ;\
+ RESTORE_REG(t1, T1, frame, bo) ;\
+ RESTORE_REG(t2, T2, frame, bo) ;\
+ RESTORE_REG(t3, T3, frame, bo) ;\
+ RESTORE_REG(t4, T4, frame, bo) ;\
+ RESTORE_REG(t5, T5, frame, bo) ;\
+ RESTORE_REG(t6, T6, frame, bo) ;\
+ RESTORE_REG(t7, T7, frame, bo) ;\
+ RESTORE_REG(t8, T8, frame, bo) ;\
+ RESTORE_REG(t9, T9, frame, bo) ;\
+ RESTORE_REG(gp, GP, frame, bo) ;\
+ RESTORE_REG(ra, RA, frame, bo)
+
+/*
+ * Restore 'callee save' registers
+ */
+#define RESTORE_CPU_SREG(frame, bo) \
+ RESTORE_REG(s0, S0, frame, bo) ;\
+ RESTORE_REG(s1, S1, frame, bo) ;\
+ RESTORE_REG(s2, S2, frame, bo) ;\
+ RESTORE_REG(s3, S3, frame, bo) ;\
+ RESTORE_REG(s4, S4, frame, bo) ;\
+ RESTORE_REG(s5, S5, frame, bo) ;\
+ RESTORE_REG(s6, S6, frame, bo) ;\
+ RESTORE_REG(s7, S7, frame, bo) ;\
+ RESTORE_REG(s8, S8, frame, bo)
+
diff --git a/sys/arch/mips64/include/db_machdep.h b/sys/arch/mips64/include/db_machdep.h
new file mode 100644
index 00000000000..ba008d3ab20
--- /dev/null
+++ b/sys/arch/mips64/include/db_machdep.h
@@ -0,0 +1,84 @@
+/* $OpenBSD: db_machdep.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS_DB_MACHDEP_H_
+#define _MIPS_DB_MACHDEP_H_
+
+#include <machine/frame.h>
+#include <machine/trap.h>
+#include <uvm/uvm_param.h>
+
+#define MID_MACHINE 0 /* XXX booo... */
+#define DB_MACHINE_COMMANDS /* We have machine specific commands */
+#define DB_ELF_SYMBOLS /* Elf style symbol table support */
+#define DB_NO_AOUT /* For crying out loud! */
+#define DB_ELFSIZE 32 /* This is the size of symtab stuff */
+
+typedef struct trap_frame db_regs_t;
+db_regs_t ddb_regs;
+
+typedef long db_expr_t;
+typedef vaddr_t db_addr_t;
+
+#define SOFTWARE_SSTEP /* Need software single step */
+#define SOFTWARE_SSTEP_EMUL /* next_instr_address() emulates 100% */
+db_addr_t next_instr_address __P((db_addr_t, boolean_t));
+#define BKPT_SIZE (4)
+#define BKPT_SET(ins) (BREAK_DDB)
+#define DB_VALID_BREAKPOINT(addr) (((addr) & 3) == 0)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAK)
+#define IS_WATCHPOINT_TRAP(type, code) (0) /* XXX mips3 watchpoint */
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->pc)
+#define DDB_REGS (&ddb_regs)
+
+/*
+ * Test of instructions to see class.
+ */
+#define IT_CALL 0x01
+#define IT_BRANCH 0x02
+#define IT_LOAD 0x03
+#define IT_STORE 0x04
+
+#define inst_branch(i) (db_inst_type(i) == IT_BRANCH)
+#define inst_trap_return(i) ((i) & 0)
+#define inst_call(i) (db_inst_type(i) == IT_CALL)
+#define inst_return(i) ((i) == 0x03e00008)
+#define inst_load(i) (db_inst_type(i) == IT_LOAD)
+#define inst_store(i) (db_inst_type(i) == IT_STORE)
+
+int db_inst_type __P((int));
+void db_machine_init __P((void));
+
+#endif /* !_MIPS_DB_MACHDEP_H_ */
diff --git a/sys/arch/mips64/include/disklabel.h b/sys/arch/mips64/include/disklabel.h
new file mode 100644
index 00000000000..8a02db20cfc
--- /dev/null
+++ b/sys/arch/mips64/include/disklabel.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: disklabel.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+/* $NetBSD: disklabel.h,v 1.3 1996/03/09 20:52:54 ghudson Exp $ */
+
+/*
+ * Copyright (c) 1994 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MIPS_DISKLABEL_H_
+#define _MIPS_DISKLABEL_H_
+
+#define LABELSECTOR 1 /* sector containing label */
+#define LABELOFFSET 0 /* offset of label in sector */
+#define MAXPARTITIONS 16 /* number of partitions */
+#define RAW_PART 2 /* raw partition: ie. rsd0c */
+
+/* DOS partition table -- located in boot block */
+#define DOSBBSECTOR 0 /* DOS boot block relative sector # */
+#define DOSPARTOFF 446
+#define NDOSPART 4
+
+struct dos_partition {
+ u_int8_t dp_flag; /* bootstrap flags */
+ u_int8_t dp_shd; /* starting head */
+ u_int8_t dp_ssect; /* starting sector */
+ u_int8_t dp_scyl; /* starting cylinder */
+ u_int8_t dp_typ; /* partition type (see below) */
+ u_int8_t dp_ehd; /* end head */
+ u_int8_t dp_esect; /* end sector */
+ u_int8_t dp_ecyl; /* end cylinder */
+ u_int32_t dp_start; /* absolute starting sector number */
+ u_int32_t dp_size; /* partition size in sectors */
+};
+
+/* Known DOS partition types. */
+#define DOSPTYP_UNUSED 0x00 /* Unused partition */
+#define DOSPTYP_FAT12 0x01 /* 12-bit FAT */
+#define DOSPTYP_FAT16S 0x04 /* 16-bit FAT, less than 32M */
+#define DOSPTYP_EXTEND 0x05 /* Extended; contains sub-partitions */
+#define DOSPTYP_FAT16B 0x06 /* 16-bit FAT, more than 32M */
+#define DOSPTYP_FAT32 0x0b /* 32-bit FAT */
+#define DOSPTYP_FAT32L 0x0c /* 32-bit FAT, LBA-mapped */
+#define DOSPTYP_FAT16L 0x0e /* 16-bit FAT, LBA-mapped */
+#define DOSPTYP_ONTRACK 0x54
+#define DOSPTYP_LINUX 0x83 /* That other thing */
+#define DOSPTYP_FREEBSD 0xa5 /* FreeBSD partition type */
+#define DOSPTYP_OPENBSD 0xa6 /* OpenBSD partition type */
+#define DOSPTYP_NETBSD 0xa9 /* NetBSD partition type */
+
+#include <sys/dkbad.h>
+struct cpu_disklabel {
+ struct dos_partition dosparts[NDOSPART];
+ struct dkbad bad;
+};
+
+#define DKBAD(x) ((x)->bad)
+
+/* Isolate the relevant bits to get sector and cylinder. */
+#define DPSECT(s) ((s) & 0x3f)
+#define DPCYL(c, s) ((c) + (((s) & 0xc0) << 2))
+
+#endif /* !_MIPS_DISKLABEL_H_ */
diff --git a/sys/arch/mips64/include/dlfcn.h b/sys/arch/mips64/include/dlfcn.h
new file mode 100644
index 00000000000..f242fcdc055
--- /dev/null
+++ b/sys/arch/mips64/include/dlfcn.h
@@ -0,0 +1,61 @@
+/* $OpenBSD: dlfcn.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom, Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS_DLFCN_H_
+#define _MIPS_DLFCN_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern void *dlopen __P((const char *, int));
+extern int dlclose __P((void *));
+extern void *dlsym __P((void *, const char *));
+extern int dlctl __P((void *, int, void *));
+extern const char *dlerror __P((void));
+__END_DECLS
+
+/*
+ * dlopen() modes.
+ */
+#define DL_LAZY 1 /* Resolve when called */
+#define DL_NOW 2 /* Resolve immediatly */
+
+/*
+ * dlctl() commands.
+ */
+
+#define DL_DUMP_MAP 99
+
+
+#endif /* !_MIPS_DLFCN_H_ */
diff --git a/sys/arch/mips64/include/ecoff_machdep.h b/sys/arch/mips64/include/ecoff_machdep.h
new file mode 100644
index 00000000000..1de7eeadc6d
--- /dev/null
+++ b/sys/arch/mips64/include/ecoff_machdep.h
@@ -0,0 +1,98 @@
+/* $OpenBSD: ecoff_machdep.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+/* $NetBSD: ecoff.h,v 1.4 1995/06/16 02:07:33 mellon Exp $ */
+
+/*
+ * Copyright (c) 1994 Adam Glass
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Adam Glass.
+ * 4. The name of the Author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Adam Glass ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Adam Glass BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MIPS_ECOFF_H_
+#define _MIPS_ECOFF_H_
+
+#define ECOFF_LDPGSZ 4096
+
+#define ECOFF_PAD
+
+#define ECOFF_MACHDEP \
+ u_long ea_gprmask; \
+ u_long ea_cprmask[4]; \
+ u_long ea_gp_value
+
+#define ECOFF_MAGIC_MIPSEL 0x0162
+#define ECOFF_BADMAG(ex) ((ex)->f.f_magic != ECOFF_MAGIC_MIPSEL)
+
+#define ECOFF_SEGMENT_ALIGNMENT(ep) ((ep)->a.vstamp < 23 ? 8 : 16)
+
+struct ecoff_symhdr {
+ int16_t sh_magic;
+ int16_t sh_vstamp;
+ int32_t sh_linemax;
+ int32_t sh_densenummax;
+ int32_t sh_procmax;
+ int32_t sh_lsymmax;
+ int32_t sh_optsymmax;
+ int32_t sh_auxxymmax;
+ int32_t sh_lstrmax;
+ int32_t sh_estrmax;
+ int32_t sh_fdmax;
+ int32_t sh_rfdmax;
+ int32_t sh_esymmax;
+ long sh_linesize;
+ long sh_lineoff;
+ long sh_densenumoff;
+ long sh_procoff;
+ long sh_lsymoff;
+ long sh_optsymoff;
+ long sh_auxsymoff;
+ long sh_lstroff;
+ long sh_estroff;
+ long sh_fdoff;
+ long sh_rfdoff;
+ long sh_esymoff;
+};
+/* Some day they will make up their minds.... */
+#define esymMax sh_esymmax
+#define cbExtOffset sh_esymoff
+#define cbSsExtOffset sh_estroff
+
+struct ecoff_extsym {
+ long es_value;
+ int es_strindex;
+ unsigned es_type:6;
+ unsigned es_class:5;
+ unsigned :1;
+ unsigned es_symauxindex:20;
+ unsigned es_jmptbl:1;
+ unsigned es_cmain:1;
+ unsigned es_weakext:1;
+ unsigned :29;
+ int es_indexfld;
+};
+
+#endif /* !_MIPS_ECOFF_H_ */
diff --git a/sys/arch/mips64/include/endian.h b/sys/arch/mips64/include/endian.h
new file mode 100644
index 00000000000..ddd520dcb9b
--- /dev/null
+++ b/sys/arch/mips64/include/endian.h
@@ -0,0 +1,50 @@
+/* $OpenBSD: endian.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2002 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS64_ENDIAN_H_
+#define _MIPS64_ENDIAN_H_
+
+#if defined(__MIPSEL__)
+#define BYTE_ORDER 1234 /* Little endian */
+#endif
+#if defined(__MIPSEB__)
+#define BYTE_ORDER 4321 /* Big endian */
+#endif
+
+#if !defined(BYTE_ORDER) && !defined(lint)
+#error "__MIPSEL__ or __MIPSEB__ must be defined to define BYTE_ORDER!!!"
+#endif
+
+#include <sys/endian.h>
+
+#endif /* _MIPS64_ENDIAN_H_ */
diff --git a/sys/arch/mips64/include/exception.h b/sys/arch/mips64/include/exception.h
new file mode 100644
index 00000000000..fdfd2c05ab7
--- /dev/null
+++ b/sys/arch/mips64/include/exception.h
@@ -0,0 +1,79 @@
+/* $OpenBSD: exception.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Definitions for exeption processing.
+ */
+
+#ifndef _MIPS_EXCEPTION_H_
+#define _MIPS_EXCEPTION_H_
+
+/*
+ * Exception codes.
+ */
+
+#define EX_INT 0 /* Interrupt */
+#define EX_MOD 1 /* TLB Modification */
+#define EX_TLBL 2 /* TLB exception, load or i-fetch */
+#define EX_TLBS 3 /* TLB exception, store */
+#define EX_ADEL 4 /* Address error exception, load or i-fetch */
+#define EX_ADES 5 /* Address error exception, store */
+#define EX_IBE 6 /* Bus error exception, i-fetch */
+#define EX_DBE 7 /* Bus error exception, data reference */
+#define EX_SYS 8 /* Syscall exception */
+#define EX_BP 9 /* Breakpoint exception */
+#define EX_RI 10 /* Reserved instruction exception */
+#define EX_CPU 11 /* Coprocessor unusable exception */
+#define EX_OV 12 /* Arithmetic overflow exception */
+#define EX_TR 13 /* Trap exception */
+#define EX_VCEI 14 /* Viritual coherency exception instruction */
+#define EX_FPE 15 /* Floating point exception */
+#define EX_WATCH 23 /* Reference to watch/hi/watch/lo address */
+#define EX_VCED 31 /* Viritual coherency exception data */
+
+#define EX_U 32 /* Exception from user mode (SW flag) */
+
+#if defined(DDB) || defined(DEBUG)
+#define EX_SIZE 10
+struct ex_debug {
+ u_int ex_status;
+ u_int ex_cause;
+ u_int ex_badaddr;
+ u_int ex_pc;
+ u_int ex_ra;
+ u_int ex_sp;
+ u_int ex_code;
+} ex_debug[EX_SIZE], *exp = ex_debug;
+
+#endif
+#endif /* !_MIPS_EXCEPTION_H_ */
diff --git a/sys/arch/mips64/include/float.h b/sys/arch/mips64/include/float.h
new file mode 100644
index 00000000000..d054887c6ad
--- /dev/null
+++ b/sys/arch/mips64/include/float.h
@@ -0,0 +1,80 @@
+/* $OpenBSD: float.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)float.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_FLOAT_H_
+#define _MIPS_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int __flt_rounds __P((void));
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP -125 /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP -37 /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP -1021
+#define DBL_MIN 2.225073858507201E-308
+#define DBL_MIN_10_EXP -307
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.797693134862316E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+
+#endif /* !_MIPS_FLOAT_H_ */
diff --git a/sys/arch/mips64/include/frame.h b/sys/arch/mips64/include/frame.h
new file mode 100644
index 00000000000..4176328349a
--- /dev/null
+++ b/sys/arch/mips64/include/frame.h
@@ -0,0 +1,204 @@
+/* $OpenBSD: frame.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#ifndef _MIPS64_FRAME_H_
+#define _MIPS64_FRAME_H_
+
+/*
+ * The layout of this *must* match with regnum.h or bad things
+ * will happen. libc setjmp/longjmp depends on this as well.
+ */
+struct trap_frame {
+ register_t zero;
+ register_t ast;
+ register_t v0;
+ register_t v1;
+ register_t a0;
+ register_t a1;
+ register_t a2;
+ register_t a3;
+ register_t t0;
+ register_t t1;
+ register_t t2;
+ register_t t3;
+#define a4 t0
+#define a5 t1
+#define a6 t2
+#define a7 t3
+ register_t t4;
+ register_t t5;
+ register_t t6;
+ register_t t7;
+ register_t s0;
+ register_t s1;
+ register_t s2;
+ register_t s3;
+ register_t s4;
+ register_t s5;
+ register_t s6;
+ register_t s7;
+ register_t t8;
+ register_t t9;
+ register_t k0;
+ register_t k1;
+ register_t gp;
+ register_t sp;
+ register_t s8;
+ register_t ra;
+ register_t sr;
+ register_t mullo;
+ register_t mulhi;
+ register_t badvaddr;
+ register_t cause;
+ register_t pc;
+ register_t ic;
+ register_t cpl;
+
+/* From here and on, only saved user processes. */
+
+ f_register_t f0;
+ f_register_t f1;
+ f_register_t f2;
+ f_register_t f3;
+ f_register_t f4;
+ f_register_t f5;
+ f_register_t f6;
+ f_register_t f7;
+ f_register_t f8;
+ f_register_t f9;
+ f_register_t f10;
+ f_register_t f11;
+ f_register_t f12;
+ f_register_t f13;
+ f_register_t f14;
+ f_register_t f15;
+ f_register_t f16;
+ f_register_t f17;
+ f_register_t f18;
+ f_register_t f19;
+ f_register_t f20;
+ f_register_t f21;
+ f_register_t f22;
+ f_register_t f23;
+ f_register_t f24;
+ f_register_t f25;
+ f_register_t f26;
+ f_register_t f27;
+ f_register_t f28;
+ f_register_t f29;
+ f_register_t f30;
+ f_register_t f31;
+ register_t fsr;
+};
+
+#if 0
+struct trap_frame32 {
+ int32_t zero;
+ int32_t ast;
+ int32_t v0;
+ int32_t v1;
+ int32_t a0;
+ int32_t a1;
+ int32_t a2;
+ int32_t a3;
+ int32_t t0;
+ int32_t t1;
+ int32_t t2;
+ int32_t t3;
+ int32_t t4;
+ int32_t t5;
+ int32_t t6;
+ int32_t t7;
+ int32_t s0;
+ int32_t s1;
+ int32_t s2;
+ int32_t s3;
+ int32_t s4;
+ int32_t s5;
+ int32_t s6;
+ int32_t s7;
+ int32_t t8;
+ int32_t t9;
+ int32_t k0;
+ int32_t k1;
+ int32_t gp;
+ int32_t sp;
+ int32_t s8;
+ int32_t ra;
+ int32_t sr;
+ int32_t mullo;
+ int32_t mulhi;
+ int32_t badvaddr;
+ int32_t cause;
+ int32_t pc;
+ int32_t ic;
+ int32_t cpl;
+
+/* From here and on, only saved user processes. */
+
+ f_register_t f0;
+ f_register_t f1;
+ f_register_t f2;
+ f_register_t f3;
+ f_register_t f4;
+ f_register_t f5;
+ f_register_t f6;
+ f_register_t f7;
+ f_register_t f8;
+ f_register_t f9;
+ f_register_t f10;
+ f_register_t f11;
+ f_register_t f12;
+ f_register_t f13;
+ f_register_t f14;
+ f_register_t f15;
+ f_register_t f16;
+ f_register_t f17;
+ f_register_t f18;
+ f_register_t f19;
+ f_register_t f20;
+ f_register_t f21;
+ f_register_t f22;
+ f_register_t f23;
+ f_register_t f24;
+ f_register_t f25;
+ f_register_t f26;
+ f_register_t f27;
+ f_register_t f28;
+ f_register_t f29;
+ f_register_t f30;
+ f_register_t f31;
+ register_t fsr;
+};
+#endif
+
+#endif /* !_MIPS64_FRAME_H_ */
diff --git a/sys/arch/mips64/include/ieee.h b/sys/arch/mips64/include/ieee.h
new file mode 100644
index 00000000000..0af7989d7e4
--- /dev/null
+++ b/sys/arch/mips64/include/ieee.h
@@ -0,0 +1,136 @@
+/* $OpenBSD: ieee.h,v 1.1 2004/08/06 20:56:01 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point. It does *not* define (yet?) any of the rounding
+ * mode bits, exceptions, and so forth.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACBITS 52
+
+#define EXT_EXPBITS 15
+#define EXT_FRACBITS 112
+
+struct ieee_single {
+ u_int sng_sign:1;
+ u_int sng_exp:8;
+ u_int sng_frac:23;
+};
+
+struct ieee_double {
+ u_int dbl_sign:1;
+ u_int dbl_exp:11;
+ u_int dbl_frach:20;
+ u_int dbl_fracl;
+};
+
+struct ieee_ext {
+ u_int ext_sign:1;
+ u_int ext_exp:15;
+ u_int ext_frach:16;
+ u_int ext_frachm;
+ u_int ext_fraclm;
+ u_int ext_fracl;
+};
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+#define EXT_EXP_INFNAN 32767
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#define EXT_QUIETNAN (1 << 15)
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
+#define EXT_EXP_BIAS 16383
diff --git a/sys/arch/mips64/include/ieeefp.h b/sys/arch/mips64/include/ieeefp.h
new file mode 100644
index 00000000000..5828b51e39e
--- /dev/null
+++ b/sys/arch/mips64/include/ieeefp.h
@@ -0,0 +1,25 @@
+/* $OpenBSD: ieeefp.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Written by J.T. Conklin, Apr 11, 1995
+ * Public domain.
+ */
+
+#ifndef _MIPS_IEEEFP_H_
+#define _MIPS_IEEEFP_H_
+
+typedef int fp_except;
+#define FP_X_IMP 0x01 /* imprecise (loss of precision) */
+#define FP_X_UFL 0x02 /* underflow exception */
+#define FP_X_OFL 0x04 /* overflow exception */
+#define FP_X_DZ 0x08 /* divide-by-zero exception */
+#define FP_X_INV 0x10 /* invalid operation exception */
+
+typedef enum {
+ FP_RN=0, /* round to nearest representable number */
+ FP_RZ=1, /* round to zero (truncate) */
+ FP_RP=2, /* round toward positive infinity */
+ FP_RM=3 /* round toward negative infinity */
+} fp_rnd;
+
+#endif /* !_MIPS_IEEEFP_H_ */
diff --git a/sys/arch/mips64/include/internal_types.h b/sys/arch/mips64/include/internal_types.h
new file mode 100644
index 00000000000..529079fc997
--- /dev/null
+++ b/sys/arch/mips64/include/internal_types.h
@@ -0,0 +1,8 @@
+/* $OpenBSD: internal_types.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+/* Public domain */
+#ifndef _MIPS64_INTERNAL_TYPES_H_
+#define _MIPS64_INTERNAL_TYPES_H_
+
+/* Machine special type definitions */
+
+#endif
diff --git a/sys/arch/mips64/include/kcore.h b/sys/arch/mips64/include/kcore.h
new file mode 100644
index 00000000000..b7449467a60
--- /dev/null
+++ b/sys/arch/mips64/include/kcore.h
@@ -0,0 +1,45 @@
+/* $OpenBSD: kcore.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+/* $NetBSD: kcore.h,v 1.1 1996/03/10 21:55:18 leo Exp $ */
+
+/*
+ * Copyright (c) 1996 Leo Weppelman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Leo Weppelman.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MIPS_KCORE_H_
+#define _MIPS_KCORE_H_
+
+#define NPHYS_RAM_SEGS 8
+
+typedef struct cpu_kcore_hdr {
+ paddr_t kernel_pa; /* Phys. address of kernel VA 0 */
+ int mmutype;
+ phys_ram_seg_t ram_segs[NPHYS_RAM_SEGS];
+} cpu_kcore_hdr_t;
+
+#endif /* !_MIPS_KCORE_H_ */
diff --git a/sys/arch/mips64/include/kdbparam.h b/sys/arch/mips64/include/kdbparam.h
new file mode 100644
index 00000000000..9caf0f8b922
--- /dev/null
+++ b/sys/arch/mips64/include/kdbparam.h
@@ -0,0 +1,79 @@
+/* $OpenBSD: kdbparam.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kdbparam.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_KDBPARAM_H_
+#define _MIPS_KDBPARAM_H_
+
+/*
+ * Machine dependent definitions for kdb.
+ */
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define kdbshorten(w) ((w) & 0xFFFF)
+#define kdbbyte(w) ((w) & 0xFF)
+#define kdbitol(a,b) ((long)(((b) << 16) | ((a) & 0xFFFF)))
+#define kdbbtol(a) ((long)(a))
+#endif
+
+#define LPRMODE "%R"
+#define OFFMODE "+%R"
+
+#define SETBP(ins) BREAK_BRKPT
+
+/* return the program counter value modified if we are in a delay slot */
+#define kdbgetpc(pcb) (kdbvar[kdbvarchk('t')] < 0 ? \
+ (pcb).pcb_regs[34] + 4 : (pcb).pcb_regs[34])
+#define kdbishiddenreg(p) ((p) >= &kdbreglist[33])
+#define kdbisbreak(type) (((type) & CR_EXC_CODE) == 0x24)
+
+/* check for address wrap around */
+#define kdbaddrwrap(addr,newaddr) (((addr)^(newaddr)) >> 31)
+
+/* declare machine dependent routines defined in kadb.c */
+void kdbprinttrap __P((unsigned, unsigned));
+void kdbsetsstep __P((void));
+void kdbclrsstep __P((void));
+void kdbreadc __P((char *));
+void kdbwrite __P((char *, int));
+void kdbprintins __P((int, long));
+void kdbstacktrace __P((int));
+char *kdbmalloc __P((int));
+
+#endif /* !_MIPS_KDBPARAM_H_ */
diff --git a/sys/arch/mips64/include/limits.h b/sys/arch/mips64/include/limits.h
new file mode 100644
index 00000000000..ea69e591b68
--- /dev/null
+++ b/sys/arch/mips64/include/limits.h
@@ -0,0 +1,98 @@
+/* $OpenBSD: limits.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ */
+
+#ifndef _MIPS_LIMITS_H_
+#define _MIPS_LIMITS_H_
+
+#define CHAR_BIT 8 /* number of bits in a char */
+#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values
+ * are written as hex so that GCC will be quiet about large integer constants.
+ */
+#define SCHAR_MAX 0x7f /* min value for a signed char */
+#define SCHAR_MIN (-0x7f-1) /* max value for a signed char */
+
+#define UCHAR_MAX 0xffU /* max value for an unsigned char */
+#define CHAR_MAX 0x7f /* max value for a char */
+#define CHAR_MIN (-0x7f-1) /* min value for a char */
+
+#define USHRT_MAX 0xffffU /* max value for an unsigned short */
+#define SHRT_MAX 0x7fff /* max value for a short */
+#define SHRT_MIN (-0x7fff-1) /* min value for a short */
+
+#define UINT_MAX 0xffffffffU /* max value for an unsigned int */
+#define INT_MAX 0x7fffffff /* max value for an int */
+#define INT_MIN (-0x7fffffff-1) /* min value for an int */
+
+#if !defined(_ANSI_SOURCE)
+#define SIZE_MAX ULONG_MAX /* max value for a size_t */
+#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE) && !defined(_XOPEN_SOURCE)
+#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */
+
+#define UID_MAX UINT_MAX /* max value for a uid_t */
+#define GID_MAX UINT_MAX /* max value for a gid_t */
+
+/* GCC requires that quad constants be written as expressions. */
+#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */
+ /* max value for a quad_t */
+#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1))
+#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */
+
+#endif /* !_POSIX_SOURCE && !_XOPEN_SOURCE */
+#endif /* !_ANSI_SOURCE */
+
+#if (!defined(_ANSI_SOURCE)&&!defined(_POSIX_SOURCE)) || defined(_XOPEN_SOURCE)
+#define DBL_DIG 15
+#define DBL_MAX 1.797693134862316E+308
+#define DBL_MIN 2.225073858507201E-308
+
+#define FLT_DIG 6
+#define FLT_MAX 3.40282347E+38F
+#define FLT_MIN 1.17549435E-38F
+#endif
+
+#endif /* !_MIPS_LIMITS_H_ */
diff --git a/sys/arch/mips64/include/link.h b/sys/arch/mips64/include/link.h
new file mode 100644
index 00000000000..6526a506204
--- /dev/null
+++ b/sys/arch/mips64/include/link.h
@@ -0,0 +1,74 @@
+/* $OpenBSD: link.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1996 Per Fogelstrom
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed under OpenBSD by
+ * Per Fogelstrom.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS_LINK_H_
+#define _MIPS_LINK_H_
+
+#include <elf_abi.h>
+#include <machine/elf_abi.h>
+
+/*
+ * Debug rendezvous struct. Pointer to this is set up in the
+ * target code pointed by the DT_MIPS_RLD_MAP tag. If it is
+ * defined.
+ */
+
+struct r_debug {
+ int r_version; /* Protocol version. */
+ struct link_map *r_map; /* Head of list of loaded objects. */
+
+ Elf32_Addr r_brk;
+ enum {
+ RT_CONSISTENT, /* Mapping change is complete. */
+ RT_ADD, /* Adding a new object. */
+ RT_DELETE, /* Removing an object mapping. */
+ } r_state;
+
+ Elf32_Addr r_ldbase; /* Base address the linker is loaded at. */
+};
+
+
+/*
+ * Shared object map data used by the debugger.
+ */
+
+struct link_map {
+ Elf32_Addr l_addr; /* Base address shared object is loaded at. */
+ Elf32_Addr l_offs; /* Offset from link address */
+ char *l_name; /* Absolute file name object was found in. */
+ Elf32_Dyn *l_ld; /* Dynamic section of the shared object. */
+ struct link_map *l_next, *l_prev; /* Chain of loaded objects. */
+};
+
+#endif /* !_MIPS_LINK_H_ */
diff --git a/sys/arch/mips64/include/memconf.h b/sys/arch/mips64/include/memconf.h
new file mode 100644
index 00000000000..16a4201516b
--- /dev/null
+++ b/sys/arch/mips64/include/memconf.h
@@ -0,0 +1,50 @@
+/* $OpenBSD: memconf.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1996 Per Fogelstrom
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Per Fogelstrom.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Memory config list used by pmap_bootstrap.
+ */
+
+#ifndef _MIPS_MEMCONF_H_
+#define _MIPS_MEMCONF_H_
+
+struct mem_descriptor {
+ paddr_t mem_start;
+ psize_t mem_size;
+};
+
+#ifdef _KERNEL
+#define MAXMEMSEGS 16
+extern struct mem_descriptor mem_layout[];
+#endif
+
+#endif /* !_MIPS_MEMCONF_H_ */
diff --git a/sys/arch/mips64/include/mips_opcode.h b/sys/arch/mips64/include/mips_opcode.h
new file mode 100644
index 00000000000..92658089fa2
--- /dev/null
+++ b/sys/arch/mips64/include/mips_opcode.h
@@ -0,0 +1,297 @@
+/* $OpenBSD: mips_opcode.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)mips_opcode.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_MIPS_OPCODE_H_
+#define _MIPS_MIPS_OPCODE_H_
+
+/*
+ * Define the instruction formats and opcode values for the
+ * MIPS instruction set.
+ */
+
+/*
+ * Define the instruction formats.
+ */
+typedef union {
+ unsigned word;
+
+#if BYTE_ORDER == BIG_ENDIAN
+ struct {
+ unsigned op: 6;
+ unsigned rs: 5;
+ unsigned rt: 5;
+ unsigned imm: 16;
+ } IType;
+
+ struct {
+ unsigned op: 6;
+ unsigned target: 26;
+ } JType;
+
+ struct {
+ unsigned op: 6;
+ unsigned rs: 5;
+ unsigned rt: 5;
+ unsigned rd: 5;
+ unsigned shamt: 5;
+ unsigned func: 6;
+ } RType;
+
+ struct {
+ unsigned op: 6; /* always '0x11' */
+ unsigned : 1; /* always '1' */
+ unsigned fmt: 4;
+ unsigned ft: 5;
+ unsigned fs: 5;
+ unsigned fd: 5;
+ unsigned func: 6;
+ } FRType;
+#endif
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct {
+ unsigned imm: 16;
+ unsigned rt: 5;
+ unsigned rs: 5;
+ unsigned op: 6;
+ } IType;
+
+ struct {
+ unsigned target: 26;
+ unsigned op: 6;
+ } JType;
+
+ struct {
+ unsigned func: 6;
+ unsigned shamt: 5;
+ unsigned rd: 5;
+ unsigned rt: 5;
+ unsigned rs: 5;
+ unsigned op: 6;
+ } RType;
+
+ struct {
+ unsigned func: 6;
+ unsigned fd: 5;
+ unsigned fs: 5;
+ unsigned ft: 5;
+ unsigned fmt: 4;
+ unsigned : 1; /* always '1' */
+ unsigned op: 6; /* always '0x11' */
+ } FRType;
+#endif
+} InstFmt;
+
+/*
+ * Values for the 'op' field.
+ */
+#define OP_SPECIAL 000
+#define OP_BCOND 001
+#define OP_J 002
+#define OP_JAL 003
+#define OP_BEQ 004
+#define OP_BNE 005
+#define OP_BLEZ 006
+#define OP_BGTZ 007
+
+#define OP_ADDI 010
+#define OP_ADDIU 011
+#define OP_SLTI 012
+#define OP_SLTIU 013
+#define OP_ANDI 014
+#define OP_ORI 015
+#define OP_XORI 016
+#define OP_LUI 017
+
+#define OP_COP0 020
+#define OP_COP1 021
+#define OP_COP2 022
+#define OP_COP3 023
+#define OP_BEQL 024
+#define OP_BNEL 025
+#define OP_BLEZL 026
+#define OP_BGTZL 027
+
+#define OP_DADDI 030
+#define OP_DADDIU 031
+#define OP_LDL 032
+#define OP_LDR 033
+
+#define OP_LB 040
+#define OP_LH 041
+#define OP_LWL 042
+#define OP_LW 043
+#define OP_LBU 044
+#define OP_LHU 045
+#define OP_LWR 046
+#define OP_LHU 045
+#define OP_LWR 046
+#define OP_LWU 047
+
+#define OP_SB 050
+#define OP_SH 051
+#define OP_SWL 052
+#define OP_SW 053
+#define OP_SDL 054
+#define OP_SDR 055
+#define OP_SWR 056
+#define OP_CACHE 057
+
+#define OP_LL 060
+#define OP_LWC1 061
+#define OP_LWC2 062
+#define OP_LWC3 063
+#define OP_LLD 064
+#define OP_LD 067
+
+#define OP_SC 070
+#define OP_SWC1 071
+#define OP_SWC2 072
+#define OP_SWC3 073
+#define OP_SCD 074
+#define OP_SD 077
+
+/*
+ * Values for the 'func' field when 'op' == OP_SPECIAL.
+ */
+#define OP_SLL 000
+#define OP_SRL 002
+#define OP_SRA 003
+#define OP_SLLV 004
+#define OP_SRLV 006
+#define OP_SRAV 007
+
+#define OP_JR 010
+#define OP_JALR 011
+#define OP_SYSCALL 014
+#define OP_BREAK 015
+#define OP_SYNC 017
+
+#define OP_MFHI 020
+#define OP_MTHI 021
+#define OP_MFLO 022
+#define OP_MTLO 023
+#define OP_DSLLV 024
+#define OP_DSRLV 026
+#define OP_DSRAV 027
+
+#define OP_MULT 030
+#define OP_MULTU 031
+#define OP_DIV 032
+#define OP_DIVU 033
+#define OP_DMULT 034
+#define OP_DMULTU 035
+#define OP_DDIV 036
+#define OP_DDIVU 037
+
+
+#define OP_ADD 040
+#define OP_ADDU 041
+#define OP_SUB 042
+#define OP_SUBU 043
+#define OP_AND 044
+#define OP_OR 045
+#define OP_XOR 046
+#define OP_NOR 047
+
+#define OP_SLT 052
+#define OP_SLTU 053
+#define OP_DADD 054
+#define OP_DADDU 055
+#define OP_DSUB 056
+#define OP_DSUBU 057
+
+#define OP_TGE 060
+#define OP_TGEU 061
+#define OP_TLT 062
+#define OP_TLTU 063
+#define OP_TEQ 064
+#define OP_TNE 066
+
+#define OP_DSLL 070
+#define OP_DSRL 072
+#define OP_DSRA 073
+#define OP_DSLL32 074
+#define OP_DSRL32 076
+#define OP_DSRA32 077
+
+/*
+ * Values for the 'func' field when 'op' == OP_BCOND.
+ */
+#define OP_BLTZ 000
+#define OP_BGEZ 001
+#define OP_BLTZL 002
+#define OP_BGEZL 003
+
+#define OP_TGEI 010
+#define OP_TGEIU 011
+#define OP_TLTI 012
+#define OP_TLTIU 013
+#define OP_TEQI 014
+#define OP_TNEI 016
+
+#define OP_BLTZAL 020
+#define OP_BLTZAL 020
+#define OP_BGEZAL 021
+#define OP_BLTZALL 022
+#define OP_BGEZALL 023
+
+/*
+ * Values for the 'rs' field when 'op' == OP_COPz.
+ */
+#define OP_MF 000
+#define OP_DMF 001
+#define OP_MT 004
+#define OP_DMT 005
+#define OP_BCx 010
+#define OP_BCy 014
+#define OP_CF 002
+#define OP_CT 006
+
+/*
+ * Values for the 'rt' field when 'op' == OP_COPz.
+ */
+#define COPz_BC_TF_MASK 0x01
+#define COPz_BC_TRUE 0x01
+#define COPz_BC_FALSE 0x00
+#define COPz_BCL_TF_MASK 0x02
+#define COPz_BCL_TRUE 0x02
+#define COPz_BCL_FALSE 0x00
+
+#endif /* !_MIPS_MIPS_OPCODE_H_ */
diff --git a/sys/arch/mips64/include/param.h b/sys/arch/mips64/include/param.h
new file mode 100644
index 00000000000..4af7c8c2944
--- /dev/null
+++ b/sys/arch/mips64/include/param.h
@@ -0,0 +1,177 @@
+/* $OpenBSD: param.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: machparam.h 1.11 89/08/14
+ * from: @(#)param.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_PARAM_H_
+#define _MIPS_PARAM_H_
+
+#ifdef _KERNEL
+#ifdef _LOCORE
+#include <machine/psl.h>
+#else
+#include <machine/cpu.h>
+#endif
+#endif
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value for all
+ * data types (int, long, ...). The result is u_int and must be cast to
+ * any desired pointer type.
+ */
+#define ALIGNBYTES 7
+#define ALIGN(p) (((u_long)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#define ALIGNED_POINTER(p, t) ((((u_long)(p)) & (sizeof (t) - 1)) == 0)
+
+#define NBPG 4096 /* bytes/page */
+#define PGOFSET (NBPG-1) /* byte offset into page */
+#define PGSHIFT 12 /* LOG2(NBPG) */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+#define NPTEPG (NBPG/4)
+
+#define NBSEG 0x400000 /* bytes/segment */
+#define SEGOFSET (NBSEG-1) /* byte offset into segment */
+#define SEGSHIFT 22 /* LOG2(NBSEG) */
+
+#if (_MIPS_SZPTR == 64)
+#define KERNBASE 0xffffffff80000000L /* start of kernel virtual */
+#else
+#define KERNBASE 0x80000000 /* start of kernel virtual */
+#endif
+#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
+
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define DEV_BSIZE (1 << DEV_BSHIFT)
+#define BLKDEV_IOSIZE 2048
+/* XXX Maxphys temporary changed to 32K while SCSI driver is fixed. */
+#define MAXPHYS (32 * 1024) /* max raw I/O transfer size */
+
+#define SSIZE 1 /* initial stack size/NBPG */
+#define SINCR 1 /* increment of stack/NBPG */
+
+#if (_MIPS_SZPTR == 64)
+#define UPAGES 4 /* pages of u-area */
+#else
+#define UPAGES 2 /* pages of u-area */
+#endif
+
+#if 0
+#define UVPN (UADDR>>PGSHIFT)/* virtual page number of u */
+#define KERNELSTACK (UADDR+UPAGES*NBPG) /* top of kernel stack */
+#define UADDR 0xffffc000 /* address of u */
+#define UADDR 0xffffffffffffa000 /* address of u */
+#endif
+
+#define USPACE (UPAGES*NBPG) /* size of u-area in bytes */
+#define USPACE_ALIGN (2*NBPG)
+
+#define PMAP_NEW
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than PAGE_SIZE (the software page size), and,
+ * on machines that exchange pages of input or output buffers with mbuf
+ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
+ * of the hardware page size.
+ */
+#define MSIZE 128 /* size of an mbuf */
+#define MCLSHIFT 11
+#define MCLBYTES (1 << MCLSHIFT) /* enough for whole Ethernet packet */
+#define MCLOFSET (MCLBYTES - 1)
+#ifndef NMBCLUSTERS
+#ifdef GATEWAY
+#define NMBCLUSTERS 2048 /* map size, max cluster allocation */
+#else
+#define NMBCLUSTERS 1024 /* map size, max cluster allocation */
+#endif
+#endif
+
+#define MSGBUFSIZE 8192
+
+/* Default malloc arena size */
+#define NKMEMPAGES_MIN_DEFAULT ((8 * 1024 * 1024) >> PAGE_SHIFT)
+#define NKMEMPAGES_MAX_DEFAULT ((64 * 1024 * 1024) >> PAGE_SHIFT)
+
+/* pages ("clicks") (4096 bytes) to disk blocks */
+#define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT))
+#define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT))
+
+/* pages to bytes */
+#define ctob(x) ((x) << PGSHIFT)
+#define btoc(x) (((x) + PGOFSET) >> PGSHIFT)
+
+/* bytes to disk blocks */
+#define btodb(x) ((x) >> DEV_BSHIFT)
+#define dbtob(x) ((x) << DEV_BSHIFT)
+
+/*
+ * Map a ``block device block'' to a file system block.
+ * This should be device dependent, and should use the bsize
+ * field from the disk label.
+ * For now though just use DEV_BSIZE.
+ */
+#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE))
+
+/*
+ * Conversion macros
+ */
+#define mips_round_page(x) ((((u_long)(x)) + NBPG - 1) & ~(NBPG-1))
+#define mips_trunc_page(x) ((u_long)(x) & ~(NBPG-1))
+#define mips_btop(x) ((u_long)(x) >> PGSHIFT)
+#define mips_ptob(x) ((u_long)(x) << PGSHIFT)
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+
+#define DELAY(n) delay(n)
+void delay __P((int));
+void nanodelay __P((int));
+#endif
+
+#else /* !_KERNEL */
+#define DELAY(n) { int N = (n); while (--N > 0); }
+#endif /* !_KERNEL */
+
+#endif /* !_MIPS_PARAM_H_ */
diff --git a/sys/arch/mips64/include/pcb.h b/sys/arch/mips64/include/pcb.h
new file mode 100644
index 00000000000..ded6a50bc40
--- /dev/null
+++ b/sys/arch/mips64/include/pcb.h
@@ -0,0 +1,69 @@
+/* $OpenBSD: pcb.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: pcb.h 1.13 89/04/23
+ * from: @(#)pcb.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_PCB_H_
+#define _MIPS_PCB_H_
+
+#include <machine/frame.h>
+
+/*
+ * MIPS process control block. This is first in the U-area.
+ */
+struct pcb
+{
+ struct trap_frame pcb_regs; /* saved CPU and registers */
+ label_t pcb_context; /* kernel context for resume */
+ int pcb_onfault; /* for copyin/copyout faults */
+ int pcb_kernel;
+ void *pcb_segtab; /* copy of pmap pm_segtab */
+};
+
+/*
+ * The pcb is augmented with machine-dependent additional data for
+ * core dumps. For the MIPS, there is nothing to add.
+ */
+struct md_coredump {
+ long md_pad[8];
+};
+
+#endif /* !_MIPS_PCB_H_ */
diff --git a/sys/arch/mips64/include/pio.h b/sys/arch/mips64/include/pio.h
new file mode 100644
index 00000000000..9ac0f842b4e
--- /dev/null
+++ b/sys/arch/mips64/include/pio.h
@@ -0,0 +1,125 @@
+/* $OpenBSD: pio.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS_PIO_H_
+#define _MIPS_PIO_H_
+
+/*
+ * I/O macros.
+ */
+
+#define outb(a,v) (*(volatile unsigned char*)(a) = (v))
+#define out8(a,v) (*(volatile unsigned char*)(a) = (v))
+#define outw(a,v) (*(volatile unsigned short*)(a) = (v))
+#define out16(a,v) outw(a,v)
+#define outl(a,v) (*(volatile unsigned int*)(a) = (v))
+#define out32(a,v) outl(a,v)
+#define inb(a) (*(volatile unsigned char*)(a))
+#define in8(a) (*(volatile unsigned char*)(a))
+#define inw(a) (*(volatile unsigned short*)(a))
+#define in16(a) inw(a)
+#define inl(a) (*(volatile unsigned int*)(a))
+#define in32(a) inl(a)
+
+#define out8rb(a,v) (*(volatile unsigned char*)(a) = (v))
+#define out16rb(a,v) (__out16rb((volatile u_int16_t *)(a), v))
+#define out32rb(a,v) (__out32rb((volatile u_int32_t *)(a), v))
+#define in8rb(a) (*(volatile unsigned char*)(a))
+#define in16rb(a) (__in16rb((volatile u_int16_t *)(a)))
+#define in32rb(a) (__in32rb((volatile u_int32_t *)(a)))
+
+#define _swap_(x) \
+ (((x) >> 24) | ((x) << 24) | \
+ (((x) >> 8) & 0xff00) | (((x) & 0xff00) << 8))
+
+static __inline void __out32rb(volatile u_int32_t *, u_int32_t);
+static __inline void __out16rb(volatile u_int16_t *, u_int16_t);
+static __inline u_int32_t __in32rb(volatile u_int32_t *);
+static __inline u_int16_t __in16rb(volatile u_int16_t *);
+
+static __inline void
+__out32rb(a,v)
+ volatile u_int32_t *a;
+ u_int32_t v;
+{
+ u_int32_t _v_ = v;
+
+ _v_ = _swap_(_v_);
+ out32(a, _v_);
+}
+
+static __inline void
+__out16rb(a,v)
+ volatile u_int16_t *a;
+ u_int16_t v;
+{
+ u_int16_t _v_;
+
+ _v_ = ((v >> 8) & 0xff) | (v << 8);
+ out16(a, _v_);
+}
+
+static __inline u_int32_t
+__in32rb(a)
+ volatile u_int32_t *a;
+{
+ u_int32_t _v_;
+
+ _v_ = in32(a);
+ _v_ = _swap_(_v_);
+ return _v_;
+}
+
+static __inline u_int16_t
+__in16rb(a)
+ volatile u_int16_t *a;
+{
+ u_int16_t _v_;
+
+ _v_ = in16(a);
+ _v_ = ((_v_ >> 8) & 0xff) | (_v_ << 8);
+ return _v_;
+}
+
+void insb(u_int8_t *, u_int8_t *,int);
+void insw(u_int16_t *, u_int16_t *,int);
+void insl(u_int32_t *, u_int32_t *,int);
+void outsb(u_int8_t *, const u_int8_t *,int);
+void outsw(u_int16_t *, const u_int16_t *,int);
+void outsl(u_int32_t *, const u_int32_t *,int);
+
+/* Helper function to access bus 64 bit at at time in LP32 mode */
+u_int64_t lp32_read8(u_int64_t *);
+void lp32_write8(u_int64_t *, u_int64_t);
+
+#endif /* !_MIPS_PIO_H_ */
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
new file mode 100644
index 00000000000..d25cac2dbdc
--- /dev/null
+++ b/sys/arch/mips64/include/pmap.h
@@ -0,0 +1,129 @@
+/* $OpenBSD: pmap.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_PMAP_H_
+#define _MIPS_PMAP_H_
+
+/*
+ * The user address space is 2Gb (0x0 - 0x80000000).
+ * User programs are laid out in memory as follows:
+ * address
+ * USRTEXT 0x00400000
+ * USRDATA 0x10000000
+ * USRSTACK 0x7FFFFFFF
+ *
+ * The user address space is mapped using a two level structure where
+ * virtual address bits 30..22 are used to index into a segment table which
+ * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
+ * Bits 21..12 are then used to index a PTE which describes a page within
+ * a segment.
+ *
+ * The wired entries in the TLB will contain the following:
+ * 0-1 (UPAGES) for curproc user struct and kernel stack.
+ *
+ * Note: The kernel doesn't use the same data structures as user programs.
+ * All the PTE entries are stored in a single array in Sysmap which is
+ * dynamically allocated at boot time.
+ */
+
+#define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
+#define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
+#define pmap_segmap(m, v) ((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
+
+#define PMAP_SEGTABSIZE 512
+
+union pt_entry;
+
+struct segtab {
+ union pt_entry *seg_tab[PMAP_SEGTABSIZE];
+};
+
+/*
+ * Machine dependent pmap structure.
+ */
+typedef struct pmap {
+ int pm_count; /* pmap reference count */
+ simple_lock_data_t pm_lock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ int pm_tlbpid; /* address space tag */
+ u_int pm_tlbgen; /* TLB PID generation number */
+ struct segtab *pm_segtab; /* pointers to pages of PTEs */
+} *pmap_t;
+
+/*
+ * Defines for pmap_attributes[phys_mach_page];
+ */
+#define PMAP_ATTR_MOD 0x01 /* page has been modified */
+#define PMAP_ATTR_REF 0x02 /* page has been referenced */
+
+#ifdef _KERNEL
+extern char *pmap_attributes; /* reference and modify bits */
+extern struct pmap kernel_pmap_store;
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+#define pmap_kernel() (&kernel_pmap_store)
+
+#define PMAP_STEAL_MEMORY /* Enable 'stealing' during boot */
+
+#define PMAP_PREFER(pa, va) pmap_prefer(pa, va)
+
+#define pmap_update(x) /* nothing */
+
+void pmap_prefer(vaddr_t, vaddr_t *);
+
+void pmap_bootstrap(void);
+int pmap_is_page_ro( pmap_t, vaddr_t, int);
+int pmap_alloc_tlbpid(struct proc *);
+void pmap_remove_pv(pmap_t, vaddr_t, vaddr_t);
+int pmap_is_pa_mapped(vaddr_t);
+vaddr_t pmap_pa_to_va(paddr_t);
+void pmap_page_cache(vaddr_t, int);
+
+#define pmap_proc_iflush(p,va,len) /* nothing yet (handled in trap now) */
+
+void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cache);
+
+paddr_t vtophys(void *);
+
+#endif /* _KERNEL */
+
+#endif /* !_MIPS_PMAP_H_ */
diff --git a/sys/arch/mips64/include/proc.h b/sys/arch/mips64/include/proc.h
new file mode 100644
index 00000000000..8b2e9a7705e
--- /dev/null
+++ b/sys/arch/mips64/include/proc.h
@@ -0,0 +1,70 @@
+/* $OpenBSD: proc.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_PROC_H_
+#define _MIPS_PROC_H_
+
+/*
+ * Machine-dependent part of the proc structure.
+ */
+struct mdproc {
+ struct trap_frame *md_regs; /* registers on current frame */
+ int md_flags; /* machine-dependent flags */
+ long md_ss_addr; /* single step address for ptrace */
+ int md_ss_instr; /* single step instruction for ptrace */
+/* The following is RM7000 dependent, but kept in for compatibility */
+ int md_pc_ctrl; /* performance counter control */
+ int md_pc_count; /* performance counter */
+ int md_pc_spill; /* performance counter spill */
+ quad_t md_watch_1;
+ quad_t md_watch_2;
+ int md_watch_m;
+};
+
+/* md_flags */
+#define MDP_FPUSED 0x00000001 /* floating point coprocessor used */
+#define MDP_O32 0x00000002 /* Uses 32 bit syscall interface */
+#define MDP_64 0x00000004 /* Uses new 64 bit syscall interface */
+#define MDP_PERF 0x00010000 /* Performance counter used */
+#define MDP_WATCH1 0x00020000 /* Watch register 1 used */
+#define MDP_WATCH2 0x00040000 /* Watch register 1 used */
+#define MDP_FORKSAVE 0x0000ffff /* Flags to save when forking */
+
+#endif /* !_MIPS_PROC_H_ */
diff --git a/sys/arch/mips64/include/profile.h b/sys/arch/mips64/include/profile.h
new file mode 100644
index 00000000000..c1e52c0eab3
--- /dev/null
+++ b/sys/arch/mips64/include/profile.h
@@ -0,0 +1,88 @@
+/* $OpenBSD: profile.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)profile.h 8.1 (Berkeley) 6/10/93
+ */
+#ifndef _MIPS_PROFILE_H_
+#define _MIPS_PROFILE_H_
+
+#define _MCOUNT_DECL static void ___mcount
+
+/*XXX The cprestore instruction is a "dummy" to shut up as(1). */
+
+#define MCOUNT \
+ __asm(".globl _mcount;" \
+ ".type _mcount,@function;" \
+ "_mcount:;" \
+ ".set noreorder;" \
+ ".set noat;" \
+ ".cpload $25;" \
+ ".cprestore 4;" \
+ "sw $4,8($29);" \
+ "sw $5,12($29);" \
+ "sw $6,16($29);" \
+ "sw $7,20($29);" \
+ "sw $1,0($29);" \
+ "sw $31,4($29);" \
+ "move $5,$31;" \
+ "jal ___mcount;" \
+ "move $4,$1;" \
+ "lw $4,8($29);" \
+ "lw $5,12($29);" \
+ "lw $6,16($29);" \
+ "lw $7,20($29);" \
+ "lw $31,4($29);" \
+ "lw $1,0($29);" \
+ "addu $29,$29,8;" \
+ "j $31;" \
+ "move $31,$1;" \
+ ".set reorder;" \
+ ".set at");
+
+#ifdef _KERNEL
+/*
+ * The following two macros do splhigh and splx respectively.
+ * They have to be defined this way because these are real
+ * functions on the MIPS, and we do not want to invoke mcount
+ * recursively.
+ */
+#define MCOUNT_ENTER s = _splhigh()
+
+#define MCOUNT_EXIT _splx(s)
+#endif /* _KERNEL */
+
+#endif /* !_MIPS_PROFILE_H_ */
diff --git a/sys/arch/mips64/include/ptrace.h b/sys/arch/mips64/include/ptrace.h
new file mode 100644
index 00000000000..c2e99e83a2f
--- /dev/null
+++ b/sys/arch/mips64/include/ptrace.h
@@ -0,0 +1,50 @@
+/* $OpenBSD: ptrace.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)ptrace.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_PTRACE_H_
+#define _MIPS_PTRACE_H_
+
+/*
+ * Machine dependent trace commands.
+ *
+ */
+
+#define PT_GETREGS (PT_FIRSTMACH+0)
+#define PT_SETREGS (PT_FIRSTMACH+1)
+#define PT_STEP (PT_FIRSTMACH+2)
+
+#endif /* !_MIPS_PTRACE_H_ */
diff --git a/sys/arch/mips64/include/reg.h b/sys/arch/mips64/include/reg.h
new file mode 100644
index 00000000000..be2dcbf5fe6
--- /dev/null
+++ b/sys/arch/mips64/include/reg.h
@@ -0,0 +1,61 @@
+/* $OpenBSD: reg.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: reg.h 1.1 90/07/09
+ * @(#)reg.h 8.2 (Berkeley) 1/11/94
+ */
+
+#ifndef _MIPS_REG_H_
+#define _MIPS_REG_H_
+/*
+ * Location of the users' stored
+ * registers relative to ZERO.
+ * Usage is p->p_regs[XX].
+ *
+ * must be visible to assembly code.
+ */
+#include <machine/regnum.h>
+
+/*
+ * Register set accessible via /proc/$pid/reg
+ */
+struct reg {
+ register_t r_regs[NREGS]; /* numbered as above */
+};
+#endif /* !_MIPS_REG_H_ */
diff --git a/sys/arch/mips64/include/regdef.h b/sys/arch/mips64/include/regdef.h
new file mode 100644
index 00000000000..eec78c9b745
--- /dev/null
+++ b/sys/arch/mips64/include/regdef.h
@@ -0,0 +1,77 @@
+/* $OpenBSD: regdef.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell. This file is derived from the MIPS RISC
+ * Architecture book by Gerry Kane.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)regdef.h 8.1 (Berkeley) 6/10/93
+ */
+#ifndef _MIPS_REGDEF_H_
+#define _MIPS_REGDEF_H_
+
+#define zero $0 /* always zero */
+#define AT $at /* assembler temp */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* temp registers (not saved across subroutine calls) */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+#define s0 $16 /* saved across subroutine calls (callee saved) */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* two more temp registers */
+#define t9 $25
+#define k0 $26 /* kernel temporary */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define s8 $30 /* one more callee saved */
+#define ra $31 /* return address */
+
+#endif /* !_MIPS_REGDEF_H_ */
diff --git a/sys/arch/mips64/include/regnum.h b/sys/arch/mips64/include/regnum.h
new file mode 100644
index 00000000000..296016dc01b
--- /dev/null
+++ b/sys/arch/mips64/include/regnum.h
@@ -0,0 +1,124 @@
+/* $OpenBSD: regnum.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2002 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MIPS64_REGNUM_H_
+#define _MIPS64_REGNUM_H_
+
+/*
+ * Location of the saved registers relative to ZERO.
+ * Usage is p->p_regs[XX].
+ */
+#define ZERO 0
+#define AST 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+#define A2 6
+#define A3 7
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#define T4 12
+#define T5 13
+#define T6 14
+#define T7 15
+#define S0 16
+#define S1 17
+#define S2 18
+#define S3 19
+#define S4 20
+#define S5 21
+#define S6 22
+#define S7 23
+#define T8 24
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define S8 30
+#define RA 31
+#define SR 32
+#define PS SR /* alias for SR */
+#define MULLO 33
+#define MULHI 34
+#define BADVADDR 35
+#define CAUSE 36
+#define PC 37
+#define IC 38
+#define CPL 39
+
+#define NUMSAVEREGS 40 /* Number of registers saved in trap */
+
+#define FPBASE NUMSAVEREGS
+#define F0 (FPBASE+0)
+#define F1 (FPBASE+1)
+#define F2 (FPBASE+2)
+#define F3 (FPBASE+3)
+#define F4 (FPBASE+4)
+#define F5 (FPBASE+5)
+#define F6 (FPBASE+6)
+#define F7 (FPBASE+7)
+#define F8 (FPBASE+8)
+#define F9 (FPBASE+9)
+#define F10 (FPBASE+10)
+#define F11 (FPBASE+11)
+#define F12 (FPBASE+12)
+#define F13 (FPBASE+13)
+#define F14 (FPBASE+14)
+#define F15 (FPBASE+15)
+#define F16 (FPBASE+16)
+#define F17 (FPBASE+17)
+#define F18 (FPBASE+18)
+#define F19 (FPBASE+19)
+#define F20 (FPBASE+20)
+#define F21 (FPBASE+21)
+#define F22 (FPBASE+22)
+#define F23 (FPBASE+23)
+#define F24 (FPBASE+24)
+#define F25 (FPBASE+25)
+#define F26 (FPBASE+26)
+#define F27 (FPBASE+27)
+#define F28 (FPBASE+28)
+#define F29 (FPBASE+29)
+#define F30 (FPBASE+30)
+#define F31 (FPBASE+31)
+#define FSR (FPBASE+32)
+
+#define NUMFPREGS 33
+
+#define NREGS (NUMSAVEREGS + NUMFPREGS)
+
+#endif /* !_MIPS64_REGNUM_H_ */
diff --git a/sys/arch/mips64/include/reloc.h b/sys/arch/mips64/include/reloc.h
new file mode 100644
index 00000000000..5e080d9b16b
--- /dev/null
+++ b/sys/arch/mips64/include/reloc.h
@@ -0,0 +1,38 @@
+/* $OpenBSD: reloc.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)reloc.h 8.1 (Berkeley) 6/10/93
+ * from: Header: reloc.h,v 1.6 92/06/20 09:59:37 torek Exp
+ */
+
diff --git a/sys/arch/mips64/include/setjmp.h b/sys/arch/mips64/include/setjmp.h
new file mode 100644
index 00000000000..aec83de6e9a
--- /dev/null
+++ b/sys/arch/mips64/include/setjmp.h
@@ -0,0 +1,12 @@
+/* $OpenBSD: setjmp.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * mips/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#ifndef _MIPS_SETJMP_H_
+#define _MIPS_SETJMP_H_
+
+#define _JBLEN 83 /* size, in longs, of a jmp_buf */
+
+#endif /* !_MIPS_SETJMP_H_ */
diff --git a/sys/arch/mips64/include/signal.h b/sys/arch/mips64/include/signal.h
new file mode 100644
index 00000000000..6af5d134bb6
--- /dev/null
+++ b/sys/arch/mips64/include/signal.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: signal.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_SIGNAL_H_
+#define _MIPS_SIGNAL_H_
+
+#if !defined(__LANGUAGE_ASSEMBLY)
+#include <sys/types.h>
+
+/*
+ * Machine-dependent signal definitions
+ */
+typedef int sig_atomic_t;
+
+#ifndef _ANSI_SOURCE
+
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ */
+struct sigcontext {
+ long sc_onstack; /* sigstack state to restore */
+ long sc_mask; /* signal mask to restore */
+ register_t sc_pc; /* pc at time of signal */
+ register_t sc_regs[32]; /* processor regs 0 to 31 */
+ register_t mullo; /* mullo and mulhi registers... */
+ register_t mulhi; /* mullo and mulhi registers... */
+ f_register_t sc_fpregs[33]; /* fp regs 0 to 31 and csr */
+ long sc_fpused; /* fp has been used */
+ long sc_fpc_eir; /* floating point exception instruction reg */
+ long xxx[8]; /* XXX reserved */
+};
+#endif /* !_ANSI_SOURCE */
+
+#else /* __LANGUAGE_ASSEMBLY */
+#define SC_ONSTACK (0 * REGSZ)
+#define SC_MASK (1 * REGSZ)
+#define SC_PC (2 * REGSZ)
+#define SC_REGS (3 * REGSZ)
+#define SC_MULLO (35 * REGSZ)
+#define SC_MULHI (36 * REGSZ)
+#define SC_FPREGS (37 * REGSZ)
+#define SC_FPUSED (70 * REGSZ)
+#define SC_FPC_EIR (71 * REGSZ)
+#endif /* __LANGUAGE_ASSEMBLY */
+
+#endif /* !_MIPS_SIGNAL_H_ */
diff --git a/sys/arch/mips64/include/spinlock.h b/sys/arch/mips64/include/spinlock.h
new file mode 100644
index 00000000000..3715ad9389c
--- /dev/null
+++ b/sys/arch/mips64/include/spinlock.h
@@ -0,0 +1,10 @@
+/* $OpenBSD: spinlock.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+#ifndef _MIPS_SPINLOCK_H_
+#define _MIPS_SPINLOCK_H_
+
+#define _SPINLOCK_UNLOCKED (0)
+#define _SPINLOCK_LOCKED (1)
+typedef int _spinlock_lock_t;
+
+#endif /* !_MIPS_SPINLOCK_H_ */
diff --git a/sys/arch/mips64/include/stdarg.h b/sys/arch/mips64/include/stdarg.h
new file mode 100644
index 00000000000..39b4109ac6c
--- /dev/null
+++ b/sys/arch/mips64/include/stdarg.h
@@ -0,0 +1,251 @@
+/* $OpenBSD: stdarg.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdarg.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_STDARG_H_
+#define _MIPS_STDARG_H_
+
+#include <machine/ansi.h>
+
+typedef _BSD_VA_LIST_ va_list;
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+#if defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+
+typedef struct {
+ /* Pointer to FP regs. */
+ char *__fp_regs;
+ /* Number of FP regs remaining. */
+ int __fp_left;
+ /* Pointer to GP regs followed by stack parameters. */
+ char *__gp_regs;
+} __gnuc_va_list;
+
+#else /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+
+typedef char * __gnuc_va_list;
+
+#endif /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#endif /* not __GNUC_VA_LIST */
+
+/* If this is for internal libc use, don't define anything but __gnuc_va_list */
+
+#ifndef _VA_MIPS_H_ENUM
+#define _VA_MIPS_H_ENUM
+enum {
+ __no_type_class = -1,
+ __void_type_class,
+ __integer_type_class,
+ __char_type_class,
+ __enumeral_type_class,
+ __boolean_type_class,
+ __pointer_type_class,
+ __reference_type_class,
+ __offset_type_class,
+ __real_type_class,
+ __complex_type_class,
+ __function_type_class,
+ __method_type_class,
+ __record_type_class,
+ __union_type_class,
+ __array_type_class,
+ __string_type_class,
+ __set_type_class,
+ __file_type_class,
+ __lang_type_class
+};
+#endif
+
+#define __va_ellipsis ...
+
+#ifdef __mips64
+#define __va_rounded_size(__TYPE) \
+ (((sizeof (__TYPE) + 8 - 1) / 8) * 8)
+#else
+#define __va_rounded_size(__TYPE) \
+ (((sizeof (__TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int))
+#endif
+
+#ifdef __mips64
+#define __va_reg_size 8
+#else
+#define __va_reg_size 4
+#endif
+
+#if defined (__mips_eabi)
+#if ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+#ifdef __mips64
+#define va_start(__AP, __LASTARG) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : 0)), \
+ __AP.__fp_left = 8 - __builtin_args_info (3), \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * __va_reg_size)
+#else /* ! defined (__mips64) */
+#define va_start(__AP, __LASTARG) \
+ (__AP.__gp_regs = ((char *) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) < 8 \
+ ? (8 - __builtin_args_info (2)) * __va_reg_size \
+ : 0)), \
+ __AP.__fp_left = (8 - __builtin_args_info (3)) / 2, \
+ __AP.__fp_regs = __AP.__gp_regs - __AP.__fp_left * 8, \
+ __AP.__fp_regs = (char *) ((int) __AP.__fp_regs & -8))
+#endif /* ! defined (__mips64) */
+#else /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float) ) */
+#define va_start(__AP, __LASTARG) \
+ (__AP = ((__gnuc_va_list) __builtin_next_arg (__LASTARG) \
+ - (__builtin_args_info (2) >= 8 ? 0 \
+ : (8 - __builtin_args_info (2)) * __va_reg_size)))
+#endif /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float) ) */
+#else /* ! defined (__mips_eabi) */
+#define va_start(__AP, __LASTARG) \
+ (__AP = (__gnuc_va_list) __builtin_next_arg (__LASTARG))
+#endif /* ! (defined (__mips_eabi) && ! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+
+#ifndef va_end
+void va_end (__gnuc_va_list); /* Defined in libgcc.a */
+#endif
+#define va_end(__AP) ((void)0)
+
+#if defined (__mips_eabi)
+
+#if ! defined (__mips_soft_float) && ! defined (__mips_single_float)
+#ifdef __mips64
+#define __va_next_addr(__AP, __type) \
+ ((__builtin_classify_type (*(__type *) 0) == __real_type_class \
+ && __AP.__fp_left > 0) \
+ ? (--__AP.__fp_left, (__AP.__fp_regs += 8) - 8) \
+ : (__AP.__gp_regs += __va_reg_size) - __va_reg_size)
+#else
+#define __va_next_addr(__AP, __type) \
+ ((__builtin_classify_type (*(__type *) 0) == __real_type_class \
+ && __AP.__fp_left > 0) \
+ ? (--__AP.__fp_left, (__AP.__fp_regs += 8) - 8) \
+ : (((__builtin_classify_type (* (__type *) 0) < __record_type_class \
+ && __alignof__ (__type) > 4) \
+ ? __AP.__gp_regs = (char *) (((int) __AP.__gp_regs + 8 - 1) & -8) \
+ : (char *) 0), \
+ (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? (__AP.__gp_regs += __va_reg_size) - __va_reg_size \
+ : ((__AP.__gp_regs += __va_rounded_size (__type)) \
+ - __va_rounded_size (__type)))))
+#endif
+#else /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+#ifdef __mips64
+#define __va_next_addr(__AP, __type) \
+ ((__AP += __va_reg_size) - __va_reg_size)
+#else
+#define __va_next_addr(__AP, __type) \
+ (((__builtin_classify_type (* (__type *) 0) < __record_type_class \
+ && __alignof__ (__type) > 4) \
+ ? __AP = (char *) (((__PTRDIFF_TYPE__) __AP + 8 - 1) & -8) \
+ : (char *) 0), \
+ (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? (__AP += __va_reg_size) - __va_reg_size \
+ : ((__AP += __va_rounded_size (__type)) \
+ - __va_rounded_size (__type))))
+#endif
+#endif /* ! (! defined (__mips_soft_float) && ! defined (__mips_single_float)) */
+
+#ifdef __MIPSEB__
+#define va_arg(__AP, __type) \
+ ((__va_rounded_size (__type) <= __va_reg_size) \
+ ? *(__type *) (void *) (__va_next_addr (__AP, __type) \
+ + __va_reg_size \
+ - sizeof (__type)) \
+ : (__builtin_classify_type (*(__type *) 0) >= __record_type_class \
+ ? **(__type **) (void *) (__va_next_addr (__AP, __type) \
+ + __va_reg_size \
+ - sizeof (char *)) \
+ : *(__type *) (void *) __va_next_addr (__AP, __type)))
+#else
+#define va_arg(__AP, __type) \
+ ((__va_rounded_size (__type) <= __va_reg_size) \
+ ? *(__type *) (void *) __va_next_addr (__AP, __type) \
+ : (__builtin_classify_type (* (__type *) 0) >= __record_type_class \
+ ? **(__type **) (void *) __va_next_addr (__AP, __type) \
+ : *(__type *) (void *) __va_next_addr (__AP, __type)))
+#endif
+
+#else /* ! defined (__mips_eabi) */
+
+/* We cast to void * and then to TYPE * because this avoids
+ a warning about increasing the alignment requirement. */
+/* The __mips64 cases are reversed from the 32 bit cases, because the standard
+ 32 bit calling convention left-aligns all parameters smaller than a word,
+ whereas the __mips64 calling convention does not (and hence they are
+ right aligned). */
+#ifdef __mips64
+#ifdef __MIPSEB__
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) \
+ ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))))[-1]
+#else
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#endif
+
+#else /* not __mips64 */
+
+#ifdef __MIPSEB__
+/* For big-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__AP = (char *) ((__alignof__ (__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size (__type))), \
+ *(__type *) (void *) (__AP - __va_rounded_size (__type)))
+#else
+/* For little-endian machines. */
+#define va_arg(__AP, __type) \
+ ((__type *) (void *) (__AP = (char *) ((__alignof__(__type) > 4 \
+ ? ((__PTRDIFF_TYPE__)__AP + 8 - 1) & -8 \
+ : ((__PTRDIFF_TYPE__)__AP + 4 - 1) & -4) \
+ + __va_rounded_size(__type))))[-1]
+#endif
+#endif
+#endif /* ! defined (__mips_eabi) */
+
+/* Copy __gnuc_va_list into another variable of this type. */
+#define __va_copy(dest, src) (dest) = (src)
+#define va_copy __va_copy
+
+#endif /* !_MIPS_STDARG_H_ */
diff --git a/sys/arch/mips64/include/trap.h b/sys/arch/mips64/include/trap.h
new file mode 100644
index 00000000000..d752a78c156
--- /dev/null
+++ b/sys/arch/mips64/include/trap.h
@@ -0,0 +1,127 @@
+/* $OpenBSD: trap.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: trap.h 1.1 90/07/09
+ * from: @(#)trap.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _MIPS_TRAP_H_
+#define _MIPS_TRAP_H_
+
+/*
+ * Trap codes also known in trap.c for name strings.
+ * Used for indexing so modify with care.
+ */
+
+#define T_INT 0 /* Interrupt pending */
+#define T_TLB_MOD 1 /* TLB modified fault */
+#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS 3 /* TLB miss on a store */
+#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST 5 /* Address error on a store */
+#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
+#define T_SYSCALL 8 /* System call */
+#define T_BREAK 9 /* Breakpoint */
+#define T_RES_INST 10 /* Reserved instruction exception */
+#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
+#define T_OVFLOW 12 /* Arithmetic overflow */
+#define T_TRAP 13 /* Trap instruction */
+#define T_VCEI 14 /* Virtual coherency instruction */
+#define T_FPE 15 /* Floating point exception */
+#define T_IWATCH 16 /* Inst. Watch address reference */
+#define T_DWATCH 23 /* Data Watch address reference */
+#define T_VCED 31 /* Virtual coherency data */
+
+#define T_USER 0x20 /* user-mode flag or'ed with type */
+
+/*
+ * Defines for trap handler catching kernel accessng memory.
+ */
+#define KT_BADERR 1 /* Bad address */
+#define KT_COPYERR 2 /* User space copy error */
+#define KT_KCOPYERR 3 /* Kernel space copy error */
+#define KT_FSWBERR 4 /* Access error */
+#define KT_FSWINTRBERR 5 /* Access error, non sleep */
+#define KT_DDBERR 6 /* DDB access error */
+
+
+#ifndef _LOCORE
+
+#if defined(DDB) || defined(DEBUG)
+
+struct trapdebug { /* trap history buffer for debugging */
+ u_int status;
+ u_int cause;
+ u_long vadr;
+ u_long pc;
+ u_long ra;
+ u_long sp;
+ u_int code;
+ u_int cpl;
+};
+
+#define trapdebug_enter(x, cd) { \
+ u_int32_t __s = disableintr(); \
+ trp->status = x->sr; \
+ trp->cause = x->cause; \
+ trp->vadr = x->badvaddr; \
+ trp->pc = x->pc; \
+ trp->sp = x->sp; \
+ trp->ra = x->ra; \
+ trp->cpl = x->cpl; \
+ trp->code = cd; \
+ if (++trp == &trapdebug[TRAPSIZE]) \
+ trp = trapdebug; \
+ setsr(__s); \
+ }
+
+#define TRAPSIZE 10 /* Trap log buffer length */
+extern struct trapdebug trapdebug[TRAPSIZE], *trp;
+
+void trapDump __P((char *msg));
+
+#else
+
+#define trapdebug_enter(x)
+
+#endif
+#endif /* _LOCORE */
+
+#endif /* !_MIPS_TRAP_H_ */
diff --git a/sys/arch/mips64/include/types.h b/sys/arch/mips64/include/types.h
new file mode 100644
index 00000000000..947e3a75374
--- /dev/null
+++ b/sys/arch/mips64/include/types.h
@@ -0,0 +1,96 @@
+/* $OpenBSD: types.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.3 (Berkeley) 1/5/94
+ */
+
+#ifndef _MIPS_TYPES_H_
+#define _MIPS_TYPES_H_
+
+/*
+ * We need to handle the various ISA levels for sizes.
+ */
+#define _MIPS_ISA_MIPS1 1 /* R2000/R3000 */
+#define _MIPS_ISA_MIPS2 2 /* R4000/R6000 */
+#define _MIPS_ISA_MIPS3 3 /* R4000 */
+#define _MIPS_ISA_MIPS4 4 /* TFP (R1x000) */
+
+#include <sys/cdefs.h>
+
+typedef unsigned long vaddr_t;
+typedef unsigned long paddr_t;
+typedef unsigned long vsize_t;
+typedef unsigned long psize_t;
+
+/*
+ * Basic integral types. Omit the typedef if
+ * not possible for a machine/compiler combination.
+ */
+#define __BIT_TYPES_DEFINED__
+typedef __signed char int8_t;
+typedef unsigned char u_int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+typedef unsigned int uint32_t;
+/* LONGLONG */
+typedef long long int64_t;
+/* LONGLONG */
+typedef unsigned long long u_int64_t;
+/* LONGLONG */
+typedef unsigned long long uint64_t;
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4)
+typedef int64_t register_t;
+typedef int64_t f_register_t;
+#else
+typedef int32_t register_t;
+typedef int32_t f_register_t;
+#endif
+
+#define __SWAP_BROKEN
+
+#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE)
+typedef struct label_t {
+ register_t val[14];
+} label_t;
+#endif
+
+#endif /* !_MIPS_TYPES_H_ */
diff --git a/sys/arch/mips64/include/varargs.h b/sys/arch/mips64/include/varargs.h
new file mode 100644
index 00000000000..8ed0325b252
--- /dev/null
+++ b/sys/arch/mips64/include/varargs.h
@@ -0,0 +1,54 @@
+/* $OpenBSD: varargs.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)varargs.h 8.2 (Berkeley) 3/22/94
+ */
+
+#ifndef _MIPS_VARARGS_H_
+#define _MIPS_VARARGS_H_
+
+#include <machine/stdarg.h>
+
+#define va_dcl int va_alist; ...
+
+#undef va_start
+#define va_start(ap) \
+ ap = (char *)&va_alist
+
+#endif /* !_MIPS_VARARGS_H_ */
diff --git a/sys/arch/mips64/include/vmparam.h b/sys/arch/mips64/include/vmparam.h
new file mode 100644
index 00000000000..581716a9c50
--- /dev/null
+++ b/sys/arch/mips64/include/vmparam.h
@@ -0,0 +1,142 @@
+/* $OpenBSD: vmparam.h,v 1.1 2004/08/06 20:56:02 pefo Exp $ */
+/* $NetBSD: vmparam.h,v 1.5 1994/10/26 21:10:10 cgd Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: vmparam.h 1.16 91/01/18
+ * @(#)vmparam.h 8.2 (Berkeley) 4/22/94
+ */
+
+#ifndef _MIPS_VMPARAM_H_
+#define _MIPS_VMPARAM_H_
+
+/*
+ * Machine dependent constants mips processors.
+ */
+/*
+ * USRTEXT is the start of the user text/data space, while USRSTACK
+ * is the top (end) of the user stack.
+ */
+#define USRTEXT 0x0000000000400000L
+#define USRSTACK 0x0000000080000000L /* Start of user stack */
+
+/*
+ * Virtual memory related constants, all in bytes
+ */
+#ifndef MAXTSIZ
+#define MAXTSIZ (64*1024*1024) /* max text size */
+#endif
+#ifndef DFLDSIZ
+#define DFLDSIZ (64*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (1*1024*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (2*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (32*1024*1024) /* max stack size */
+#endif
+
+/*
+ * PTEs for mapping user space into the kernel for phyio operations.
+ * 16 pte's are enough to cover 8 disks * MAXBSIZE.
+ */
+#ifndef USRIOSIZE
+#define USRIOSIZE 32
+#endif
+
+/*
+ * PTEs for system V style shared memory.
+ * This is basically slop for kmempt which we actually allocate (malloc) from.
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 8192 /* 8mb */
+#endif
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+#define VM_PHYSSEG_MAX 8 /* Max number of physical memory segments */
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD
+
+
+/* user/kernel map constants */
+#if (_MIPS_SZPTR == 64)
+#define VM_MIN_ADDRESS ((vaddr_t)0x0000000000000000L)
+#define VM_MAXUSER_ADDRESS ((vaddr_t)0x0000000080000000L)
+#define VM_MAX_ADDRESS ((vaddr_t)0x0000000080000000L)
+#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0xffffffffc0000000L)
+#else
+#define VM_MIN_ADDRESS ((vaddr_t)0x00000000)
+#define VM_MAXUSER_ADDRESS ((vaddr_t)0x80000000)
+#define VM_MAX_ADDRESS ((vaddr_t)0x80000000)
+#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0xc0000000)
+#endif
+
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+/* Kernel page table size is variable. */
+vaddr_t virtual_end;
+#define VM_MAX_KERNEL_ADDRESS virtual_end
+
+/* virtual sizes (bytes) for various kernel submaps */
+#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
+#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
+
+/*
+ * pmap-specific data stored in the vm_physmem[] array.
+ */
+#define __HAVE_PMAP_PHYSSEG
+struct pmap_physseg {
+ struct pv_entry *pvent; /* pv list of this seg */
+ char *attrs;
+};
+
+#endif /* !_MIPS_VMPARAM_H_ */
diff --git a/sys/arch/mips64/mips64/busdma.c b/sys/arch/mips64/mips64/busdma.c
new file mode 100644
index 00000000000..eb740703484
--- /dev/null
+++ b/sys/arch/mips64/mips64/busdma.c
@@ -0,0 +1,642 @@
+/* $OpenBSD: busdma.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/user.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/autoconf.h>
+
+#include <machine/bus.h>
+
+#include <pmonmips/localbus/localbus.h>
+
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
+ bus_dma_tag_t t;
+ bus_size_t size;
+ int nsegments;
+ bus_size_t maxsegsz;
+ bus_size_t boundary;
+ int flags;
+ bus_dmamap_t *dmamp;
+{
+ struct machine_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifies others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct machine_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ bzero(mapstore, mapsize);
+ map = (struct machine_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxsegsz = maxsegsz;
+ map->_dm_boundary = boundary;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Common function for loading a DMA map with a linear buffer. May
+ * be called by bus-specific DMA map load functions.
+ */
+int
+_dmamap_load(t, map, buf, buflen, p, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ void *buf;
+ bus_size_t buflen;
+ struct proc *p;
+ int flags;
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ caddr_t vaddr = buf;
+ int first, seg;
+ pmap_t pmap;
+ bus_size_t saved_buflen;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ if (p != NULL)
+ pmap = p->p_vmspace->vm_map.pmap;
+ else
+ pmap = pmap_kernel();
+
+ lastaddr = ~0; /* XXX gcc */
+ bmask = ~(map->_dm_boundary - 1);
+
+ saved_buflen = buflen;
+ for (first = 1, seg = 0; buflen > 0; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ pmap_extract(pmap, (vaddr_t)vaddr, (paddr_t *)&curaddr);
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = NBPG - ((u_long)vaddr & PGOFSET);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = (curaddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = curaddr + t->dma_offs;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg].ds_vaddr = (vaddr_t)vaddr;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ ((map->dm_segs[seg].ds_addr - t->dma_offs) & bmask) ==
+ (curaddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ break;
+ map->dm_segs[seg].ds_addr = curaddr + t->dma_offs;
+ map->dm_segs[seg].ds_len = sgsize;
+ map->dm_segs[seg].ds_vaddr = (vaddr_t)vaddr;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ return (EFBIG); /* XXX better return value here? */
+
+ map->dm_nsegs = seg + 1;
+ map->dm_mapsize = saved_buflen;
+ return (0);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+_dmamap_load_mbuf(t, map, m, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct mbuf *m;
+ int flags;
+{
+ int i;
+ size_t len;
+
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+
+ i = 0;
+ len = 0;
+ while (m) {
+ vaddr_t vaddr = mtod(m, vaddr_t);
+ long buflen = (long)m->m_len;
+
+ len += buflen;
+ while (buflen > 0 && i < map->_dm_segcnt) {
+ paddr_t pa;
+ long incr;
+
+ incr = min(buflen, NBPG);
+ buflen -= incr;
+ (void) pmap_extract(pmap_kernel(), vaddr, &pa);
+ pa += t->dma_offs;
+
+ if (i > 0 && pa == (map->dm_segs[i-1].ds_addr + map->dm_segs[i-1].ds_len)
+ && ((map->dm_segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
+ /* Hey, waddyaknow, they're contiguous */
+ map->dm_segs[i-1].ds_len += incr;
+ continue;
+ }
+ map->dm_segs[i].ds_addr = pa;
+ map->dm_segs[i].ds_vaddr = vaddr;
+ map->dm_segs[i].ds_len = incr;
+ i++;
+ vaddr += incr;
+ }
+ m = m->m_next;
+ if (m && i >= map->_dm_segcnt) {
+ /* Exceeded the size of our dmamap */
+ return E2BIG;
+ }
+ }
+ map->dm_nsegs = i;
+ map->dm_mapsize = len;
+ return (0);
+}
+
+/*
+ * Like _dmamap_load(), but for uios.
+ */
+int
+_dmamap_load_uio(t, map, uio, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct uio *uio;
+ int flags;
+{
+ return (EOPNOTSUPP);
+}
+
+/*
+ * Like _dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+_dmamap_load_raw(t, map, segs, nsegs, size, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ bus_size_t size;
+ int flags;
+{
+ if (nsegs > map->_dm_segcnt || size > map->_dm_size)
+ return (EINVAL);
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary) {
+ bus_addr_t bmask = ~(map->_dm_boundary - 1);
+ int i;
+
+ for (i = 0; i < nsegs; i++) {
+ if (segs[i].ds_len > map->_dm_maxsegsz)
+ return (EINVAL);
+ if ((segs[i].ds_addr & bmask) !=
+ ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
+ return (EINVAL);
+ }
+ }
+
+ bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
+ map->dm_nsegs = nsegs;
+ map->dm_mapsize = size;
+ return (0);
+}
+
+/*
+ * Common function for unloading a DMA map. May be called by
+ * bus-specific DMA map unload functions.
+ */
+void
+_dmamap_unload(t, map)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+{
+
+ /*
+ * No resources to free; just mark the mappings as
+ * invalid.
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by bus-specific DMA map synchronization functions.
+ */
+void
+_dmamap_sync(t, map, addr, size, op)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_addr_t addr;
+ bus_size_t size;
+ bus_dmasync_op_t op;
+{
+#define SYNC_R 0
+#define SYNC_W 1
+#define SYNC_X 2
+ int nsegs;
+ int curseg;
+
+ nsegs = map->dm_nsegs;
+ curseg = 0;
+
+#ifdef DEBUG_BUSDMASYNC
+ printf("dmasync %p:%p:%p:", map, addr, size);
+ if (op & BUS_DMASYNC_PREWRITE) printf("PRW ");
+ if (op & BUS_DMASYNC_PREREAD) printf("PRR ");
+ if (op & BUS_DMASYNC_POSTWRITE) printf("POW ");
+ if (op & BUS_DMASYNC_POSTREAD) printf("POR ");
+ printf("\n");
+#endif
+
+ while (size && nsegs) {
+ bus_addr_t vaddr;
+ bus_size_t ssize;
+
+ ssize = map->dm_segs[curseg].ds_len;
+ vaddr = map->dm_segs[curseg].ds_vaddr;
+
+ if (addr > 0) {
+ if (addr > ssize) {
+ addr -= ssize;
+ ssize = 0;
+ } else {
+ vaddr += addr;
+ ssize -= addr;
+ }
+ }
+ if (ssize > size) {
+ ssize = size;
+ }
+
+ if (ssize) {
+// #define DEBUG_BUSDMASYNC_FRAG
+#ifdef DEBUG_BUSDMASYNC_FRAG
+ printf(" syncing %p:%p ", vaddr, ssize);
+ if (op & BUS_DMASYNC_PREWRITE) printf("PRW ");
+ if (op & BUS_DMASYNC_PREREAD) printf("PRR ");
+ if (op & BUS_DMASYNC_POSTWRITE) printf("POW ");
+ if (op & BUS_DMASYNC_POSTREAD) printf("POR ");
+ printf("\n");
+#endif
+ /*
+ * If only PREWRITE is requested, writeback and
+ * invalidate. PREWRITE with PREREAD writebacks
+ * and invalidates *all* cache levels.
+ * Otherwise, just invalidate.
+ * POSTREAD and POSTWRITE are no-ops since
+ * we are not bouncing data.
+ */
+ if (op & BUS_DMASYNC_PREWRITE) {
+ if (op & BUS_DMASYNC_PREREAD)
+ Mips_IOSyncDCache(vaddr, ssize, SYNC_X);
+ else
+ Mips_IOSyncDCache(vaddr, ssize, SYNC_W);
+ } else if (op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD)) {
+ Mips_IOSyncDCache(vaddr, ssize, SYNC_R);
+ }
+ size -= ssize;
+ }
+ curseg++;
+ nsegs--;
+ }
+
+ if (size != 0) {
+ panic("_dmamap_sync: ran off map!");
+ }
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+int
+_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
+ bus_dma_tag_t t;
+ bus_size_t size, alignment, boundary;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int *rsegs;
+ int flags;
+{
+ return (_dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags, 0, 0xf0000000));
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_dmamem_free(t, segs, nsegs)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+{
+ vm_page_t m;
+ bus_addr_t addr;
+ struct pglist mlist;
+ int curseg;
+
+ /*
+ * Build a list of pages to free back to the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE) {
+ m = PHYS_TO_VM_PAGE(addr - t->dma_offs);
+ TAILQ_INSERT_TAIL(&mlist, m, pageq);
+ }
+ }
+
+ uvm_pglistfree(&mlist);
+}
+
+/*
+ * Common function for mapping DMA-safe memory. May be called by
+ * bus-specific DMA memory map functions.
+ */
+int
+_dmamem_map(t, segs, nsegs, size, kvap, flags)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ size_t size;
+ caddr_t *kvap;
+ int flags;
+{
+ vaddr_t va;
+ bus_addr_t addr;
+ int curseg;
+
+ size = round_page(size);
+ va = uvm_km_valloc(kmem_map, size);
+ if (va == 0)
+ return (ENOMEM);
+
+ *kvap = (caddr_t)va;
+
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += NBPG, va += NBPG, size -= NBPG) {
+ if (size == 0)
+ panic("_dmamem_map: size botch");
+ pmap_enter(pmap_kernel(), va, addr - t->dma_offs,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ segs[curseg].ds_vaddr = va;
+#if 0
+ if (flags & BUS_DMAMEM_NOSYNC)
+ pmap_changebit(addr, PG_N, ~0);
+ else
+ pmap_changebit(addr, 0, ~PG_N);
+#endif
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_dmamem_unmap(t, kva, size)
+ bus_dma_tag_t t;
+ caddr_t kva;
+ size_t size;
+{
+
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PGOFSET)
+ panic("_dmamem_unmap");
+#endif
+
+ size = round_page(size);
+ uvm_km_free(kmem_map, (vaddr_t)kva, size);
+}
+
+/*
+ * Common functin for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_dmamem_mmap(t, segs, nsegs, off, prot, flags)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ off_t off;
+ int prot, flags;
+{
+ int i;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef DIAGNOSTIC
+ if (off & PGOFSET)
+ panic("_dmamem_mmap: offset unaligned");
+ if (segs[i].ds_addr & PGOFSET)
+ panic("_dmamem_mmap: segment unaligned");
+ if (segs[i].ds_len & PGOFSET)
+ panic("_dmamem_mmap: segment size not multiple"
+ " of page size");
+#endif
+ if (off >= segs[i].ds_len) {
+ off -= segs[i].ds_len;
+ continue;
+ }
+
+ return (mips_btop((caddr_t)segs[i].ds_addr + off - t->dma_offs));
+ }
+
+ /* Page not found. */
+ return (-1);
+}
+
+/**********************************************************************
+ * DMA utility functions
+ **********************************************************************/
+
+/*
+ * Allocate physical memory from the given physical address range.
+ * Called by DMA-safe memory allocation methods.
+ */
+int
+_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
+ flags, low, high)
+ bus_dma_tag_t t;
+ bus_size_t size, alignment, boundary;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int *rsegs;
+ int flags;
+ vaddr_t low;
+ vaddr_t high;
+{
+ vaddr_t curaddr, lastaddr;
+ vm_page_t m;
+ struct pglist mlist;
+ int curseg, error;
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ /*
+ * Allocate pages from the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(size, low, high,
+ alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
+ if (error)
+ return (error);
+
+ /*
+ * Compute the location, size, and number of segments actually
+ * returned by the VM code.
+ */
+ m = mlist.tqh_first;
+ curseg = 0;
+ lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
+ segs[curseg].ds_addr += t->dma_offs;
+ segs[curseg].ds_len = PAGE_SIZE;
+ m = m->pageq.tqe_next;
+
+ for (; m != NULL; m = m->pageq.tqe_next) {
+ curaddr = VM_PAGE_TO_PHYS(m);
+#ifdef DIAGNOSTIC
+ if (curaddr < low || curaddr >= high) {
+ printf("vm_page_alloc_memory returned non-sensical"
+ " address 0x%lx\n", curaddr);
+ panic("_dmamem_alloc_range");
+ }
+#endif
+ if (curaddr == (lastaddr + PAGE_SIZE))
+ segs[curseg].ds_len += PAGE_SIZE;
+ else {
+ curseg++;
+ segs[curseg].ds_addr = curaddr + t->dma_offs;
+ segs[curseg].ds_len = PAGE_SIZE;
+ }
+ lastaddr = curaddr;
+ }
+
+ *rsegs = curseg + 1;
+
+ return (0);
+}
+
diff --git a/sys/arch/mips64/mips64/cache_r5k.S b/sys/arch/mips64/mips64/cache_r5k.S
new file mode 100644
index 00000000000..ace6c7c7e23
--- /dev/null
+++ b/sys/arch/mips64/mips64/cache_r5k.S
@@ -0,0 +1,1009 @@
+/* $OpenBSD: cache_r5k.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998-2004 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * NOTE!
+ *
+ * This code does not support caches with other linesize than 32.
+ * Neither will it support R4000 or R4400 Secondary cahes. These
+ * configurations will need another set of cache functions.
+ *
+ * Processors supported:
+ * R4600/R4700
+ * R5000
+ * RM52xx
+ * RM7xxx
+ * RM9xxx
+ */
+
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/pte.h>
+
+#include "assym.h"
+
+ .set mips3
+
+/*
+ * Skip the .h file. Noone else need to know!
+ */
+
+#define IndexInvalidate_I 0x00
+#define IndexWBInvalidate_D 0x01
+#define IndexFlashInvalidate_T 0x02
+#define IndexWBInvalidate_S 0x03
+
+#define IndexLoadTag_I 0x04
+#define IndexLoadTag_D 0x05
+#define IndexLoadTag_T 0x06
+#define IndexLoadTag_S 0x07
+
+#define IndexStoreTag_I 0x08
+#define IndexStoreTag_D 0x09
+#define IndexStoreTag_T 0x0a
+#define IndexStoreTag_S 0x0b
+
+#define CreateDirtyExclusive 0x09
+
+#define HitInvalidate_I 0x10
+#define HitInvalidate_D 0x11
+#define HitInvalidate_S 0x13
+
+#define Fill_I 0x14
+#define HitWBInvalidate_D 0x15
+#define InvalidatePage_T 0x16
+#define HitWBInvalidate_S 0x17
+
+#define HitWB_I 0x18
+#define HitWB_D 0x19
+#define HitWB_S 0x1b
+
+#define InvalidateSecondaryPage 0x17 /* Only RM527[0-1] */
+
+
+
+/*
+ * R5000 config register bits.
+ */
+#define CF_5_SC (1 << 17) /* Secondary cache not present */
+#define CF_5_SE (1 << 12) /* Secondary cache enable */
+#define CF_5_SS (3 << 20) /* Secondary cache size */
+#define CF_5_SS_AL 20 /* Shift to align */
+
+/*
+ * RM52xx config register bits. (like R5000)
+ */
+#define CF_52_SE (1 << 12) /* Secondary cache enable */
+#define CF_52_SC (1 << 17) /* Secondary cache not present */
+#define CF_52_SS (3 << 20) /* Secondary cache size */
+#define CF_52_SS_AL 20 /* Shift to align */
+
+/*
+ * RM7000 config register bits.
+ */
+#define CF_7_SE (1 << 3) /* Secondary cache enable */
+#define CF_7_SC (1 << 31) /* Secondary cache not present */
+#define CF_7_TE (1 << 12) /* Tertiary cache enable */
+#define CF_7_TC (1 << 17) /* Tertiary cache not present */
+#define CF_7_TS (3 << 20) /* Tertiary cache size */
+#define CF_7_TS_AL 20 /* Shift to align */
+
+/*
+ * Define cache type definition bits. NOTE! the 3 lsb may NOT change!
+ */
+#define CTYPE_DIR 0x0001 /* Cache is direct mapped */
+#define CTYPE_2WAY 0x0002 /* Cache is TWO way */
+#define CTYPE_4WAY 0x0004 /* Cache is FOUR way */
+#define CTYPE_WAYMASK 0x0007
+
+#define CTYPE_HAS_IL2 0x0100 /* Internal L2 Cache present */
+#define CTYPE_HAS_XL2 0x0200 /* External L2 Cache present */
+#define CTYPE_HAS_XL3 0x0400 /* External L3 Cache present */
+
+/*
+ * Due to a flaw in RM7000 1.x processors a pipleine 'drain' is
+ * requierd after some mtc0 instructions.
+ * Ten nops in sequence does the trick.
+ */
+#define NOP10 nop;nop;nop;nop;nop;\
+ nop;nop;nop;nop;nop /* Two cycles for dual issue machine */
+
+ .set noreorder # Noreorder is default style!
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_ConfigCache --
+ *
+ * Size and configure the caches.
+ * NOTE: should only be called from mips_init().
+ *
+ * Results:
+ * Returns the value of the cpu configuration register.
+ *
+ * Side effects:
+ * The size of the data cache is stored into CpuPrimaryDataCacheSize.
+ * The size of instruction cache is stored into CpuPrimaryInstCacheSize.
+ * Alignment mask for cache aliasing test is stored in CpuCacheAliasMask.
+ * CpuSecondaryCacheSize is set to the size of the secondary cache.
+ * CpuTertiaryCacheSize is set to the size of the tertiary cache.
+ * CpuNWayCache is set to 0 for direct mapped caches, 2 for two way
+ * caches and 4 for four way caches. This primarily indicates the
+ * primary cache associativity.
+ * cpu_id is set for later decision testing.
+ *
+ * Allocation:
+ * t4, t5 t6 used to hold I and D set size and Alias mask.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_ConfigCache)
+ .set noreorder
+ mfc0 v1, COP_0_PRID # read processor ID register
+ mfc0 v0, COP_0_CONFIG # Get configuration register
+ sw v1, cpu_id # save PRID register
+
+ srl t1, v0, 9 # Get I cache size.
+ and t1, 3
+ li t2, 4096
+ sllv t4, t2, t1 # t4 = Initial I set size.
+
+ and t2, v0, 0x20
+ srl t2, t2, 1 # Get I cache line size.
+ addu t2, t2, 16
+ sw t2, CpuPrimaryInstCacheLSize
+
+ srl t1, v0, 6 # Get D cache size.
+ and t1, 3
+ li t2, 4096 # Fixed page size.
+ sllv t5, t2, t1
+
+ and t2, v0, 0x10
+ addu t2, t2, 16 # Get D cache line size.
+ sw t2, CpuPrimaryDataCacheLSize
+
+ li t2, CTYPE_2WAY # Assume two way cache
+ li t6, 0 # Secondary size 0.
+ li t7, 0 # Tertiary size 0.
+
+ and v1, 0xff00 # Recognize CPU's with
+ li t1, (MIPS_R4600 << 8) # N way L1 caches only.
+ beq v1, t1, ConfResult # R4K 2 way, no L2 control
+ li t1, (MIPS_R4700 << 8)
+ beq v1, t1, ConfResult # R4K 2 way, No L2 control
+ li t1, (MIPS_R5000 << 8)
+ beq v1, t1, Conf5K # R5K 2 way, check L2
+ li t1, (MIPS_RM52XX << 8)
+ beq v1, t1, Conf52K # R52K 2 way, check L2
+ li t1, (MIPS_RM7000 << 8)
+ beq v1, t1, Conf7K
+ li t1, (MIPS_RM9000 << 8)
+ beq v1, t1, Conf7K
+ nop
+ # R4000PC/R4400PC or unknown.
+ li t2, CTYPE_DIR # default direct mapped cache
+ b ConfResult
+ nop
+
+#---- R5K ------------------------------
+Conf5K: # R5000 type, check for L2 cache
+ and t1, v0, CF_5_SC
+ bnez t1, ConfResult # not enabled
+ li t6, 0 # set size to 0.
+
+ li t3, CF_5_SS
+ and t1, t3, v0
+ beq t1, t3, ConfResult # No L2 cache
+ srl t1, CF_5_SS_AL
+
+ or t2, CTYPE_HAS_XL2 # External L2 present.
+ li t1, CF_5_SE # Set SE in conf
+ or v0, t1 # Update config register
+ li t6, 512*1024 # 512k per 'click'.
+ sll t6, t1
+
+ mtc0 v0, COP_0_CONFIG # Enable L2 cache
+ la t0, KSEG0_BASE
+ addu t1, t0, t6
+1:
+ cache InvalidateSecondaryPage, 0(t0)
+ addu t0, 128*32
+ bne t0, t1, 1b
+ nop
+
+ b ConfResult
+ nop
+
+
+#---- R52K ------------------------------
+Conf52K: # R5200 type, check for L2 cache
+ and t1, v0, CF_52_SC
+ bnez t1, ConfResult # not present
+ li t6, 0 # set size to 0.
+
+ li t3, CF_52_SS
+ and t1, t3, v0
+ beq t1, t3, ConfResult # No L2 cache
+ srl t1, CF_52_SS_AL
+
+ or t2, CTYPE_HAS_XL2 # External L2 present.
+ li t1, CF_5_SE # Set SE in conf
+ or v0, t1 # Update config register
+ li t6, 512*1024 # 512k per 'click'.
+ lw t3, CpuExternalCacheOn # Check if disabled
+ bnez t3, ConfResult # No use it.
+ sll t6, t1
+
+ and t2, ~CTYPE_HAS_XL2
+ li t1, ~CF_52_SE # Clear SE in conf
+ and v0, t1 # Update config register
+ b ConfResult
+ li t6, 0 # L2 cache disabled
+
+
+#---- RM7K -----------------------------
+Conf7K: # RM7000, check for L2 and L3 cache
+ li t2, CTYPE_4WAY # 4-way cache
+ and t1, v0, CF_7_TC
+ bnez t1, Conf7KL2 # No L3 cache if set
+ li t7, 0 # Set size = 0
+
+ lw t7, CpuTertiaryCacheSize
+ and t2, ~CTYPE_HAS_XL3
+ beqz t7, Conf7KL2 # No L3 cache present
+ nop
+
+ or t2, CTYPE_HAS_XL3
+ lw t3, CpuExternalCacheOn # Check if disabled
+ bnez t3, Conf7KL2 # No, use it
+ nop
+
+ and v0, ~CF_7_TE # Clear TE in conf
+ mtc0 v0, COP_0_CONFIG # establish any new config
+ NOP10
+ li t7, 0 # L3 cache disabled
+
+Conf7KL2:
+ and t1, v0, CF_7_SC # check for L2 cache
+ bnez t1, ConfResult
+ li t6, 0 # No L2?
+
+ or t2, CTYPE_HAS_IL2 # L2 is on chip
+ lw t3, CpuOnboardCacheOn # Check if disabled
+ bnez t3, ConfResult # No, use it
+ li t6, 256*1024 # size = 256k
+
+/* Sync on chip L2 */
+
+ li a0, 0x80000000
+ li a1, 0x80040000
+10:
+ cache IndexWBInvalidate_S, 0(a0)
+ addu a0, 32
+ bne a0, a1, 10b
+ nop
+
+ and t2, ~CTYPE_HAS_IL2
+ li t1, ~CF_7_SE # Clear SE in conf
+ and v0, t1
+ mtc0 v0, COP_0_CONFIG # establish any new config
+ NOP10
+ b ConfResult
+ li t6, 0 # L2 cache disabled
+
+/*
+ * Get here with t2 = Cache type, t4 = L1 I size, t5 = L1 D size.
+ * t6 = secondary size, t7 = tertiary size.
+ */
+ConfResult:
+ sw v0, CpuConfigRegister
+ mfc0 t3, COP_0_STATUS_REG
+ sw t2, CpuCacheType # Save cache attributes
+ sw t3, CpuStatusRegister
+ and t2, CTYPE_WAYMASK # isolate number of sets.
+ sw t2, CpuNWayCache
+ srl t2, 1 # get div shift for set size.
+
+ sw t6, CpuSecondaryCacheSize
+ sw t7, CpuTertiaryCacheSize
+
+ addu t1, t4, -1 # Use icache for alias mask
+ srl t1, t2 # Some cpus have different
+ and t1, ~(NBPG - 1) # i and d cache sizes...
+ sw t1, CpuCacheAliasMask
+
+ sw t4, CpuPrimaryInstCacheSize # store cache size.
+ srl t4, t2 # calculate set size.
+ sw t4, CpuPrimaryInstSetSize
+
+ sw t5, CpuPrimaryDataCacheSize # store cache size.
+ srl t5, t2 # calculate set size.
+ sw t5, CpuPrimaryDataSetSize
+
+ and v0, 0xfffffff8
+ or v0, 0x00000003 # set cachable writeback kseg0
+ mtc0 v0, COP_0_CONFIG # establish any new config
+ NOP10
+ j ra
+ nop
+END(Mips5k_ConfigCache)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_SyncCache --
+ *
+ * Sync ALL caches.
+ * No need to look at number of sets since we are cleaning out
+ * the entire cache and thus will address all sets anyway.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The contents of ALL caches are Invalidated or Synched.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_SyncCache)
+ .set noreorder
+ lw t1, CpuPrimaryInstCacheSize
+ lw t2, CpuPrimaryDataCacheSize
+
+/*
+ * Sync the instruction cache.
+ */
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+
+ LA t0, KSEG0_BASE
+ PTR_ADDU t1, t0, t1 # Compute end address
+ PTR_SUBU t1, 128
+
+1:
+ cache IndexInvalidate_I, 0(t0)
+ cache IndexInvalidate_I, 32(t0)
+ cache IndexInvalidate_I, 64(t0)
+ cache IndexInvalidate_I, 96(t0)
+
+ bne t0, t1, 1b
+ PTR_ADDU t0, t0, 128
+
+/*
+ * Sync the data cache. Do L1 first. Indexed only operate on
+ * the selected cache and differs from Hit in that sense.
+ */
+
+ LA t0, KSEG0_BASE
+ PTR_ADDU t1, t0, t2 # End address
+ PTR_SUBU t1, t1, 128
+1:
+ cache IndexWBInvalidate_D, 0(t0)
+ cache IndexWBInvalidate_D, 32(t0)
+ cache IndexWBInvalidate_D, 64(t0)
+ cache IndexWBInvalidate_D, 96(t0)
+
+ bne t0, t1, 1b
+ PTR_ADDU t0, t0, 128
+
+/* Do on chip L2 if present */
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_IL2
+ beqz t0, 20f
+ nop
+
+3:
+ LA t3, KSEG0_BASE
+ lw t4, CpuSecondaryCacheSize
+10:
+ cache IndexWBInvalidate_S, 0(t3)
+ PTR_SUBU t4, 32 # Fixed cache line size.
+ bgtz t4, 10b
+ PTR_ADDU t3, 32
+
+/* Do off chip L2 if present */
+20:
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_XL2
+ beqz t0, 30f
+ nop
+
+ mtc0 zero, COP_0_TAG_LO
+ LA t3, KSEG0_BASE
+ lw t4, CpuSecondaryCacheSize
+21:
+ cache InvalidateSecondaryPage, 0(t3)
+ PTR_SUBU t4, 4096 # Fixed cache page size.
+ bgtz t4, 21b
+ PTR_ADDU t3, 4096
+
+/* Do off chip L3 if present */
+30:
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_XL3
+ beqz t0, 99f
+ nop
+
+ mtc0 zero, COP_0_TAG_LO
+ LA t3, KSEG0_BASE
+ lw t4, CpuTertiaryCacheSize
+31:
+ cache InvalidatePage_T, 0(t3)
+ PTR_SUBU t4, 4096 # Fixed cache page size.
+ bgtz t4, 31b
+ PTR_ADDU t3, 4096
+
+99:
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ nop
+END(Mips5k_SyncCache)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_InvalidateICachePage --
+ *
+ * void Mips5k_InvalidateICachePage(addr)
+ * vaddr_t addr;
+ *
+ * Invalidate the L1 instruction cache page given by addr.
+ *
+ * Results:
+ * Void.
+ *
+ * Side effects:
+ * The contents of the L1 Instruction cache is flushed.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_InvalidateICachePage)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+ lw v0, CpuNWayCache # Cache properties
+ lw t0, CpuPrimaryInstSetSize # Set size
+ and a0, ~PAGE_MASK # Page align start address
+ PTR_ADDU a1, a0, PAGE_SIZE-128 # End address.
+ addiu v0, -2 # <0 1way, 0 = two, >0 four
+1:
+ cache HitInvalidate_I, 0(a0)
+ cache HitInvalidate_I, 32(a0)
+ cache HitInvalidate_I, 64(a0)
+ cache HitInvalidate_I, 96(a0)
+
+ bne a0, a1, 1b
+ PTR_ADDU a0, 128
+
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ move v0, zero # suiword depends on this!!
+END(Mips5k_InvalidateICachePage)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_InvalidateICache --
+ *
+ * void Mips5k_SyncICache(addr, len)
+ * vaddr_t addr, len;
+ *
+ * Invalidate the L1 instruction cache for at least range
+ * of addr to addr + len - 1.
+ * The address is reduced to a KSEG0 index to avoid TLB faults.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The contents of the L1 Instruction cache is flushed.
+ * Must not touch v0.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_InvalidateICache)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+ lw v0, CpuNWayCache # Cache properties
+ lw t0, CpuPrimaryInstSetSize # Set size
+ and a0, 0x00ffffff # Reduce addr to cache index
+ PTR_ADDU a1, 31 # Round up size
+ PTR_ADDU a1, a0 # Add extra from address
+ and a0, -32 # Align start address
+ PTR_SUBU a1, a1, a0
+ PTR_ADDU a0, KSEG0_BASE # a0 now new KSEG0 address
+ srl a1, a1, 5 # Number of unrolled loops
+ addiu v0, -2 # <0 1way, 0 = two, >0 four
+1:
+ bltz v0, 3f
+ addu a1, -1
+
+2:
+ PTR_ADDU t1, t0, a0 # Nway cache, flush set B.
+ cache IndexInvalidate_I, 0(t1)
+ beqz v0, 3f # Is two way do set A
+ PTR_ADDU t1, t0 # else step to set C.
+
+ cache IndexInvalidate_I, 0(t1)
+
+ PTR_ADDU t1, t0 # step to set D
+ cache IndexInvalidate_I, 0(t1)
+
+3:
+ cache IndexInvalidate_I, 0(a0) # do set (A if NWay)
+
+ bne a1, zero, 1b
+ PTR_ADDU a0, 32
+
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ move v0, zero # suiword depends on this!!
+END(Mips5k_InvalidateICache)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_SyncDCachePage --
+ *
+ * void Mips5k_SyncDCachePage(addr)
+ * vaddr_t addr;
+ *
+ * Sync the L1 data cache page for address addr.
+ * The address is reduced to a KSEG0 index to avoid TLB faults.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The contents of the cache is written back to primary memory.
+ * The cache line is invalidated.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_SyncDCachePage)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+ lw a2, CpuPrimaryDataSetSize
+ lw v0, CpuNWayCache
+ dsll a0, 34
+ dsrl a0, 34
+ PTR_ADDU a0, KSEG0_BASE # a0 now new KSEG0 address
+ and a0, ~PAGE_MASK # Page align start address
+ PTR_ADDU a1, a0, PAGE_SIZE-128
+ addiu v0, -2 # <0 1way, 0 = two, >0 four
+
+1:
+ bltz v0, 3f
+ PTR_ADDU t1, a0, a2 # flush set B.
+ cache IndexWBInvalidate_D, 0(t1)
+ cache IndexWBInvalidate_D, 32(t1)
+ cache IndexWBInvalidate_D, 64(t1)
+ cache IndexWBInvalidate_D, 96(t1)
+ beqz v0, 3f # Two way, do set A,
+ PTR_ADDU t1, a2
+
+ cache IndexWBInvalidate_D, 0(t1) # do set C
+ cache IndexWBInvalidate_D, 32(t1)
+ cache IndexWBInvalidate_D, 64(t1)
+ cache IndexWBInvalidate_D, 96(t1)
+
+ PTR_ADDU t1, a2 # do set D
+ cache IndexWBInvalidate_D, 0(t1)
+ cache IndexWBInvalidate_D, 32(t1)
+ cache IndexWBInvalidate_D, 64(t1)
+ cache IndexWBInvalidate_D, 96(t1)
+
+3:
+ cache IndexWBInvalidate_D, 0(a0) # do set A
+ cache IndexWBInvalidate_D, 32(a0)
+ cache IndexWBInvalidate_D, 64(a0)
+ cache IndexWBInvalidate_D, 96(a0)
+
+ bne a1, a0, 1b
+ PTR_ADDU a0, 128
+
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+#endif
+ j ra
+ nop
+END(Mips5k_SyncDCachePage)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_HitSyncDCache --
+ *
+ * void Mips5k_HitSyncDCache(addr, len)
+ * vaddr_t addr, len;
+ *
+ * Sync data cache for range of addr to addr + len - 1.
+ * The address can be any valid viritual address as long
+ * as no TLB invalid traps occur. Only lines with matching
+ * addr are flushed.
+ *
+ * Note: Use the CpuNWayCache flag to select 16 or 32 byte linesize.
+ * All Nway cpu's now available have a fixed 32byte linesize.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The contents of the L1 cache is written back to primary memory.
+ * The cache line is invalidated.
+ *
+ * IMPORTANT NOTE:
+ * Since orphaned L1 cache entries will not be synched it is
+ * mandatory to pass over the L1 cache once after the L2 is done.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_HitSyncDCache)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+
+ beq a1, zero, 3f # size is zero!
+ PTR_ADDU a1, 31 # Round up
+ PTR_ADDU a1, a1, a0 # Add extra from address
+ and a0, a0, -32 # align address
+ PTR_SUBU a1, a1, a0
+ srl a1, a1, 5 # Compute number of cache lines
+
+1:
+ PTR_ADDU a1, -1
+ cache HitWBInvalidate_D, 0(a0)
+ bne a1, zero, 1b
+ PTR_ADDU a0, 32
+
+3:
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ nop
+END(Mips5k_HitSyncDCache)
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_HitSyncSCache --
+ *
+ * void Mips5k_HitSyncSCache(addr, len)
+ * vaddr_t addr, len;
+ *
+ * Sync secondary cache for range of addr to addr + len - 1.
+ * The address can be any valid viritual address as long
+ * as no TLB invalid traps occur. Only lines with matching
+ * addr are flushed.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The contents of the L2 cache is written back to primary memory.
+ * The cache line is invalidated.
+ *
+ * IMPORTANT NOTE:
+ * Since orphaned L1 cache entries will not be synched it is
+ * mandatory to pass over the L1 cache once after the L2 is done.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_HitSyncSCache)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+
+ beq a1, zero, 3f # size is zero!
+ PTR_ADDU a1, a1, a0 # Add in extra from align
+ and a0, a0, -32 # Align address
+ PTR_SUBU a1, a1, a0
+1:
+ PTR_ADDU a1, -32
+
+ cache HitWBInvalidate_S, 0(a0)
+ cache HitWBInvalidate_D, 0(a0) # Kill any orphans...
+
+ bgtz a1, 1b
+ PTR_ADDU a0, 32
+
+3:
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ nop
+END(Mips5k_HitSyncSCache)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_HitInvalidateDCache --
+ *
+ * void Mips5k_HitInvalidateDCache(addr, len)
+ * vaddr_t addr, len;
+ *
+ * Invalidate data cache for range of addr to addr + len - 1.
+ * The address can be any valid address as long as no TLB misses occur.
+ * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
+ * Only lines with matching addresses are invalidated.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The L1 cache line is invalidated.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_HitInvalidateDCache)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+
+ beq a1, zero, 3f # size is zero!
+ PTR_ADDU a1, a1, a0 # Add in extra from align
+ and a0, a0, -32 # Align address
+ PTR_SUBU a1, a1, a0
+
+1:
+ PTR_ADDU a1, -32
+
+ cache HitInvalidate_D, 0(a0)
+
+ bgtz a1, 1b
+ PTR_ADDU a0, 32
+
+3:
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ nop
+END(Mips5k_HitInvalidateDCache)
+
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_HitInvalidateSCache --
+ *
+ * void Mips5k_HitInvalidateSCache(addr, len)
+ * vaddr_t addr, len;
+ *
+ * Invalidate secondary cache for range of addr to addr + len - 1.
+ * The address can be any valid address as long as no TLB misses occur.
+ * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
+ * Only lines with matching addresses are invalidated.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * The L2 cache line is invalidated.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(Mips5k_HitInvalidateSCache)
+#ifdef CPUR4600
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ li v0, SR_DIAG_DE
+ mtc0 v0, COP_0_STATUS_REG # Disable interrupts
+#endif
+
+ beq a1, zero, 3f # size is zero!
+ PTR_ADDU a1, a1, a0 # Add in extra from align
+ and a0, a0, -32 # Align address
+ PTR_SUBU a1, a1, a0
+1:
+ PTR_ADDU a1, -32
+
+ cache HitInvalidate_S, 0(a0)
+ cache HitInvalidate_D, 0(a0) # Orphans in L1
+
+ bgtz a1, 1b
+ PTR_ADDU a0, 32
+
+3:
+#ifdef CPUR4600
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register.
+ NOP10
+#endif
+ j ra
+ nop
+END(Mips5k_HitInvalidateSCache)
+
+/*----------------------------------------------------------------------------
+ *
+ * Mips5k_IOSyncDCache --
+ *
+ * void Mips5k_IOSyncDCache(addr, len, rw)
+ * vaddr_t addr;
+ * int len, rw;
+ *
+ * Invalidate or flush data cache for range of addr to addr + len - 1.
+ * The address can be any valid address as long as no TLB misses occur.
+ * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
+ *
+ * In case of the existence of an external cache we invalidate pages
+ * which are in the given range ONLY if transfer direction is READ.
+ * The assumption here is a 'write through' external cache which is
+ * true for all now supported processors.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * If rw == 0 (read), L1 and on-chip L2 caches are invalidated or
+ * flushed if the area does not match the alignment
+ * requirements. Writethrough L2 and L3 cache are
+ * invalidated for the address range.
+ * If rw == 1 (write), L1 and on-chip L2 caches are written back
+ * to memory and invalidated. Writethrough L2 and L3 caches
+ * are left alone.
+ * If rw == 2 (write-read), L1 and on-chip L2 caches are written back
+ * to memory and invalidated. Writethrough L2 and L3 caches
+ * are invalidated.
+ *
+ *----------------------------------------------------------------------------
+ */
+NON_LEAF(Mips5k_IOSyncDCache, FRAMESZ(CF_SZ), ra)
+
+ PTR_SUBU sp, FRAMESZ(CF_SZ)
+ PTR_S ra, CF_RA_OFFS(sp)
+ REG_S a0, FRAMESZ(CF_SZ)(sp) # save args
+ beqz a2, SyncRD # Sync PREREAD
+ REG_S a1, FRAMESZ(CF_SZ)+REGSZ(sp)
+ addiu a2, 1
+ bnez a2, SyncRDWB # Sync PREWRITE+PREREAD
+ nop
+
+ lw t0, CpuCacheType # Sync PREWRITE
+ and t0, CTYPE_HAS_IL2 # Have internal L2?
+ beqzl t0, Mips5k_HitSyncDCache # No flush L1.
+ PTR_ADDU sp, FRAMESZ(CF_SZ)
+
+ b Mips5k_HitSyncSCache # Do internal L2 cache
+ nop # L1 done in parallel
+
+SyncRD:
+ and t0, a0, 31 # check if invalidate possible
+ bnez t0, SyncRDWB # both address and size must
+ and t0, a1, 31 # be aligned at the cache size
+ bnez t0, SyncRDWB
+ nop
+
+/*
+ * Sync for aligned read, no writeback requierd.
+ */
+ lw t0, CpuCacheType # Aligned, do invalidate
+ and t0, CTYPE_HAS_IL2 # Have internal L2?
+ bnez t0, SyncRDL2
+ nop
+
+ jal Mips5k_HitInvalidateDCache # External L2 or no L2. Do L1.
+ nop
+
+ b SyncRDXL2
+ PTR_L ra, CF_RA_OFFS(sp) # External L2 if present
+
+SyncRDL2:
+ jal Mips5k_HitInvalidateSCache # Internal L2 cache
+ nop # L1 done in parallel
+
+ b SyncRDL3
+ PTR_L ra, CF_RA_OFFS(sp) # L3 invalidate if present
+
+/*
+ * Sync for unaligned read or write-read.
+ */
+SyncRDWB:
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_IL2 # Have internal L2?
+ bnez t0, SyncRDWBL2 # Yes, do L2
+ nop
+
+ jal Mips5k_HitSyncDCache
+ nop
+
+ b SyncRDXL2
+ PTR_L ra, CF_RA_OFFS(sp) # External L2 if present
+
+SyncRDWBL2:
+ jal Mips5k_HitSyncSCache # Internal L2 cache
+ nop # L1 done in parallel
+
+ b SyncRDL3
+ PTR_L ra, CF_RA_OFFS(sp) # L3 invalidate if present
+
+SyncRDXL2:
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_XL2 # Have external L2?
+ beqz t0, SyncRDL3 # Nope.
+ REG_L a0, FRAMESZ(CF_SZ)(sp)
+ REG_L a1, FRAMESZ(CF_SZ)+REGSZ(sp)
+ and a2, a0, 4095 # align on page size
+ PTR_SUBU a0, a2
+ PTR_ADDU a1, a2
+50:
+ blez a1, SyncDone
+ PTR_SUBU a1, 4096 # Fixed cache page size.
+
+ cache InvalidateSecondaryPage, 0(a0)
+ b 50b
+ PTR_ADDU a0, 4096
+
+SyncRDL3:
+ lw t0, CpuCacheType
+ and t0, CTYPE_HAS_XL3 # Have L3?
+ beqz t0, SyncDone # Nope.
+ REG_L a0, FRAMESZ(CF_SZ)(sp)
+ REG_L a1, FRAMESZ(CF_SZ)+REGSZ(sp)
+ and a2, a0, 4095 # align on page size
+ PTR_SUBU a0, a2
+ PTR_ADDU a1, a2
+40:
+ blez a1, SyncDone
+ PTR_SUBU a1, 4096 # Fixed cache page size.
+
+ cache InvalidatePage_T, 0(a0)
+ b 40b
+ PTR_ADDU a0, 4096
+
+SyncDone:
+ j ra
+ PTR_ADDU sp, FRAMESZ(CF_SZ)
+END(Mips5k_IOSyncDCache)
diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c
new file mode 100644
index 00000000000..e7c9ed03dea
--- /dev/null
+++ b/sys/arch/mips64/mips64/clock.c
@@ -0,0 +1,421 @@
+/* $OpenBSD: clock.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+
+#include <machine/autoconf.h>
+#include <machine/cpu.h>
+#include <mips64/dev/clockvar.h>
+#include <mips64/archtype.h>
+
+int clock_started = 0;
+
+/* Definition of the driver for autoconfig. */
+int clockmatch(struct device *, void *, void *);
+void clockattach(struct device *, struct device *, void *);
+intrmask_t clock_int5_dummy(intrmask_t, struct trap_frame *);
+intrmask_t clock_int5(intrmask_t, struct trap_frame *);
+void clock_int5_init(struct clock_softc *);
+
+struct cfdriver clock_cd = {
+ NULL, "clock", DV_DULL, NULL, 0
+};
+
+struct cfattach clock_ca = {
+ sizeof(struct clock_softc), clockmatch, clockattach
+};
+
+void md_clk_attach(struct device *, struct device *, void *);
+
+u_int32_t cpu_counter_last;
+u_int32_t cpu_counter_interval;
+u_int32_t pendingticks;
+
+#define SECDAY (24*SECHOUR) /* seconds per day */
+#define SECYR (365*SECDAY) /* seconds per common year */
+#define SECMIN (60) /* seconds per minute */
+#define SECHOUR (60*SECMIN) /* seconds per hour */
+
+#define YEARDAYS(year) (((((year) + 1900) % 4) == 0 && \
+ ((((year) + 1900) % 100) != 0 || \
+ (((year) + 1900) % 400) == 0)) ? 366 : 365)
+
+int
+clockmatch(struct device *parent, void *cfdata, void *aux)
+{
+ struct confargs *ca = aux;
+ struct cfdata *cf = cfdata;
+
+ /* Make sure that we're looking for a clock. */
+ if (strcmp(ca->ca_name, clock_cd.cd_name) != 0)
+ return (0);
+
+ if (cf->cf_unit >= 1)
+ return 0;
+ return 10; /* Try to get clock early */
+}
+
+void
+clockattach(struct device *parent, struct device *self, void *aux)
+{
+ struct clock_softc *sc;
+
+ md_clk_attach(parent, self, aux);
+ sc = (struct clock_softc *)self;
+
+ switch (sys_config.system_type) {
+ case ALGOR_P4032:
+ case ALGOR_P5064:
+ case MOMENTUM_CP7000:
+ case MOMENTUM_CP7000G:
+ case MOMENTUM_JAGUAR:
+ case GALILEO_EV64240:
+ case SGI_INDY:
+ case SGI_O2:
+ printf(" clock on int5 using counter");
+ set_intr(INTPRI_CLOCK, CR_INT_5, clock_int5);
+ break;
+
+ case GALILEO_G9:
+ case WG4308:
+ case WG4309:
+ case WG6000:
+ case WG7000:
+ case WG8168:
+ break;
+
+ default:
+ panic("clockattach: it didn't get here. really.");
+ clock_int5(0,(struct trap_frame *)NULL);
+ }
+
+ printf("\n");
+}
+
+/*
+ * Clock interrupt code for machines using the on cpu chip
+ * counter register. This register counts at half the pipeline
+ * frequency so the frequency must be known and the options
+ * register wired to allow it's use.
+ *
+ * The code is enabled by setting 'cpu_counter_interval'.
+ */
+void
+clock_int5_init(struct clock_softc *sc)
+{
+ int s;
+
+ s = splclock();
+ cpu_counter_interval = sys_config.cpu.clock / (hz * 2);
+ cpu_counter_last = cp0_get_count() + cpu_counter_interval * 4;
+ cp0_set_compare(cpu_counter_last);
+ splx(s);
+}
+
+/*
+ * Dummy count register interrupt handler used on some targets.
+ * Just resets the compare register and acknowledge the interrupt.
+ */
+intrmask_t
+clock_int5_dummy( intrmask_t mask, struct trap_frame *tf)
+{
+ cp0_set_compare(0); /* Shut up counter int's for a while */
+ return CR_INT_5; /* Clock is always on 5 */
+}
+
+/*
+ * Interrupt handler for targets using the internal count register
+ * as interval clock. Normally the system is run with the clock
+ * interrupt always enabled. Masking is done here and if the clock
+ * can not be run the tick is just counted and handled later when
+ * the clock is unmasked again.
+ */
+intrmask_t
+clock_int5( intrmask_t mask, struct trap_frame *tf)
+{
+ u_int32_t clkdiff;
+
+ /*
+ * If clock is started count the tick, else just arm for a new.
+ */
+ if (clock_started && cpu_counter_interval != 0) {
+ clkdiff = cp0_get_count() - cpu_counter_last;
+ while (clkdiff >= cpu_counter_interval) {
+ cpu_counter_last += cpu_counter_interval;
+ clkdiff = cp0_get_count() - cpu_counter_last;
+ pendingticks++;
+ }
+ cpu_counter_last += cpu_counter_interval;
+ pendingticks++;
+ } else {
+ cpu_counter_last = cpu_counter_interval + cp0_get_count();
+ }
+
+ cp0_set_compare(cpu_counter_last);
+
+ if ((tf->cpl & SPL_CLOCKMASK) == 0) {
+ while (pendingticks) {
+ hardclock(tf);
+ pendingticks--;
+ }
+ }
+
+ return CR_INT_5; /* Clock is always on 5 */
+}
+
+/*
+ * Wait "n" microseconds.
+ */
+void
+delay(int n)
+{
+ int dly;
+ int p, c;
+
+ p = cp0_get_count();
+ dly = (sys_config.cpu.clock / 1000000) * n / 2;
+ while (dly > 0) {
+ c = cp0_get_count();
+ dly -= c - p;
+ p = c;
+ }
+}
+
+/*
+ * Wait "n" nanoseconds.
+ */
+void
+nanodelay(int n)
+{
+ int dly;
+ int p, c;
+
+ p = cp0_get_count();
+ dly = ((sys_config.cpu.clock * n) / 1000000000) / 2;
+ while (dly > 0) {
+ c = cp0_get_count();
+ dly -= c - p;
+ p = c;
+ }
+}
+
+/*
+ * Return the best possible estimate of the time in the timeval
+ * to which tvp points. Unfortunately, we can't read the hardware registers.
+ * We guarantee that the time will be greater than the value obtained by a
+ * previous call.
+ */
+void
+microtime(struct timeval *tvp)
+{
+ int s = splclock();
+ static struct timeval lasttime;
+
+ *tvp = time;
+#ifdef notdef
+ tvp->tv_usec += clkread();
+ while (tvp->tv_usec >= 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+#endif
+ if (tvp->tv_sec == lasttime.tv_sec &&
+ tvp->tv_usec <= lasttime.tv_usec &&
+ (tvp->tv_usec = lasttime.tv_usec + 1) >= 1000000) {
+ tvp->tv_sec++;
+ tvp->tv_usec -= 1000000;
+ }
+ lasttime = *tvp;
+ splx(s);
+}
+
+/*
+ * Mips machine independent clock routines.
+ */
+
+/*
+ * Start the real-time and statistics clocks. Leave stathz 0 since there
+ * are no other timers available.
+ */
+void
+cpu_initclocks()
+{
+ extern int tickadj;
+ struct clock_softc *sc = (struct clock_softc *)clock_cd.cd_devs[0];
+
+ hz = sc->sc_clock.clk_hz;
+ stathz = sc->sc_clock.clk_stathz;
+ profhz = sc->sc_clock.clk_profhz;
+
+ printf("Starting clocks %d/%d/%d hz\n", hz, stathz, profhz);
+
+ /* Start the clock. */
+ if (sc->sc_clock.clk_init != NULL)
+ (*sc->sc_clock.clk_init)(sc);
+
+ tick = 1000000 / hz; /* number of micro-seconds between interrupts */
+ tickadj = 240000 / (60 * hz); /* can adjust 240ms in 60s */
+
+ clock_started++;
+}
+
+/*
+ * We assume newhz is either stathz or profhz, and that neither will
+ * change after being set up above. Could recalculate intervals here
+ * but that would be a drag.
+ */
+void
+setstatclockrate(newhz)
+ int newhz;
+{
+}
+
+/*
+ * This code is defunct after 2099. Will Unix still be here then??
+ */
+static short dayyr[12] = {
+ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
+};
+
+/*
+ * Initialize the time of day register, based on the time base which
+ * is, e.g. from a filesystem.
+ */
+void
+inittodr(time_t base)
+{
+ struct tod_time c;
+ struct clock_softc *sc = (struct clock_softc *)clock_cd.cd_devs[0];
+ int days, yr;
+
+ if (base < 15*SECYR) {
+ printf("WARNING: preposterous time in file system");
+ /* read the system clock anyway */
+ base = 17*SECYR + 186*SECDAY + SECDAY/2;
+ }
+
+ /*
+ * Read RTC chip registers NOTE: Read routines are responsible
+ * for sanity checking clock. Dates after 19991231 should be
+ * returned as year >= 100.
+ */
+ if (sc->sc_clock.clk_get) {
+ (*sc->sc_clock.clk_get)(sc, base, &c);
+ } else {
+ printf("WARNING: No TOD clock, beliving file system.\n");
+ goto bad;
+ }
+
+ days = 0;
+ for (yr = 70; yr < c.year; yr++) {
+ days += YEARDAYS(yr);
+ }
+
+ days += dayyr[c.mon - 1] + c.day - 1;
+ if (YEARDAYS(c.year) == 366 && c.mon > 2) {
+ days++;
+ }
+
+ /* now have days since Jan 1, 1970; the rest is easy... */
+ time.tv_sec = days * SECDAY + c.hour * 3600 + c.min * 60 + c.sec;
+ sc->sc_initted = 1;
+
+ /*
+ * See if we gained/lost time.
+ */
+ if (base < time.tv_sec - 5*SECYR) {
+ printf("WARNING: file system time much less than clock time\n");
+ } else if (base > time.tv_sec + 5*SECYR) {
+ printf("WARNING: clock time much less than file system time\n");
+ printf("WARNING: using file system time\n");
+ } else {
+ return;
+ }
+
+bad:
+ time.tv_sec = base;
+ sc->sc_initted = 1;
+ printf("WARNING: CHECK AND RESET THE DATE!\n");
+}
+
+/*
+ * Reset the TOD clock. This is done when the system is halted or
+ * when the time is reset by the stime system call.
+ */
+void
+resettodr()
+{
+ struct tod_time c;
+ struct clock_softc *sc = (struct clock_softc *)clock_cd.cd_devs[0];
+ register int t, t2;
+
+ /*
+ * Don't reset clock if time has not been set!
+ */
+ if (!sc->sc_initted) {
+ return;
+ }
+
+ /* compute the day of week. 1 is Sunday*/
+ t2 = time.tv_sec / SECDAY;
+ c.dow = (t2 + 5) % 7; /* 1/1/1970 was thursday */
+
+ /* compute the year */
+ t2 = time.tv_sec / SECDAY;
+ c.year = 69;
+ while (t2 >= 0) { /* whittle off years */
+ t = t2;
+ c.year++;
+ t2 -= YEARDAYS(c.year);
+ }
+
+ /* t = month + day; separate */
+ t2 = YEARDAYS(c.year);
+ for (c.mon = 1; c.mon < 12; c.mon++) {
+ if (t < dayyr[c.mon] + (t2 == 366 && c.mon > 1))
+ break;
+ }
+
+ c.day = t - dayyr[c.mon - 1] + 1;
+ if (t2 == 366 && c.mon > 2) {
+ c.day--;
+ }
+
+ t = time.tv_sec % SECDAY;
+ c.hour = t / 3600;
+ t %= 3600;
+ c.min = t / 60;
+ c.sec = t % 60;
+
+ if (sc->sc_clock.clk_set) {
+ (*sc->sc_clock.clk_set)(sc, &c);
+ }
+}
diff --git a/sys/arch/mips64/mips64/context.S b/sys/arch/mips64/mips64/context.S
new file mode 100644
index 00000000000..dd84d71fbfb
--- /dev/null
+++ b/sys/arch/mips64/mips64/context.S
@@ -0,0 +1,434 @@
+/* $OpenBSD: context.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+#include <machine/pte.h>
+
+#include "assym.h"
+
+ .set mips3
+
+ .set noreorder # Noreorder is default style!
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*
+ * Save registers and state used by reboot to take snapshot.
+ */
+LEAF(savectx)
+ REG_S s0, U_PCB_CONTEXT+0*REGSZ(a0)
+ REG_S s1, U_PCB_CONTEXT+1*REGSZ(a0)
+ REG_S s2, U_PCB_CONTEXT+2*REGSZ(a0)
+ REG_S s3, U_PCB_CONTEXT+3*REGSZ(a0)
+ mfc0 v0, COP_0_STATUS_REG
+ REG_S s4, U_PCB_CONTEXT+4*REGSZ(a0)
+ REG_S s5, U_PCB_CONTEXT+5*REGSZ(a0)
+ REG_S s6, U_PCB_CONTEXT+6*REGSZ(a0)
+ REG_S s7, U_PCB_CONTEXT+7*REGSZ(a0)
+ REG_S sp, U_PCB_CONTEXT+8*REGSZ(a0)
+ REG_S s8, U_PCB_CONTEXT+9*REGSZ(a0)
+ PTR_S ra, U_PCB_CONTEXT+10*REGSZ(a0)
+ REG_S v0, U_PCB_CONTEXT+11*REGSZ(a0)
+ cfc0 t1, COP_0_ICR
+ lw t0, cpl
+ sw t1, U_PCB_CONTEXT+12*REGSZ(a0) # save status register
+ sw t0, U_PCB_CONTEXT+13*REGSZ(a0)
+ j ra
+ move v0, zero
+END(savectx)
+
+/*
+ * The following primitives manipulate the run queues. _whichqs tells which
+ * of the 32 queues _qs have processes in them. Setrunqueue puts processes
+ * into queues, Remrq removes them from queues. The running process is on
+ * no queue, other processes are on a queue related to p->p_priority, divided
+ * by 4 actually to shrink the 0-127 range of priorities into the 32 available
+ * queues.
+ */
+/*
+ * setrunqueue(p)
+ * proc *p;
+ *
+ * Call should be made at splclock(), and p->p_stat should be SRUN.
+ */
+NON_LEAF(setrunqueue, FRAMESZ(CF_SZ), ra)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_L t0, P_BACK(a0) ## firewall: p->p_back must be 0
+ bne t0, zero, 1f ##
+ lbu t0, P_PRIORITY(a0) # put on p->p_priority / 4 queue
+ li t1, 1 # compute corresponding bit
+ srl t0, t0, 2 # compute index into 'whichqs'
+ sll t1, t1, t0
+ lw t2, whichqs # set corresponding bit
+ sll t0, t0, LOGREGSZ+1 # compute index into 'qs'
+ or t2, t2, t1
+ sw t2, whichqs
+ LA t1, qs
+ PTR_ADDU t0, t0, t1 # t0 = qp = &qs[pri >> 2]
+ PTR_L t1, P_BACK(t0) # t1 = qp->ph_rlink
+ PTR_S t0, P_FORW(a0) # p->p_forw = qp
+ PTR_S t1, P_BACK(a0) # p->p_back = qp->ph_rlink
+ PTR_S a0, P_FORW(t1) # p->p_back->p_forw = p;
+ j ra
+ PTR_S a0, P_BACK(t0) # qp->ph_rlink = p
+
+1:
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ PTR_S ra, CF_RA_OFFS(sp)
+ PANIC("setrunqueue")
+ jr ra
+ nop
+END(setrunqueue)
+
+/*
+ * Remrq(p)
+ *
+ * Call should be made at splclock().
+ */
+NON_LEAF(remrunqueue, FRAMESZ(CF_SZ), ra)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ lbu t0, P_PRIORITY(a0) # get from p->p_priority / 4 queue
+ li t1, 1 # compute corresponding bit
+ srl t0, t0, 2 # compute index into 'whichqs'
+ lw t2, whichqs # check corresponding bit
+ sll t1, t1, t0
+ and v0, t2, t1
+ beqz v0, 2f # oops! queue is empty!
+ PTR_L v0, P_BACK(a0) # v0 = p->p_back
+
+ PTR_L v1, P_FORW(a0) # v1 = p->p_forw
+ PTR_SLL t0, t0, LOGREGSZ+1 # compute index into 'qs'
+ PTR_S v1, P_FORW(v0) # p->p_back->p_forw = p->p_forw;
+ PTR_S v0, P_BACK(v1) # p->p_forw->p_back = p->r_rlink
+ LA v0, qs
+ PTR_ADDU t0, t0, v0 # t0 = qp = &qs[pri >> 2]
+ PTR_L v0, P_FORW(t0) # check if queue empty
+ bne v0, t0, 1f # No. qp->ph_link != qp
+ xor t2, t2, t1 # clear corresponding bit in 'whichqs'
+ sw t2, whichqs
+1:
+ j ra
+ PTR_S zero, P_BACK(a0) # for firewall checking
+
+2:
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ PTR_S ra, CF_RA_OFFS(sp)
+ PANIC("remrunqueue empty")
+ jr ra
+ nop
+END(remrunqueue)
+
+/*
+ * Idle, this is where we spend time when nothing to do.
+ */
+LEAF(idle)
+ PTR_S zero, curproc # set curproc NULL for stats
+ sw zero, cpl # lower to spl0
+
+ mfc0 a0, COP_0_STATUS_REG
+ li a1, SR_INT_ENAB
+ or a0, a0, a1
+ mtc0 a0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+#ifdef IMASK_EXTERNAL
+ jal hw_setintrmask
+ xor a0, a0
+#endif
+ jal updateimask # Make sure SR imask is updated
+ xor a0, a0
+
+ li t1,1
+#if defined(TGT_CP7000) || defined(TGT_CP7000G)
+ PTR_L t2, misc_h # if non zero, do Ocelot LEDs.
+ beqz t2, 1f
+ li t0, 0x40
+ sb t0, 0x0d(t2)
+#endif
+1:
+ beq t1, zero, 2f # check if stuck in idle!
+ addu t1, t1, 1
+ lw t0, whichqs # look for non-empty queue
+ beq t0, zero, 1b
+ nop
+#if defined(TGT_CP7000) || defined(TGT_CP7000G)
+ beqz t2, sw1
+ li t0, 0x40
+ sb t0, 0x0c(t2)
+#endif
+ b sw1 # Hey, time to do some work!
+ nop
+2:
+ break BREAK_SOVER_VAL # interrupt stuck!?
+ b 1b
+ nop
+ jr ra # DDB trace
+ nop
+ .globl e_idle
+e_idle:
+END(idle)
+
+/*
+ * switch_exit(p)
+ *
+ * At exit of a process, do a cpu_switch for the last time.
+ * All interrupts should be blocked at this point.
+ */
+LEAF(switch_exit)
+ mfc0 v0, COP_0_STATUS_REG
+ li v1, ~SR_INT_ENAB
+ and v0, v0, v1
+ mtc0 v0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ LA sp, idle_stack - FRAMESZ(CF_SZ)
+ jal exit2
+ nop
+
+ PTR_S zero, curproc
+END(switch_exit)
+ /* FALL THROUGH TO cpu switch! */
+/*
+ * cpu_switch()
+ * Find the highest priority process and resume it.
+ */
+NON_LEAF(cpu_switch, FRAMESZ(CF_SZ), ra)
+ PTR_L t3, curprocpaddr
+ PTR_S sp, U_PCB_CONTEXT+8*REGSZ(t3) # save old sp
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ PTR_S ra, CF_RA_OFFS(sp)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ lw t0, cpl
+ sw t0, U_PCB_CONTEXT+13*REGSZ(t3)
+# lw t2, cnt+V_SWTCH # for statistics
+ REG_S s0, U_PCB_CONTEXT+0*REGSZ(t3) # do a 'savectx()'
+ REG_S s1, U_PCB_CONTEXT+1*REGSZ(t3)
+ REG_S s2, U_PCB_CONTEXT+2*REGSZ(t3)
+ REG_S s3, U_PCB_CONTEXT+3*REGSZ(t3)
+ REG_S s4, U_PCB_CONTEXT+4*REGSZ(t3)
+ REG_S s5, U_PCB_CONTEXT+5*REGSZ(t3)
+ REG_S s6, U_PCB_CONTEXT+6*REGSZ(t3)
+ REG_S s7, U_PCB_CONTEXT+7*REGSZ(t3)
+ REG_S s8, U_PCB_CONTEXT+9*REGSZ(t3)
+ PTR_S ra, U_PCB_CONTEXT+10*REGSZ(t3)
+ mfc0 t0, COP_0_STATUS_REG
+ cfc0 t1, COP_0_ICR
+ REG_S t0, U_PCB_CONTEXT+11*REGSZ(t3)
+ REG_S t1, U_PCB_CONTEXT+12*REGSZ(t3)
+
+ lw t1, whichqs # look for non-empty queue
+# addu t2, t2, 1
+# sw t2, cnt+V_SWTCH
+ beq t1, zero, idle # if none, idle
+ nop
+sw1:
+ mfc0 v0, COP_0_STATUS_REG
+ li v1, ~SR_INT_ENAB
+ and v0, v0, v1
+ mtc0 v0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ lw t0, whichqs # look for non-empty queue
+ li t2, -1 # t2 = lowest bit set
+ beq t0, zero, idle # if none, idle
+ move t3, t0 # t3 = saved whichqs
+1:
+ addu t2, t2, 1
+ and t1, t0, 1 # bit set?
+ beq t1, zero, 1b
+ srl t0, t0, 1 # try next bit
+/*
+ * Remove process from queue.
+ */
+ PTR_SLL t0, t2, LOGREGSZ+1
+ LA t1, qs
+ PTR_ADDU t0, t0, t1 # t0 = qp = &qs[highbit]
+ PTR_L a0, P_FORW(t0) # a0 = p = highest pri process
+ PTR_L v0, P_FORW(a0) # v0 = p->p_forw
+ beq t0, a0, 4f # make sure something in queue
+ PTR_S v0, P_FORW(t0) # qp->ph_link = p->p_forw;
+ PTR_S t0, P_BACK(v0) # p->p_forw->p_back = qp
+ bne v0, t0, 2f # queue still not empty
+ PTR_S zero, P_BACK(a0) ## for firewall checking
+ li v1, 1 # compute bit in 'whichqs'
+ sll v1, v1, t2
+ xor t3, t3, v1 # clear bit in 'whichqs'
+ sw t3, whichqs
+2:
+/*
+ * Switch to new context.
+ */
+ sw zero, want_resched
+ jal pmap_activate # v0 = TLB PID
+ move s0, a0 # BDSLOT: save p
+
+/*
+ * We need to wire the process kernel stack mapping so there
+ * will be no tlb misses in exception handlers. This is done
+ * by invalidating any tlb entries mapping the U-area and
+ * put valid mappings in tlb entries 0 and 1.
+ */
+
+ PTR_L t3, P_ADDR(s0) # get uarea pointer.
+ PTR_S s0, curproc # set curproc
+ PTR_S t3, curprocpaddr
+
+ li t1, SONPROC
+ sb t1, P_STAT(s0) # set to onproc.
+
+ or v0, t3
+ dmtc0 v0, COP_0_TLB_HI # init high entry (tlbid)
+ LA t1, (VM_MIN_KERNEL_ADDRESS)
+ PTR_SUBU t2, t3, t1
+ bltz t2, ctx3 # not mapped.
+ PTR_L t1, Sysmap
+ tlbp
+ PTR_SRL t2, PGSHIFT+1
+ PTR_SLL t2, 3
+ PTR_ADDU t1, t2 # t1 now points at ptes.
+ mfc0 t0, COP_0_TLB_INDEX
+ nop
+ bltz t0, ctx1 # not in tlb
+ LA t2, KSEG0_BASE # NOTE: if > 1 ins, does not matter
+
+ dmtc0 t2, COP_0_TLB_HI # invalidate it.
+ dmtc0 zero, COP_0_TLB_LO0
+ dmtc0 zero, COP_0_TLB_LO1
+ nop
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+ nop
+
+ctx1:
+ mtc0 zero, COP_0_TLB_INDEX
+ dmtc0 v0, COP_0_TLB_HI
+ lw t4, 0(t1)
+ lw t5, 4(t1)
+ dsll t4, t4, 34
+ dsrl t4, t4, 34
+ dsll t5, t5, 34
+ dsrl t5, t5, 34
+ dmtc0 t4, COP_0_TLB_LO0
+ dmtc0 t5, COP_0_TLB_LO1
+ nop
+ PTR_ADDU v0, 2*NBPG
+ nop
+ nop
+ tlbwi
+
+#if (UPAGES != 2)
+ dmtc0 v0, COP_0_TLB_HI # init high entry (tlbid)
+ lw t4, 8(t1)
+ lw t5, 12(t1)
+ dsll t4, t4, 34
+ dsrl t4, t4, 34
+ tlbp
+ nop
+ dsll t5, t5, 34
+ dsrl t5, t5, 34
+ mfc0 t0, COP_0_TLB_INDEX
+ nop
+ bltz t0, ctx2 # not in tlb
+ li t2, 1
+
+ dmtc0 t2, COP_0_TLB_HI # invalidate it.
+ dmtc0 zero, COP_0_TLB_LO0
+ dmtc0 zero, COP_0_TLB_LO1
+ nop
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+ nop
+
+ctx2:
+ mtc0 t2, COP_0_TLB_INDEX
+ dmtc0 v0, COP_0_TLB_HI
+ dmtc0 t4, COP_0_TLB_LO0
+ dmtc0 t5, COP_0_TLB_LO1
+ nop
+ nop
+ nop
+ nop
+ tlbwi
+#endif
+ nop
+ nop
+ nop
+ nop
+
+ctx3:
+
+/*
+ * Restore registers and return.
+ */
+ lw a0, U_PCB_CONTEXT+13*REGSZ(t3)
+ REG_L s0, U_PCB_CONTEXT+0*REGSZ(t3)
+ REG_L s1, U_PCB_CONTEXT+1*REGSZ(t3)
+ REG_L s2, U_PCB_CONTEXT+2*REGSZ(t3)
+ REG_L s3, U_PCB_CONTEXT+3*REGSZ(t3)
+ REG_L s4, U_PCB_CONTEXT+4*REGSZ(t3)
+ REG_L s5, U_PCB_CONTEXT+5*REGSZ(t3)
+ REG_L s6, U_PCB_CONTEXT+6*REGSZ(t3)
+ REG_L s7, U_PCB_CONTEXT+7*REGSZ(t3)
+ REG_L sp, U_PCB_CONTEXT+8*REGSZ(t3)
+ REG_L s8, U_PCB_CONTEXT+9*REGSZ(t3)
+ sw a0, cpl
+#ifdef IMASK_EXTERNAL
+ jal hw_setintrmask
+ nop
+#endif
+ PTR_L ra, U_PCB_CONTEXT+10*REGSZ(t3)
+ lw v0, U_PCB_CONTEXT+11*REGSZ(t3)
+ lw v1, U_PCB_CONTEXT+12*REGSZ(t3)
+#ifndef IMASK_EXTERNAL
+ ctc0 v1, COP_0_ICR # XXX RM7000
+#endif
+ mtc0 v0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ j ra
+ nop
+4:
+ PANIC("cpu_switch") # nothing in queue
+END(cpu_switch)
diff --git a/sys/arch/mips64/mips64/cp0access.S b/sys/arch/mips64/mips64/cp0access.S
new file mode 100644
index 00000000000..5e2d226e342
--- /dev/null
+++ b/sys/arch/mips64/mips64/cp0access.S
@@ -0,0 +1,205 @@
+/* $OpenBSD: cp0access.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Low level code to manage processor specific registers.
+ */
+
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/pte.h>
+
+#include "assym.h"
+
+ .set noreorder # Noreorder is default style!
+
+/*
+ * Set/clear software interrupt.
+ */
+
+LEAF(setsoftintr0)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ or v0, v0, SOFT_INT_MASK_0 # set soft clock interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(setsoftintr0)
+
+LEAF(clearsoftintr0)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ and v0, v0, ~SOFT_INT_MASK_0 # clear soft clock interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(clearsoftintr0)
+
+LEAF(setsoftintr1)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ or v0, v0, SOFT_INT_MASK_1 # set soft net interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(setsoftintr1)
+
+LEAF(clearsoftintr1)
+ mfc0 v0, COP_0_CAUSE_REG # read cause register
+ nop
+ and v0, v0, ~SOFT_INT_MASK_1 # clear soft net interrupt
+ mtc0 v0, COP_0_CAUSE_REG # save it
+ j ra
+ nop
+END(clearsoftintr1)
+
+/*
+ * Set/change interrupt priority routines.
+ * These routines return the previous state.
+ */
+
+LEAF(enableintr)
+ mfc0 v0, COP_0_STATUS_REG # read status register
+ nop
+ or v1, v0, SR_INT_ENAB
+ mtc0 v1, COP_0_STATUS_REG # enable all interrupts
+ j ra
+ nop
+END(enableintr)
+
+LEAF(disableintr)
+ mfc0 v0, COP_0_STATUS_REG # read status register
+ nop
+ and v1, v0, ~SR_INT_ENAB
+ mtc0 v1, COP_0_STATUS_REG # disable all interrupts
+ nop # Propagate new status
+ nop
+ nop
+ nop # so int's are off before
+ nop # return to caller.
+ nop # total 10 nops to handle
+ nop # RM7000 1.x bug.
+ nop
+ nop
+ nop
+ j ra
+ nop
+END(disableintr)
+
+LEAF(updateimask)
+ lw t0, idle_mask
+ not a0, a0 # 1 means masked so invert.
+ and a0, t0 # never upgrade to higher than max
+#ifndef IMASK_EXTERNAL
+ cfc0 v0, COP_0_ICR
+ li v1, ~IC_INT_MASK
+ and v1, v0
+ and v0, a0, IC_INT_MASK
+ or v1, v0
+ ctc0 v1, COP_0_ICR
+#endif
+ mfc0 v0, COP_0_STATUS_REG
+ li v1, ~SR_INT_MASK
+ and v1, v0
+ sll v0, a0, 8
+ and v0, SR_INT_MASK
+ or v1, v0
+ mtc0 v1, COP_0_STATUS_REG
+ nop # total 10 nops to handle
+ nop # RM7000 1.x bug.
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ jr ra
+ move v0, v1
+END(updateimask)
+
+LEAF(setsr)
+ mtc0 a0, COP_0_STATUS_REG
+ nop # total 10 nops to handle
+ nop # RM7000 1.x bug.
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ jr ra
+ move v0, a0
+END(setsr)
+
+LEAF(cp0_get_count)
+ mfc0 v0, COP_0_COUNT
+ j ra
+ nop
+END(cp0_get_count)
+
+LEAF(cp0_set_compare)
+ mtc0 a0, COP_0_COMPARE
+ j ra
+ nop
+END(cp0_set_compare)
+
+LEAF(cp0_getperfcount)
+ mfc0 v0, COP_0_PC_COUNT
+ nop; nop
+ j ra
+ nop
+END(cp0_getperfcount)
+
+LEAF(cp0_setperfcount)
+ mtc0 a0, COP_0_PC_COUNT
+ nop; nop
+ j ra
+ nop
+END(cp0_setperfcount)
+
+LEAF(cp0_setperfctrl)
+ mtc0 a0, COP_0_PC_CTRL
+ nop; nop
+ j ra
+ nop
+END(cp0_setperfctrl)
diff --git a/sys/arch/mips64/mips64/cpu.c b/sys/arch/mips64/mips64/cpu.c
new file mode 100644
index 00000000000..cb7ebf5f4a1
--- /dev/null
+++ b/sys/arch/mips64/mips64/cpu.c
@@ -0,0 +1,241 @@
+/* $OpenBSD: cpu.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1997-2003 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/device.h>
+
+#include <machine/pte.h>
+#include <machine/cpu.h>
+#include <machine/autoconf.h>
+
+
+/* Definition of the driver for autoconfig. */
+static int cpumatch(struct device *, void *, void *);
+static void cpuattach(struct device *, struct device *, void *);
+
+int cpu_is_rm7k = 0;
+
+struct cfattach cpu_ca = {
+ sizeof(struct device), cpumatch, cpuattach
+};
+struct cfdriver cpu_cd = {
+ NULL, "cpu", DV_DULL, NULL, 0
+};
+
+static int
+cpumatch(parent, cfdata, aux)
+ struct device *parent;
+ void *cfdata;
+ void *aux;
+{
+ struct confargs *ca = aux;
+
+ /* make sure that we're looking for a CPU. */
+ if (strcmp(ca->ca_name, cpu_cd.cd_name) != 0)
+ return (0);
+
+ return (20); /* Make CPU probe first */
+}
+
+static void
+cpuattach(parent, dev, aux)
+ struct device *parent;
+ struct device *dev;
+ void *aux;
+{
+
+ printf(": ");
+
+ switch(sys_config.cpu.type) {
+
+ case MIPS_R4000:
+ if(CpuPrimaryInstCacheSize == 16384)
+ printf("MIPS R4400 CPU");
+ else
+ printf("MIPS R4000 CPU");
+ break;
+ case MIPS_R5000:
+ printf("MIPS R5000 CPU");
+ break;
+ case MIPS_R10000:
+ printf("MIPS R10000 CPU");
+ break;
+ case MIPS_R4200:
+ printf("NEC VR4200 CPU (ICE)");
+ break;
+ case MIPS_R4300:
+ printf("NEC VR4300 CPU");
+ break;
+ case MIPS_R4100:
+ printf("NEC VR41xx CPU");
+ break;
+ case MIPS_R4600:
+ printf("QED R4600 Orion CPU");
+ break;
+ case MIPS_R4700:
+ printf("QED R4700 Orion CPU");
+ break;
+ case MIPS_RM52XX:
+ printf("PMC-Sierra RM52X0 CPU");
+ break;
+ case MIPS_RM7000:
+ if(sys_config.cpu.vers_maj < 2) {
+ printf("PMC-Sierra RM7000 CPU");
+ }
+ else {
+ printf("PMC-Sierra RM7000A CPU");
+ }
+ cpu_is_rm7k++;
+ break;
+ case MIPS_RM9000:
+ printf("PMC-Sierra RM9000 CPU");
+ break;
+ default:
+ printf("Unknown CPU type (0x%x)",sys_config.cpu.type);
+ break;
+ }
+ printf(" Rev. %d.%d with ", sys_config.cpu.vers_maj, sys_config.cpu.vers_min);
+
+
+ switch(fpu_id.cpu.cp_imp) {
+
+ case MIPS_SOFT:
+ printf("Software emulation float");
+ break;
+ case MIPS_R4010:
+ printf("MIPS R4010 FPC");
+ break;
+ case MIPS_R10010:
+ printf("MIPS R10000 FPU");
+ break;
+ case MIPS_R4210:
+ printf("NEC VR4200 FPC (ICE)");
+ break;
+ case MIPS_R4600:
+ printf("QED R4600 Orion FPC");
+ break;
+ case MIPS_R4700:
+ printf("QED R4700 Orion FPC");
+ break;
+ case MIPS_R5000:
+ printf("MIPS R5000 based FPC");
+ break;
+ case MIPS_RM52XX:
+ printf("PMC-Sierra RM52X0 FPC");
+ break;
+ case MIPS_RM7000:
+ printf("PMC-Sierra RM7000 FPC");
+ break;
+ case MIPS_RM9000:
+ printf("PMC-Sierra RM9000 FPC");
+ break;
+ case MIPS_UNKF1:
+ default:
+ printf("Unknown FPU type (0x%x)", fpu_id.cpu.cp_imp);
+ break;
+ }
+ printf(" Rev. %d.%d", fpu_id.cpu.cp_majrev, fpu_id.cpu.cp_minrev);
+ printf("\n");
+
+ printf(" CPU clock %dMhz\n",sys_config.cpu.clock/1000000);
+ printf(" L1 Cache: I size %dkb(%d line),",
+ CpuPrimaryInstCacheSize / 1024,
+ CpuPrimaryInstCacheLSize);
+ printf(" D size %dkb(%d line), ",
+ CpuPrimaryDataCacheSize / 1024,
+ CpuPrimaryDataCacheLSize);
+ switch(CpuNWayCache) {
+ case 2:
+ printf("two way.\n");
+ break;
+ case 4:
+ printf("four way.\n");
+ break;
+ default:
+ printf("direct mapped.\n");
+ break;
+ }
+ if(CpuSecondaryCacheSize != 0) {
+ switch(fpu_id.cpu.cp_imp) {
+ case MIPS_RM7000:
+ printf(" L2 Cache: Size %dkb, four way\n",
+ CpuSecondaryCacheSize / 1024);
+ break;
+
+ default:
+ printf(" L2 Cache: Size %dkb, direct mapped\n",
+ CpuSecondaryCacheSize / 1024);
+ break;
+ }
+
+ }
+ if(CpuTertiaryCacheSize != 0) {
+ printf(" L3 Cache: Size %dkb, direct mapped\n",
+ CpuTertiaryCacheSize / 1024);
+ }
+
+#ifdef DEBUG
+ printf("\tSetsize %d:%d\n", CpuPrimaryInstSetSize, CpuPrimaryDataSetSize);
+ printf("\tAlias mask 0x%x\n", CpuCacheAliasMask);
+ printf("\tConfig Register %x\n",CpuConfigRegister);
+ printf("\tCache type %x\n", CpuCacheType);
+ if(fpu_id.cpu.cp_imp == MIPS_RM7000) {
+ u_int tmp;
+ tmp = CpuConfigRegister;
+ printf("\t\t\t");
+ printf("K0 = %1d ",0x7 & tmp);
+ printf("SE = %1d ",0x1 & (tmp>>3));
+ printf("DB = %1d ",0x1 & (tmp>>4));
+ printf("IB = %1d\n",0x1 & (tmp>>5));
+ printf("\t\t\t");
+ printf("DC = %1d ",0x7 & (tmp>>6));
+ printf("IC = %1d ",0x7 & (tmp>>9));
+ printf("TE = %1d ",0x1 & (tmp>>12));
+ printf("EB = %1d\n",0x1 & (tmp>>13));
+ printf("\t\t\t");
+ printf("EM = %1d ",0x1 & (tmp>>14));
+ printf("BE = %1d ",0x1 & (tmp>>15));
+ printf("TC = %1d ",0x1 & (tmp>>17));
+ printf("EW = %1d\n",0x3 & (tmp>>18));
+ printf("\t\t\t");
+ printf("TS = %1d ",0x3 & (tmp>>20));
+ printf("EP = %1d ",0xf & (tmp>>24));
+ printf("EC = %1d ",0x7 & (tmp>>28));
+ printf("SC = %1d\n",0x1 & (tmp>>31));
+ }
+ printf("\tStatus Register %x\n",CpuStatusRegister);
+#endif
+}
diff --git a/sys/arch/mips64/mips64/cpu_ecoff.c b/sys/arch/mips64/mips64/cpu_ecoff.c
new file mode 100644
index 00000000000..b9731837fca
--- /dev/null
+++ b/sys/arch/mips64/mips64/cpu_ecoff.c
@@ -0,0 +1,95 @@
+/* $OpenBSD: cpu_ecoff.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by Ralph
+ * Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)machdep.c 8.3 (Berkeley) 1/12/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/vnode.h>
+#include <sys/exec.h>
+#include <sys/resourcevar.h>
+
+#include <machine/frame.h>
+
+#if defined(_KERN_DO_ECOFF)
+#include <sys/exec_ecoff.h>
+
+void cpu_exec_ecoff_setregs __P((struct proc *, struct exec_package *,
+ u_long, register_t *));
+void
+cpu_exec_ecoff_setregs(p, pack, stack, retval)
+ struct proc *p;
+ struct exec_package *pack;
+ u_long stack;
+ register_t *retval;
+{
+ struct ecoff_aouthdr *eap;
+
+ setregs(p, pack, stack, retval);
+ eap = (struct ecoff_aouthdr *)
+ ((caddr_t)pack->ep_hdr + sizeof(struct ecoff_filehdr));
+#if defined(pmax)
+ p->p_md.md_regs[GP] = eap->ea_gp_value;
+#else
+ p->p_md.md_regs->gp = eap->ea_gp_value;
+#endif
+}
+
+/*
+ * cpu_exec_ecoff_hook():
+ * cpu-dependent ECOFF format hook for execve().
+ *
+ * Do any machine-dependent diddling of the exec package when doing ECOFF.
+ *
+ */
+int
+cpu_exec_ecoff_hook(p, epp)
+ struct proc *p;
+ struct exec_package *epp;
+{
+#ifdef COMPAT_ULTRIX
+ extern struct emul emul_ultrix;
+
+ epp->ep_emul = &emul_ultrix;
+#endif
+ return 0;
+}
+
+#endif /* _KERN_DO_ECOFF */
diff --git a/sys/arch/mips64/mips64/db_disasm.c b/sys/arch/mips64/mips64/db_disasm.c
new file mode 100644
index 00000000000..03558f198ce
--- /dev/null
+++ b/sys/arch/mips64/mips64/db_disasm.c
@@ -0,0 +1,414 @@
+/* $OpenBSD: db_disasm.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)kadb.c 8.1 (Berkeley) 6/10/93
+ * $Id: db_disasm.c,v 1.1 2004/08/06 20:56:03 pefo Exp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/mips_opcode.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_interface.h>
+#include <ddb/db_output.h>
+
+static char *op_name[64] = {
+/* 0 */ "spec", "bcond","j", "jal", "beq", "bne", "blez", "bgtz",
+/* 8 */ "addi", "addiu","slti", "sltiu","andi", "ori", "xori", "lui",
+/*16 */ "cop0", "cop1", "cop2", "cop3", "beql", "bnel", "blezl","bgtzl",
+/*24 */ "daddi","daddiu","ldl", "ldr", "op34", "op35", "op36", "op37",
+/*32 */ "lb", "lh", "lwl", "lw", "lbu", "lhu", "lwr", "lwu",
+/*40 */ "sb", "sh", "swl", "sw", "sdl", "sdr", "swr", "cache",
+/*48 */ "ll", "lwc1", "lwc2", "lwc3", "lld", "ldc1", "ldc2", "ld",
+/*56 */ "sc", "swc1", "swc2", "swc3", "scd", "sdc1", "sdc2", "sd"
+};
+
+static char *spec_name[64] = {
+/* 0 */ "sll", "spec01","srl", "sra", "sllv", "spec05","srlv","srav",
+/* 8 */ "jr", "jalr", "spec12","spec13","syscall","break","spec16","sync",
+/*16 */ "mfhi", "mthi", "mflo", "mtlo", "dsllv","spec25","dsrlv","dsrav",
+/*24 */ "mult", "multu","div", "divu", "dmult","dmultu","ddiv","ddivu",
+/*32 */ "add", "addu", "sub", "subu", "and", "or", "xor", "nor",
+/*40 */ "spec50","spec51","slt","sltu", "dadd","daddu","dsub","dsubu",
+/*48 */ "tge","tgeu","tlt","tltu","teq","spec65","tne","spec67",
+/*56 */ "dsll","spec71","dsrl","dsra","dsll32","spec75","dsrl32","dsra32"
+};
+
+static char *bcond_name[32] = {
+/* 0 */ "bltz", "bgez", "bltzl", "bgezl", "?", "?", "?", "?",
+/* 8 */ "tgei", "tgeiu", "tlti", "tltiu", "teqi", "?", "tnei", "?",
+/*16 */ "bltzal", "bgezal", "bltzall", "bgezall", "?", "?", "?", "?",
+/*24 */ "?", "?", "?", "?", "?", "?", "?", "?",
+};
+
+static char *cop1_name[64] = {
+/* 0 */ "fadd", "fsub", "fmpy", "fdiv", "fsqrt","fabs", "fmov", "fneg",
+/* 8 */ "fop08","fop09","fop0a","fop0b","fop0c","fop0d","fop0e","fop0f",
+/*16 */ "fop10","fop11","fop12","fop13","fop14","fop15","fop16","fop17",
+/*24 */ "fop18","fop19","fop1a","fop1b","fop1c","fop1d","fop1e","fop1f",
+/*32 */ "fcvts","fcvtd","fcvte","fop23","fcvtw","fop25","fop26","fop27",
+/*40 */ "fop28","fop29","fop2a","fop2b","fop2c","fop2d","fop2e","fop2f",
+/*48 */ "fcmp.f","fcmp.un","fcmp.eq","fcmp.ueq","fcmp.olt","fcmp.ult",
+ "fcmp.ole","fcmp.ule",
+/*56 */ "fcmp.sf","fcmp.ngle","fcmp.seq","fcmp.ngl","fcmp.lt","fcmp.nge",
+ "fcmp.le","fcmp.ngt"
+};
+
+static char *fmt_name[16] = {
+ "s", "d", "e", "fmt3",
+ "w", "fmt5", "fmt6", "fmt7",
+ "fmt8", "fmt9", "fmta", "fmtb",
+ "fmtc", "fmtd", "fmte", "fmtf"
+};
+
+static char *reg_name[32] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
+};
+
+static char *c0_opname[64] = {
+ "c0op00","tlbr", "tlbwi", "c0op03","c0op04","c0op05","tlbwr", "c0op07",
+ "tlbp", "c0op11","c0op12","c0op13","c0op14","c0op15","c0op16","c0op17",
+ "rfe", "c0op21","c0op22","c0op23","c0op24","c0op25","c0op26","c0op27",
+ "eret","c0op31","c0op32","c0op33","c0op34","c0op35","c0op36","c0op37",
+ "c0op40","c0op41","c0op42","c0op43","c0op44","c0op45","c0op46","c0op47",
+ "c0op50","c0op51","c0op52","c0op53","c0op54","c0op55","c0op56","c0op57",
+ "c0op60","c0op61","c0op62","c0op63","c0op64","c0op65","c0op66","c0op67",
+ "c0op70","c0op71","c0op72","c0op73","c0op74","c0op75","c0op77","c0op77",
+};
+
+static char *c0_reg[32] = {
+ "index","random","tlblo0","tlblo1","context","tlbmask","wired","c0r7",
+ "badvaddr","count","tlbhi","c0r11","sr","cause","epc", "prid",
+ "config","lladr","watchlo","watchhi","xcontext","c0r21","c0r22","c0r23",
+ "c0r24","c0r25","ecc","cacheerr","taglo","taghi","errepc","c0r31"
+};
+
+int kdbpeek __P((void *));
+
+static int md_printins __P((int ins, int mdbdot));
+
+db_addr_t
+db_disasm(loc, altfmt)
+ db_addr_t loc;
+ boolean_t altfmt;
+
+{
+ if (md_printins(kdbpeek((void *)loc), loc)) {
+ loc += 4;
+ printf("\t\t");
+ md_printins(kdbpeek((void *)loc), loc);
+ }
+ loc += 4;
+ return loc;
+}
+
+/* ARGSUSED */
+static int
+md_printins(int ins, int mdbdot)
+{
+ InstFmt i;
+ int delay = 0;
+
+ i.word = ins;
+
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ if (i.word == 0) {
+ db_printf("nop");
+ break;
+ }
+ if (i.RType.func == OP_ADDU && i.RType.rt == 0) {
+ db_printf("move\t%s,%s",
+ reg_name[i.RType.rd],
+ reg_name[i.RType.rs]);
+ break;
+ }
+ db_printf("%s", spec_name[i.RType.func]);
+ switch (i.RType.func) {
+ case OP_SLL:
+ case OP_SRL:
+ case OP_SRA:
+ case OP_DSLL:
+ case OP_DSRL:
+ case OP_DSRA:
+ case OP_DSLL32:
+ case OP_DSRL32:
+ case OP_DSRA32:
+ db_printf("\t%s,%s,%d",
+ reg_name[i.RType.rd],
+ reg_name[i.RType.rt],
+ i.RType.shamt);
+ break;
+
+ case OP_SLLV:
+ case OP_SRLV:
+ case OP_SRAV:
+ case OP_DSLLV:
+ case OP_DSRLV:
+ case OP_DSRAV:
+ db_printf("\t%s,%s,%s",
+ reg_name[i.RType.rd],
+ reg_name[i.RType.rt],
+ reg_name[i.RType.rs]);
+ break;
+
+ case OP_MFHI:
+ case OP_MFLO:
+ db_printf("\t%s", reg_name[i.RType.rd]);
+ break;
+
+ case OP_JR:
+ case OP_JALR:
+ delay = 1;
+ /* FALLTHROUGH */
+ case OP_MTLO:
+ case OP_MTHI:
+ db_printf("\t%s", reg_name[i.RType.rs]);
+ break;
+
+ case OP_MULT:
+ case OP_MULTU:
+ case OP_DMULT:
+ case OP_DMULTU:
+ case OP_DIV:
+ case OP_DIVU:
+ case OP_DDIV:
+ case OP_DDIVU:
+ db_printf("\t%s,%s",
+ reg_name[i.RType.rs],
+ reg_name[i.RType.rt]);
+ break;
+
+ case OP_SYSCALL:
+ case OP_SYNC:
+ break;
+
+ case OP_BREAK:
+ db_printf("\t%d", (i.RType.rs << 5) | i.RType.rt);
+ break;
+
+ default:
+ db_printf("\t%s,%s,%s",
+ reg_name[i.RType.rd],
+ reg_name[i.RType.rs],
+ reg_name[i.RType.rt]);
+ };
+ break;
+
+ case OP_BCOND:
+ db_printf("%s\t%s,", bcond_name[i.IType.rt],
+ reg_name[i.IType.rs]);
+ goto pr_displ;
+
+ case OP_BLEZ:
+ case OP_BLEZL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ db_printf("%s\t%s,", op_name[i.IType.op],
+ reg_name[i.IType.rs]);
+ goto pr_displ;
+
+ case OP_BEQ:
+ case OP_BEQL:
+ if (i.IType.rs == 0 && i.IType.rt == 0) {
+ db_printf("b\t");
+ goto pr_displ;
+ }
+ /* FALLTHROUGH */
+ case OP_BNE:
+ case OP_BNEL:
+ db_printf("%s\t%s,%s,", op_name[i.IType.op],
+ reg_name[i.IType.rs],
+ reg_name[i.IType.rt]);
+ pr_displ:
+ delay = 1;
+ db_printf("0x%08x", mdbdot + 4 + ((short)i.IType.imm << 2));
+ break;
+
+ case OP_COP0:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ db_printf("bc0%c\t",
+ "ft"[i.RType.rt & COPz_BC_TF_MASK]);
+ goto pr_displ;
+
+ case OP_MT:
+ db_printf("mtc0\t%s,%s",
+ reg_name[i.RType.rt],
+ c0_reg[i.RType.rd]);
+ break;
+
+ case OP_DMT:
+ db_printf("dmtc0\t%s,%s",
+ reg_name[i.RType.rt],
+ c0_reg[i.RType.rd]);
+ break;
+
+ case OP_MF:
+ db_printf("mfc0\t%s,%s",
+ reg_name[i.RType.rt],
+ c0_reg[i.RType.rd]);
+ break;
+
+ case OP_DMF:
+ db_printf("dmfc0\t%s,%s",
+ reg_name[i.RType.rt],
+ c0_reg[i.RType.rd]);
+ break;
+
+ default:
+ db_printf("%s", c0_opname[i.FRType.func]);
+ };
+ break;
+
+ case OP_COP1:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ db_printf("bc1%c\t",
+ "ft"[i.RType.rt & COPz_BC_TF_MASK]);
+ goto pr_displ;
+
+ case OP_MT:
+ db_printf("mtc1\t%s,f%d",
+ reg_name[i.RType.rt],
+ i.RType.rd);
+ break;
+
+ case OP_MF:
+ db_printf("mfc1\t%s,f%d",
+ reg_name[i.RType.rt],
+ i.RType.rd);
+ break;
+
+ case OP_CT:
+ db_printf("ctc1\t%s,f%d",
+ reg_name[i.RType.rt],
+ i.RType.rd);
+ break;
+
+ case OP_CF:
+ db_printf("cfc1\t%s,f%d",
+ reg_name[i.RType.rt],
+ i.RType.rd);
+ break;
+
+ default:
+ db_printf("%s.%s\tf%d,f%d,f%d",
+ cop1_name[i.FRType.func],
+ fmt_name[i.FRType.fmt],
+ i.FRType.fd, i.FRType.fs, i.FRType.ft);
+ };
+ break;
+
+ case OP_J:
+ case OP_JAL:
+ db_printf("%s\t", op_name[i.JType.op]);
+ db_printf("0x%8x",(mdbdot & 0xF0000000) | (i.JType.target << 2));
+ delay = 1;
+ break;
+
+ case OP_LWC1:
+ case OP_SWC1:
+ db_printf("%s\tf%d,", op_name[i.IType.op],
+ i.IType.rt);
+ goto loadstore;
+
+ case OP_LB:
+ case OP_LH:
+ case OP_LW:
+ case OP_LD:
+ case OP_LBU:
+ case OP_LHU:
+ case OP_LWU:
+ case OP_SB:
+ case OP_SH:
+ case OP_SW:
+ case OP_SD:
+ db_printf("%s\t%s,", op_name[i.IType.op],
+ reg_name[i.IType.rt]);
+ loadstore:
+ db_printf("%d(%s)", (short)i.IType.imm,
+ reg_name[i.IType.rs]);
+ break;
+
+ case OP_ORI:
+ case OP_XORI:
+ if (i.IType.rs == 0) {
+ db_printf("li\t%s,0x%x",
+ reg_name[i.IType.rt],
+ i.IType.imm);
+ break;
+ }
+ /* FALLTHROUGH */
+ case OP_ANDI:
+ db_printf("%s\t%s,%s,0x%x", op_name[i.IType.op],
+ reg_name[i.IType.rt],
+ reg_name[i.IType.rs],
+ i.IType.imm);
+ break;
+
+ case OP_LUI:
+ db_printf("%s\t%s,0x%x", op_name[i.IType.op],
+ reg_name[i.IType.rt],
+ i.IType.imm);
+ break;
+
+ case OP_ADDI:
+ case OP_DADDI:
+ case OP_ADDIU:
+ case OP_DADDIU:
+ if (i.IType.rs == 0) {
+ db_printf("li\t%s,%d",
+ reg_name[i.IType.rt],
+ (short)i.IType.imm);
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ db_printf("%s\t%s,%s,%d", op_name[i.IType.op],
+ reg_name[i.IType.rt],
+ reg_name[i.IType.rs],
+ (short)i.IType.imm);
+ }
+ db_printf("\n");
+ return(delay);
+}
diff --git a/sys/arch/mips64/mips64/db_machdep.c b/sys/arch/mips64/mips64/db_machdep.c
new file mode 100644
index 00000000000..da13380f195
--- /dev/null
+++ b/sys/arch/mips64/mips64/db_machdep.c
@@ -0,0 +1,656 @@
+/* $OpenBSD: db_machdep.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <dev/cons.h>
+
+#include <machine/autoconf.h>
+#include <machine/db_machdep.h>
+#include <machine/cpu.h>
+#include <machine/mips_opcode.h>
+#include <machine/pte.h>
+#include <machine/frame.h>
+#include <machine/regnum.h>
+
+#include <ddb/db_sym.h>
+#include <ddb/db_extern.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_interface.h>
+
+#define MIPS_JR_RA 0x03e00008 /* instruction code for jr ra */
+
+extern void trapDump(char *);
+u_long MipsEmulateBranch(db_regs_t *, int, int, u_int);
+void stacktrace_subr(db_regs_t *, int (*)(const char*, ...));
+
+int kdbpeek(void *);
+short kdbpeekw(void *);
+char kdbpeekb(void *);
+void kdbpoke(int, int);
+void kdbpokew(int, short);
+void kdbpokeb(int, char);
+int kdb_trap(int, struct trap_frame *);
+
+void db_trap_trace_cmd(db_expr_t, int, db_expr_t, char *);
+void db_dump_tlb_cmd(db_expr_t, int, db_expr_t, char *);
+
+int db_active = 0;
+db_regs_t ddb_regs;
+
+struct db_variable db_regs[] = {
+ { "at", (long *)&ddb_regs.ast, FCN_NULL },
+ { "v0", (long *)&ddb_regs.v0, FCN_NULL },
+ { "v1", (long *)&ddb_regs.v1, FCN_NULL },
+ { "a0", (long *)&ddb_regs.a0, FCN_NULL },
+ { "a1", (long *)&ddb_regs.a1, FCN_NULL },
+ { "a2", (long *)&ddb_regs.a2, FCN_NULL },
+ { "a3", (long *)&ddb_regs.a3, FCN_NULL },
+ { "t0", (long *)&ddb_regs.t0, FCN_NULL },
+ { "t1", (long *)&ddb_regs.t1, FCN_NULL },
+ { "t2", (long *)&ddb_regs.t2, FCN_NULL },
+ { "t3", (long *)&ddb_regs.t3, FCN_NULL },
+ { "t4", (long *)&ddb_regs.t4, FCN_NULL },
+ { "t5", (long *)&ddb_regs.t5, FCN_NULL },
+ { "t6", (long *)&ddb_regs.t6, FCN_NULL },
+ { "t7", (long *)&ddb_regs.t7, FCN_NULL },
+ { "s0", (long *)&ddb_regs.s0, FCN_NULL },
+ { "s1", (long *)&ddb_regs.s1, FCN_NULL },
+ { "s2", (long *)&ddb_regs.s2, FCN_NULL },
+ { "s3", (long *)&ddb_regs.s3, FCN_NULL },
+ { "s4", (long *)&ddb_regs.s4, FCN_NULL },
+ { "s5", (long *)&ddb_regs.s5, FCN_NULL },
+ { "s6", (long *)&ddb_regs.s6, FCN_NULL },
+ { "s7", (long *)&ddb_regs.s7, FCN_NULL },
+ { "t8", (long *)&ddb_regs.t8, FCN_NULL },
+ { "t9", (long *)&ddb_regs.t9, FCN_NULL },
+ { "k0", (long *)&ddb_regs.k0, FCN_NULL },
+ { "k1", (long *)&ddb_regs.k1, FCN_NULL },
+ { "gp", (long *)&ddb_regs.gp, FCN_NULL },
+ { "sp", (long *)&ddb_regs.sp, FCN_NULL },
+ { "s8", (long *)&ddb_regs.s8, FCN_NULL },
+ { "ra", (long *)&ddb_regs.ra, FCN_NULL },
+ { "sr", (long *)&ddb_regs.sr, FCN_NULL },
+ { "lo", (long *)&ddb_regs.mullo, FCN_NULL },
+ { "hi", (long *)&ddb_regs.mulhi, FCN_NULL },
+ { "bad", (long *)&ddb_regs.badvaddr,FCN_NULL },
+ { "cs", (long *)&ddb_regs.cause, FCN_NULL },
+ { "pc", (long *)&ddb_regs.pc, FCN_NULL },
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+extern label_t *db_recover;
+
+int
+kdb_trap(type, fp)
+ int type;
+ struct trap_frame *fp;
+{
+ switch(type) {
+ case T_BREAK: /* breakpoint */
+ if(db_get_value((fp)->pc, sizeof(int), FALSE) == BREAK_SOVER) {
+ (fp)->pc += BKPT_SIZE;
+ }
+ break;
+ case -1:
+ break;
+ default:
+#if 0
+ if (!db_panic)
+ return (0);
+#endif
+ if(db_recover != 0) {
+ db_error("Caught exception in ddb.\n");
+ /*NOTREACHED*/
+ }
+ printf("stoped on non ddb fault\n");
+ }
+
+ bcopy((void *)fp, (void *)&ddb_regs, NUMSAVEREGS * sizeof(register_t));
+
+ db_active++;
+ cnpollc(TRUE);
+ db_trap(type, 0);
+ cnpollc(FALSE);
+ db_active--;
+
+ bcopy((void *)&ddb_regs, (void *)fp, NUMSAVEREGS * sizeof(register_t));
+ return(TRUE);
+}
+void
+db_read_bytes(addr, size, data)
+ vaddr_t addr;
+ size_t size;
+ char *data;
+{
+ while(size >= sizeof(int)) {
+ *((int *)data)++ = kdbpeek((void *)addr);
+ addr += sizeof(int);
+ size -= sizeof(int);
+ }
+
+ if (size > sizeof(short)) {
+ *((short *)data)++ = kdbpeekw((void *)addr);
+ addr += sizeof(short);
+ size -= sizeof(short);
+ }
+
+ if (size) {
+ *data++ = kdbpeekb((void *)addr);
+ }
+}
+
+void
+db_write_bytes(addr, size, data)
+ vaddr_t addr;
+ size_t size;
+ char *data;
+{
+ vaddr_t ptr = addr;
+ size_t len = size;
+
+ while (len >= sizeof(int)) {
+ kdbpoke(ptr, *((int *)data)++);
+ ptr += sizeof(int);
+ len -= sizeof(int);
+ }
+
+ if (len >= sizeof(short)) {
+ kdbpokew(ptr, *((short *)data)++);
+ ptr += sizeof(int);
+ len -= sizeof(int);
+ }
+
+ if(len) {
+ kdbpokeb(ptr, *data++);
+ }
+ if(addr < VM_MIN_KERNEL_ADDRESS) {
+ Mips_HitSyncDCache(addr, size);
+ Mips_InvalidateICache(PHYS_TO_KSEG0(addr & 0xffff), size);
+ }
+}
+
+void
+db_stack_trace_print(addr, have_addr, count, modif, pr)
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char *modif;
+ int (*pr)(const char *, ...);
+{
+ db_sym_t sym;
+ db_expr_t diff;
+ db_addr_t subr;
+ char *symname;
+ register_t pc, sp, ra, va;
+ register_t a0, a1, a2, a3;
+ unsigned instr, mask;
+ InstFmt i;
+ int more, stksize;
+ extern char edata[];
+ extern char k_intr[];
+ extern char k_general[];
+ extern char idle[];
+ struct trap_frame *regs = &ddb_regs;
+
+ /* get initial values from the exception frame */
+ sp = regs->sp;
+ pc = regs->pc;
+ ra = regs->ra; /* May be a 'leaf' function */
+ a0 = regs->a0;
+ a1 = regs->a1;
+ a2 = regs->a2;
+ a3 = regs->a3;
+
+/* Jump here when done with a frame, to start a new one */
+loop:
+
+/* Jump here after a nonstandard (interrupt handler) frame */
+ stksize = 0;
+
+ /* check for bad SP: could foul up next frame */
+ if (sp & 3 || sp < 0xffffffff80000000) {
+ (*pr)("SP %p: not in kernel\n", sp);
+ ra = 0;
+ subr = 0;
+ goto done;
+ }
+
+#if 0
+ /* Backtraces should contine through interrupts from kernel mode */
+ if (pc >= (unsigned)MipsKernIntr && pc < (unsigned)MipsUserIntr) {
+ (*pr)("MipsKernIntr+%x: (%x, %x ,%x) -------\n",
+ pc-(unsigned)MipsKernIntr, a0, a1, a2);
+ regs = (struct trap_frame *)(sp + STAND_ARG_SIZE);
+ a0 = kdbpeek(&regs->a0);
+ a1 = kdbpeek(&regs->a1);
+ a2 = kdbpeek(&regs->a2);
+ a3 = kdbpeek(&regs->a3);
+
+ pc = kdbpeek(&regs->pc); /* exc_pc - pc at time of exception */
+ ra = kdbpeek(&regs->ra); /* ra at time of exception */
+ sp = kdbpeek(&regs->sp);
+ goto specialframe;
+ }
+#endif
+
+
+ /* check for bad PC */
+ if (pc & 3 || pc < 0xffffffff80000000 || pc >= (unsigned)edata) {
+ (*pr)("PC 0x%x: not in kernel\n", pc);
+ ra = 0;
+ goto done;
+ }
+
+ /*
+ * Dig out the function from the symbol table.
+ */
+ sym = db_search_symbol(pc, DB_STGY_ANY, &diff);
+ db_symbol_values(sym, &symname, 0);
+ if (sym != DB_SYM_NULL) {
+ subr = pc - diff;
+ } else {
+ subr = 0;
+ }
+
+ /*
+ * Find the beginning of the current subroutine by scanning backwards
+ * from the current PC for the end of the previous subroutine.
+ */
+ if (!subr) {
+ va = pc - sizeof(int);
+ while ((instr = kdbpeek((int *)va)) != MIPS_JR_RA)
+ va -= sizeof(int);
+ va += 2 * sizeof(int); /* skip back over branch & delay slot */
+ /* skip over nulls which might separate .o files */
+ while ((instr = kdbpeek((int *)va)) == 0)
+ va += sizeof(int);
+ subr = va;
+ }
+
+ /*
+ * Jump here for locore entry points for which the preceding
+ * function doesn't end in "j ra"
+ */
+ /* scan forwards to find stack size and any saved registers */
+ stksize = 0;
+ more = 3;
+ mask = 0;
+ for (va = subr; more; va += sizeof(int),
+ more = (more == 3) ? 3 : more - 1) {
+ /* stop if hit our current position */
+ if (va >= pc)
+ break;
+ instr = kdbpeek((int *)va);
+ i.word = instr;
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ switch (i.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_SYSCALL:
+ case OP_BREAK:
+ more = 1; /* stop now */
+ };
+ break;
+
+ case OP_BCOND:
+ case OP_J:
+ case OP_JAL:
+ case OP_BEQ:
+ case OP_BNE:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_COP0:
+ case OP_COP1:
+ case OP_COP2:
+ case OP_COP3:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ more = 2; /* stop after next instruction */
+ };
+ break;
+
+ case OP_SW:
+ case OP_SD:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ switch (i.IType.rt) {
+ case 4: /* a0 */
+ a0 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 5: /* a1 */
+ a1 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 6: /* a2 */
+ a2 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 7: /* a3 */
+ a3 = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+
+ case 31: /* ra */
+ ra = kdbpeek((int *)(sp + (short)i.IType.imm));
+ break;
+ }
+ break;
+
+ case OP_ADDI:
+ case OP_ADDIU:
+ case OP_DADDI:
+ case OP_DADDIU:
+ /* look for stack pointer adjustment */
+ if (i.IType.rs != 29 || i.IType.rt != 29)
+ break;
+ stksize = - ((short)i.IType.imm);
+ }
+ }
+
+done:
+ if(symname == NULL) {
+ if (subr == (long)idle)
+ (*pr)("idle ");
+ else
+ (*pr)("%p ", subr);
+ } else {
+ (*pr)("%s+%p ", symname, diff);
+ }
+ (*pr)("(%x,%x,%x,%x) sp %x ra %x, sz %d\n", a0, a1, a2, a3, sp, ra, stksize);
+
+ if (subr == (long)k_intr || subr == (long)k_general) {
+ if (subr == (long)k_intr)
+ (*pr)("<-> KERNEL INTERRUPT <->\n");
+ else
+ (*pr)("<-> KERNEL TRAP <->\n");
+ sp = *(register_t *)sp;
+ pc = ((struct trap_frame *)sp)->pc;
+ ra = ((struct trap_frame *)sp)->ra;
+ sp = ((struct trap_frame *)sp)->sp; /* last */
+ goto loop;
+ }
+
+ if (ra) {
+ if (pc == ra && stksize == 0)
+ (*pr)("stacktrace: loop!\n");
+ else {
+ pc = ra;
+ sp += stksize;
+ ra = 0;
+ goto loop;
+ }
+ } else {
+ if (curproc)
+ (*pr)("User-level: pid %d\n", curproc->p_pid);
+ else
+ (*pr)("User-level: curproc NULL\n");
+ }
+}
+
+/*
+ * To do a single step ddb needs to know the next address
+ * that we will get to. It means that we need to find out
+ * both the address for a branch taken and for not taken, NOT! :-)
+ * MipsEmulateBranch will do the job to find out _exactly_ which
+ * address we will end up at so the 'dual bp' method is not
+ * requiered.
+ */
+db_addr_t
+next_instr_address(db_addr_t pc, boolean_t bd)
+{
+ db_addr_t next;
+
+ next = MipsEmulateBranch(&ddb_regs, pc, 0, 0);
+ return(next);
+}
+
+
+/*
+ * Decode instruction and figure out type.
+ */
+int
+db_inst_type(ins)
+ int ins;
+{
+ InstFmt inst;
+ int ityp = 0;
+
+ inst.word = ins;
+ switch ((int)inst.JType.op) {
+ case OP_SPECIAL:
+ switch ((int)inst.RType.func) {
+ case OP_JR:
+ ityp = IT_BRANCH;
+ break;
+ case OP_JALR:
+ case OP_SYSCALL:
+ ityp = IT_CALL;
+ break;
+ }
+ break;
+
+ case OP_BCOND:
+ switch ((int)inst.IType.rt) {
+ case OP_BLTZ:
+ case OP_BLTZL:
+ case OP_BGEZ:
+ case OP_BGEZL:
+ ityp = IT_BRANCH;
+ break;
+
+ case OP_BLTZAL:
+ case OP_BLTZALL:
+ case OP_BGEZAL:
+ case OP_BGEZALL:
+ ityp = IT_CALL;
+ break;
+ }
+ break;
+
+ case OP_JAL:
+ ityp = IT_CALL;
+ break;
+
+ case OP_J:
+ case OP_BEQ:
+ case OP_BEQL:
+ case OP_BNE:
+ case OP_BNEL:
+ case OP_BLEZ:
+ case OP_BLEZL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ ityp = IT_BRANCH;
+ break;
+
+ case OP_COP1:
+ switch (inst.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ ityp = IT_BRANCH;
+ break;
+ }
+ break;
+
+ case OP_LB:
+ case OP_LH:
+ case OP_LW:
+ case OP_LD:
+ case OP_LBU:
+ case OP_LHU:
+ case OP_LWU:
+ case OP_LWC1:
+ ityp = IT_LOAD;
+ break;
+
+ case OP_SB:
+ case OP_SH:
+ case OP_SW:
+ case OP_SD:
+ case OP_SWC1:
+ ityp = IT_STORE;
+ break;
+ }
+ return (ityp);
+}
+
+/*
+ * MIPS machine dependent DDB commands.
+ */
+
+/*
+ * Do a trap traceback.
+ */
+void
+db_trap_trace_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *m)
+{
+ trapDump("ddb trap trace");
+}
+
+/*
+ * Dump TLB contents.
+ */
+void
+db_dump_tlb_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *m)
+{
+ int tlbno, last, check, pid;
+ struct tlb tlb, tlbp;
+char *attr[] = {
+ "CWTNA", "CWTA ", "UCBL ", "CWB ", "RES ", "RES ", "UCNB ", "BPASS"
+};
+
+ pid = -1;
+
+ if (m[0] == 'p') {
+ if(have_addr && addr < 256) {
+ pid = addr;
+ tlbno = 0;
+ count = sys_config.cpu.tlbsize;
+ }
+ } else if (m[0] == 'c') {
+ last = sys_config.cpu.tlbsize;
+ for (tlbno = 0; tlbno < last; tlbno++) {
+ tlb_read(tlbno, &tlb);
+ for (check = tlbno + 1; check < last; check++) {
+ tlb_read(check, &tlbp);
+if ((tlbp.tlb_hi == tlb.tlb_hi && (tlb.tlb_lo0 & PG_V || tlb.tlb_lo1 & PG_V)) ||
+(pfn_to_pad(tlb.tlb_lo0) == pfn_to_pad(tlbp.tlb_lo0) && tlb.tlb_lo0 & PG_V) ||
+(pfn_to_pad(tlb.tlb_lo1) == pfn_to_pad(tlbp.tlb_lo1) && tlb.tlb_lo1 & PG_V)) {
+ printf("MATCH:\n");
+ db_dump_tlb_cmd(tlbno, 1, 1, "");
+ db_dump_tlb_cmd(check, 1, 1, "");
+ }
+ }
+ }
+ return;
+ } else {
+ if(have_addr && addr < sys_config.cpu.tlbsize) {
+ tlbno = addr;
+ }
+ else {
+ tlbno = 0;
+ count = sys_config.cpu.tlbsize;
+ }
+ }
+ last = tlbno + count;
+
+ for (; tlbno < sys_config.cpu.tlbsize && tlbno < last; tlbno++) {
+ tlb_read(tlbno, &tlb);
+
+ if (pid >= 0 && (tlb.tlb_hi & 0xff) != pid)
+ continue;
+
+ if(tlb.tlb_lo0 & PG_V || tlb.tlb_lo1 & PG_V) {
+ printf("%2d v=0x%08x", tlbno, tlb.tlb_hi & ~0xff);
+ printf("/%02x ", tlb.tlb_hi & 0xff);
+
+ if(tlb.tlb_lo0 & PG_V) {
+ printf("0x%08x ", pfn_to_pad(tlb.tlb_lo0));
+ printf("%c", tlb.tlb_lo0 & PG_M ? 'M' : ' ');
+ printf("%c", tlb.tlb_lo0 & PG_G ? 'G' : ' ');
+ printf(" %s ", attr[(tlb.tlb_lo0 >> 3) & 7]);
+ } else {
+ printf("invalid ");
+ }
+
+ if(tlb.tlb_lo1 & PG_V) {
+ printf("0x%08x ", pfn_to_pad(tlb.tlb_lo1));
+ printf("%c", tlb.tlb_lo1 & PG_M ? 'M' : ' ');
+ printf("%c", tlb.tlb_lo1 & PG_G ? 'G' : ' ');
+ printf(" %s ", attr[(tlb.tlb_lo1 >> 3) & 7]);
+ } else {
+ printf("invalid ");
+ }
+ printf(" sz=%x", tlb.tlb_mask);
+ }
+ else if (pid < 0) {
+ printf("%2d v=invalid ", tlbno);
+ }
+ printf("\n");
+ }
+}
+
+
+struct db_command mips_db_command_table[] = {
+ { "tlb", db_dump_tlb_cmd, 0, NULL },
+ { "trap", db_trap_trace_cmd, 0, NULL },
+ { NULL, NULL, 0, NULL }
+};
+
+void
+db_machine_init()
+{
+extern char *ssym;
+ db_machine_commands_install(mips_db_command_table);
+ if (ssym != NULL) {
+ ddb_init(); /* Init symbols */
+ }
+}
diff --git a/sys/arch/mips64/mips64/disksubr.c b/sys/arch/mips64/mips64/disksubr.c
new file mode 100644
index 00000000000..88aaf6b2551
--- /dev/null
+++ b/sys/arch/mips64/mips64/disksubr.c
@@ -0,0 +1,553 @@
+/* $OpenBSD: disksubr.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/* $NetBSD: disksubr.c,v 1.21 1996/05/03 19:42:03 christos Exp $ */
+
+/*
+ * Copyright (c) 1996 Theo de Raadt
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <sys/syslog.h>
+#include <sys/disk.h>
+
+#include <mips64/archtype.h>
+
+#define b_cylin b_resid
+
+#define BOOT_MAGIC 0xAA55
+#define BOOT_MAGIC_OFF (DOSPARTOFF+NDOSPART*sizeof(struct dos_partition))
+
+void
+dk_establish(dk, dev)
+ struct disk *dk;
+ struct device *dev;
+{
+}
+
+/*
+ * Attempt to read a disk label from a device
+ * using the indicated stategy routine.
+ * The label must be partly set up before this:
+ * secpercyl, secsize and anything required for a block i/o read
+ * operation in the driver's strategy/start routines
+ * must be filled in before calling us.
+ *
+ * If dos partition table requested, attempt to load it and
+ * find disklabel inside a DOS partition. Also, if bad block
+ * table needed, attempt to extract it as well. Return buffer
+ * for use in signalling errors if requested.
+ *
+ * We would like to check if each MBR has a valid BOOT_MAGIC, but
+ * we cannot because it doesn't always exist. So.. we assume the
+ * MBR is valid.
+ *
+ * Returns null on success and an error string on failure.
+ */
+char *
+readdisklabel(dev, strat, lp, osdep, spoofonly)
+ dev_t dev;
+ void (*strat) __P((struct buf *));
+ struct disklabel *lp;
+ struct cpu_disklabel *osdep;
+ int spoofonly;
+{
+ struct dos_partition *dp = osdep->dosparts, *dp2;
+ struct dkbad *bdp = &DKBAD(osdep);
+ struct buf *bp;
+ struct disklabel *dlp;
+ char *msg = NULL, *cp;
+ int dospartoff, cyl, i, ourpart = -1;
+
+ /* minimal requirements for archtypal disk label */
+ if (lp->d_secsize == 0)
+ lp->d_secsize = DEV_BSIZE;
+ if (lp->d_secperunit == 0)
+ lp->d_secperunit = 0x1fffffff;
+ if (lp->d_secpercyl == 0) {
+ msg = "invalid geometry";
+ goto done;
+ }
+ lp->d_npartitions = RAW_PART + 1;
+ for (i = 0; i < RAW_PART; i++) {
+ lp->d_partitions[i].p_size = 0;
+ lp->d_partitions[i].p_offset = 0;
+ }
+ if (lp->d_partitions[i].p_size == 0)
+ lp->d_partitions[i].p_size = 0x1fffffff;
+ lp->d_partitions[i].p_offset = 0;
+
+ /* get a buffer and initialize it */
+ bp = geteblk((int)lp->d_secsize);
+ bp->b_dev = dev;
+
+ /* do dos partitions in the process of getting disklabel? */
+ dospartoff = 0;
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ daddr_t part_blkno = DOSBBSECTOR;
+ unsigned long extoff = 0;
+ int wander = 1, n = 0, loop = 0;
+
+ /*
+ * Read dos partition table, follow extended partitions.
+ * Map the partitions to disklabel entries i-p
+ */
+ while (wander && n < 8 && loop < 8) {
+ loop++;
+ wander = 0;
+
+ /* read boot record */
+ bp->b_blkno = part_blkno;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylin = part_blkno / lp->d_secpercyl;
+ (*strat)(bp);
+
+ /* if successful, wander through dos partition table */
+ if (biowait(bp)) {
+ msg = "dos partition I/O error";
+ goto done;
+ }
+ if(((int *)bp->b_data)[0] == 0x01084025 &&
+ ((int *)bp->b_data)[1] == 0x01294825) {
+ goto nodoslabel;
+ }
+ bcopy(bp->b_data + DOSPARTOFF, dp, NDOSPART * sizeof(*dp));
+
+ if (ourpart == -1) {
+ /* Search for our MBR partition */
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_OPENBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_FREEBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_NETBSD)
+ ourpart = i;
+ if (ourpart == -1)
+ goto donot;
+ /*
+ * This is our MBR partition. need sector address
+ * for SCSI/IDE, cylinder for ESDI/ST506/RLL
+ */
+ dp2 = &dp[ourpart];
+ dospartoff = get_le(&dp2->dp_start) + part_blkno;
+ cyl = DPCYL(dp2->dp_scyl, dp2->dp_ssect);
+
+ /* XXX build a temporary disklabel */
+ lp->d_partitions[0].p_size = get_le(&dp2->dp_size);
+ lp->d_partitions[0].p_offset =
+ get_le(&dp2->dp_start) + part_blkno;
+ if (lp->d_ntracks == 0)
+ lp->d_ntracks = dp2->dp_ehd + 1;
+ if (lp->d_nsectors == 0)
+ lp->d_nsectors = DPSECT(dp2->dp_esect);
+ if (lp->d_secpercyl == 0)
+ lp->d_secpercyl = lp->d_ntracks *
+ lp->d_nsectors;
+ }
+donot:
+ /*
+ * In case the disklabel read below fails, we want to
+ * provide a fake label in i-p.
+ */
+ for (dp2=dp, i=0; i < NDOSPART && n < 8; i++, dp2++) {
+ struct partition *pp = &lp->d_partitions[8+n];
+
+ if (dp2->dp_typ == DOSPTYP_OPENBSD)
+ continue;
+ if (get_le(&dp2->dp_size) > lp->d_secperunit)
+ continue;
+ if (get_le(&dp2->dp_size))
+ pp->p_size = get_le(&dp2->dp_size);
+ if (get_le(&dp2->dp_start))
+ pp->p_offset =
+ get_le(&dp2->dp_start) + part_blkno;
+
+ switch (dp2->dp_typ) {
+ case DOSPTYP_UNUSED:
+ for (cp = (char *)dp2;
+ cp < (char *)(dp2 + 1); cp++)
+ if (*cp)
+ break;
+ /*
+ * Was it all zeroes? If so, it is
+ * an unused entry that we don't
+ * want to show.
+ */
+ if (cp == (char *)(dp2 + 1))
+ continue;
+ lp->d_partitions[8 + n++].p_fstype =
+ FS_UNUSED;
+ break;
+
+ case DOSPTYP_LINUX:
+ pp->p_fstype = FS_EXT2FS;
+ n++;
+ break;
+
+ case DOSPTYP_FAT12:
+ case DOSPTYP_FAT16S:
+ case DOSPTYP_FAT16B:
+ case DOSPTYP_FAT32:
+ case DOSPTYP_FAT32L:
+ case DOSPTYP_FAT16L:
+ pp->p_fstype = FS_MSDOS;
+ n++;
+ break;
+ case DOSPTYP_EXTEND:
+ part_blkno = get_le(&dp2->dp_start) + extoff;
+ if (!extoff)
+ extoff = get_le(&dp2->dp_start);
+ wander = 1;
+ break;
+ default:
+ pp->p_fstype = FS_OTHER;
+ n++;
+ break;
+ }
+ }
+ }
+ lp->d_bbsize = 8192;
+ lp->d_sbsize = 64*1024; /* XXX ? */
+ lp->d_npartitions = MAXPARTITIONS;
+ }
+
+nodoslabel:
+ /* don't read the on-disk label if we are in spoofed-only mode */
+ if (spoofonly)
+ goto done;
+
+ /* next, dig out disk label */
+ bp->b_blkno = dospartoff + LABELSECTOR;
+ bp->b_cylin = cyl;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ (*strat)(bp);
+
+ /* if successful, locate disk label within block and validate */
+ if (biowait(bp)) {
+ /* XXX we return the faked label built so far */
+ msg = "disk label I/O error";
+ goto done;
+ }
+ for (dlp = (struct disklabel *)bp->b_data;
+ dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp));
+ dlp = (struct disklabel *)((char *)dlp + sizeof(long))) {
+ if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) {
+ if (msg == NULL)
+ msg = "no disk label";
+ } else if (dlp->d_npartitions > MAXPARTITIONS ||
+ dkcksum(dlp) != 0)
+ msg = "disk label corrupted";
+ else {
+ *lp = *dlp;
+ msg = NULL;
+ break;
+ }
+ }
+
+ if (msg) {
+#if defined(CD9660)
+ if (iso_disklabelspoof(dev, strat, lp) == 0)
+ msg = NULL;
+#endif
+ goto done;
+ }
+
+ /* obtain bad sector table if requested and present */
+ if (bdp && (lp->d_flags & D_BADSECT)) {
+ struct dkbad *db;
+
+ i = 0;
+ do {
+ /* read a bad sector table */
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
+ if (lp->d_secsize > DEV_BSIZE)
+ bp->b_blkno *= lp->d_secsize / DEV_BSIZE;
+ else
+ bp->b_blkno /= DEV_BSIZE / lp->d_secsize;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_cylin = lp->d_ncylinders - 1;
+ (*strat)(bp);
+
+ /* if successful, validate, otherwise try another */
+ if (biowait(bp)) {
+ msg = "bad sector table I/O error";
+ } else {
+ db = (struct dkbad *)(bp->b_data);
+#define DKBAD_MAGIC 0x4321
+ if (db->bt_mbz == 0
+ && db->bt_flag == DKBAD_MAGIC) {
+ msg = NULL;
+ *bdp = *db;
+ break;
+ } else
+ msg = "bad sector table corrupted";
+ }
+ } while ((bp->b_flags & B_ERROR) && (i += 2) < 10 &&
+ i < lp->d_nsectors);
+ }
+
+done:
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ return (msg);
+}
+
+/*
+ * Check new disk label for sensibility
+ * before setting it.
+ */
+int
+setdisklabel(olp, nlp, openmask, osdep)
+ struct disklabel *olp, *nlp;
+ u_long openmask;
+ struct cpu_disklabel *osdep;
+{
+ int i;
+ struct partition *opp, *npp;
+
+ /* sanity clause */
+ if (nlp->d_secpercyl == 0 || nlp->d_secsize == 0 ||
+ (nlp->d_secsize % DEV_BSIZE) != 0)
+ return(EINVAL);
+
+ /* special case to allow disklabel to be invalidated */
+ if (nlp->d_magic == 0xffffffff) {
+ *olp = *nlp;
+ return (0);
+ }
+
+ if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC ||
+ dkcksum(nlp) != 0)
+ return (EINVAL);
+
+ /* XXX missing check if other dos partitions will be overwritten */
+
+ while (openmask != 0) {
+ i = ffs(openmask) - 1;
+ openmask &= ~(1 << i);
+ if (nlp->d_npartitions <= i)
+ return (EBUSY);
+ opp = &olp->d_partitions[i];
+ npp = &nlp->d_partitions[i];
+ if (npp->p_offset != opp->p_offset || npp->p_size < opp->p_size)
+ return (EBUSY);
+ /*
+ * Copy internally-set partition information
+ * if new label doesn't include it. XXX
+ */
+ if (npp->p_fstype == FS_UNUSED && opp->p_fstype != FS_UNUSED) {
+ npp->p_fstype = opp->p_fstype;
+ npp->p_fsize = opp->p_fsize;
+ npp->p_frag = opp->p_frag;
+ npp->p_cpg = opp->p_cpg;
+ }
+ }
+ nlp->d_checksum = 0;
+ nlp->d_checksum = dkcksum(nlp);
+ *olp = *nlp;
+ return (0);
+}
+
+
+/*
+ * Write disk label back to device after modification.
+ * XXX cannot handle OpenBSD partitions in extended partitions!
+ */
+int
+writedisklabel(dev, strat, lp, osdep)
+ dev_t dev;
+ void (*strat) __P((struct buf *));
+ struct disklabel *lp;
+ struct cpu_disklabel *osdep;
+{
+ struct dos_partition *dp = osdep->dosparts, *dp2;
+ struct buf *bp;
+ struct disklabel *dlp;
+ int error, dospartoff, cyl, i;
+ int ourpart = -1;
+
+ /* get a buffer and initialize it */
+ bp = geteblk((int)lp->d_secsize);
+ bp->b_dev = dev;
+
+ /* do dos partitions in the process of getting disklabel? */
+ dospartoff = 0;
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ /* read master boot record */
+ bp->b_blkno = DOSBBSECTOR;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylin = DOSBBSECTOR / lp->d_secpercyl;
+ (*strat)(bp);
+
+ if ((error = biowait(bp)) != 0)
+ goto done;
+
+ if(((int *)bp->b_data)[0] == 0x01084025 &&
+ ((int *)bp->b_data)[1] == 0x01294825) {
+ goto nodoslabel;
+ }
+ /* XXX how do we check veracity/bounds of this? */
+ bcopy(bp->b_data + DOSPARTOFF, dp,
+ NDOSPART * sizeof(*dp));
+
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ == DOSPTYP_OPENBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ == DOSPTYP_FREEBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ == DOSPTYP_NETBSD)
+ ourpart = i;
+
+ if (ourpart != -1) {
+ dp2 = &dp[ourpart];
+
+ /*
+ * need sector address for SCSI/IDE,
+ * cylinder for ESDI/ST506/RLL
+ */
+ dospartoff = get_le(&dp2->dp_start);
+ cyl = DPCYL(dp2->dp_scyl, dp2->dp_ssect);
+ }
+ }
+
+nodoslabel:
+ /* next, dig out disk label */
+ bp->b_blkno = dospartoff + LABELSECTOR;
+ bp->b_cylin = cyl;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ (*strat)(bp);
+
+ /* if successful, locate disk label within block and validate */
+ if ((error = biowait(bp)) != 0)
+ goto done;
+ for (dlp = (struct disklabel *)bp->b_data;
+ dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp));
+ dlp = (struct disklabel *)((char *)dlp + sizeof(long))) {
+ if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
+ dkcksum(dlp) == 0) {
+ *dlp = *lp;
+ bp->b_flags = B_BUSY | B_WRITE;
+ (*strat)(bp);
+ error = biowait(bp);
+ goto done;
+ }
+ }
+
+ /* Write it in the regular place. */
+ *(struct disklabel *)bp->b_data = *lp;
+ bp->b_flags = B_BUSY | B_WRITE;
+ (*strat)(bp);
+ error = biowait(bp);
+ goto done;
+
+done:
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ return (error);
+}
+
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(bp, lp, osdep, wlabel)
+ struct buf *bp;
+ struct disklabel *lp;
+ struct cpu_disklabel *osdep;
+ int wlabel;
+{
+#define blockpersec(count, lp) ((count) * (((lp)->d_secsize) / DEV_BSIZE))
+ struct partition *p = lp->d_partitions + DISKPART(bp->b_dev);
+ int labelsector = blockpersec(lp->d_partitions[RAW_PART].p_offset, lp) +
+ LABELSECTOR;
+ int sz = howmany(bp->b_bcount, DEV_BSIZE);
+
+ if (bp->b_blkno + sz > blockpersec(p->p_size, lp)) {
+ sz = blockpersec(p->p_size, lp) - bp->b_blkno;
+ if (sz == 0) {
+ /* If exactly at end of disk, return EOF. */
+ bp->b_resid = bp->b_bcount;
+ goto done;
+ }
+ if (sz < 0) {
+ /* If past end of disk, return EINVAL. */
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ /* Otherwise, truncate request. */
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* Overwriting disk label? */
+ if (bp->b_blkno + blockpersec(p->p_offset, lp) <= labelsector &&
+#if LABELSECTOR != 0
+ bp->b_blkno + blockpersec(p->p_offset, lp) + sz > labelsector &&
+#endif
+ (bp->b_flags & B_READ) == 0 && !wlabel) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_cylin = (bp->b_blkno + blockpersec(p->p_offset, lp)) /
+ lp->d_secpercyl;
+ return (1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+done:
+ return (0);
+}
diff --git a/sys/arch/mips64/mips64/exception.S b/sys/arch/mips64/mips64/exception.S
new file mode 100644
index 00000000000..61d20a8fba7
--- /dev/null
+++ b/sys/arch/mips64/mips64/exception.S
@@ -0,0 +1,681 @@
+/* $OpenBSD: exception.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code handles exceptions and dispatches to the
+ * correct handler depending on the exception type.
+ *
+ * Exceptions are directed to the following addresses:
+ * 0xffffffffbfc00000 Reset, NMI etc. Not handled by the kernel.
+ * 0xffffffff80000000 TLB refill, not in exception.
+ * 0xffffffff80000080 XTLB refill, not in exception.
+ * 0xffffffffa0000100 Cache errors.
+ * 0xffffffff80000180 Interrupts. Same as next.
+ * 0xffffffff80000180 Everything else...
+ */
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+
+#include "assym.h"
+
+/*
+ * DDB stack backtrace uses 'jr ra' to find functions starts.
+ * Put this after functions which does not end with 'jr ra'.
+ */
+#define DDB_BARRIER \
+ jr ra; nop
+
+ .set mips3
+
+k_exception_table:
+ PTR_VAL k_intr
+ PTR_VAL k_general
+ PTR_VAL k_tlb_inv
+ PTR_VAL k_tlb_inv
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+ PTR_VAL k_general
+
+u_exception_table:
+ PTR_VAL u_intr
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+ PTR_VAL u_general
+
+ .set noreorder # Noreorder is default style!
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*---------------------------------------------------------------- exception
+ * General exception handler dispatcher. This code is copied
+ * to the vector area and must thus be PIC and less than 128
+ * bytes long to fit. Only k0 and k1 may be used at this time.
+ */
+ .globl exception
+exception:
+ .set noat
+#if 0
+ dmfc0 k0, COP_0_EXC_PC
+ PTR_L k1, tlbtrcptr
+ PTR_S k0, 0(k1)
+ dmfc0 k0, COP_0_BAD_VADDR
+ PTR_S k0, REGSZ(k1)
+ mfc0 k0, COP_0_CAUSE_REG
+ PTR_S k0, 2*REGSZ(k1)
+ mfc0 k0, COP_0_STATUS_REG
+ PTR_S k0, 3*REGSZ(k1)
+
+ PTR_L k1, tlbtrcptr
+ PTR_ADDU k1, 4*REGSZ
+ LI k0, 0x100
+ nor k0, zero, k0
+ and k1, k0
+ LA k0, tlbtrcptr
+ PTR_S k1, 0(k0)
+#endif
+ mfc0 k0, COP_0_STATUS_REG
+ mfc0 k1, COP_0_CAUSE_REG
+ and k0, k0, SR_KSU_USER
+ beqz k0, k_exception # Kernel mode mode
+ and k1, k1, CR_EXC_CODE
+
+ LA k0, u_exception_table
+ PTR_ADDU k0, k0, k1
+#if (_MIPS_SZPTR == 64)
+ PTR_ADDU k0, k0, k1 # yes, twice...
+#endif
+ PTR_L k0, 0(k0)
+ j k0
+ nop
+
+k_exception:
+ LA k0, k_exception_table
+ PTR_ADDU k0, k0, k1
+#if (_MIPS_SZPTR == 64)
+ PTR_ADDU k0, k0, k1 # yes, twice...
+#endif
+ PTR_L k0, 0(k0)
+ j k0
+ nop
+ .set at
+ .globl e_exception
+e_exception:
+
+ DDB_BARRIER
+
+/*---------------------------------------------------------------- k_intr
+ * Handle an interrupt in kernel mode. This is easy since we
+ * just need to save away the 'save' registers and state.
+ * State is saved on kernel stack.
+ */
+
+NNON_LEAF(k_intr, FRAMESZ(KERN_EXC_FRAME_SIZE), ra)
+ .set noat
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(KERN_EXC_FRAME_SIZE))
+ PTR_SUB k0, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
+ SAVE_CPU(k0, CF_RA_OFFS)
+#if 0
+ cfc0 v1, COP_0_ICR
+ SAVE_REG(v1, IC, k0, CF_RA_OFFS)
+#endif
+ .set at
+ move sp, k0 # Already on kernel stack
+ LA gp, _gp
+ and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ PTR_S a0, 0(sp)
+ jal interrupt
+ PTR_S a3, CF_RA_OFFS + KERN_REG_SIZE(sp)
+
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+
+ PTR_L a0, CF_RA_OFFS + KERN_REG_SIZE(sp)
+ .set noat
+#if 0
+ RESTORE_REG(t0, IC, sp, CF_RA_OFFS)
+ ctc0 t0, COP_0_ICR
+#endif
+ RESTORE_CPU(sp, CF_RA_OFFS)
+ PTR_ADDU sp, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
+ sync
+ eret
+ DDB_BARRIER
+ .set at
+END(k_intr)
+
+/*---------------------------------------------------------------- u_intr
+ * Handle an interrupt in user mode. Save the relevant user
+ * registers into the u.u_pcb struct. This will allow us
+ * to preempt the interrupted process. Full save is held
+ * off though until a switch() really is requiered.
+ */
+NNON_LEAF(u_intr, FRAMESZ(CF_SZ), ra)
+ .set noat
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_L k0, curprocpaddr
+ SAVE_CPU(k0, 0)
+#if 0
+ cfc0 v1, COP_0_ICR
+ SAVE_REG(v1, IC, k0, 0)
+#endif
+ PTR_ADDU sp, k0, USPACE-FRAMESZ(CF_SZ)
+ LA gp, _gp
+ .set at
+ and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ PTR_S a0, 0(sp)
+ jal interrupt
+ PTR_S a3, CF_RA_OFFS(sp) # for debugging
+
+ lw v0, astpending # any pending interrupts?
+ beq v0, zero, 4f
+ nop
+
+ PTR_L t0, curprocpaddr
+ SAVE_CPU_SREG(t0, 0)
+
+#ifdef PERFCNTRS
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf save.
+
+ mfc0 v0, COP_0_PC_CTRL
+ PTR_L t0, curproc
+ sw v0, P_PC_CTRL(t0)
+ dmfc0 v0, COP_0_WATCH_1
+ dmfc0 v1, COP_0_WATCH_2
+ sd v0, P_WATCH_1(t0)
+ sd v1, P_WATCH_2(t0)
+ mfc0 v0, COP_0_WATCH_M
+ mfc0 v1, COP_0_PC_COUNT
+ sw v0, P_WATCH_M(t0)
+ sw v1, P_PC_COUNT(t0)
+ mtc0 zero, COP_0_PC_CTRL
+ dmtc0 zero, COP_0_WATCH_1
+ dmtc0 zero, COP_0_WATCH_2
+ nop;nop;nop;nop
+1:
+#endif
+ jal softintr
+ nop
+/*
+ * Restore user registers and return. NOTE: interrupts are enabled.
+ */
+#ifdef PERFCNTRS
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf setup.
+
+ PTR_L t1, curproc # set up rm7k.
+ ld v0, P_WATCH_1(t1)
+ dmtc0 v0, COP_0_WATCH_1
+ ld v0, P_WATCH_2(t1)
+ dmtc0 v0, COP_0_WATCH_2
+ lw v0, P_WATCH_M(t1)
+ mtc0 v0, COP_0_WATCH_M
+ lw v0, P_PC_CTRL(t1)
+ lw v1, P_PC_COUNT(t1)
+ nop;nop
+ mtc0 v0, COP_0_PC_CTRL
+ nop;nop;nop;nop
+ mtc0 v1, COP_0_PC_COUNT
+ nop;nop;nop;nop
+1:
+#endif
+ PTR_L t0, curprocpaddr
+ RESTORE_CPU_SREG(t0, 0)
+
+4:
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ ori t0, SR_EXL # restoring to user mode.
+ mtc0 t0, COP_0_STATUS_REG # must set exeption level bit.
+ ITLBNOPFIX
+
+ PTR_L k0, curprocpaddr
+ RESTORE_REG(a3, CPL, k0, 0)
+ sw a3, cpl
+ .set noat
+ RESTORE_REG(a0, PC, k0, 0)
+#if 0
+ RESTORE_REG(t0, IC, k0, 0)
+ ctc0 t0, COP_0_ICR
+#endif
+ RESTORE_CPU(k0, 0)
+ RESTORE_REG(sp, SP, k0, 0)
+ LI k0, 0
+ LI k1, 0
+ sync
+ eret
+ DDB_BARRIER
+ .set at
+END(u_intr)
+
+/*---------------------------------------------------------------- set_sint
+ * Atomic ipending update
+ */
+LEAF(set_sint)
+ LA v1, ipending
+1:
+ ll v0, 0(v1)
+ or v0, a0
+ sc v0, 0(v1)
+ beqz v0, 1b
+ j ra
+ nop
+END(set_sint)
+
+/*---------------------------------------------------------------- k_general
+ * Handle a kernel general trap. This is very much like
+ * k_intr except that we call ktrap instead of interrupt.
+ */
+
+NNON_LEAF(k_general, FRAMESZ(KERN_EXC_FRAME_SIZE), ra)
+ .set noat
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(KERN_EXC_FRAME_SIZE))
+ PTR_SUB k0, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
+ SAVE_CPU(k0, CF_RA_OFFS)
+#if 0
+ cfc0 v1, COP_0_ICR
+ SAVE_REG(v1, IC, k0, CF_RA_OFFS)
+#endif
+#if defined(DDB)
+ SAVE_CPU_SREG(k0, CF_RA_OFFS)
+#endif
+ .set at
+ move sp, k0 # Already on kernel stack
+ LA gp, _gp
+ and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ PTR_S a0, 0(sp)
+ jal trap
+ PTR_S a3, CF_RA_OFFS + KERN_REG_SIZE(sp)
+
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+
+ move a0, v0
+ .set noat
+#if 0
+ RESTORE_REG(t0, IC, sp, CF_RA_OFFS)
+ ctc0 t0, COP_0_ICR
+#endif
+ RESTORE_CPU(sp, CF_RA_OFFS)
+ PTR_ADDU sp, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
+ sync
+ eret
+ DDB_BARRIER
+ .set at
+END(k_general)
+
+/*---------------------------------------------------------------- u_general
+ * Handle a user general trap.
+ */
+NNON_LEAF(u_general, FRAMESZ(CF_SZ), ra)
+ .set noat
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+
+ PTR_L k0, curprocpaddr
+ SAVE_CPU(k0, 0)
+#if 0
+ cfc0 v1, COP_0_ICR
+ SAVE_REG(v1, IC, k0, 0)
+#endif
+ SAVE_CPU_SREG(k0, 0)
+ PTR_ADDU sp, k0, USPACE-FRAMESZ(CF_SZ)
+ LA gp, _gp
+ .set at
+ and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+#ifdef PERFCNTRS
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf save.
+
+ mfc0 v0, COP_0_PC_CTRL
+ PTR_L t0, curproc
+ sw v0, P_PC_CTRL(t0)
+ dmfc0 v0, COP_0_WATCH_1
+ dmfc0 v1, COP_0_WATCH_2
+ sd v0, P_WATCH_1(t0)
+ sd v1, P_WATCH_2(t0)
+ mfc0 v0, COP_0_WATCH_M
+ mfc0 v1, COP_0_PC_COUNT
+ sw v0, P_WATCH_M(t0)
+ sw v1, P_PC_COUNT(t0)
+ mtc0 zero, COP_0_PC_CTRL
+ nop;nop;nop;nop
+1:
+#endif
+
+ jal trap
+ PTR_S a3, CF_RA_OFFS(sp) # for debugging
+
+#ifdef PERFCNTRS
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf setup.
+
+ LOAD t0, curproc # set up rm7k.
+ ld v0, P_WATCH_1(t0)
+ dmtc0 v0, COP_0_WATCH_1
+ ld v0, P_WATCH_2(t0)
+ dmtc0 v0, COP_0_WATCH_2
+ lw v0, P_WATCH_M(t0)
+ mtc0 v0, COP_0_WATCH_M
+ lw v0, P_PC_CTRL(t0)
+ lw v1, P_PC_COUNT(t0)
+ nop;nop
+ mtc0 v0, COP_0_PC_CTRL
+ nop;nop;nop;nop
+ mtc0 v1, COP_0_PC_COUNT
+ nop;nop;nop;nop
+1:
+#endif
+
+4:
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ ori t0, SR_EXL # restoring to user mode.
+ mtc0 t0, COP_0_STATUS_REG # must set exeption level bit.
+ ITLBNOPFIX
+
+ PTR_L k0, curprocpaddr
+ RESTORE_REG(a3, CPL, k0, 0)
+ sw a3, cpl
+ .set noat
+ RESTORE_CPU_SREG(k0, 0)
+ RESTORE_REG(a0, PC, k0, 0)
+#if 0
+ RESTORE_REG(t0, IC, k0, 0)
+ ctc0 t0, COP_0_ICR
+#endif
+ RESTORE_CPU(k0, 0)
+ RESTORE_REG(sp, SP, k0, 0)
+ LI k0, 0
+ LI k1, 0
+ sync
+ eret
+ DDB_BARRIER
+ .set at
+END(u_general)
+
+#ifdef notyet
+/*---------------------------------------------------------------- u_syscall
+ * Syscall exceptions are special such that they can be
+ * optimized by not saving more than what is really needed.
+ * Syscalls are actually 'function calls' from the user
+ * programs point of view and thus it does not expect us to
+ * save away all temporary registers etc. Just save state and
+ * args to avoid a lot of overhead.
+ */
+NNON_LEAF(u_syscall, FRAMESZ(CF_SZ), ra)
+ .set noat
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+
+ REG_S a0, UADDR+U_PCB_REGS+(A0 * REGSZ)
+ REG_S a1, UADDR+U_PCB_REGS+(A1 * REGSZ)
+ REG_S a2, UADDR+U_PCB_REGS+(A2 * REGSZ)
+ REG_S a3, UADDR+U_PCB_REGS+(A3 * REGSZ)
+ mfc0 a0, COP_0_STATUS_REG # First arg is the status reg.
+ mfc0 a1, COP_0_CAUSE_REG # Second arg is the cause reg.
+ dmfc0 a3, COP_0_EXC_PC # Fourth arg is the pc.
+ REG_S sp, UADDR+U_PCB_REGS+(SP * REGSZ)
+ LA sp, KERNELSTACK - FRAMESZ(CF_SZ) # switch to kernel SP
+ REG_S ra, UADDR+U_PCB_REGS+(RA * REGSZ)
+ REG_S a0, UADDR+U_PCB_REGS+(SR * REGSZ)
+ REG_S a1, UADDR+U_PCB_REGS+(CAUSE * REGSZ)
+ REG_S a3, UADDR+U_PCB_REGS+(PC * REGSZ)
+ REG_S a3, CF_RA_OFFS(sp) # for debugging
+ LA gp, _gp # switch to kernel GP
+ lw a3, cpl
+ sw a3, UADDR+U_PCB_REGS+(CPL * REGSZ)
+ .set at
+# Turn off fpu and enter kernel mode
+ and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_KSU_MASK | SR_INT_ENAB)
+ mtc0 t0, COP_0_STATUS_REG
+ li a0, UADDR+U_PCB_REGS
+ ITLBNOPFIX
+/*
+ * If CPU is a RM7000 save away performance stuff.
+ */
+#if 0
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf save.
+ mfc0 v0, COP_0_PC_CTRL
+ lw t0, curproc
+ sw v0, P_PC_CTRL(t0)
+ dmfc0 v0, COP_0_WATCH_1
+ dmfc0 v1, COP_0_WATCH_2
+ sd v0, P_WATCH_1(t0)
+ sd v1, P_WATCH_2(t0)
+ mfc0 v0, COP_0_WATCH_M
+ mfc0 v1, COP_0_PC_COUNT
+ sw v0, P_WATCH_M(t0)
+ sw v1, P_PC_COUNT(t0)
+ mtc0 zero, COP_0_PC_CTRL
+ dmtc0 zero, COP_0_WATCH_1
+ dmtc0 zero, COP_0_WATCH_2
+1:
+#endif
+
+ jal trap
+ nop
+
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ ori t0, SR_EXL
+ mtc0 t0, COP_0_STATUS_REG # set exeption level
+ ITLBNOPFIX
+
+#if 0
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do perf setup.
+
+ PTR_L t0, curproc # set up rm7k.
+ ld v0, P_WATCH_1(t0)
+ dmtc0 v0, COP_0_WATCH_1
+ ld v0, P_WATCH_2(t0)
+ dmtc0 v0, COP_0_WATCH_2
+ lw v0, P_WATCH_M(t0)
+ mtc0 v0, COP_0_WATCH_M
+ lw v0, P_PC_CTRL(t0)
+ lw v1, P_PC_COUNT(t0)
+ nop;nop
+ mtc0 v0, COP_0_PC_CTRL
+ nop;nop;nop;nop
+ mtc0 v1, COP_0_PC_COUNT
+ nop;nop;nop;nop
+1:
+#endif
+ lw a3, UADDR+U_PCB_REGS+(CPL * REGSZ)
+ sw a3, cpl
+
+ .set noat
+
+ REG_L a0, UADDR+U_PCB_REGS+(SR * REGSZ)
+ mtc0 a0, COP_0_STATUS_REG # still exeption level
+ REG_L a0, UADDR+U_PCB_REGS+(PC * REGSZ)
+ REG_L v0, UADDR+U_PCB_REGS+(V0 * REGSZ)
+ dmtc0 a0, COP_0_EXC_PC # set return address
+ REG_L v1, UADDR+U_PCB_REGS+(V1 * REGSZ)
+ REG_L gp, UADDR+U_PCB_REGS+(GP * REGSZ)
+ REG_L sp, UADDR+U_PCB_REGS+(SP * REGSZ)
+ REG_L ra, UADDR+U_PCB_REGS+(RA * REGSZ)
+ sync
+ eret
+ DDB_BARRIER
+ .set at
+END(u_syscall)
+#endif
+
+
+/*-------------------------------------------------------------- proc_trampoline
+ * Setup for and return to user.
+ */
+LEAF(proc_trampoline)
+ sw zero, cpl # lower to spl0
+ lw t0, ipending
+ beq t0, zero, 0f
+ nop
+
+ jal setsoftintr0 # process any pending ints
+ nop
+0:
+ jal s0
+ move a0,s1 # set up for return to user.
+
+#if 0
+ lw t0, cpu_is_rm7k
+ beqz t0, 1f # not an RM7K. Don't do IC reg.
+
+ LOAD t0, curproc # set up rm7k.
+ ld v0, P_WATCH_1(t0)
+ dmtc0 v0, COP_0_WATCH_1
+ ld v0, P_WATCH_2(t0)
+ dmtc0 v0, COP_0_WATCH_2
+ lw v0, P_WATCH_M(t0)
+ mtc0 v0, COP_0_WATCH_M
+ lw v0, P_PC_CTRL(t0)
+ lw v1, P_PC_COUNT(t0)
+ nop;nop
+ mtc0 v0, COP_0_PC_CTRL
+ nop;nop;nop;nop
+ mtc0 v1, COP_0_PC_COUNT
+ nop;nop;nop;nop
+ li v0, IC_INT_PERF
+ ctc0 v0, COP_0_ICR # enable perfcntr interrupt.
+1:
+#endif
+ mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
+ li t1, ~SR_INT_ENAB
+ and t0, t0, t1
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ ori t0, SR_EXL # restoring to user mode.
+ mtc0 t0, COP_0_STATUS_REG # must set exeption level bit.
+ ITLBNOPFIX
+
+ .set noat
+ PTR_L k0, curprocpaddr
+ RESTORE_CPU_SREG(k0, 0)
+ RESTORE_REG(a0, PC, k0, 0)
+#if 0
+ RESTORE_REG(t0, IC, k0, 0)
+ ctc0 t0, COP_0_ICR
+#endif
+ RESTORE_CPU(k0, 0)
+ RESTORE_REG(sp, SP, k0, 0)
+ LI k0, 0
+ LI k1, 0
+ sync
+ eret
+ .set at
+END(proc_trampoline)
diff --git a/sys/arch/mips64/mips64/fp.S b/sys/arch/mips64/mips64/fp.S
new file mode 100644
index 00000000000..cd1a04398e2
--- /dev/null
+++ b/sys/arch/mips64/mips64/fp.S
@@ -0,0 +1,3612 @@
+/* $OpenBSD: fp.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)fp.s 8.1 (Berkeley) 6/10/93
+ * $Id: fp.S,v 1.1 2004/08/06 20:56:03 pefo Exp $
+ */
+
+/*
+ * Standard header stuff.
+ */
+
+#include <machine/regdef.h>
+#include <machine/asm.h>
+#include <machine/regnum.h>
+#include <machine/cpu.h>
+
+#include "assym.h"
+
+#define SEXP_INF 0xff
+#define DEXP_INF 0x7ff
+#define SEXP_BIAS 127
+#define DEXP_BIAS 1023
+#define SEXP_MIN -126
+#define DEXP_MIN -1022
+#define SEXP_MAX 127
+#define DEXP_MAX 1023
+#define WEXP_MAX 30 /* maximum unbiased exponent for int */
+#define WEXP_MIN -1 /* minimum unbiased exponent for int */
+#define SFRAC_BITS 23
+#define DFRAC_BITS 52
+#define SIMPL_ONE 0x00800000
+#define DIMPL_ONE 0x00100000
+#define SLEAD_ZEROS 31 - 23
+#define DLEAD_ZEROS 31 - 20
+#define STICKYBIT 1
+#define GUARDBIT 0x80000000
+#define SSIGNAL_NAN 0x00400000
+#define DSIGNAL_NAN 0x00080000
+#define SQUIET_NAN 0x003fffff
+#define DQUIET_NAN0 0x0007ffff
+#define DQUIET_NAN1 0xffffffff
+#define INT_MIN 0x80000000
+#define INT_MAX 0x7fffffff
+
+#define COND_UNORDERED 0x1
+#define COND_EQUAL 0x2
+#define COND_LESS 0x4
+#define COND_SIGNAL 0x8
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsEmulateFP --
+ *
+ * Emulate unimplemented floating point operations.
+ * This routine should only be called by MipsFPInterrupt().
+ *
+ * MipsEmulateFP(instr)
+ * unsigned instr;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Floating point registers are modified according to instruction.
+ *
+ *----------------------------------------------------------------------------
+ */
+NON_LEAF(MipsEmulateFP, FRAMESZ(CF_SZ), ra)
+ PTR_SUB sp, sp, FRAMESZ(CF_SZ)
+ PTR_S ra, CF_RA_OFFS(sp)
+/*
+ * Decode the FMT field (bits 24-21) and FUNCTION field (bits 5-0).
+ */
+ srl v0, a0, 21 - 2 # get FMT field
+ and v0, v0, 0xF << 2 # mask FMT field
+ and v1, a0, 0x3F # mask FUNC field
+ sll v1, v1, 5 # align for table lookup
+ bgt v0, 4 << 2, ill # illegal format
+
+ or v1, v1, v0
+ cfc1 a1, FPC_CSR # get exception register
+ lw a3, func_fmt_tbl(v1) # switch on FUNC & FMT
+ and a1, a1, ~FPC_EXCEPTION_UNIMPL # clear exception
+ ctc1 a1, FPC_CSR
+ j a3
+
+ .rdata
+func_fmt_tbl:
+ .word add_s # 0
+ .word add_d # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word ill # 0
+ .word sub_s # 1
+ .word sub_d # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word ill # 1
+ .word mul_s # 2
+ .word mul_d # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word ill # 2
+ .word div_s # 3
+ .word div_d # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 3
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word ill # 4
+ .word abs_s # 5
+ .word abs_d # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word ill # 5
+ .word mov_s # 6
+ .word mov_d # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word ill # 6
+ .word neg_s # 7
+ .word neg_d # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 7
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 8
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 9
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 10
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 11
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 12
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 13
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 14
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 15
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 16
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 17
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 18
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 19
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 20
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 21
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 22
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 23
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 24
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 25
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 26
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 27
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 28
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 29
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 30
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 31
+ .word ill # 32
+ .word cvt_s_d # 32
+ .word ill # 32
+ .word ill # 32
+ .word cvt_s_w # 32
+ .word ill # 32
+ .word ill # 32
+ .word ill # 32
+ .word cvt_d_s # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 33
+ .word cvt_d_w # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 33
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 34
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word ill # 35
+ .word cvt_w_s # 36
+ .word cvt_w_d # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 36
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 37
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 38
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 39
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 40
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 41
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 42
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 43
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 44
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 45
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 46
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word ill # 47
+ .word cmp_s # 48
+ .word cmp_d # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word ill # 48
+ .word cmp_s # 49
+ .word cmp_d # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word ill # 49
+ .word cmp_s # 50
+ .word cmp_d # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word ill # 50
+ .word cmp_s # 51
+ .word cmp_d # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word ill # 51
+ .word cmp_s # 52
+ .word cmp_d # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word ill # 52
+ .word cmp_s # 53
+ .word cmp_d # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word ill # 53
+ .word cmp_s # 54
+ .word cmp_d # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word ill # 54
+ .word cmp_s # 55
+ .word cmp_d # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word ill # 55
+ .word cmp_s # 56
+ .word cmp_d # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word ill # 56
+ .word cmp_s # 57
+ .word cmp_d # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word ill # 57
+ .word cmp_s # 58
+ .word cmp_d # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word ill # 58
+ .word cmp_s # 59
+ .word cmp_d # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word ill # 59
+ .word cmp_s # 60
+ .word cmp_d # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word ill # 60
+ .word cmp_s # 61
+ .word cmp_d # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word ill # 61
+ .word cmp_s # 62
+ .word cmp_d # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word ill # 62
+ .word cmp_s # 63
+ .word cmp_d # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .word ill # 63
+ .text
+
+/*
+ * Single precision subtract.
+ */
+sub_s:
+ jal get_ft_fs_s
+ xor t4, t4, 1 # negate FT sign bit
+ b add_sub_s
+/*
+ * Single precision add.
+ */
+add_s:
+ jal get_ft_fs_s
+add_sub_s:
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t5, SEXP_INF, result_fs_s # if FT is not inf, result=FS
+ bne t2, zero, result_fs_s # if FS is NAN, result is FS
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ bne t0, t4, invalid_s # both infinities same sign?
+ b result_fs_s # result is in FS
+1:
+ beq t5, SEXP_INF, result_ft_s # if FT is inf, result=FT
+ bne t1, zero, 4f # is FS a denormalized num?
+ beq t2, zero, 3f # is FS zero?
+ bne t5, zero, 2f # is FT a denormalized num?
+ beq t6, zero, result_fs_s # FT is zero, result=FS
+ jal renorm_fs_s
+ jal renorm_ft_s
+ b 5f
+2:
+ jal renorm_fs_s
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+ b 5f
+3:
+ bne t5, zero, result_ft_s # if FT != 0, result=FT
+ bne t6, zero, result_ft_s
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_s
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_s
+4:
+ bne t5, zero, 2f # is FT a denormalized num?
+ beq t6, zero, result_fs_s # FT is zero, result=FS
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+ jal renorm_ft_s
+ b 5f
+2:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+/*
+ * Perform the addition.
+ */
+5:
+ move t8, zero # no shifted bits (sticky reg)
+ beq t1, t5, 4f # no shift needed
+ subu v0, t1, t5 # v0 = difference of exponents
+ move v1, v0 # v1 = abs(difference)
+ bge v0, zero, 1f
+ negu v1
+1:
+ ble v1, SFRAC_BITS+2, 2f # is difference too great?
+ li t8, STICKYBIT # set the sticky bit
+ bge v0, zero, 1f # check which exp is larger
+ move t1, t5 # result exp is FTs
+ move t2, zero # FSs fraction shifted is zero
+ b 4f
+1:
+ move t6, zero # FTs fraction shifted is zero
+ b 4f
+2:
+ li t9, 32 # compute 32 - abs(exp diff)
+ subu t9, t9, v1
+ bgt v0, zero, 3f # if FS > FT, shift FTs frac
+ move t1, t5 # FT > FS, result exp is FTs
+ sll t8, t2, t9 # save bits shifted out
+ srl t2, t2, v1 # shift FSs fraction
+ b 4f
+3:
+ sll t8, t6, t9 # save bits shifted out
+ srl t6, t6, v1 # shift FTs fraction
+4:
+ bne t0, t4, 1f # if signs differ, subtract
+ addu t2, t2, t6 # add fractions
+ b norm_s
+1:
+ blt t2, t6, 3f # subtract larger from smaller
+ bne t2, t6, 2f # if same, result=0
+ move t1, zero # result=0
+ move t2, zero
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_s
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_s
+2:
+ sltu t9, zero, t8 # compute t2:zero - t6:t8
+ subu t8, zero, t8
+ subu t2, t2, t6 # subtract fractions
+ subu t2, t2, t9 # subtract barrow
+ b norm_s
+3:
+ move t0, t4 # sign of result = FTs
+ sltu t9, zero, t8 # compute t6:zero - t2:t8
+ subu t8, zero, t8
+ subu t2, t6, t2 # subtract fractions
+ subu t2, t2, t9 # subtract barrow
+ b norm_s
+
+/*
+ * Double precision subtract.
+ */
+sub_d:
+ jal get_ft_fs_d
+ xor t4, t4, 1 # negate sign bit
+ b add_sub_d
+/*
+ * Double precision add.
+ */
+add_d:
+ jal get_ft_fs_d
+add_sub_d:
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t5, DEXP_INF, result_fs_d # if FT is not inf, result=FS
+ bne t2, zero, result_fs_d # if FS is NAN, result is FS
+ bne t3, zero, result_fs_d
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ bne t0, t4, invalid_d # both infinities same sign?
+ b result_fs_d # result is in FS
+1:
+ beq t5, DEXP_INF, result_ft_d # if FT is inf, result=FT
+ bne t1, zero, 4f # is FS a denormalized num?
+ bne t2, zero, 1f # is FS zero?
+ beq t3, zero, 3f
+1:
+ bne t5, zero, 2f # is FT a denormalized num?
+ bne t6, zero, 1f
+ beq t7, zero, result_fs_d # FT is zero, result=FS
+1:
+ jal renorm_fs_d
+ jal renorm_ft_d
+ b 5f
+2:
+ jal renorm_fs_d
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+ b 5f
+3:
+ bne t5, zero, result_ft_d # if FT != 0, result=FT
+ bne t6, zero, result_ft_d
+ bne t7, zero, result_ft_d
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_d
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_d
+4:
+ bne t5, zero, 2f # is FT a denormalized num?
+ bne t6, zero, 1f
+ beq t7, zero, result_fs_d # FT is zero, result=FS
+1:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+ jal renorm_ft_d
+ b 5f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+/*
+ * Perform the addition.
+ */
+5:
+ move t8, zero # no shifted bits (sticky reg)
+ beq t1, t5, 4f # no shift needed
+ subu v0, t1, t5 # v0 = difference of exponents
+ move v1, v0 # v1 = abs(difference)
+ bge v0, zero, 1f
+ negu v1
+1:
+ ble v1, DFRAC_BITS+2, 2f # is difference too great?
+ li t8, STICKYBIT # set the sticky bit
+ bge v0, zero, 1f # check which exp is larger
+ move t1, t5 # result exp is FTs
+ move t2, zero # FSs fraction shifted is zero
+ move t3, zero
+ b 4f
+1:
+ move t6, zero # FTs fraction shifted is zero
+ move t7, zero
+ b 4f
+2:
+ li t9, 32
+ bge v0, zero, 3f # if FS > FT, shift FTs frac
+ move t1, t5 # FT > FS, result exp is FTs
+ blt v1, t9, 1f # shift right by < 32?
+ subu v1, v1, t9
+ subu t9, t9, v1
+ sll t8, t2, t9 # save bits shifted out
+ sltu t9, zero, t3 # dont lose any one bits
+ or t8, t8, t9 # save sticky bit
+ srl t3, t2, v1 # shift FSs fraction
+ move t2, zero
+ b 4f
+1:
+ subu t9, t9, v1
+ sll t8, t3, t9 # save bits shifted out
+ srl t3, t3, v1 # shift FSs fraction
+ sll t9, t2, t9 # save bits shifted out of t2
+ or t3, t3, t9 # and put into t3
+ srl t2, t2, v1
+ b 4f
+3:
+ blt v1, t9, 1f # shift right by < 32?
+ subu v1, v1, t9
+ subu t9, t9, v1
+ sll t8, t6, t9 # save bits shifted out
+ srl t7, t6, v1 # shift FTs fraction
+ move t6, zero
+ b 4f
+1:
+ subu t9, t9, v1
+ sll t8, t7, t9 # save bits shifted out
+ srl t7, t7, v1 # shift FTs fraction
+ sll t9, t6, t9 # save bits shifted out of t2
+ or t7, t7, t9 # and put into t3
+ srl t6, t6, v1
+4:
+ bne t0, t4, 1f # if signs differ, subtract
+ addu t3, t3, t7 # add fractions
+ sltu t9, t3, t7 # compute carry
+ addu t2, t2, t6 # add fractions
+ addu t2, t2, t9 # add carry
+ b norm_d
+1:
+ blt t2, t6, 3f # subtract larger from smaller
+ bne t2, t6, 2f
+ bltu t3, t7, 3f
+ bne t3, t7, 2f # if same, result=0
+ move t1, zero # result=0
+ move t2, zero
+ move t3, zero
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ bne v0, FPC_ROUND_RM, 1f # round to -infinity?
+ or t0, t0, t4 # compute result sign
+ b result_fs_d
+1:
+ and t0, t0, t4 # compute result sign
+ b result_fs_d
+2:
+ beq t8, zero, 1f # compute t2:t3:zero - t6:t7:t8
+ subu t8, zero, t8
+ sltu v0, t3, 1 # compute barrow out
+ subu t3, t3, 1 # subtract barrow
+ subu t2, t2, v0
+1:
+ sltu v0, t3, t7
+ subu t3, t3, t7 # subtract fractions
+ subu t2, t2, t6 # subtract fractions
+ subu t2, t2, v0 # subtract barrow
+ b norm_d
+3:
+ move t0, t4 # sign of result = FTs
+ beq t8, zero, 1f # compute t6:t7:zero - t2:t3:t8
+ subu t8, zero, t8
+ sltu v0, t7, 1 # compute barrow out
+ subu t7, t7, 1 # subtract barrow
+ subu t6, t6, v0
+1:
+ sltu v0, t7, t3
+ subu t3, t7, t3 # subtract fractions
+ subu t2, t6, t2 # subtract fractions
+ subu t2, t2, v0 # subtract barrow
+ b norm_d
+
+/*
+ * Single precision multiply.
+ */
+mul_s:
+ jal get_ft_fs_s
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, SEXP_INF, 2f # is FS an infinity?
+ bne t2, zero, result_fs_s # if FS is a NAN, result=FS
+ bne t5, SEXP_INF, 1f # FS is inf, is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is a NAN, result=FT
+ b result_fs_s # result is infinity
+1:
+ bne t5, zero, result_fs_s # inf * zero? if no, result=FS
+ bne t6, zero, result_fs_s
+ b invalid_s # infinity * zero is invalid
+2:
+ bne t5, SEXP_INF, 1f # FS != inf, is FT an infinity?
+ bne t1, zero, result_ft_s # zero * inf? if no, result=FT
+ bne t2, zero, result_ft_s
+ bne t6, zero, result_ft_s # if FT is a NAN, result=FT
+ b invalid_s # zero * infinity is invalid
+1:
+ bne t1, zero, 1f # is FS zero?
+ beq t2, zero, result_fs_s # result is zero
+ jal renorm_fs_s
+ b 2f
+1:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+2:
+ bne t5, zero, 1f # is FT zero?
+ beq t6, zero, result_ft_s # result is zero
+ jal renorm_ft_s
+ b 2f
+1:
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+2:
+ addu t1, t1, t5 # compute result exponent
+ addu t1, t1, 9 # account for binary point
+ multu t2, t6 # multiply fractions
+ mflo t8
+ mfhi t2
+ b norm_s
+
+/*
+ * Double precision multiply.
+ */
+mul_d:
+ jal get_ft_fs_d
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, DEXP_INF, 2f # is FS an infinity?
+ bne t2, zero, result_fs_d # if FS is a NAN, result=FS
+ bne t3, zero, result_fs_d
+ bne t5, DEXP_INF, 1f # FS is inf, is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is a NAN, result=FT
+ bne t7, zero, result_ft_d
+ b result_fs_d # result is infinity
+1:
+ bne t5, zero, result_fs_d # inf * zero? if no, result=FS
+ bne t6, zero, result_fs_d
+ bne t7, zero, result_fs_d
+ b invalid_d # infinity * zero is invalid
+2:
+ bne t5, DEXP_INF, 1f # FS != inf, is FT an infinity?
+ bne t1, zero, result_ft_d # zero * inf? if no, result=FT
+ bne t2, zero, result_ft_d # if FS is a NAN, result=FS
+ bne t3, zero, result_ft_d
+ bne t6, zero, result_ft_d # if FT is a NAN, result=FT
+ bne t7, zero, result_ft_d
+ b invalid_d # zero * infinity is invalid
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_d # result is zero
+1:
+ jal renorm_fs_d
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ beq t7, zero, result_ft_d # result is zero
+1:
+ jal renorm_ft_d
+ b 3f
+2:
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+3:
+ addu t1, t1, t5 # compute result exponent
+ addu t1, t1, 12 # ???
+ multu t3, t7 # multiply fractions (low * low)
+ move t4, t2 # free up t2,t3 for result
+ move t5, t3
+ mflo a3 # save low order bits
+ mfhi t8
+ not v0, t8
+ multu t4, t7 # multiply FS(high) * FT(low)
+ mflo v1
+ mfhi t3 # init low result
+ sltu v0, v0, v1 # compute carry
+ addu t8, v1
+ multu t5, t6 # multiply FS(low) * FT(high)
+ addu t3, t3, v0 # add carry
+ not v0, t8
+ mflo v1
+ mfhi t2
+ sltu v0, v0, v1
+ addu t8, v1
+ multu t4, t6 # multiply FS(high) * FT(high)
+ addu t3, v0
+ not v1, t3
+ sltu v1, v1, t2
+ addu t3, t2
+ not v0, t3
+ mfhi t2
+ addu t2, v1
+ mflo v1
+ sltu v0, v0, v1
+ addu t2, v0
+ addu t3, v1
+ sltu a3, zero, a3 # reduce t8,a3 to just t8
+ or t8, a3
+ b norm_d
+
+/*
+ * Single precision divide.
+ */
+div_s:
+ jal get_ft_fs_s
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, result_fs_s # if FS is NAN, result is FS
+ bne t5, SEXP_INF, result_fs_s # is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ b invalid_s # infinity/infinity is invalid
+1:
+ bne t5, SEXP_INF, 1f # is FT an infinity?
+ bne t6, zero, result_ft_s # if FT is NAN, result is FT
+ move t1, zero # x / infinity is zero
+ move t2, zero
+ b result_fs_s
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ bne t5, zero, result_fs_s # FS=zero, is FT zero?
+ beq t6, zero, invalid_s # 0 / 0
+ b result_fs_s # result = zero
+1:
+ jal renorm_fs_s
+ b 3f
+2:
+ subu t1, t1, SEXP_BIAS # unbias FS exponent
+ or t2, t2, SIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ or a1, a1, FPC_EXCEPTION_DIV0 | FPC_STICKY_DIV0
+ and v0, a1, FPC_ENABLE_DIV0 # trap enabled?
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ li t1, SEXP_INF # result is infinity
+ move t2, zero
+ b result_fs_s
+1:
+ jal renorm_ft_s
+ b 3f
+2:
+ subu t5, t5, SEXP_BIAS # unbias FT exponent
+ or t6, t6, SIMPL_ONE # set implied one bit
+3:
+ subu t1, t1, t5 # compute exponent
+ subu t1, t1, 3 # compensate for result position
+ li v0, SFRAC_BITS+3 # number of bits to divide
+ move t8, t2 # init dividend
+ move t2, zero # init result
+1:
+ bltu t8, t6, 3f # is dividend >= divisor?
+2:
+ subu t8, t8, t6 # subtract divisor from dividend
+ or t2, t2, 1 # remember that we did
+ bne t8, zero, 3f # if not done, continue
+ sll t2, t2, v0 # shift result to final position
+ b norm_s
+3:
+ sll t8, t8, 1 # shift dividend
+ sll t2, t2, 1 # shift result
+ subu v0, v0, 1 # are we done?
+ bne v0, zero, 1b # no, continue
+ b norm_s
+
+/*
+ * Double precision divide.
+ */
+div_d:
+ jal get_ft_fs_d
+ xor t0, t0, t4 # compute sign of result
+ move t4, t0
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, result_fs_d # if FS is NAN, result is FS
+ bne t3, zero, result_fs_d
+ bne t5, DEXP_INF, result_fs_d # is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ b invalid_d # infinity/infinity is invalid
+1:
+ bne t5, DEXP_INF, 1f # is FT an infinity?
+ bne t6, zero, result_ft_d # if FT is NAN, result is FT
+ bne t7, zero, result_ft_d
+ move t1, zero # x / infinity is zero
+ move t2, zero
+ move t3, zero
+ b result_fs_d
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ bne t3, zero, 1f
+ bne t5, zero, result_fs_d # FS=zero, is FT zero?
+ bne t6, zero, result_fs_d
+ beq t7, zero, invalid_d # 0 / 0
+ b result_fs_d # result = zero
+1:
+ jal renorm_fs_d
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias FS exponent
+ or t2, t2, DIMPL_ONE # set implied one bit
+3:
+ bne t5, zero, 2f # is FT zero?
+ bne t6, zero, 1f
+ bne t7, zero, 1f
+ or a1, a1, FPC_EXCEPTION_DIV0 | FPC_STICKY_DIV0
+ and v0, a1, FPC_ENABLE_DIV0 # trap enabled?
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # Save exceptions
+ li t1, DEXP_INF # result is infinity
+ move t2, zero
+ move t3, zero
+ b result_fs_d
+1:
+ jal renorm_ft_d
+ b 3f
+2:
+ subu t5, t5, DEXP_BIAS # unbias FT exponent
+ or t6, t6, DIMPL_ONE # set implied one bit
+3:
+ subu t1, t1, t5 # compute exponent
+ subu t1, t1, 3 # compensate for result position
+ li v0, DFRAC_BITS+3 # number of bits to divide
+ move t8, t2 # init dividend
+ move t9, t3
+ move t2, zero # init result
+ move t3, zero
+1:
+ bltu t8, t6, 3f # is dividend >= divisor?
+ bne t8, t6, 2f
+ bltu t9, t7, 3f
+2:
+ sltu v1, t9, t7 # subtract divisor from dividend
+ subu t9, t9, t7
+ subu t8, t8, t6
+ subu t8, t8, v1
+ or t3, t3, 1 # remember that we did
+ bne t8, zero, 3f # if not done, continue
+ bne t9, zero, 3f
+ li v1, 32 # shift result to final position
+ blt v0, v1, 2f # shift < 32 bits?
+ subu v0, v0, v1 # shift by > 32 bits
+ sll t2, t3, v0 # shift upper part
+ move t3, zero
+ b norm_d
+2:
+ subu v1, v1, v0 # shift by < 32 bits
+ sll t2, t2, v0 # shift upper part
+ srl t9, t3, v1 # save bits shifted out
+ or t2, t2, t9 # and put into upper part
+ sll t3, t3, v0
+ b norm_d
+3:
+ sll t8, t8, 1 # shift dividend
+ srl v1, t9, 31 # save bit shifted out
+ or t8, t8, v1 # and put into upper part
+ sll t9, t9, 1
+ sll t2, t2, 1 # shift result
+ srl v1, t3, 31 # save bit shifted out
+ or t2, t2, v1 # and put into upper part
+ sll t3, t3, 1
+ subu v0, v0, 1 # are we done?
+ bne v0, zero, 1b # no, continue
+ sltu v0, zero, t9 # be sure to save any one bits
+ or t8, t8, v0 # from the lower remainder
+ b norm_d
+
+/*
+ * Single precision absolute value.
+ */
+abs_s:
+ jal get_fs_s
+ move t0, zero # set sign positive
+ b result_fs_s
+
+/*
+ * Double precision absolute value.
+ */
+abs_d:
+ jal get_fs_d
+ move t0, zero # set sign positive
+ b result_fs_d
+
+/*
+ * Single precision move.
+ */
+mov_s:
+ jal get_fs_s
+ b result_fs_s
+
+/*
+ * Double precision move.
+ */
+mov_d:
+ jal get_fs_d
+ b result_fs_d
+
+/*
+ * Single precision negate.
+ */
+neg_s:
+ jal get_fs_s
+ xor t0, t0, 1 # reverse sign
+ b result_fs_s
+
+/*
+ * Double precision negate.
+ */
+neg_d:
+ jal get_fs_d
+ xor t0, t0, 1 # reverse sign
+ b result_fs_d
+
+/*
+ * Convert double to single.
+ */
+cvt_s_d:
+ jal get_fs_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ li t1, SEXP_INF # convert to single
+ sll t2, t2, 3 # convert D fraction to S
+ srl t8, t3, 32 - 3
+ or t2, t2, t8
+ b result_fs_s
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_s # result=0
+1:
+ jal renorm_fs_d
+ subu t1, t1, 3 # correct exp for shift below
+ b 3f
+2:
+ subu t1, t1, DEXP_BIAS # unbias exponent
+ or t2, t2, DIMPL_ONE # add implied one bit
+3:
+ sll t2, t2, 3 # convert D fraction to S
+ srl t8, t3, 32 - 3
+ or t2, t2, t8
+ sll t8, t3, 3
+ b norm_noshift_s
+
+/*
+ * Convert integer to single.
+ */
+cvt_s_w:
+ jal get_fs_int
+ bne t2, zero, 1f # check for zero
+ move t1, zero
+ b result_fs_s
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+1:
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count leading zeros
+ li t1, 23 # init exponent
+ subu t1, t1, t9 # compute exponent
+ beq t9, zero, 1f
+ li v0, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ subu v0, v0, t9
+ sll t2, t2, t9 # shift left
+1:
+ add t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ b result_fs_s
+2:
+ negu t9 # shift right by t9
+ subu v0, v0, t9
+ sll t8, t2, v0 # save bits shifted out
+ srl t2, t2, t9
+ b norm_noshift_s
+
+/*
+ * Convert single to double.
+ */
+cvt_d_s:
+ jal get_fs_s
+ move t3, zero
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ li t1, DEXP_INF # convert to double
+ b result_fs_d
+1:
+ bne t1, zero, 2f # is FS denormalized or zero?
+ beq t2, zero, result_fs_d # is FS zero?
+ jal renorm_fs_s
+ move t8, zero
+ b norm_d
+2:
+ addu t1, t1, DEXP_BIAS - SEXP_BIAS # bias exponent correctly
+ sll t3, t2, 32 - 3 # convert S fraction to D
+ srl t2, t2, 3
+ b result_fs_d
+
+/*
+ * Convert integer to double.
+ */
+cvt_d_w:
+ jal get_fs_int
+ bne t2, zero, 1f # check for zero
+ move t1, zero # result=0
+ move t3, zero
+ b result_fs_d
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+1:
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count leading zeros
+ li t1, DEXP_BIAS + 20 # init exponent
+ subu t1, t1, t9 # compute exponent
+ beq t9, zero, 1f
+ li v0, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ subu v0, v0, t9
+ sll t2, t2, t9 # shift left
+1:
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ move t3, zero
+ b result_fs_d
+2:
+ negu t9 # shift right by t9
+ subu v0, v0, t9
+ sll t3, t2, v0
+ srl t2, t2, t9
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ b result_fs_d
+
+/*
+ * Convert single to integer.
+ */
+cvt_w_s:
+ jal get_fs_s
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, invalid_w # invalid conversion
+1:
+ bne t1, zero, 1f # is FS zero?
+ beq t2, zero, result_fs_w # result is zero
+ move t2, zero # result is an inexact zero
+ b inexact_w
+1:
+ subu t1, t1, SEXP_BIAS # unbias exponent
+ or t2, t2, SIMPL_ONE # add implied one bit
+ sll t3, t2, 32 - 3 # convert S fraction to D
+ srl t2, t2, 3
+ b cvt_w
+
+/*
+ * Convert double to integer.
+ */
+cvt_w_d:
+ jal get_fs_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, invalid_w # invalid conversion
+ bne t3, zero, invalid_w # invalid conversion
+1:
+ bne t1, zero, 2f # is FS zero?
+ bne t2, zero, 1f
+ beq t3, zero, result_fs_w # result is zero
+1:
+ move t2, zero # result is an inexact zero
+ b inexact_w
+2:
+ subu t1, t1, DEXP_BIAS # unbias exponent
+ or t2, t2, DIMPL_ONE # add implied one bit
+cvt_w:
+ blt t1, WEXP_MIN, underflow_w # is exponent too small?
+ li v0, WEXP_MAX+1
+ bgt t1, v0, overflow_w # is exponent too large?
+ bne t1, v0, 1f # special check for INT_MIN
+ beq t0, zero, overflow_w # if positive, overflow
+ bne t2, DIMPL_ONE, overflow_w
+ bne t3, zero, overflow_w
+ li t2, INT_MIN # result is INT_MIN
+ b result_fs_w
+1:
+ subu v0, t1, 20 # compute amount to shift
+ beq v0, zero, 2f # is shift needed?
+ li v1, 32
+ blt v0, zero, 1f # if shift < 0, shift right
+ subu v1, v1, v0 # shift left
+ sll t2, t2, v0
+ srl t9, t3, v1 # save bits shifted out of t3
+ or t2, t2, t9 # and put into t2
+ sll t3, t3, v0 # shift FSs fraction
+ b 2f
+1:
+ negu v0 # shift right by v0
+ subu v1, v1, v0
+ sll t8, t3, v1 # save bits shifted out
+ sltu t8, zero, t8 # dont lose any ones
+ srl t3, t3, v0 # shift FSs fraction
+ or t3, t3, t8
+ sll t9, t2, v1 # save bits shifted out of t2
+ or t3, t3, t9 # and put into t3
+ srl t2, t2, v0
+/*
+ * round result (t0 is sign, t2 is integer part, t3 is fractional part).
+ */
+2:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t3, zero, 5f # if no fraction bits, continue
+ addu t2, t2, 1 # add rounding bit
+ blt t2, zero, overflow_w # overflow?
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t3 # add remainder
+ sltu v1, v0, t3 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+ blt t2, zero, overflow_w # overflow?
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ beq t0, zero, 1f # result positive?
+ negu t2 # convert to negative integer
+1:
+ beq t3, zero, result_fs_w # is result exact?
+/*
+ * Handle inexact exception.
+ */
+inexact_w:
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b result_fs_w
+
+/*
+ * Conversions to integer which overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an invalid exception.
+ */
+overflow_w:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ bne v0, zero, fpe_trap
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, inexact_w # inexact traps enabled?
+ b invalid_w
+
+/*
+ * Conversions to integer which underflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an invalid exception.
+ */
+underflow_w:
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ bne v0, zero, fpe_trap
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, inexact_w # inexact traps enabled?
+ b invalid_w
+
+/*
+ * Compare single.
+ */
+cmp_s:
+ jal get_cmp_s
+ bne t1, SEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, unordered # FS is a NAN
+1:
+ bne t5, SEXP_INF, 2f # is FT an infinity?
+ bne t6, zero, unordered # FT is a NAN
+2:
+ sll t1, t1, 23 # reassemble exp & frac
+ or t1, t1, t2
+ sll t5, t5, 23 # reassemble exp & frac
+ or t5, t5, t6
+ beq t0, zero, 1f # is FS positive?
+ negu t1
+1:
+ beq t4, zero, 1f # is FT positive?
+ negu t5
+1:
+ li v0, COND_LESS
+ blt t1, t5, test_cond # is FS < FT?
+ li v0, COND_EQUAL
+ beq t1, t5, test_cond # is FS == FT?
+ move v0, zero # FS > FT
+ b test_cond
+
+/*
+ * Compare double.
+ */
+cmp_d:
+ jal get_cmp_d
+ bne t1, DEXP_INF, 1f # is FS an infinity?
+ bne t2, zero, unordered
+ bne t3, zero, unordered # FS is a NAN
+1:
+ bne t5, DEXP_INF, 2f # is FT an infinity?
+ bne t6, zero, unordered
+ bne t7, zero, unordered # FT is a NAN
+2:
+ sll t1, t1, 20 # reassemble exp & frac
+ or t1, t1, t2
+ sll t5, t5, 20 # reassemble exp & frac
+ or t5, t5, t6
+ beq t0, zero, 1f # is FS positive?
+ not t3 # negate t1,t3
+ not t1
+ addu t3, t3, 1
+ seq v0, t3, zero # compute carry
+ addu t1, t1, v0
+1:
+ beq t4, zero, 1f # is FT positive?
+ not t7 # negate t5,t7
+ not t5
+ addu t7, t7, 1
+ seq v0, t7, zero # compute carry
+ addu t5, t5, v0
+1:
+ li v0, COND_LESS
+ blt t1, t5, test_cond # is FS(MSW) < FT(MSW)?
+ move v0, zero
+ bne t1, t5, test_cond # is FS(MSW) > FT(MSW)?
+ li v0, COND_LESS
+ bltu t3, t7, test_cond # is FS(LSW) < FT(LSW)?
+ li v0, COND_EQUAL
+ beq t3, t7, test_cond # is FS(LSW) == FT(LSW)?
+ move v0, zero # FS > FT
+test_cond:
+ and v0, v0, a0 # condition match instruction?
+set_cond:
+ bne v0, zero, 1f
+ and a1, a1, ~FPC_COND_BIT # clear condition bit
+ b 2f
+1:
+ or a1, a1, FPC_COND_BIT # set condition bit
+2:
+ ctc1 a1, FPC_CSR # save condition bit
+ b done
+
+unordered:
+ and v0, a0, COND_UNORDERED # this cmp match unordered?
+ bne v0, zero, 1f
+ and a1, a1, ~FPC_COND_BIT # clear condition bit
+ b 2f
+1:
+ or a1, a1, FPC_COND_BIT # set condition bit
+2:
+ and v0, a0, COND_SIGNAL
+ beq v0, zero, 1f # is this a signaling cmp?
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+1:
+ ctc1 a1, FPC_CSR # save condition bit
+ b done
+
+/*
+ * Determine the amount to shift the fraction in order to restore the
+ * normalized position. After that, round and handle exceptions.
+ */
+norm_s:
+ move v0, t2
+ move t9, zero # t9 = num of leading zeros
+ bne t2, zero, 1f
+ move v0, t8
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t8 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count leading zeros
+ subu t1, t1, t9 # adjust the exponent
+ beq t9, zero, norm_noshift_s
+ li v1, 32
+ blt t9, zero, 1f # if shift < 0, shift right
+ subu v1, v1, t9
+ sll t2, t2, t9 # shift t2,t8 left
+ srl v0, t8, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t8, t8, t9
+ b norm_noshift_s
+1:
+ negu t9 # shift t2,t8 right by t9
+ subu v1, v1, t9
+ sll v0, t8, v1 # save bits shifted out
+ sltu v0, zero, v0 # be sure to save any one bits
+ srl t8, t8, t9
+ or t8, t8, v0
+ sll v0, t2, v1 # save bits shifted out
+ or t8, t8, v0
+ srl t2, t2, t9
+norm_noshift_s:
+ move t5, t1 # save unrounded exponent
+ move t6, t2 # save unrounded fraction
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t2, t2, 1 # add rounding bit
+ bne t2, SIMPL_ONE<<1, 5f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+ bne t2, SIMPL_ONE<<1, 4f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ bgt t1, SEXP_MAX, overflow_s # overflow?
+ blt t1, SEXP_MIN, underflow_s # underflow?
+ bne t8, zero, inexact_s # is result inexact?
+ addu t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ b result_fs_s
+
+/*
+ * Handle inexact exception.
+ */
+inexact_s:
+ addu t1, t1, SEXP_BIAS # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+inexact_nobias_s:
+ jal set_fd_s # save result
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an infinity.
+ */
+overflow_s:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ beq v0, zero, 1f
+ subu t1, t1, 192 # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ jal set_fd_s # save result
+ b fpe_trap
+1:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 1f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 2f # round to +infinity
+ bne t0, zero, 3f
+1:
+ li t1, SEXP_MAX # result is max finite
+ li t2, 0x007fffff
+ b inexact_s
+2:
+ bne t0, zero, 1b
+3:
+ li t1, SEXP_MAX + 1 # result is infinity
+ move t2, zero
+ b inexact_s
+
+/*
+ * In this implementation, "tininess" is detected "after rounding" and
+ * "loss of accuracy" is detected as "an inexact result".
+ */
+underflow_s:
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ beq v0, zero, 1f
+/*
+ * Underflow is enabled so compute the result and trap.
+ */
+ addu t1, t1, 192 # bias exponent
+ and t2, t2, ~SIMPL_ONE # clear implied one bit
+ jal set_fd_s # save result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ b fpe_trap
+/*
+ * Underflow is not enabled so compute the result,
+ * signal inexact result (if it is) and trap (if enabled).
+ */
+1:
+ move t1, t5 # get unrounded exponent
+ move t2, t6 # get unrounded fraction
+ li t9, SEXP_MIN # compute shift amount
+ subu t9, t9, t1 # shift t2,t8 right by t9
+ blt t9, SFRAC_BITS+2, 3f # shift all the bits out?
+ move t1, zero # result is inexact zero
+ move t2, zero
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+/*
+ * Now round the zero result.
+ * Only need to worry about rounding to +- infinity when the sign matches.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, inexact_nobias_s # round to nearest
+ beq v0, FPC_ROUND_RZ, inexact_nobias_s # round to zero
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, inexact_nobias_s # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, inexact_nobias_s # if sign is negative, truncate
+2:
+ addu t2, t2, 1 # add rounding bit
+ b inexact_nobias_s
+3:
+ li v1, 32
+ subu v1, v1, t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t2, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t2, t2, t9
+/*
+ * Now round the denormalized result.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t2, t2, 1 # add rounding bit
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry to result
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t2, t2, ~1 # clear LSB (round to nearest)
+5:
+ move t1, zero # denorm or zero exponent
+ jal set_fd_s # save result
+ beq t8, zero, done # check for exact result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Determine the amount to shift the fraction in order to restore the
+ * normalized position. After that, round and handle exceptions.
+ */
+norm_d:
+ move v0, t2
+ move t9, zero # t9 = num of leading zeros
+ bne t2, zero, 1f
+ move v0, t3
+ addu t9, 32
+ bne t3, zero, 1f
+ move v0, t8
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t3,t8 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count leading zeros
+ subu t1, t1, t9 # adjust the exponent
+ beq t9, zero, norm_noshift_d
+ li v1, 32
+ blt t9, zero, 2f # if shift < 0, shift right
+ blt t9, v1, 1f # shift by < 32?
+ subu t9, t9, v1 # shift by >= 32
+ subu v1, v1, t9
+ sll t2, t3, t9 # shift left by t9
+ srl v0, t8, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t3, t8, t9
+ move t8, zero
+ b norm_noshift_d
+1:
+ subu v1, v1, t9
+ sll t2, t2, t9 # shift left by t9
+ srl v0, t3, v1 # save bits shifted out
+ or t2, t2, v0
+ sll t3, t3, t9
+ srl v0, t8, v1 # save bits shifted out
+ or t3, t3, v0
+ sll t8, t8, t9
+ b norm_noshift_d
+2:
+ negu t9 # shift right by t9
+ subu v1, v1, t9 # (known to be < 32 bits)
+ sll v0, t8, v1 # save bits shifted out
+ sltu v0, zero, v0 # be sure to save any one bits
+ srl t8, t8, t9
+ or t8, t8, v0
+ sll v0, t3, v1 # save bits shifted out
+ or t8, t8, v0
+ srl t3, t3, t9
+ sll v0, t2, v1 # save bits shifted out
+ or t3, t3, v0
+ srl t2, t2, t9
+norm_noshift_d:
+ move t5, t1 # save unrounded exponent
+ move t6, t2 # save unrounded fraction (MS)
+ move t7, t3 # save unrounded fraction (LS)
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 5f # branch if no carry
+ addu t2, t2, 1 # add carry
+ bne t2, DIMPL_ONE<<1, 5f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # branch if no carry
+ addu t3, t3, 1 # add carry
+ bne t3, zero, 4f # branch if no carry
+ addu t2, t2, 1 # add carry to result
+ bne t2, DIMPL_ONE<<1, 4f # need to adjust exponent?
+ addu t1, t1, 1 # adjust exponent
+ srl t2, t2, 1 # renormalize fraction
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t3, t3, ~1 # clear LSB (round to nearest)
+5:
+ bgt t1, DEXP_MAX, overflow_d # overflow?
+ blt t1, DEXP_MIN, underflow_d # underflow?
+ bne t8, zero, inexact_d # is result inexact?
+ addu t1, t1, DEXP_BIAS # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ b result_fs_d
+
+/*
+ * Handle inexact exception.
+ */
+inexact_d:
+ addu t1, t1, DEXP_BIAS # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+inexact_nobias_d:
+ jal set_fd_d # save result
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Overflow will trap (if enabled),
+ * or generate an inexact trap (if enabled),
+ * or generate an infinity.
+ */
+overflow_d:
+ or a1, a1, FPC_EXCEPTION_OVERFLOW | FPC_STICKY_OVERFLOW
+ and v0, a1, FPC_ENABLE_OVERFLOW
+ beq v0, zero, 1f
+ subu t1, t1, 1536 # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ jal set_fd_d # save result
+ b fpe_trap
+1:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 1f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 2f # round to +infinity
+ bne t0, zero, 3f
+1:
+ li t1, DEXP_MAX # result is max finite
+ li t2, 0x000fffff
+ li t3, 0xffffffff
+ b inexact_d
+2:
+ bne t0, zero, 1b
+3:
+ li t1, DEXP_MAX + 1 # result is infinity
+ move t2, zero
+ move t3, zero
+ b inexact_d
+
+/*
+ * In this implementation, "tininess" is detected "after rounding" and
+ * "loss of accuracy" is detected as "an inexact result".
+ */
+underflow_d:
+ and v0, a1, FPC_ENABLE_UNDERFLOW
+ beq v0, zero, 1f
+/*
+ * Underflow is enabled so compute the result and trap.
+ */
+ addu t1, t1, 1536 # bias exponent
+ and t2, t2, ~DIMPL_ONE # clear implied one bit
+ jal set_fd_d # save result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ b fpe_trap
+/*
+ * Underflow is not enabled so compute the result,
+ * signal inexact result (if it is) and trap (if enabled).
+ */
+1:
+ move t1, t5 # get unrounded exponent
+ move t2, t6 # get unrounded fraction (MS)
+ move t3, t7 # get unrounded fraction (LS)
+ li t9, DEXP_MIN # compute shift amount
+ subu t9, t9, t1 # shift t2,t8 right by t9
+ blt t9, DFRAC_BITS+2, 3f # shift all the bits out?
+ move t1, zero # result is inexact zero
+ move t2, zero
+ move t3, zero
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+/*
+ * Now round the zero result.
+ * Only need to worry about rounding to +- infinity when the sign matches.
+ */
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, inexact_nobias_d # round to nearest
+ beq v0, FPC_ROUND_RZ, inexact_nobias_d # round to zero
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, inexact_nobias_d # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, inexact_nobias_d # if sign is negative, truncate
+2:
+ addu t3, t3, 1 # add rounding bit
+ b inexact_nobias_d
+3:
+ li v1, 32
+ blt t9, v1, 1f # shift by < 32?
+ subu t9, t9, v1 # shift right by >= 32
+ subu v1, v1, t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t2, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t3, t2, t9
+ move t2, zero
+ b 2f
+1:
+ subu v1, v1, t9 # shift right by t9
+ sltu v0, zero, t8 # be sure to save any one bits
+ sll t8, t3, v1 # save bits shifted out
+ or t8, t8, v0 # include sticky bits
+ srl t3, t3, t9
+ sll v0, t2, v1 # save bits shifted out
+ or t3, t3, v0
+ srl t2, t2, t9
+/*
+ * Now round the denormalized result.
+ */
+2:
+ and v0, a1, FPC_ROUNDING_BITS # get rounding mode
+ beq v0, FPC_ROUND_RN, 3f # round to nearest
+ beq v0, FPC_ROUND_RZ, 5f # round to zero (truncate)
+ beq v0, FPC_ROUND_RP, 1f # round to +infinity
+ beq t0, zero, 5f # if sign is positive, truncate
+ b 2f
+1:
+ bne t0, zero, 5f # if sign is negative, truncate
+2:
+ beq t8, zero, 5f # if exact, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 5f # if no carry, continue
+ addu t2, t2, 1 # add carry
+ b 5f
+3:
+ li v0, GUARDBIT # load guard bit for rounding
+ addu v0, v0, t8 # add remainder
+ sltu v1, v0, t8 # compute carry out
+ beq v1, zero, 4f # if no carry, continue
+ addu t3, t3, 1 # add rounding bit
+ bne t3, zero, 4f # if no carry, continue
+ addu t2, t2, 1 # add carry
+4:
+ bne v0, zero, 5f # if rounded remainder is zero
+ and t3, t3, ~1 # clear LSB (round to nearest)
+5:
+ move t1, zero # denorm or zero exponent
+ jal set_fd_d # save result
+ beq t8, zero, done # check for exact result
+ or a1, a1, FPC_EXCEPTION_UNDERFLOW | FPC_STICKY_UNDERFLOW
+ or a1, a1, FPC_EXCEPTION_INEXACT | FPC_STICKY_INEXACT
+ and v0, a1, FPC_ENABLE_INEXACT
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is a quiet NAN.
+ */
+invalid_s: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ move t0, zero # result is a quiet NAN
+ li t1, SEXP_INF
+ li t2, SQUIET_NAN
+ jal set_fd_s # save result (in t0,t1,t2)
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is a quiet NAN.
+ */
+invalid_d: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ move t0, zero # result is a quiet NAN
+ li t1, DEXP_INF
+ li t2, DQUIET_NAN0
+ li t3, DQUIET_NAN1
+ jal set_fd_d # save result (in t0,t1,t2,t3)
+ b done
+
+/*
+ * Signal an invalid operation if the trap is enabled; otherwise,
+ * the result is INT_MAX or INT_MIN.
+ */
+invalid_w: # trap invalid operation
+ or a1, a1, FPC_EXCEPTION_INVALID | FPC_STICKY_INVALID
+ and v0, a1, FPC_ENABLE_INVALID
+ bne v0, zero, fpe_trap
+ ctc1 a1, FPC_CSR # save exceptions
+ bne t0, zero, 1f
+ li t2, INT_MAX # result is INT_MAX
+ b result_fs_w
+1:
+ li t2, INT_MIN # result is INT_MIN
+ b result_fs_w
+
+/*
+ * Trap if the hardware should have handled this case.
+ */
+fpe_trap:
+ move a2, a1 # code = FP CSR
+ ctc1 a1, FPC_CSR # save exceptions
+ break 0
+
+/*
+ * Send an illegal instruction signal to the current process.
+ */
+ill:
+ ctc1 a1, FPC_CSR # save exceptions
+ move a2, a0 # code = FP instruction
+ break 0
+
+result_ft_s:
+ move t0, t4 # result is FT
+ move t1, t5
+ move t2, t6
+result_fs_s: # result is FS
+ jal set_fd_s # save result (in t0,t1,t2)
+ b done
+
+result_fs_w:
+ jal set_fd_word # save result (in t2)
+ b done
+
+result_ft_d:
+ move t0, t4 # result is FT
+ move t1, t5
+ move t2, t6
+ move t3, t7
+result_fs_d: # result is FS
+ jal set_fd_d # save result (in t0,t1,t2,t3)
+
+done:
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_ADD sp, sp, FRAMESZ(CF_SZ)
+ j ra
+END(MipsEmulateFP)
+
+/*----------------------------------------------------------------------------
+ * get_fs_int --
+ *
+ * Read (integer) the FS register (bits 15-11).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t2 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_fs_int)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_int_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_int_tbl:
+ .word get_fs_int_f0
+ .word get_fs_int_f2
+ .word get_fs_int_f4
+ .word get_fs_int_f6
+ .word get_fs_int_f8
+ .word get_fs_int_f10
+ .word get_fs_int_f12
+ .word get_fs_int_f14
+ .word get_fs_int_f16
+ .word get_fs_int_f18
+ .word get_fs_int_f20
+ .word get_fs_int_f22
+ .word get_fs_int_f24
+ .word get_fs_int_f26
+ .word get_fs_int_f28
+ .word get_fs_int_f30
+ .text
+
+get_fs_int_f0:
+ mfc1 t2, $f0
+ b get_fs_int_done
+get_fs_int_f2:
+ mfc1 t2, $f2
+ b get_fs_int_done
+get_fs_int_f4:
+ mfc1 t2, $f4
+ b get_fs_int_done
+get_fs_int_f6:
+ mfc1 t2, $f6
+ b get_fs_int_done
+get_fs_int_f8:
+ mfc1 t2, $f8
+ b get_fs_int_done
+get_fs_int_f10:
+ mfc1 t2, $f10
+ b get_fs_int_done
+get_fs_int_f12:
+ mfc1 t2, $f12
+ b get_fs_int_done
+get_fs_int_f14:
+ mfc1 t2, $f14
+ b get_fs_int_done
+get_fs_int_f16:
+ mfc1 t2, $f16
+ b get_fs_int_done
+get_fs_int_f18:
+ mfc1 t2, $f18
+ b get_fs_int_done
+get_fs_int_f20:
+ mfc1 t2, $f20
+ b get_fs_int_done
+get_fs_int_f22:
+ mfc1 t2, $f22
+ b get_fs_int_done
+get_fs_int_f24:
+ mfc1 t2, $f24
+ b get_fs_int_done
+get_fs_int_f26:
+ mfc1 t2, $f26
+ b get_fs_int_done
+get_fs_int_f28:
+ mfc1 t2, $f28
+ b get_fs_int_done
+get_fs_int_f30:
+ mfc1 t2, $f30
+get_fs_int_done:
+ srl t0, t2, 31 # init the sign bit
+ bge t2, zero, 1f
+ negu t2
+1:
+ j ra
+END(get_fs_int)
+
+/*----------------------------------------------------------------------------
+ * get_ft_fs_s --
+ *
+ * Read (single precision) the FT register (bits 20-16) and
+ * the FS register (bits 15-11) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the FS sign
+ * t1 contains the FS (biased) exponent
+ * t2 contains the FS fraction
+ * t4 contains the FT sign
+ * t5 contains the FT (biased) exponent
+ * t6 contains the FT fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_ft_fs_s)
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, get_ft_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_ft_s_tbl:
+ .word get_ft_s_f0
+ .word get_ft_s_f2
+ .word get_ft_s_f4
+ .word get_ft_s_f6
+ .word get_ft_s_f8
+ .word get_ft_s_f10
+ .word get_ft_s_f12
+ .word get_ft_s_f14
+ .word get_ft_s_f16
+ .word get_ft_s_f18
+ .word get_ft_s_f20
+ .word get_ft_s_f22
+ .word get_ft_s_f24
+ .word get_ft_s_f26
+ .word get_ft_s_f28
+ .word get_ft_s_f30
+ .text
+
+get_ft_s_f0:
+ mfc1 t4, $f0
+ b get_ft_s_done
+get_ft_s_f2:
+ mfc1 t4, $f2
+ b get_ft_s_done
+get_ft_s_f4:
+ mfc1 t4, $f4
+ b get_ft_s_done
+get_ft_s_f6:
+ mfc1 t4, $f6
+ b get_ft_s_done
+get_ft_s_f8:
+ mfc1 t4, $f8
+ b get_ft_s_done
+get_ft_s_f10:
+ mfc1 t4, $f10
+ b get_ft_s_done
+get_ft_s_f12:
+ mfc1 t4, $f12
+ b get_ft_s_done
+get_ft_s_f14:
+ mfc1 t4, $f14
+ b get_ft_s_done
+get_ft_s_f16:
+ mfc1 t4, $f16
+ b get_ft_s_done
+get_ft_s_f18:
+ mfc1 t4, $f18
+ b get_ft_s_done
+get_ft_s_f20:
+ mfc1 t4, $f20
+ b get_ft_s_done
+get_ft_s_f22:
+ mfc1 t4, $f22
+ b get_ft_s_done
+get_ft_s_f24:
+ mfc1 t4, $f24
+ b get_ft_s_done
+get_ft_s_f26:
+ mfc1 t4, $f26
+ b get_ft_s_done
+get_ft_s_f28:
+ mfc1 t4, $f28
+ b get_ft_s_done
+get_ft_s_f30:
+ mfc1 t4, $f30
+get_ft_s_done:
+ srl t5, t4, 23 # get exponent
+ and t5, t5, 0xFF
+ and t6, t4, 0x7FFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ bne t5, SEXP_INF, 1f # is it a signaling NAN?
+ and v0, t6, SSIGNAL_NAN
+ bne v0, zero, invalid_s
+1:
+ /* fall through to get FS */
+
+/*----------------------------------------------------------------------------
+ * get_fs_s --
+ *
+ * Read (single precision) the FS register (bits 15-11) and
+ * break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+ALEAF(get_fs_s)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_s_tbl:
+ .word get_fs_s_f0
+ .word get_fs_s_f2
+ .word get_fs_s_f4
+ .word get_fs_s_f6
+ .word get_fs_s_f8
+ .word get_fs_s_f10
+ .word get_fs_s_f12
+ .word get_fs_s_f14
+ .word get_fs_s_f16
+ .word get_fs_s_f18
+ .word get_fs_s_f20
+ .word get_fs_s_f22
+ .word get_fs_s_f24
+ .word get_fs_s_f26
+ .word get_fs_s_f28
+ .word get_fs_s_f30
+ .text
+
+get_fs_s_f0:
+ mfc1 t0, $f0
+ b get_fs_s_done
+get_fs_s_f2:
+ mfc1 t0, $f2
+ b get_fs_s_done
+get_fs_s_f4:
+ mfc1 t0, $f4
+ b get_fs_s_done
+get_fs_s_f6:
+ mfc1 t0, $f6
+ b get_fs_s_done
+get_fs_s_f8:
+ mfc1 t0, $f8
+ b get_fs_s_done
+get_fs_s_f10:
+ mfc1 t0, $f10
+ b get_fs_s_done
+get_fs_s_f12:
+ mfc1 t0, $f12
+ b get_fs_s_done
+get_fs_s_f14:
+ mfc1 t0, $f14
+ b get_fs_s_done
+get_fs_s_f16:
+ mfc1 t0, $f16
+ b get_fs_s_done
+get_fs_s_f18:
+ mfc1 t0, $f18
+ b get_fs_s_done
+get_fs_s_f20:
+ mfc1 t0, $f20
+ b get_fs_s_done
+get_fs_s_f22:
+ mfc1 t0, $f22
+ b get_fs_s_done
+get_fs_s_f24:
+ mfc1 t0, $f24
+ b get_fs_s_done
+get_fs_s_f26:
+ mfc1 t0, $f26
+ b get_fs_s_done
+get_fs_s_f28:
+ mfc1 t0, $f28
+ b get_fs_s_done
+get_fs_s_f30:
+ mfc1 t0, $f30
+get_fs_s_done:
+ srl t1, t0, 23 # get exponent
+ and t1, t1, 0xFF
+ and t2, t0, 0x7FFFFF # get fraction
+ srl t0, t0, 31 # get sign
+ bne t1, SEXP_INF, 1f # is it a signaling NAN?
+ and v0, t2, SSIGNAL_NAN
+ bne v0, zero, invalid_s
+1:
+ j ra
+END(get_ft_fs_s)
+
+/*----------------------------------------------------------------------------
+ * get_ft_fs_d --
+ *
+ * Read (double precision) the FT register (bits 20-16) and
+ * the FS register (bits 15-11) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the FS sign
+ * t1 contains the FS (biased) exponent
+ * t2 contains the FS fraction
+ * t3 contains the FS remaining fraction
+ * t4 contains the FT sign
+ * t5 contains the FT (biased) exponent
+ * t6 contains the FT fraction
+ * t7 contains the FT remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_ft_fs_d)
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, get_ft_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_ft_d_tbl:
+ .word get_ft_d_f0
+ .word get_ft_d_f2
+ .word get_ft_d_f4
+ .word get_ft_d_f6
+ .word get_ft_d_f8
+ .word get_ft_d_f10
+ .word get_ft_d_f12
+ .word get_ft_d_f14
+ .word get_ft_d_f16
+ .word get_ft_d_f18
+ .word get_ft_d_f20
+ .word get_ft_d_f22
+ .word get_ft_d_f24
+ .word get_ft_d_f26
+ .word get_ft_d_f28
+ .word get_ft_d_f30
+ .text
+
+get_ft_d_f0:
+ mfc1 t7, $f0
+ mfc1 t4, $f1
+ b get_ft_d_done
+get_ft_d_f2:
+ mfc1 t7, $f2
+ mfc1 t4, $f3
+ b get_ft_d_done
+get_ft_d_f4:
+ mfc1 t7, $f4
+ mfc1 t4, $f5
+ b get_ft_d_done
+get_ft_d_f6:
+ mfc1 t7, $f6
+ mfc1 t4, $f7
+ b get_ft_d_done
+get_ft_d_f8:
+ mfc1 t7, $f8
+ mfc1 t4, $f9
+ b get_ft_d_done
+get_ft_d_f10:
+ mfc1 t7, $f10
+ mfc1 t4, $f11
+ b get_ft_d_done
+get_ft_d_f12:
+ mfc1 t7, $f12
+ mfc1 t4, $f13
+ b get_ft_d_done
+get_ft_d_f14:
+ mfc1 t7, $f14
+ mfc1 t4, $f15
+ b get_ft_d_done
+get_ft_d_f16:
+ mfc1 t7, $f16
+ mfc1 t4, $f17
+ b get_ft_d_done
+get_ft_d_f18:
+ mfc1 t7, $f18
+ mfc1 t4, $f19
+ b get_ft_d_done
+get_ft_d_f20:
+ mfc1 t7, $f20
+ mfc1 t4, $f21
+ b get_ft_d_done
+get_ft_d_f22:
+ mfc1 t7, $f22
+ mfc1 t4, $f23
+ b get_ft_d_done
+get_ft_d_f24:
+ mfc1 t7, $f24
+ mfc1 t4, $f25
+ b get_ft_d_done
+get_ft_d_f26:
+ mfc1 t7, $f26
+ mfc1 t4, $f27
+ b get_ft_d_done
+get_ft_d_f28:
+ mfc1 t7, $f28
+ mfc1 t4, $f29
+ b get_ft_d_done
+get_ft_d_f30:
+ mfc1 t7, $f30
+ mfc1 t4, $f31
+get_ft_d_done:
+ srl t5, t4, 20 # get exponent
+ and t5, t5, 0x7FF
+ and t6, t4, 0xFFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ bne t5, DEXP_INF, 1f # is it a signaling NAN?
+ and v0, t6, DSIGNAL_NAN
+ bne v0, zero, invalid_d
+1:
+ /* fall through to get FS */
+
+/*----------------------------------------------------------------------------
+ * get_fs_d --
+ *
+ * Read (double precision) the FS register (bits 15-11) and
+ * break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+ALEAF(get_fs_d)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, get_fs_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+get_fs_d_tbl:
+ .word get_fs_d_f0
+ .word get_fs_d_f2
+ .word get_fs_d_f4
+ .word get_fs_d_f6
+ .word get_fs_d_f8
+ .word get_fs_d_f10
+ .word get_fs_d_f12
+ .word get_fs_d_f14
+ .word get_fs_d_f16
+ .word get_fs_d_f18
+ .word get_fs_d_f20
+ .word get_fs_d_f22
+ .word get_fs_d_f24
+ .word get_fs_d_f26
+ .word get_fs_d_f28
+ .word get_fs_d_f30
+ .text
+
+get_fs_d_f0:
+ mfc1 t3, $f0
+ mfc1 t0, $f1
+ b get_fs_d_done
+get_fs_d_f2:
+ mfc1 t3, $f2
+ mfc1 t0, $f3
+ b get_fs_d_done
+get_fs_d_f4:
+ mfc1 t3, $f4
+ mfc1 t0, $f5
+ b get_fs_d_done
+get_fs_d_f6:
+ mfc1 t3, $f6
+ mfc1 t0, $f7
+ b get_fs_d_done
+get_fs_d_f8:
+ mfc1 t3, $f8
+ mfc1 t0, $f9
+ b get_fs_d_done
+get_fs_d_f10:
+ mfc1 t3, $f10
+ mfc1 t0, $f11
+ b get_fs_d_done
+get_fs_d_f12:
+ mfc1 t3, $f12
+ mfc1 t0, $f13
+ b get_fs_d_done
+get_fs_d_f14:
+ mfc1 t3, $f14
+ mfc1 t0, $f15
+ b get_fs_d_done
+get_fs_d_f16:
+ mfc1 t3, $f16
+ mfc1 t0, $f17
+ b get_fs_d_done
+get_fs_d_f18:
+ mfc1 t3, $f18
+ mfc1 t0, $f19
+ b get_fs_d_done
+get_fs_d_f20:
+ mfc1 t3, $f20
+ mfc1 t0, $f21
+ b get_fs_d_done
+get_fs_d_f22:
+ mfc1 t3, $f22
+ mfc1 t0, $f23
+ b get_fs_d_done
+get_fs_d_f24:
+ mfc1 t3, $f24
+ mfc1 t0, $f25
+ b get_fs_d_done
+get_fs_d_f26:
+ mfc1 t3, $f26
+ mfc1 t0, $f27
+ b get_fs_d_done
+get_fs_d_f28:
+ mfc1 t3, $f28
+ mfc1 t0, $f29
+ b get_fs_d_done
+get_fs_d_f30:
+ mfc1 t3, $f30
+ mfc1 t0, $f31
+get_fs_d_done:
+ srl t1, t0, 20 # get exponent
+ and t1, t1, 0x7FF
+ and t2, t0, 0xFFFFF # get fraction
+ srl t0, t0, 31 # get sign
+ bne t1, DEXP_INF, 1f # is it a signaling NAN?
+ and v0, t2, DSIGNAL_NAN
+ bne v0, zero, invalid_d
+1:
+ j ra
+END(get_ft_fs_d)
+
+/*----------------------------------------------------------------------------
+ * get_cmp_s --
+ *
+ * Read (single precision) the FS register (bits 15-11) and
+ * the FT register (bits 20-16) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t4 contains the sign
+ * t5 contains the (biased) exponent
+ * t6 contains the fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_cmp_s)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, cmp_fs_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_fs_s_tbl:
+ .word cmp_fs_s_f0
+ .word cmp_fs_s_f2
+ .word cmp_fs_s_f4
+ .word cmp_fs_s_f6
+ .word cmp_fs_s_f8
+ .word cmp_fs_s_f10
+ .word cmp_fs_s_f12
+ .word cmp_fs_s_f14
+ .word cmp_fs_s_f16
+ .word cmp_fs_s_f18
+ .word cmp_fs_s_f20
+ .word cmp_fs_s_f22
+ .word cmp_fs_s_f24
+ .word cmp_fs_s_f26
+ .word cmp_fs_s_f28
+ .word cmp_fs_s_f30
+ .text
+
+cmp_fs_s_f0:
+ mfc1 t0, $f0
+ b cmp_fs_s_done
+cmp_fs_s_f2:
+ mfc1 t0, $f2
+ b cmp_fs_s_done
+cmp_fs_s_f4:
+ mfc1 t0, $f4
+ b cmp_fs_s_done
+cmp_fs_s_f6:
+ mfc1 t0, $f6
+ b cmp_fs_s_done
+cmp_fs_s_f8:
+ mfc1 t0, $f8
+ b cmp_fs_s_done
+cmp_fs_s_f10:
+ mfc1 t0, $f10
+ b cmp_fs_s_done
+cmp_fs_s_f12:
+ mfc1 t0, $f12
+ b cmp_fs_s_done
+cmp_fs_s_f14:
+ mfc1 t0, $f14
+ b cmp_fs_s_done
+cmp_fs_s_f16:
+ mfc1 t0, $f16
+ b cmp_fs_s_done
+cmp_fs_s_f18:
+ mfc1 t0, $f18
+ b cmp_fs_s_done
+cmp_fs_s_f20:
+ mfc1 t0, $f20
+ b cmp_fs_s_done
+cmp_fs_s_f22:
+ mfc1 t0, $f22
+ b cmp_fs_s_done
+cmp_fs_s_f24:
+ mfc1 t0, $f24
+ b cmp_fs_s_done
+cmp_fs_s_f26:
+ mfc1 t0, $f26
+ b cmp_fs_s_done
+cmp_fs_s_f28:
+ mfc1 t0, $f28
+ b cmp_fs_s_done
+cmp_fs_s_f30:
+ mfc1 t0, $f30
+cmp_fs_s_done:
+ srl t1, t0, 23 # get exponent
+ and t1, t1, 0xFF
+ and t2, t0, 0x7FFFFF # get fraction
+ srl t0, t0, 31 # get sign
+
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, cmp_ft_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_ft_s_tbl:
+ .word cmp_ft_s_f0
+ .word cmp_ft_s_f2
+ .word cmp_ft_s_f4
+ .word cmp_ft_s_f6
+ .word cmp_ft_s_f8
+ .word cmp_ft_s_f10
+ .word cmp_ft_s_f12
+ .word cmp_ft_s_f14
+ .word cmp_ft_s_f16
+ .word cmp_ft_s_f18
+ .word cmp_ft_s_f20
+ .word cmp_ft_s_f22
+ .word cmp_ft_s_f24
+ .word cmp_ft_s_f26
+ .word cmp_ft_s_f28
+ .word cmp_ft_s_f30
+ .text
+
+cmp_ft_s_f0:
+ mfc1 t4, $f0
+ b cmp_ft_s_done
+cmp_ft_s_f2:
+ mfc1 t4, $f2
+ b cmp_ft_s_done
+cmp_ft_s_f4:
+ mfc1 t4, $f4
+ b cmp_ft_s_done
+cmp_ft_s_f6:
+ mfc1 t4, $f6
+ b cmp_ft_s_done
+cmp_ft_s_f8:
+ mfc1 t4, $f8
+ b cmp_ft_s_done
+cmp_ft_s_f10:
+ mfc1 t4, $f10
+ b cmp_ft_s_done
+cmp_ft_s_f12:
+ mfc1 t4, $f12
+ b cmp_ft_s_done
+cmp_ft_s_f14:
+ mfc1 t4, $f14
+ b cmp_ft_s_done
+cmp_ft_s_f16:
+ mfc1 t4, $f16
+ b cmp_ft_s_done
+cmp_ft_s_f18:
+ mfc1 t4, $f18
+ b cmp_ft_s_done
+cmp_ft_s_f20:
+ mfc1 t4, $f20
+ b cmp_ft_s_done
+cmp_ft_s_f22:
+ mfc1 t4, $f22
+ b cmp_ft_s_done
+cmp_ft_s_f24:
+ mfc1 t4, $f24
+ b cmp_ft_s_done
+cmp_ft_s_f26:
+ mfc1 t4, $f26
+ b cmp_ft_s_done
+cmp_ft_s_f28:
+ mfc1 t4, $f28
+ b cmp_ft_s_done
+cmp_ft_s_f30:
+ mfc1 t4, $f30
+cmp_ft_s_done:
+ srl t5, t4, 23 # get exponent
+ and t5, t5, 0xFF
+ and t6, t4, 0x7FFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ j ra
+END(get_cmp_s)
+
+/*----------------------------------------------------------------------------
+ * get_cmp_d --
+ *
+ * Read (double precision) the FS register (bits 15-11) and
+ * the FT register (bits 20-16) and break up into fields.
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Results:
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ * t4 contains the sign
+ * t5 contains the (biased) exponent
+ * t6 contains the fraction
+ * t7 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(get_cmp_d)
+ srl a3, a0, 12 - 2 # get FS field (even regs only)
+ and a3, a3, 0xF << 2 # mask FS field
+ lw a3, cmp_fs_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_fs_d_tbl:
+ .word cmp_fs_d_f0
+ .word cmp_fs_d_f2
+ .word cmp_fs_d_f4
+ .word cmp_fs_d_f6
+ .word cmp_fs_d_f8
+ .word cmp_fs_d_f10
+ .word cmp_fs_d_f12
+ .word cmp_fs_d_f14
+ .word cmp_fs_d_f16
+ .word cmp_fs_d_f18
+ .word cmp_fs_d_f20
+ .word cmp_fs_d_f22
+ .word cmp_fs_d_f24
+ .word cmp_fs_d_f26
+ .word cmp_fs_d_f28
+ .word cmp_fs_d_f30
+ .text
+
+cmp_fs_d_f0:
+ mfc1 t3, $f0
+ mfc1 t0, $f1
+ b cmp_fs_d_done
+cmp_fs_d_f2:
+ mfc1 t3, $f2
+ mfc1 t0, $f3
+ b cmp_fs_d_done
+cmp_fs_d_f4:
+ mfc1 t3, $f4
+ mfc1 t0, $f5
+ b cmp_fs_d_done
+cmp_fs_d_f6:
+ mfc1 t3, $f6
+ mfc1 t0, $f7
+ b cmp_fs_d_done
+cmp_fs_d_f8:
+ mfc1 t3, $f8
+ mfc1 t0, $f9
+ b cmp_fs_d_done
+cmp_fs_d_f10:
+ mfc1 t3, $f10
+ mfc1 t0, $f11
+ b cmp_fs_d_done
+cmp_fs_d_f12:
+ mfc1 t3, $f12
+ mfc1 t0, $f13
+ b cmp_fs_d_done
+cmp_fs_d_f14:
+ mfc1 t3, $f14
+ mfc1 t0, $f15
+ b cmp_fs_d_done
+cmp_fs_d_f16:
+ mfc1 t3, $f16
+ mfc1 t0, $f17
+ b cmp_fs_d_done
+cmp_fs_d_f18:
+ mfc1 t3, $f18
+ mfc1 t0, $f19
+ b cmp_fs_d_done
+cmp_fs_d_f20:
+ mfc1 t3, $f20
+ mfc1 t0, $f21
+ b cmp_fs_d_done
+cmp_fs_d_f22:
+ mfc1 t3, $f22
+ mfc1 t0, $f23
+ b cmp_fs_d_done
+cmp_fs_d_f24:
+ mfc1 t3, $f24
+ mfc1 t0, $f25
+ b cmp_fs_d_done
+cmp_fs_d_f26:
+ mfc1 t3, $f26
+ mfc1 t0, $f27
+ b cmp_fs_d_done
+cmp_fs_d_f28:
+ mfc1 t3, $f28
+ mfc1 t0, $f29
+ b cmp_fs_d_done
+cmp_fs_d_f30:
+ mfc1 t3, $f30
+ mfc1 t0, $f31
+cmp_fs_d_done:
+ srl t1, t0, 20 # get exponent
+ and t1, t1, 0x7FF
+ and t2, t0, 0xFFFFF # get fraction
+ srl t0, t0, 31 # get sign
+
+ srl a3, a0, 17 - 2 # get FT field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, cmp_ft_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+cmp_ft_d_tbl:
+ .word cmp_ft_d_f0
+ .word cmp_ft_d_f2
+ .word cmp_ft_d_f4
+ .word cmp_ft_d_f6
+ .word cmp_ft_d_f8
+ .word cmp_ft_d_f10
+ .word cmp_ft_d_f12
+ .word cmp_ft_d_f14
+ .word cmp_ft_d_f16
+ .word cmp_ft_d_f18
+ .word cmp_ft_d_f20
+ .word cmp_ft_d_f22
+ .word cmp_ft_d_f24
+ .word cmp_ft_d_f26
+ .word cmp_ft_d_f28
+ .word cmp_ft_d_f30
+ .text
+
+cmp_ft_d_f0:
+ mfc1 t7, $f0
+ mfc1 t4, $f1
+ b cmp_ft_d_done
+cmp_ft_d_f2:
+ mfc1 t7, $f2
+ mfc1 t4, $f3
+ b cmp_ft_d_done
+cmp_ft_d_f4:
+ mfc1 t7, $f4
+ mfc1 t4, $f5
+ b cmp_ft_d_done
+cmp_ft_d_f6:
+ mfc1 t7, $f6
+ mfc1 t4, $f7
+ b cmp_ft_d_done
+cmp_ft_d_f8:
+ mfc1 t7, $f8
+ mfc1 t4, $f9
+ b cmp_ft_d_done
+cmp_ft_d_f10:
+ mfc1 t7, $f10
+ mfc1 t4, $f11
+ b cmp_ft_d_done
+cmp_ft_d_f12:
+ mfc1 t7, $f12
+ mfc1 t4, $f13
+ b cmp_ft_d_done
+cmp_ft_d_f14:
+ mfc1 t7, $f14
+ mfc1 t4, $f15
+ b cmp_ft_d_done
+cmp_ft_d_f16:
+ mfc1 t7, $f16
+ mfc1 t4, $f17
+ b cmp_ft_d_done
+cmp_ft_d_f18:
+ mfc1 t7, $f18
+ mfc1 t4, $f19
+ b cmp_ft_d_done
+cmp_ft_d_f20:
+ mfc1 t7, $f20
+ mfc1 t4, $f21
+ b cmp_ft_d_done
+cmp_ft_d_f22:
+ mfc1 t7, $f22
+ mfc1 t4, $f23
+ b cmp_ft_d_done
+cmp_ft_d_f24:
+ mfc1 t7, $f24
+ mfc1 t4, $f25
+ b cmp_ft_d_done
+cmp_ft_d_f26:
+ mfc1 t7, $f26
+ mfc1 t4, $f27
+ b cmp_ft_d_done
+cmp_ft_d_f28:
+ mfc1 t7, $f28
+ mfc1 t4, $f29
+ b cmp_ft_d_done
+cmp_ft_d_f30:
+ mfc1 t7, $f30
+ mfc1 t4, $f31
+cmp_ft_d_done:
+ srl t5, t4, 20 # get exponent
+ and t5, t5, 0x7FF
+ and t6, t4, 0xFFFFF # get fraction
+ srl t4, t4, 31 # get sign
+ j ra
+END(get_cmp_d)
+
+/*----------------------------------------------------------------------------
+ * set_fd_s --
+ *
+ * Write (single precision) the FD register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ *
+ * set_fd_word --
+ *
+ * Write (integer) the FD register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t2 contains the integer
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(set_fd_s)
+ sll t0, t0, 31 # position sign
+ sll t1, t1, 23 # position exponent
+ or t2, t2, t0
+ or t2, t2, t1
+ALEAF(set_fd_word)
+ srl a3, a0, 7 - 2 # get FD field (even regs only)
+ and a3, a3, 0xF << 2 # mask FT field
+ lw a3, set_fd_s_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+set_fd_s_tbl:
+ .word set_fd_s_f0
+ .word set_fd_s_f2
+ .word set_fd_s_f4
+ .word set_fd_s_f6
+ .word set_fd_s_f8
+ .word set_fd_s_f10
+ .word set_fd_s_f12
+ .word set_fd_s_f14
+ .word set_fd_s_f16
+ .word set_fd_s_f18
+ .word set_fd_s_f20
+ .word set_fd_s_f22
+ .word set_fd_s_f24
+ .word set_fd_s_f26
+ .word set_fd_s_f28
+ .word set_fd_s_f30
+ .text
+
+set_fd_s_f0:
+ mtc1 t2, $f0
+ j ra
+set_fd_s_f2:
+ mtc1 t2, $f2
+ j ra
+set_fd_s_f4:
+ mtc1 t2, $f4
+ j ra
+set_fd_s_f6:
+ mtc1 t2, $f6
+ j ra
+set_fd_s_f8:
+ mtc1 t2, $f8
+ j ra
+set_fd_s_f10:
+ mtc1 t2, $f10
+ j ra
+set_fd_s_f12:
+ mtc1 t2, $f12
+ j ra
+set_fd_s_f14:
+ mtc1 t2, $f14
+ j ra
+set_fd_s_f16:
+ mtc1 t2, $f16
+ j ra
+set_fd_s_f18:
+ mtc1 t2, $f18
+ j ra
+set_fd_s_f20:
+ mtc1 t2, $f20
+ j ra
+set_fd_s_f22:
+ mtc1 t2, $f22
+ j ra
+set_fd_s_f24:
+ mtc1 t2, $f24
+ j ra
+set_fd_s_f26:
+ mtc1 t2, $f26
+ j ra
+set_fd_s_f28:
+ mtc1 t2, $f28
+ j ra
+set_fd_s_f30:
+ mtc1 t2, $f30
+ j ra
+END(set_fd_s)
+
+/*----------------------------------------------------------------------------
+ * set_fd_d --
+ *
+ * Write (double precision) the FT register (bits 10-6).
+ * This is an internal routine used by MipsEmulateFP only.
+ *
+ * Arguments:
+ * a0 contains the FP instruction
+ * t0 contains the sign
+ * t1 contains the (biased) exponent
+ * t2 contains the fraction
+ * t3 contains the remaining fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(set_fd_d)
+ sll t0, t0, 31 # set sign
+ sll t1, t1, 20 # set exponent
+ or t0, t0, t1
+ or t0, t0, t2 # set fraction
+ srl a3, a0, 7 - 2 # get FD field (even regs only)
+ and a3, a3, 0xF << 2 # mask FD field
+ lw a3, set_fd_d_tbl(a3) # switch on register number
+ j a3
+
+ .rdata
+set_fd_d_tbl:
+ .word set_fd_d_f0
+ .word set_fd_d_f2
+ .word set_fd_d_f4
+ .word set_fd_d_f6
+ .word set_fd_d_f8
+ .word set_fd_d_f10
+ .word set_fd_d_f12
+ .word set_fd_d_f14
+ .word set_fd_d_f16
+ .word set_fd_d_f18
+ .word set_fd_d_f20
+ .word set_fd_d_f22
+ .word set_fd_d_f24
+ .word set_fd_d_f26
+ .word set_fd_d_f28
+ .word set_fd_d_f30
+ .text
+
+set_fd_d_f0:
+ mtc1 t3, $f0
+ mtc1 t0, $f1
+ j ra
+set_fd_d_f2:
+ mtc1 t3, $f2
+ mtc1 t0, $f3
+ j ra
+set_fd_d_f4:
+ mtc1 t3, $f4
+ mtc1 t0, $f5
+ j ra
+set_fd_d_f6:
+ mtc1 t3, $f6
+ mtc1 t0, $f7
+ j ra
+set_fd_d_f8:
+ mtc1 t3, $f8
+ mtc1 t0, $f9
+ j ra
+set_fd_d_f10:
+ mtc1 t3, $f10
+ mtc1 t0, $f11
+ j ra
+set_fd_d_f12:
+ mtc1 t3, $f12
+ mtc1 t0, $f13
+ j ra
+set_fd_d_f14:
+ mtc1 t3, $f14
+ mtc1 t0, $f15
+ j ra
+set_fd_d_f16:
+ mtc1 t3, $f16
+ mtc1 t0, $f17
+ j ra
+set_fd_d_f18:
+ mtc1 t3, $f18
+ mtc1 t0, $f19
+ j ra
+set_fd_d_f20:
+ mtc1 t3, $f20
+ mtc1 t0, $f21
+ j ra
+set_fd_d_f22:
+ mtc1 t3, $f22
+ mtc1 t0, $f23
+ j ra
+set_fd_d_f24:
+ mtc1 t3, $f24
+ mtc1 t0, $f25
+ j ra
+set_fd_d_f26:
+ mtc1 t3, $f26
+ mtc1 t0, $f27
+ j ra
+set_fd_d_f28:
+ mtc1 t3, $f28
+ mtc1 t0, $f29
+ j ra
+set_fd_d_f30:
+ mtc1 t3, $f30
+ mtc1 t0, $f31
+ j ra
+END(set_fd_d)
+
+/*----------------------------------------------------------------------------
+ * renorm_fs_s --
+ *
+ * Results:
+ * t1 unbiased exponent
+ * t2 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_fs_s)
+/*
+ * Find out how many leading zero bits are in t2 and put in t9.
+ */
+ move v0, t2
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count normal leading zeros
+ li t1, SEXP_MIN
+ subu t1, t1, t9 # adjust exponent
+ sll t2, t2, t9
+ j ra
+END(renorm_fs_s)
+
+/*----------------------------------------------------------------------------
+ * renorm_fs_d --
+ *
+ * Results:
+ * t1 unbiased exponent
+ * t2,t3 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_fs_d)
+/*
+ * Find out how many leading zero bits are in t2,t3 and put in t9.
+ */
+ move v0, t2
+ move t9, zero
+ bne t2, zero, 1f
+ move v0, t3
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t2,t3 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count normal leading zeros
+ li t1, DEXP_MIN
+ subu t1, t1, t9 # adjust exponent
+ li v0, 32
+ blt t9, v0, 1f
+ subu t9, t9, v0 # shift fraction left >= 32 bits
+ sll t2, t3, t9
+ move t3, zero
+ j ra
+1:
+ subu v0, v0, t9 # shift fraction left < 32 bits
+ sll t2, t2, t9
+ srl v1, t3, v0
+ or t2, t2, v1
+ sll t3, t3, t9
+ j ra
+END(renorm_fs_d)
+
+/*----------------------------------------------------------------------------
+ * renorm_ft_s --
+ *
+ * Results:
+ * t5 unbiased exponent
+ * t6 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_ft_s)
+/*
+ * Find out how many leading zero bits are in t6 and put in t9.
+ */
+ move v0, t6
+ move t9, zero
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t6 the correct number of bits.
+ */
+1:
+ subu t9, t9, SLEAD_ZEROS # dont count normal leading zeros
+ li t5, SEXP_MIN
+ subu t5, t5, t9 # adjust exponent
+ sll t6, t6, t9
+ j ra
+END(renorm_ft_s)
+
+/*----------------------------------------------------------------------------
+ * renorm_ft_d --
+ *
+ * Results:
+ * t5 unbiased exponent
+ * t6,t7 normalized fraction
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(renorm_ft_d)
+/*
+ * Find out how many leading zero bits are in t6,t7 and put in t9.
+ */
+ move v0, t6
+ move t9, zero
+ bne t6, zero, 1f
+ move v0, t7
+ addu t9, 32
+1:
+ srl v1, v0, 16
+ bne v1, zero, 1f
+ addu t9, 16
+ sll v0, 16
+1:
+ srl v1, v0, 24
+ bne v1, zero, 1f
+ addu t9, 8
+ sll v0, 8
+1:
+ srl v1, v0, 28
+ bne v1, zero, 1f
+ addu t9, 4
+ sll v0, 4
+1:
+ srl v1, v0, 30
+ bne v1, zero, 1f
+ addu t9, 2
+ sll v0, 2
+1:
+ srl v1, v0, 31
+ bne v1, zero, 1f
+ addu t9, 1
+/*
+ * Now shift t6,t7 the correct number of bits.
+ */
+1:
+ subu t9, t9, DLEAD_ZEROS # dont count normal leading zeros
+ li t5, DEXP_MIN
+ subu t5, t5, t9 # adjust exponent
+ li v0, 32
+ blt t9, v0, 1f
+ subu t9, t9, v0 # shift fraction left >= 32 bits
+ sll t6, t7, t9
+ move t7, zero
+ j ra
+1:
+ subu v0, v0, t9 # shift fraction left < 32 bits
+ sll t6, t6, t9
+ srl v1, t7, v0
+ or t6, t6, v1
+ sll t7, t7, t9
+ j ra
+END(renorm_ft_d)
diff --git a/sys/arch/mips64/mips64/interrupt.c b/sys/arch/mips64/mips64/interrupt.c
new file mode 100644
index 00000000000..9a47f98d504
--- /dev/null
+++ b/sys/arch/mips64/mips64/interrupt.c
@@ -0,0 +1,607 @@
+/* $OpenBSD: interrupt.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/signalvar.h>
+#include <sys/user.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+#include <net/netisr.h>
+
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/cpu.h>
+#include <machine/pio.h>
+#include <machine/intr.h>
+#include <machine/autoconf.h>
+#include <machine/frame.h>
+#include <machine/regnum.h>
+
+#include <machine/rm7000.h>
+
+#include <mips64/archtype.h>
+
+#ifdef DDB
+#include <mips64/db_machdep.h>
+#include <ddb/db_sym.h>
+#endif
+
+int netisr;
+
+/*
+ * Modern versions of MIPS processors have extended interrupt
+ * capabilites. How these are handeled differs from implementation
+ * to implementation. This code tries to hide away some of these
+ * in "higher level" interrupt code.
+ *
+ * Basically there are <n> interrupt inputs to the processor and
+ * typically the HW designer ties these interrupts to various
+ * sources in the HW. The low level code does not deal with interrupts
+ * in more than it dispatches handling to the code that has registred
+ * an interrupt handler for that particular interrupt. More than one
+ * handler can register to an interrupt input and one handler may register
+ * for more than one interrupt input. A handler is only called once even
+ * if it register for more than one interrupt input.
+ *
+ * The interrupt mechanism in this port uses a delayed masking model
+ * where interrupts are not really masked when doing an spl(). Instead
+ * a masked interrupt will be taken and validiated in the various
+ * handlers. If the handler finds that an interrupt is masked it will
+ * register this interrupt as pending and return a new mask to this
+ * code that will turn off the interrupt hardware wise. Later when
+ * the pending interrupt is unmasked it will be processed as usual
+ * and the hardware mask will be restored.
+ */
+
+/*
+ * Interrupt mapping is as follows:
+ *
+ * irq can be between 1 and 10. This maps to CPU IPL2..IPL11.
+ * The two software interrupts IPL0 and IPL1 are reserved for
+ * kernel functions. IPL13 is used for the performance counters
+ * in the RM7000. IPL12 extra timer is currently not used.
+ *
+ * irq's maps into the software spl register to the bit corresponding
+ * to it's status/mask bit in the cause/sr register shifted right eight
+ * places.
+ *
+ * A well designed system uses the CPUs interrupt inputs in a way, such
+ * that masking can be done according to the IPL in the CPU status and
+ * interrupt vontrol register. However support for an external masking
+ * register is provided but will case a slightly higher overhead when
+ * used. When an external masking register is used, no masking in the
+ * CPU is done. Instead a fixed mask is set and used throughout.
+ */
+
+void interrupt (struct trap_frame *);
+void softintr (void);
+
+/*
+ * Handle an interrupt. Both kernel and user mode is handeled here.
+ *
+ * The interrupt handler is called with the CR_INT bits set that
+ * was given when the handlers was registred that needs servicing.
+ * The handler should return a similar word with a mask indicating
+ * which CR_INT bits that has been served.
+ */
+
+void
+interrupt(struct trap_frame *trapframe)
+{
+ u_int32_t pending;
+ u_int32_t cause;
+ int i;
+ intrmask_t pcpl;
+
+ /*
+ * Paranoic? Perhaps. But if we got here with the enable
+ * bit reset a mtc0 COP_0_STATUS_REG may have been interrupted.
+ * If this was a disable and the pipleine had advanced long
+ * enough... i don't know but better safe than sorry...
+ * The main effect is not the interrupts but the spl mechanism.
+ */
+ if (!(trapframe->sr & SR_INT_ENAB)) {
+ return;
+ }
+
+ uvmexp.intrs++;
+ pcpl = splhigh() ; /* Turn off all and get current SW mask */
+
+#ifdef DEBUG_INTERRUPT
+ trapdebug_enter(trapframe, 0);
+#endif
+
+ pending = trapframe->cause & CR_IPEND;
+#ifdef IMASK_EXTERNAL
+ pending &= idle_mask << 8;
+#else
+ ipending |= (pending >> 8) & pcpl;
+ pending &= ~(pcpl << 8);
+#endif
+ cause = pending;
+
+ if (cause & CR_INT_PERF) {
+ rm7k_perfintr(trapframe);
+ cause &= ~CR_INT_PERF;
+ }
+
+ for (i = 0; i <= last_low_int; i++) {
+ intrmask_t active;
+ active = cpu_int_tab[i].int_mask & pending;
+ if (active) {
+ cause &= ~(*cpu_int_tab[i].int_hand)(active, trapframe);
+ }
+ }
+#if 0
+if ((pending & cause & ~(SOFT_INT_MASK_1|SOFT_INT_MASK_0)) != 0) {
+printf("Unhandled interrupt %x:%x\n", cause, pending);
+//Debugger();
+}
+#endif
+ /*
+ * Reenable all non served hardware levels.
+ */
+#if 0
+ splx((trapframe->sr & ~cause & SR_INT_MASK) | SR_INT_ENAB);
+#endif
+
+ if (pending & SOFT_INT_MASK_0) {
+ clearsoftintr0();
+ uvmexp.softs++;
+ }
+
+#ifndef IMASK_EXTERNAL
+ trapframe->sr &= ~((pcpl << 8) & SR_INT_MASK);
+ trapframe->ic &= ~(pcpl & IC_INT_MASK);
+#endif
+ splx(pcpl); /* Process pendings. */
+}
+
+
+/*
+ * Set up handler for external interrupt events.
+ * Use CR_INT_<n> to select the proper interrupt
+ * condition to dispatch on. We also enable the
+ * software ints here since they are always on.
+ */
+void
+set_intr(int pri, intrmask_t mask,
+ intrmask_t (*int_hand)(intrmask_t, struct trap_frame *))
+{
+ if (pri < 0 || pri >= NLOWINT) {
+ panic("set_intr: to high priority");
+ }
+
+ if (pri > last_low_int)
+ last_low_int = pri;
+
+ if ((mask & ~CR_IPEND) != 0) {
+ panic("set_intr: invalid mask 0x%x", mask);
+ }
+
+ if (cpu_int_tab[pri].int_mask != 0 &&
+ (cpu_int_tab[pri].int_mask != mask ||
+ cpu_int_tab[pri].int_hand != int_hand)) {
+ panic("set_intr: int already set at pri %d", pri);
+ }
+
+ cpu_int_tab[pri].int_hand = int_hand;
+ cpu_int_tab[pri].int_mask = mask;
+ idle_mask |= (mask | SOFT_INT_MASK) >> 8;
+}
+
+/*
+ * This is called from MipsUserIntr() if astpending is set.
+ * This is very similar to the tail of trap().
+ */
+void
+softintr()
+{
+ struct proc *p = curproc;
+ int sig;
+
+ uvmexp.softs++;
+ /* take pending signals */
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ p->p_priority = p->p_usrpri;
+ astpending = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ if (want_resched) {
+ int s;
+
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ splx(s);
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ }
+ curpriority = p->p_priority;
+}
+
+
+
+intrmask_t intem = 0x0;
+intrmask_t intrtype[INTMASKSIZE], intrmask[INTMASKSIZE], intrlevel[INTMASKSIZE];
+struct intrhand *intrhand[INTMASKSIZE];
+
+/*======================================================================*/
+
+/*
+ * Generic interrupt handling code.
+ * ================================
+ *
+ * This code can be used for interrupt models where only the
+ * processor status register has to be changed to mask/unmask.
+ * HW specific setup can be done in a MD function that can then
+ * call this function to use the generic interrupt code.
+ */
+static int fakeintr(void *);
+static int fakeintr(void *a) {return 0;}
+
+/*
+ * Establish an interrupt handler called from the dispatcher.
+ * The interrupt function established should return zero if
+ * there was nothing to serve (no int) and non zero when an
+ * interrupt was serviced.
+ * Interrupts are numbered from 1 and up where 1 maps to HW int 0.
+ */
+void *
+generic_intr_establish(icp, irq, type, level, ih_fun, ih_arg, ih_what)
+ void *icp;
+ u_long irq; /* XXX pci_intr_handle_t compatible XXX */
+ int type;
+ int level;
+ int (*ih_fun) __P((void *));
+ void *ih_arg;
+ char *ih_what;
+{
+ struct intrhand **p, *q, *ih;
+ static struct intrhand fakehand = {NULL, fakeintr};
+ int edge;
+extern int cold;
+
+static int initialized = 0;
+
+ if (!initialized) {
+/*INIT CODE HERE*/
+ initialized = 1;
+ }
+
+ if (irq > 62 || irq < 1) {
+ panic("intr_establish: illegal irq %d\n", irq);
+ }
+ irq += 1; /* Adjust for softint 1 and 0 */
+
+ /* no point in sleeping unless someone can free memory. */
+ ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
+ if (ih == NULL)
+ panic("intr_establish: can't malloc handler info");
+
+ if (type == IST_NONE || type == IST_PULSE)
+ panic("intr_establish: bogus type");
+
+ switch (intrtype[irq]) {
+ case IST_EDGE:
+ case IST_LEVEL:
+ if (type == intrtype[irq])
+ break;
+ }
+
+ switch (type) {
+ case IST_EDGE:
+ edge |= 1 << irq;
+ break;
+ case IST_LEVEL:
+ edge &= ~(1 << irq);
+ break;
+ }
+
+ /*
+ * Figure out where to put the handler.
+ * This is O(N^2), but we want to preserve the order, and N is
+ * generally small.
+ */
+ for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
+ ;
+
+ /*
+ * Actually install a fake handler momentarily, since we might be doing
+ * this with interrupts enabled and don't want the real routine called
+ * until masking is set up.
+ */
+ fakehand.ih_level = level;
+ *p = &fakehand;
+
+ generic_intr_makemasks();
+
+ /*
+ * Poke the real handler in now.
+ */
+ ih->ih_fun = ih_fun;
+ ih->ih_arg = ih_arg;
+ ih->ih_count = 0;
+ ih->ih_next = NULL;
+ ih->ih_level = level;
+ ih->ih_irq = irq;
+ ih->ih_what = ih_what;
+ *p = ih;
+
+ return (ih);
+}
+
+void
+generic_intr_disestablish(void *p1, void *p2)
+{
+}
+
+/*
+ * Regenerate interrupt masks to reflect reality.
+ */
+void
+generic_intr_makemasks()
+{
+ int irq, level;
+ struct intrhand *q;
+
+ /* First, figure out which levels each IRQ uses. */
+ for (irq = 0; irq < INTMASKSIZE; irq++) {
+ int levels = 0;
+ for (q = intrhand[irq]; q; q = q->ih_next)
+ levels |= 1 << q->ih_level;
+ intrlevel[irq] = levels;
+ }
+
+ /* Then figure out which IRQs use each level. */
+ for (level = 0; level < 5; level++) {
+ register int irqs = 0;
+ for (irq = 0; irq < INTMASKSIZE; irq++)
+ if (intrlevel[irq] & (1 << level))
+ irqs |= 1 << irq;
+ imask[level] = irqs | SINT_ALLMASK;
+ }
+
+ /*
+ * There are tty, network and disk drivers that use free() at interrupt
+ * time, so imp > (tty | net | bio).
+ */
+ imask[IPL_VM] |= imask[IPL_TTY] | imask[IPL_NET] | imask[IPL_BIO];
+
+ /*
+ * Enforce a hierarchy that gives slow devices a better chance at not
+ * dropping data.
+ */
+ imask[IPL_TTY] |= imask[IPL_NET] | imask[IPL_BIO];
+ imask[IPL_NET] |= imask[IPL_BIO];
+
+ /*
+ * These are pseudo-levels.
+ */
+ imask[IPL_NONE] = 0;
+ imask[IPL_HIGH] = -1;
+
+ /* And eventually calculate the complete masks. */
+ for (irq = 0; irq < INTMASKSIZE; irq++) {
+ register int irqs = 1 << irq;
+ for (q = intrhand[irq]; q; q = q->ih_next)
+ irqs |= imask[q->ih_level];
+ intrmask[irq] = irqs | SINT_ALLMASK;
+ }
+
+ /* Lastly, determine which IRQs are actually in use. */
+ irq = 0;
+ for (level = 0; level < INTMASKSIZE; level++) {
+ if (intrhand[level]) {
+ irq |= 1 << level;
+ }
+ }
+ intem = irq;
+}
+
+void
+generic_do_pending_int()
+{
+ struct intrhand *ih;
+ int vector;
+ intrmask_t pcpl;
+ intrmask_t hwpend;
+ struct trap_frame cf;
+static volatile int processing;
+
+ /* Don't recurse... */
+ if (processing)
+ return;
+ processing = 1;
+
+/* XXX interrupt vulnerable when changing ipending */
+ pcpl = splhigh(); /* Turn off all */
+
+ /* XXX Fake a trapframe for clock pendings... */
+ cf.pc = (int)&generic_do_pending_int;
+ cf.sr = 0;
+ cf.cpl = pcpl;
+
+ hwpend = ipending & ~pcpl; /* Do now unmasked pendings */
+ hwpend &= ~(SINT_ALLMASK);
+ ipending &= ~hwpend;
+ intem |= hwpend;
+ while (hwpend) {
+ vector = ffs(hwpend) - 1;
+ hwpend &= ~(1L << vector);
+ ih = intrhand[vector];
+ while (ih) {
+ ih->frame = &cf;
+ if ((*ih->ih_fun)(ih->ih_arg)) {
+ ih->ih_count++;
+ }
+ ih = ih->ih_next;
+ }
+ }
+ if ((ipending & SINT_CLOCKMASK) & ~pcpl) {
+ ipending &= ~SINT_CLOCKMASK;
+ softclock();
+ }
+ if ((ipending & SINT_NETMASK) & ~pcpl) {
+ int isr = netisr;
+ netisr = 0;
+ ipending &= ~SINT_NETMASK;
+#ifdef INET
+#include "ether.h"
+ if (NETHER > 0 && isr & (1 << NETISR_ARP)) {
+ arpintr();
+ }
+
+ if (isr & (1 << NETISR_IP)) {
+ ipintr();
+ }
+#endif
+#ifdef INET6
+ if (isr & (1 << NETISR_IPV6)) {
+ ip6intr();
+ }
+#endif
+#ifdef NETATALK
+ if (isr & (1 << NETISR_ATALK)) {
+ atintr();
+ }
+#endif
+#ifdef IMP
+ if (isr & (1 << NETISR_IMP)) {
+ impintr();
+ }
+#endif
+#ifdef NS
+ if (isr & (1 << NETISR_NS)) {
+ nsintr();
+ }
+#endif
+#ifdef ISO
+ if (isr & (1 << NETISR_ISO)) {
+ clnlintr();
+ }
+#endif
+#ifdef CCITT
+ if (isr & (1 << NETISR_CCITT)) {
+ ccittintr();
+ }
+#endif
+#include "ppp.h"
+ if (NPPP > 0 && isr & (1 << NETISR_PPP)) {
+ pppintr();
+ }
+
+#include "bridge.h"
+ if (NBRIDGE > 0 && isr & (1 << NETISR_BRIDGE)) {
+ bridgeintr();
+ }
+ }
+
+#ifdef NOTYET
+ if ((ipending & SINT_TTYMASK) & ~pcpl) {
+ ipending &= ~SINT_TTYMASK;
+ compoll(NULL);
+ }
+#endif
+
+ cpl = pcpl; /* Don't use splx... we are here already! */
+ updateimask(pcpl); /* Update CPU mask ins SR register */
+ processing = 0;
+}
+
+/*
+ * splinit() is special in that sense that it require us to update
+ * the interrupt mask in the CPU since it may be the first time we arm
+ * the interrupt system. This function is called right after
+ * autoconfiguration has compleeted in autoconf.c.
+ * We enable everything in idle_mask.
+ */
+void
+splinit()
+{
+ u_int32_t sr;
+
+ spl0();
+ sr = updateimask(0);
+ sr |= SR_INT_ENAB;
+ setsr(sr);
+#ifdef IMASK_EXTERNAL
+ hw_setintrmask(0);
+#endif
+}
+
+/*
+ * Process interrupts. The parameter pending has non-masked interrupts.
+ */
+intrmask_t
+generic_iointr(intrmask_t pending, struct trap_frame *cf)
+{
+ struct intrhand *ih;
+ intrmask_t catched, vm;
+ int v;
+
+ catched = 0;
+
+ for (v = 2, vm = 0x400; pending != 0 && v < 16 ; v++, vm <<= 1) {
+ if (pending & vm) {
+ ih = intrhand[v];
+
+ while (ih) {
+ ih->frame = cf;
+ if ((*ih->ih_fun)(ih->ih_arg)) {
+ catched |= vm;
+ ih->ih_count++;
+ }
+ ih = ih->ih_next;
+ }
+ }
+ }
+ return catched;
+}
diff --git a/sys/arch/mips64/mips64/lcore_access.S b/sys/arch/mips64/mips64/lcore_access.S
new file mode 100644
index 00000000000..0eeb4b9bb15
--- /dev/null
+++ b/sys/arch/mips64/mips64/lcore_access.S
@@ -0,0 +1,581 @@
+/* $OpenBSD: lcore_access.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+#include <machine/trap.h>
+
+#include "assym.h"
+
+ .set mips3
+
+ .set noreorder # Noreorder is default style!
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*
+ * Primitives
+ */
+
+/*
+ * This table is indexed by u.u_pcb.pcb_onfault in trap().
+ * The reason for using this table rather than storing an address in
+ * u.u_pcb.pcb_onfault is simply to make the code faster.
+ * This table must match with definitions in trap.h.
+ */
+ .globl onfault_table
+ .data
+ .align 3
+onfault_table:
+ PTR_VAL 0 # invalid index number
+ PTR_VAL baderr
+ PTR_VAL copyerr
+ PTR_VAL copyerr
+ PTR_VAL fswberr
+ PTR_VAL fswintrberr
+#if defined(DDB) || defined(DEBUG)
+ PTR_VAL kt_ddberr
+#else
+ PTR_VAL 0
+#endif
+ .text
+
+/*
+ * See if access to addr with a len type instruction causes a machine check.
+ * len is length of access (1=byte, 2=short, 4=long)
+ *
+ * badaddr(addr, len)
+ * char *addr;
+ * int len;
+ */
+LEAF(badaddr)
+ li v0, KT_BADERR
+ PTR_L t3, curprocpaddr
+ bne a1, 1, 2f
+ sw v0, U_PCB_ONFAULT(t3)
+ lbu v0, (a0) # don't put in bd-slot!
+ b 5f
+ nop
+2:
+ bne a1, 2, 4f
+ nop
+ lhu v0, (a0) # don't put in bd-slot!
+ b 5f
+ nop
+4:
+ lw v0, (a0)
+5:
+ sync
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero # made it w/o errors
+baderr:
+ j ra
+ li v0, 1 # trap sends us here
+END(badaddr)
+
+/*
+ * This code is copied the user's stack for returning from signal handlers
+ * (see sendsig() and sigreturn()). We have to compute the address
+ * of the sigcontext struct for the sigreturn call.
+ */
+ .globl sigcode
+sigcode:
+ PTR_ADDU a0, sp, 4*REGSZ # address of sigcontext
+ LI v0, SYS_sigreturn # sigreturn(scp)
+ syscall
+ break 0 # just in case sigreturn fails
+ .globl esigcode
+esigcode:
+
+/* Mips o32 ABI sigcode. 32 bit pointers. */
+ .globl sigcode_o32
+sigcode_o32:
+ addu a0, sp, 16 # address of sigcontext
+ li v0, SYS_sigreturn # sigreturn(scp)
+ syscall
+ break 0 # just in case sigreturn fails
+ .globl esigcode_o32
+esigcode_o32:
+
+/*
+ * Copy a null terminated string within the kernel address space.
+ * Maxlength may be null if count not wanted.
+ * copystr(fromaddr, toaddr, maxlength, &lencopied)
+ * caddr_t fromaddr;
+ * caddr_t toaddr;
+ * u_int maxlength;
+ * u_long *lencopied;
+ */
+LEAF(copystr)
+ move t2, a2 # Save the number of bytes
+1:
+ lbu t0, 0(a0)
+ PTR_SUBU a2, a2, 1
+ beq t0, zero, 2f
+ sb t0, 0(a1)
+ PTR_ADDU a0, a0, 1
+ bne a2, zero, 1b
+ PTR_ADDU a1, a1, 1
+2:
+ beq a3, zero, 3f
+ PTR_SUBU a2, t2, a2 # compute length copied
+ REG_S a2, 0(a3)
+3:
+ j ra
+ move v0, zero
+END(copystr)
+
+/*
+ * Read 64 bits from bus in non LP64 mode.
+ */
+LEAF(lp32_read8)
+#if defined(__MIPSEB__)
+ ld v1, 0(a0)
+ jr ra
+ dsrl v0, v1, 32
+#else
+ ld v0, 0(a0)
+ jr ra
+ dsrl v1, v0, 32
+#endif
+END(lp32_read8)
+
+/*
+ * Write 64 bits to bus in non LP64 mode.
+ */
+LEAF(lp32_write8)
+#if defined(__MIPSEB__)
+ dsll a2, 32
+ dsll a3, 32
+ dsrl a3, 32
+ or a2, a3
+#else
+ dsll a3, 32
+ dsll a2, 32
+ dsrl a2, 32
+ or a3, a2
+#endif
+ jr ra
+ sd a2, 0(a0)
+END(lp32_write8)
+
+/*
+ * fillw(pat, addr, count)
+ */
+LEAF(fillw)
+1:
+ PTR_ADDU a2, a2, -1
+ sh a0, 0(a1)
+ bne a2,zero, 1b
+ PTR_ADDU a1, a1, 2
+
+ jr ra
+ nop
+END(fillw)
+
+/*
+ * Optimized memory zero code.
+ * mem_zero_page(addr);
+ */
+LEAF(mem_zero_page)
+ LI v0, NBPG
+1:
+ dsubu v0, 8
+ sd zero, 0(a0)
+ bne zero, v0, 1b
+ daddu a0, 8
+ jr ra
+ nop
+END(mem_zero_page)
+
+/*
+ * Block I/O routines mainly used by I/O drivers.
+ *
+ * Args as: a0 = port
+ * a1 = memory address
+ * a2 = count
+ */
+LEAF(insb)
+ beq a2, zero, 2f
+ PTR_ADDU a2, a1
+1:
+ lbu v0, 0(a0)
+ PTR_ADDU a1, 1
+ bne a1, a2, 1b
+ sb v0, -1(a1)
+2:
+ jr ra
+ nop
+END(insb)
+
+LEAF(insw)
+ beq a2, zero, 2f
+ PTR_ADDU a2, a2
+ PTR_ADDU a2, a1
+1:
+ lhu v0, 0(a0)
+ PTR_ADDU a1, 2
+ bne a1, a2, 1b
+ sh v0, -2(a1)
+2:
+ jr ra
+ nop
+END(insw)
+
+LEAF(insl)
+ beq a2, zero, 2f
+ PTR_SLL a2, 2
+ PTR_ADDU a2, a1
+1:
+ lw v0, 0(a0)
+ PTR_ADDU a1, 4
+ bne a1, a2, 1b
+ sw v0, -4(a1)
+2:
+ jr ra
+ nop
+END(insl)
+
+LEAF(outsb)
+ beq a2, zero, 2f
+ PTR_ADDU a2, a1
+1:
+ lbu v0, 0(a1)
+ PTR_ADDU a1, 1
+ bne a1, a2, 1b
+ sb v0, 0(a0)
+2:
+ jr ra
+ nop
+END(outsb)
+
+LEAF(outsw)
+ beq a2, zero, 2f
+ PTR_ADDU a2, a2
+ LI v0, 1
+ and v0, a1
+ bne v0, zero, 3f # arghh, unaligned.
+ PTR_ADDU a2, a1
+1:
+ lhu v0, 0(a1)
+ PTR_ADDU a1, 2
+ bne a1, a2, 1b
+ sh v0, 0(a0)
+2:
+ jr ra
+ nop
+3:
+ LWHI v0, 0(a1)
+ LWLO v0, 3(a1)
+ PTR_ADDU a1, 2
+ bne a1, a2, 3b
+ sh v0, 0(a0)
+
+ jr ra
+ nop
+END(outsw)
+
+LEAF(outsl)
+ beq a2, zero, 2f
+ PTR_SLL a2, 2
+ LI v0, 3
+ and v0, a1
+ bne v0, zero, 3f # arghh, unaligned.
+ PTR_ADDU a2, a1
+1:
+ lw v0, 0(a1)
+ PTR_ADDU a1, 4
+ bne a1, a2, 1b
+ sw v0, 0(a0)
+2:
+ jr ra
+ nop
+3:
+ LWHI v0, 0(a1)
+ LWLO v0, 3(a1)
+ PTR_ADDU a1, 4
+ bne a1, a2, 3b
+ sw v0, 0(a0)
+
+ jr ra
+ nop
+END(outsl)
+
+/*
+ * Copy a null terminated string from the user address space into
+ * the kernel address space.
+ *
+ * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
+ * caddr_t fromaddr;
+ * caddr_t toaddr;
+ * u_int maxlength;
+ * u_int *lencopied;
+ */
+NON_LEAF(copyinstr, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_S ra, CF_RA_OFFS(sp)
+ blt a0, zero, copyerr # make sure address is in user space
+ li v0, KT_COPYERR
+ PTR_L t3, curprocpaddr
+ jal copystr
+ sw v0, U_PCB_ONFAULT(t3)
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ move v0, zero
+END(copyinstr)
+
+/*
+ * Copy a null terminated string from the kernel address space into
+ * the user address space.
+ *
+ * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
+ * caddr_t fromaddr;
+ * caddr_t toaddr;
+ * u_int maxlength;
+ * u_int *lencopied;
+ */
+NON_LEAF(copyoutstr, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_S ra, CF_RA_OFFS(sp)
+ blt a1, zero, copyerr # make sure address is in user space
+ li v0, KT_COPYERR
+ PTR_L t3, curprocpaddr
+ jal copystr
+ sw v0, U_PCB_ONFAULT(t3)
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ move v0, zero
+END(copyoutstr)
+
+/*
+ * Copy specified amount of data from user space into the kernel
+ * copyin(from, to, len)
+ * caddr_t *from; (user source address)
+ * caddr_t *to; (kernel destination address)
+ * unsigned len;
+ */
+NON_LEAF(copyin, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_S ra, CF_RA_OFFS(sp)
+ blt a0, zero, copyerr # make sure address is in user space
+ li v0, KT_COPYERR
+ PTR_L t3, curprocpaddr
+ jal bcopy
+ sw v0, U_PCB_ONFAULT(t3)
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ move v0, zero
+END(copyin)
+
+/*
+ * Copy specified amount of data from kernel to the user space
+ * copyout(from, to, len)
+ * caddr_t *from; (kernel source address)
+ * caddr_t *to; (user destination address)
+ * unsigned len;
+ */
+NON_LEAF(copyout, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_S ra, CF_RA_OFFS(sp)
+ blt a1, zero, copyerr # make sure address is in user space
+ li v0, KT_COPYERR
+ PTR_L t3, curprocpaddr
+ jal bcopy
+ sw v0, U_PCB_ONFAULT(t3)
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ move v0, zero
+END(copyout)
+
+LEAF(copyerr)
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ li v0, EFAULT # return error
+END(copyerr)
+
+/*
+ * kcopy is a wrapper around bcopy that catches bad memory references.
+ */
+NON_LEAF(kcopy, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+ PTR_S ra, CF_RA_OFFS(sp)
+ li v0, KT_KCOPYERR
+ PTR_L t3, curprocpaddr
+ jal bcopy
+ sw v0, U_PCB_ONFAULT(t3)
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ PTR_L t3, curprocpaddr
+ sw zero, U_PCB_ONFAULT(t3)
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+ j ra
+ move v0, zero
+END(kcopy)
+
+/*
+ * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
+ * user space.
+ */
+LEAF(fuword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ lw v0, 0(a0) # fetch word
+ j ra
+ sw zero, U_PCB_ONFAULT(t3)
+END(fuword)
+
+LEAF(fusword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ lhu v0, 0(a0) # fetch short
+ j ra
+ sw zero, U_PCB_ONFAULT(t3)
+END(fusword)
+
+LEAF(fubyte)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ lbu v0, 0(a0) # fetch byte
+ j ra
+ sw zero, U_PCB_ONFAULT(t3)
+END(fubyte)
+
+LEAF(suword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ sw a1, 0(a0) # store word
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero
+END(suword)
+
+/*
+ * Will have to flush the instruction cache if byte merging is done in hardware.
+ */
+LEAF(susword)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ sh a1, 0(a0) # store short
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero
+END(susword)
+
+LEAF(subyte)
+ blt a0, zero, fswberr # make sure address is in user space
+ li v0, KT_FSWBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ sb a1, 0(a0) # store byte
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero
+END(subyte)
+
+LEAF(fswberr)
+ j ra
+ li v0, -1
+END(fswberr)
+
+/*
+ * fuswintr and suswintr are just like fusword and susword except that if
+ * the page is not in memory or would cause a trap, then we return an error.
+ * The important thing is to prevent sleep() and switch().
+ */
+LEAF(fuswintr)
+ blt a0, zero, fswintrberr # make sure address is in user space
+ li v0, KT_FSWINTRBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ lhu v0, 0(a0) # fetch short
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero
+END(fuswintr)
+
+LEAF(suswintr)
+ blt a0, zero, fswintrberr # make sure address is in user space
+ li v0, KT_FSWINTRBERR
+ PTR_L t3, curprocpaddr
+ sw v0, U_PCB_ONFAULT(t3)
+ sh a1, 0(a0) # store short
+ sw zero, U_PCB_ONFAULT(t3)
+ j ra
+ move v0, zero
+END(suswintr)
+
+LEAF(fswintrberr)
+ j ra
+ li v0, -1
+END(fswintrberr)
diff --git a/sys/arch/mips64/mips64/lcore_ddb.S b/sys/arch/mips64/mips64/lcore_ddb.S
new file mode 100644
index 00000000000..45241f13a48
--- /dev/null
+++ b/sys/arch/mips64/mips64/lcore_ddb.S
@@ -0,0 +1,184 @@
+/* $OpenBSD: lcore_ddb.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+#include <machine/trap.h>
+
+#include "assym.h"
+
+ .set noreorder # Noreorder is default style!
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+LEAF(kdbpeek)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ and v1, a0, 3 # unaligned ?
+ bne v1, zero, 1f
+ sw v0, U_PCB_ONFAULT(t0)
+
+ lw v0, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+
+1:
+ LWHI v0, 0(a0)
+ LWLO v0, 3(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpeek)
+
+LEAF(kdbpeekw)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ and v1, a0, 1 # unaligned ?
+ bne v1, zero, 1f
+ sw v0, U_PCB_ONFAULT(t0)
+
+ lh v0, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+
+1:
+ li v0, -1 # error!
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpeekw)
+
+LEAF(kdbpeekb)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ sw v0, U_PCB_ONFAULT(t0)
+ lb v0, 0(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpeekb)
+
+ .globl kt_ddberr
+kt_ddberr:
+ jr ra
+ li v0, -1
+
+LEAF(kdbpoke)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ and v1, a0, 3 # unaligned ?
+ bne v1, zero, 1f
+ sw v0, U_PCB_ONFAULT(t0)
+
+ sw a1, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+
+1:
+ SWHI a1, 0(a0)
+ SWLO a1, 3(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpoke)
+
+LEAF(kdbpokew)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ and v1, a0, 1 # unaligned ?
+ bne v1, zero, 1f
+ sw v0, U_PCB_ONFAULT(t0)
+
+ sh a1, (a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+
+1:
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpokew)
+
+LEAF(kdbpokeb)
+ PTR_L t0, curprocpaddr
+ li v0, KT_DDBERR
+ sw v0, U_PCB_ONFAULT(t0)
+ sb a1, 0(a0)
+ jr ra
+ sw zero, U_PCB_ONFAULT(t0)
+END(kdbpokeb)
+
+LEAF(Debugger)
+ break BREAK_SOVER_VAL
+ jr ra
+ nop
+END(Debugger)
+
+LEAF(setjmp)
+ mfc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
+ REG_S s0, REGSZ * 0(a0)
+ REG_S s1, REGSZ * 1(a0)
+ REG_S s2, REGSZ * 2(a0)
+ REG_S s3, REGSZ * 3(a0)
+ REG_S s4, REGSZ * 4(a0)
+ REG_S s5, REGSZ * 5(a0)
+ REG_S s6, REGSZ * 6(a0)
+ REG_S s7, REGSZ * 7(a0)
+ REG_S s8, REGSZ * 8(a0)
+ REG_S sp, REGSZ * 9(a0)
+ REG_S ra, REGSZ * 10(a0)
+ REG_S v0, REGSZ * 11(a0)
+ jr ra
+ li v0, 0 # setjmp return
+END(setjmp)
+
+LEAF(longjmp)
+ REG_L v0, REGSZ * 11(a0)
+ REG_L ra, REGSZ * 10(a0)
+ REG_L s0, REGSZ * 0(a0)
+ REG_L s1, REGSZ * 1(a0)
+ REG_L s2, REGSZ * 2(a0)
+ REG_L s3, REGSZ * 3(a0)
+ REG_L s4, REGSZ * 4(a0)
+ REG_L s5, REGSZ * 5(a0)
+ REG_L s6, REGSZ * 6(a0)
+ REG_L s7, REGSZ * 7(a0)
+ REG_L s8, REGSZ * 8(a0)
+ REG_L sp, REGSZ * 9(a0)
+ mtc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
+ ITLBNOPFIX
+ jr ra
+ li v0, 1 # longjmp return
+END(longjmp)
+
diff --git a/sys/arch/mips64/mips64/lcore_float.S b/sys/arch/mips64/mips64/lcore_float.S
new file mode 100644
index 00000000000..d9f03e7cd51
--- /dev/null
+++ b/sys/arch/mips64/mips64/lcore_float.S
@@ -0,0 +1,519 @@
+/* $OpenBSD: lcore_float.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/errno.h>
+#include <sys/syscall.h>
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+#include <machine/pte.h>
+
+#include "assym.h"
+
+ .set noreorder # Noreorder is default style!
+
+ .set mips3
+
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsSwitchFPState --
+ *
+ * Save the current state into 'from' and restore it from 'to'.
+ *
+ * MipsSwitchFPState(from, to)
+ * struct proc *from;
+ * struct user *to;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(MipsSwitchFPState)
+ mfc0 t1, COP_0_STATUS_REG # Save old SR
+ li t0, SR_COP_1_BIT|SR_FR_32 # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ beq a0, zero, 1f # skip save if NULL pointer
+ nop
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ PTR_L a0, P_ADDR(a0) # get pointer to pcb for proc
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ li t3, ~SR_COP_1_BIT
+ lw t2, U_PCB_REGS+(PS * REGSZ)(a0) # get CPU status register
+ sw t0, U_PCB_FPREGS+(32 * REGSZ)(a0) # save FP status
+ and t2, t2, t3 # clear COP_1 enable bit
+ sw t2, U_PCB_REGS+(PS * REGSZ)(a0) # save new status register
+/*
+ * Save the floating point registers.
+ */
+ sdc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a0)
+ sdc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a0)
+ sdc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a0)
+ sdc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a0)
+ sdc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a0)
+ sdc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a0)
+ sdc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a0)
+ sdc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a0)
+ sdc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a0)
+ sdc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a0)
+ sdc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a0)
+ sdc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a0)
+ sdc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a0)
+ sdc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a0)
+ sdc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a0)
+ sdc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a0)
+ sdc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a0)
+ sdc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a0)
+ sdc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a0)
+ sdc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a0)
+ sdc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a0)
+ sdc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a0)
+ sdc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a0)
+ sdc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a0)
+ sdc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a0)
+ sdc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a0)
+ sdc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a0)
+ sdc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a0)
+ sdc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a0)
+ sdc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a0)
+ sdc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a0)
+ sdc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a0)
+
+1:
+/*
+ * Restore the floating point registers.
+ */
+ lw t0, U_PCB_FPREGS+(32 * REGSZ)(a1) # get status register
+ ldc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a1)
+ ldc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a1)
+ ldc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a1)
+ ldc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a1)
+ ldc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a1)
+ ldc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a1)
+ ldc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a1)
+ ldc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a1)
+ ldc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a1)
+ ldc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a1)
+ ldc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a1)
+ ldc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a1)
+ ldc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a1)
+ ldc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a1)
+ ldc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a1)
+ ldc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a1)
+ ldc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a1)
+ ldc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a1)
+ ldc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a1)
+ ldc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a1)
+ ldc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a1)
+ ldc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a1)
+ ldc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a1)
+ ldc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a1)
+ ldc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a1)
+ ldc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a1)
+ ldc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a1)
+ ldc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a1)
+ ldc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a1)
+ ldc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a1)
+ ldc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a1)
+ ldc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a1)
+
+ and t0, t0, ~FPC_EXCEPTION_BITS
+ ctc1 t0, FPC_CSR
+ nop
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSwitchFPState)
+
+LEAF(MipsSwitchFPState16)
+ mfc0 t1, COP_0_STATUS_REG # Save old SR
+ li t0, SR_COP_1_BIT # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+
+ beq a0, zero, 1f # skip save if NULL pointer
+ nop
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ PTR_L a0, P_ADDR(a0) # get pointer to pcb for proc
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ li t3, ~SR_COP_1_BIT
+ lw t2, U_PCB_REGS+(PS * REGSZ)(a0) # get CPU status register
+ sw t0, U_PCB_FPREGS+(32 * REGSZ)(a0) # save FP status
+ and t2, t2, t3 # clear COP_1 enable bit
+ sw t2, U_PCB_REGS+(PS * REGSZ)(a0) # save new status register
+/*
+ * Save the floating point registers.
+ */
+ swc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a0)
+ swc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a0)
+ swc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a0)
+ swc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a0)
+ swc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a0)
+ swc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a0)
+ swc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a0)
+ swc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a0)
+ swc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a0)
+ swc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a0)
+ swc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a0)
+ swc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a0)
+ swc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a0)
+ swc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a0)
+ swc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a0)
+ swc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a0)
+ swc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a0)
+ swc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a0)
+ swc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a0)
+ swc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a0)
+ swc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a0)
+ swc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a0)
+ swc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a0)
+ swc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a0)
+ swc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a0)
+ swc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a0)
+ swc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a0)
+ swc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a0)
+ swc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a0)
+ swc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a0)
+ swc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a0)
+ swc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a0)
+
+1:
+/*
+ * Restore the floating point registers.
+ */
+ lw t0, U_PCB_FPREGS+(32 * REGSZ)(a1) # get status register
+ lwc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a1)
+ lwc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a1)
+ lwc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a1)
+ lwc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a1)
+ lwc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a1)
+ lwc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a1)
+ lwc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a1)
+ lwc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a1)
+ lwc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a1)
+ lwc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a1)
+ lwc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a1)
+ lwc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a1)
+ lwc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a1)
+ lwc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a1)
+ lwc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a1)
+ lwc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a1)
+ lwc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a1)
+ lwc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a1)
+ lwc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a1)
+ lwc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a1)
+ lwc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a1)
+ lwc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a1)
+ lwc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a1)
+ lwc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a1)
+ lwc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a1)
+ lwc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a1)
+ lwc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a1)
+ lwc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a1)
+ lwc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a1)
+ lwc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a1)
+ lwc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a1)
+ lwc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a1)
+
+ and t0, t0, ~FPC_EXCEPTION_BITS
+ ctc1 t0, FPC_CSR
+ nop
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSwitchFPState16)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsSaveCurFPState --
+ *
+ * Save the current floating point coprocessor state.
+ *
+ * MipsSaveCurFPState(p)
+ * struct proc *p;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * machFPCurProcPtr is cleared.
+ *
+ *----------------------------------------------------------------------------
+ */
+LEAF(MipsSaveCurFPState)
+ lw a0, P_ADDR(a0) # get pointer to pcb for proc
+ mfc0 t1, COP_0_STATUS_REG # Disable interrupts and
+ li t0, SR_COP_1_BIT|SR_FR_32 # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ PTR_S zero, machFPCurProcPtr # indicate state has been saved
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ lw t2, U_PCB_REGS+(PS * REGSZ)(a0) # get CPU status register
+ li t3, ~SR_COP_1_BIT
+ and t2, t2, t3 # clear COP_1 enable bit
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ sw t2, U_PCB_REGS+(PS * REGSZ)(a0) # save new status register
+ sw t0, U_PCB_FPREGS+(32 * REGSZ)(a0) # save FP status
+/*
+ * Save the floating point registers.
+ */
+ sdc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a0)
+ sdc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a0)
+ sdc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a0)
+ sdc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a0)
+ sdc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a0)
+ sdc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a0)
+ sdc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a0)
+ sdc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a0)
+ sdc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a0)
+ sdc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a0)
+ sdc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a0)
+ sdc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a0)
+ sdc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a0)
+ sdc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a0)
+ sdc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a0)
+ sdc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a0)
+ sdc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a0)
+ sdc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a0)
+ sdc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a0)
+ sdc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a0)
+ sdc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a0)
+ sdc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a0)
+ sdc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a0)
+ sdc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a0)
+ sdc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a0)
+ sdc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a0)
+ sdc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a0)
+ sdc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a0)
+ sdc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a0)
+ sdc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a0)
+ sdc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a0)
+ sdc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a0)
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSaveCurFPState)
+
+LEAF(MipsSaveCurFPState16)
+ lw a0, P_ADDR(a0) # get pointer to pcb for proc
+ mfc0 t1, COP_0_STATUS_REG # Disable interrupts and
+ li t0, SR_COP_1_BIT # enable the coprocessor
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ PTR_S zero, machFPCurProcPtr # indicate state has been saved
+/*
+ * First read out the status register to make sure that all FP operations
+ * have completed.
+ */
+ lw t2, U_PCB_REGS+(PS * REGSZ)(a0) # get CPU status register
+ li t3, ~SR_COP_1_BIT
+ and t2, t2, t3 # clear COP_1 enable bit
+ cfc1 t0, FPC_CSR # stall til FP done
+ cfc1 t0, FPC_CSR # now get status
+ sw t2, U_PCB_REGS+(PS * REGSZ)(a0) # save new status register
+ sw t0, U_PCB_FPREGS+(32 * REGSZ)(a0) # save FP status
+/*
+ * Save the floating point registers.
+ */
+ swc1 $f0, U_PCB_FPREGS+(0 * REGSZ)(a0)
+ swc1 $f1, U_PCB_FPREGS+(1 * REGSZ)(a0)
+ swc1 $f2, U_PCB_FPREGS+(2 * REGSZ)(a0)
+ swc1 $f3, U_PCB_FPREGS+(3 * REGSZ)(a0)
+ swc1 $f4, U_PCB_FPREGS+(4 * REGSZ)(a0)
+ swc1 $f5, U_PCB_FPREGS+(5 * REGSZ)(a0)
+ swc1 $f6, U_PCB_FPREGS+(6 * REGSZ)(a0)
+ swc1 $f7, U_PCB_FPREGS+(7 * REGSZ)(a0)
+ swc1 $f8, U_PCB_FPREGS+(8 * REGSZ)(a0)
+ swc1 $f9, U_PCB_FPREGS+(9 * REGSZ)(a0)
+ swc1 $f10, U_PCB_FPREGS+(10 * REGSZ)(a0)
+ swc1 $f11, U_PCB_FPREGS+(11 * REGSZ)(a0)
+ swc1 $f12, U_PCB_FPREGS+(12 * REGSZ)(a0)
+ swc1 $f13, U_PCB_FPREGS+(13 * REGSZ)(a0)
+ swc1 $f14, U_PCB_FPREGS+(14 * REGSZ)(a0)
+ swc1 $f15, U_PCB_FPREGS+(15 * REGSZ)(a0)
+ swc1 $f16, U_PCB_FPREGS+(16 * REGSZ)(a0)
+ swc1 $f17, U_PCB_FPREGS+(17 * REGSZ)(a0)
+ swc1 $f18, U_PCB_FPREGS+(18 * REGSZ)(a0)
+ swc1 $f19, U_PCB_FPREGS+(19 * REGSZ)(a0)
+ swc1 $f20, U_PCB_FPREGS+(20 * REGSZ)(a0)
+ swc1 $f21, U_PCB_FPREGS+(21 * REGSZ)(a0)
+ swc1 $f22, U_PCB_FPREGS+(22 * REGSZ)(a0)
+ swc1 $f23, U_PCB_FPREGS+(23 * REGSZ)(a0)
+ swc1 $f24, U_PCB_FPREGS+(24 * REGSZ)(a0)
+ swc1 $f25, U_PCB_FPREGS+(25 * REGSZ)(a0)
+ swc1 $f26, U_PCB_FPREGS+(26 * REGSZ)(a0)
+ swc1 $f27, U_PCB_FPREGS+(27 * REGSZ)(a0)
+ swc1 $f28, U_PCB_FPREGS+(28 * REGSZ)(a0)
+ swc1 $f29, U_PCB_FPREGS+(29 * REGSZ)(a0)
+ swc1 $f30, U_PCB_FPREGS+(30 * REGSZ)(a0)
+ swc1 $f31, U_PCB_FPREGS+(31 * REGSZ)(a0)
+
+ mtc0 t1, COP_0_STATUS_REG # Restore the status register.
+ ITLBNOPFIX
+ j ra
+ nop
+END(MipsSaveCurFPState16)
+
+/*----------------------------------------------------------------------------
+ *
+ * MipsFPTrap --
+ *
+ * Handle a floating point Trap.
+ *
+ * MipsFPTrap(statusReg, causeReg, pc)
+ * unsigned statusReg;
+ * unsigned causeReg;
+ * unsigned pc;
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------------
+ */
+NON_LEAF(MipsFPTrap, FRAMESZ(CF_SZ), ra)
+ PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
+ mfc0 t0, COP_0_STATUS_REG
+ PTR_S ra, CF_RA_OFFS(sp)
+ .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+
+ or t1, t0, SR_COP_1_BIT
+ mtc0 t1, COP_0_STATUS_REG
+ ITLBNOPFIX
+ cfc1 t1, FPC_CSR # stall til FP done
+ cfc1 t1, FPC_CSR # now get status
+ nop
+ sll t2, t1, (31 - 17) # unimplemented operation?
+ bgez t2, 3f # no, normal trap
+ nop
+/*
+ * We got an unimplemented operation trap so
+ * fetch the instruction, compute the next PC and emulate the instruction.
+ */
+ bgez a1, 1f # Check the branch delay bit.
+ nop
+/*
+ * The instruction is in the branch delay slot so the branch will have to
+ * be emulated to get the resulting PC.
+ */
+ PTR_S a2, FRAMESZ(CF_SZ) + 2 * REGSZ(sp)
+ PTR_L a0, curprocpaddr # first arg is ptr to CPU regs
+ move a1, a2 # second arg is instruction PC
+ move a2, t1 # third arg is floating point CSR
+ jal MipsEmulateBranch # compute PC after branch
+ move a3, zero # fourth arg is FALSE
+/*
+ * Now load the floating-point instruction in the branch delay slot
+ * to be emulated.
+ */
+ PTR_L a2, FRAMESZ(CF_SZ) + 2 * REGSZ(sp) # restore EXC pc
+ b 2f
+ lw a0, 4(a2) # a0 = coproc instruction
+/*
+ * This is not in the branch delay slot so calculate the resulting
+ * PC (epc + 4) into v0 and continue to MipsEmulateFP().
+ */
+1:
+ lw a0, 0(a2) # a0 = coproc instruction
+ addu v0, a2, 4 # v0 = next pc
+2:
+ PTR_L a3, curprocpaddr # first arg is ptr to CPU regs
+ PTR_S v0, U_PCB_REGS+(PC * REGSZ)(a3) # save new pc
+/*
+ * Check to see if the instruction to be emulated is a floating-point
+ * instruction.
+ */
+ srl a3, a0, OPCODE_SHIFT
+ beq a3, OPCODE_C1, 4f # this should never fail
+ nop
+/*
+ * Send a floating point exception signal to the current process.
+ */
+3:
+ PTR_L a0, curproc # get current process
+ cfc1 a2, FPC_CSR # code = FP execptions
+ ctc1 zero, FPC_CSR # Clear exceptions
+ jal trapsignal
+ li a1, SIGFPE
+ b FPReturn
+ nop
+
+/*
+ * Finally, we can call MipsEmulateFP() where a0 is the instruction to emulate.
+ */
+4:
+ jal MipsEmulateFP
+ nop
+
+/*
+ * Turn off the floating point coprocessor and return.
+ */
+FPReturn:
+ mfc0 t0, COP_0_STATUS_REG
+ PTR_L ra, CF_RA_OFFS(sp)
+ and t0, t0, ~SR_COP_1_BIT
+ mtc0 t0, COP_0_STATUS_REG
+ ITLBNOPFIX
+ j ra
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+END(MipsFPTrap)
diff --git a/sys/arch/mips64/mips64/mainbus.c b/sys/arch/mips64/mips64/mainbus.c
new file mode 100644
index 00000000000..02e4841e2cc
--- /dev/null
+++ b/sys/arch/mips64/mips64/mainbus.c
@@ -0,0 +1,192 @@
+/* $OpenBSD: mainbus.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/reboot.h>
+
+#include <mips64/archtype.h>
+#include <machine/autoconf.h>
+
+struct mainbus_softc {
+ struct device sc_dv;
+ struct abus sc_bus;
+};
+
+/* Definition of the mainbus driver. */
+static int mbmatch __P((struct device *, void *, void *));
+static void mbattach __P((struct device *, struct device *, void *));
+static int mbprint __P((void *, const char *));
+
+struct cfattach mainbus_ca = {
+ sizeof(struct mainbus_softc), mbmatch, mbattach
+};
+struct cfdriver mainbus_cd = {
+ NULL, "mainbus", DV_DULL, NULL, 0
+};
+
+void *mb_intr_establish __P((void *, u_long, int, int, int (*)(void *), void *, char
+ *));
+void mb_intr_disestablish __P((void *, void *));
+caddr_t mb_cvtaddr __P((struct confargs *));
+int mb_matchname __P((struct confargs *, char *));
+
+static int
+mbmatch(parent, cfdata, aux)
+ struct device *parent;
+ void *cfdata;
+ void *aux;
+{
+ struct cfdata *cf = cfdata;
+
+ if (cf->cf_unit > 0)
+ return(0);
+ return(1);
+}
+
+static void
+mbattach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ struct mainbus_softc *sc = (struct mainbus_softc *)self;
+ struct confargs nca;
+
+ printf("\n");
+
+ sc->sc_bus.ab_dv = (struct device *)sc;
+ sc->sc_bus.ab_type = BUS_MAIN;
+ sc->sc_bus.ab_intr_establish = mb_intr_establish;
+ sc->sc_bus.ab_intr_disestablish = mb_intr_disestablish;
+ sc->sc_bus.ab_cvtaddr = mb_cvtaddr;
+ sc->sc_bus.ab_matchname = mb_matchname;
+
+ /*
+ * Try to find and attach all of the CPUs in the machine.
+ * ( Right now only one CPU so code is simple )
+ */
+
+ nca.ca_name = "cpu";
+ nca.ca_bus = &sc->sc_bus;
+ config_found(self, &nca, mbprint);
+
+ /*
+ * Attach the clocks.
+ */
+ nca.ca_name = "clock";
+ nca.ca_bus = &sc->sc_bus;
+ config_found(self, &nca, mbprint);
+
+ if (sys_config.system_type == SGI_INDY) {
+ nca.ca_name = "indy";
+ nca.ca_bus = &sc->sc_bus;
+ config_found(self, &nca, mbprint);
+ }
+ else if (sys_config.system_type == SGI_O2) {
+ nca.ca_name = "macebus";
+ nca.ca_bus = &sc->sc_bus;
+ config_found(self, &nca, mbprint);
+
+ nca.ca_name = "macepcibr";
+ nca.ca_bus = &sc->sc_bus;
+ nca.ca_num = 0;
+ config_found(self, &nca, mbprint);
+ }
+ else if (sys_config.system_type == ALGOR_P4032 ||
+ sys_config.system_type == ALGOR_P5064 ||
+ sys_config.system_type == MOMENTUM_CP7000 ||
+ sys_config.system_type == MOMENTUM_CP7000G ||
+ sys_config.system_type == MOMENTUM_JAGUAR ||
+ sys_config.system_type == GALILEO_EV64240) {
+
+ nca.ca_name = "localbus";
+ nca.ca_bus = &sc->sc_bus;
+ config_found(self, &nca, mbprint);
+
+ nca.ca_name = "pcibr";
+ nca.ca_bus = &sc->sc_bus;
+ nca.ca_num = 0;
+ config_found(self, &nca, mbprint);
+
+ nca.ca_name = "pcibr";
+ nca.ca_bus = &sc->sc_bus;
+ nca.ca_num = 1;
+ config_found(self, &nca, mbprint);
+ }
+}
+
+static int
+mbprint(aux, pnp)
+ void *aux;
+ const char *pnp;
+{
+ if (pnp)
+ return (QUIET);
+ return (UNCONF);
+}
+
+void *
+mb_intr_establish(icp, irq, type, level, ih_fun, ih_arg, ih_what)
+ void *icp;
+ u_long irq; /* XXX pci_intr_handle_t compatible XXX */
+ int type;
+ int level;
+ int (*ih_fun) __P((void *));
+ void *ih_arg;
+ char *ih_what;
+{
+ panic("can never mb_intr_establish");
+}
+
+void
+mb_intr_disestablish(void *p1, void *p2)
+{
+ panic("can never mb_intr_disestablish");
+}
+
+caddr_t
+mb_cvtaddr(ca)
+ struct confargs *ca;
+{
+ return (NULL);
+}
+
+int
+mb_matchname(ca, name)
+ struct confargs *ca;
+ char *name;
+{
+ return (strcmp(name, ca->ca_name) == 0);
+}
diff --git a/sys/arch/mips64/mips64/mem.c b/sys/arch/mips64/mips64/mem.c
new file mode 100644
index 00000000000..47320f2fdc6
--- /dev/null
+++ b/sys/arch/mips64/mips64/mem.c
@@ -0,0 +1,234 @@
+/* $OpenBSD: mem.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/* $NetBSD: mem.c,v 1.6 1995/04/10 11:55:03 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mem.c 8.3 (Berkeley) 1/12/94
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/msgbuf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+
+#include <machine/autoconf.h>
+#include <machine/pte.h>
+#include <machine/cpu.h>
+
+#include <uvm/uvm_extern.h>
+
+#ifdef APERTURE
+static int ap_open_count = 0;
+extern int allowaperture;
+#endif
+extern vaddr_t avail_end;
+caddr_t zeropage;
+
+#define mmread mmrw
+#define mmwrite mmrw
+cdev_decl(mm);
+
+/*ARGSUSED*/
+int
+mmopen(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+
+ switch (minor(dev)) {
+ case 0:
+ case 1:
+ case 2:
+ case 12:
+ return (0);
+#ifdef APERTURE
+ case 4:
+ if (suser(p->p_ucred, &p->p_acflag) != 0 || !allowaperture)
+ return (EPERM);
+
+ /* authorize only one simultaneous open() */
+ if (ap_open_count > 0)
+ return(EPERM);
+ ap_open_count++;
+ return (0);
+#endif
+ default:
+ return (ENXIO);
+ }
+}
+
+/*ARGSUSED*/
+int
+mmclose(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+#ifdef APERTURE
+ if (minor(dev) == 4)
+ ap_open_count--;
+#endif
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+mmrw(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ vaddr_t v;
+ int c;
+ struct iovec *iov;
+ int error = 0;
+
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+/* minor device 0 is physical memory */
+ case 0:
+ v = uio->uio_offset;
+ c = iov->iov_len;
+ if (v + c > ctob(physmem))
+ return (EFAULT);
+ v += KSEG0_BASE;
+ error = uiomove((caddr_t)v, c, uio);
+ continue;
+
+/* minor device 1 is kernel memory */
+ case 1:
+ v = uio->uio_offset;
+ c = min(iov->iov_len, MAXPHYS);
+ if (v < KSEG0_BASE)
+ return (EFAULT);
+ if (v + c > PHYS_TO_KSEG0(avail_end +
+ sizeof (struct msgbuf)) &&
+ (v < VM_MIN_KERNEL_ADDRESS ||
+ !uvm_kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE)))
+ return (EFAULT);
+
+ error = uiomove((caddr_t)v, c, uio);
+ continue;
+
+/* minor device 2 is EOF/RATHOLE */
+ case 2:
+ if (uio->uio_rw == UIO_WRITE)
+ uio->uio_resid = 0;
+ return (0);
+
+/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
+ case 12:
+ if (uio->uio_rw == UIO_WRITE) {
+ c = iov->iov_len;
+ break;
+ }
+ if (zeropage == NULL) {
+ zeropage = (caddr_t)
+ malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
+ bzero(zeropage, PAGE_SIZE);
+ }
+ c = min(iov->iov_len, PAGE_SIZE);
+ error = uiomove(zeropage, c, uio);
+ continue;
+
+ default:
+ return (ENODEV);
+ }
+ if (error)
+ break;
+ iov->iov_base += c;
+ iov->iov_len -= c;
+ uio->uio_offset += c;
+ uio->uio_resid -= c;
+ }
+ return error;
+}
+
+/*ARGSUSED*/
+paddr_t
+mmmmap(dev, off, prot)
+ dev_t dev;
+ off_t off;
+ int prot;
+{
+#ifdef APERTURE
+ if (minor(dev) == 4) {
+ if (off >= 0x0000 && off < 0x10000) {
+ off += sys_config.pci_io[0].bus_base;
+ return mips_btop(off);
+ } else if (off >= 0xa0000 && off < 0x10000000) {
+ off += sys_config.pci_mem[0].bus_base;
+ return mips_btop(off);
+ } else {
+ return -1;
+ }
+ }
+#endif
+ return -1;
+}
+
+int
+mmioctl(dev, cmd, data, flags, p)
+ dev_t dev;
+ u_long cmd;
+ caddr_t data;
+ int flags;
+ struct proc *p;
+{
+ return (EOPNOTSUPP);
+}
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
new file mode 100644
index 00000000000..c522ffad4c1
--- /dev/null
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -0,0 +1,1683 @@
+/* $OpenBSD: pmap.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * XXX This code needs some major rewriting.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/pool.h>
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
+
+#include <uvm/uvm.h>
+
+#include <machine/pte.h>
+#include <machine/cpu.h>
+#include <machine/autoconf.h>
+#include <machine/memconf.h>
+#include <mips64/archtype.h>
+
+extern void mem_zero_page (vaddr_t);
+
+typedef struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vaddr_t pv_va; /* virtual address for mapping */
+ int pv_flags; /* Some flags for the mapping */
+} *pv_entry_t;
+#define PV_UNCACHED 0x0001 /* Page is mapped unchached */
+#define PV_CACHED 0x0002 /* Page has been cached */
+
+/*
+ * Local pte bits used only here
+ */
+#define PG_RO 0x40000000
+
+pv_entry_t pv_table; /* array of entries, one per page */
+
+struct pool pmap_pmap_pool;
+struct pool pmap_pv_pool;
+
+void *pmap_pv_page_alloc(u_long, int, int);
+void pmap_pv_page_free(void *, u_long, int);
+
+#define pmap_pv_alloc() (pv_entry_t)pool_get(&pmap_pv_pool, PR_NOWAIT)
+#define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
+
+#ifndef PMAP_PV_LOWAT
+#define PMAP_PV_LOWAT 16
+#endif
+int pmap_pv_lowat = PMAP_PV_LOWAT;
+
+void pmap_pinit __P((struct pmap *pmap));
+void pmap_release __P((pmap_t pmap));
+boolean_t pmap_physpage_alloc(paddr_t *);
+void pmap_physpage_free(paddr_t);
+
+#ifdef DIAGNOSTIC
+struct {
+ int kernel; /* entering kernel mapping */
+ int user; /* entering user mapping */
+ int ptpneeded; /* needed to allocate a PT page */
+ int pwchange; /* no mapping change, just wiring or protection */
+ int wchange; /* no mapping change, just wiring */
+ int mchange; /* was mapped but mapping to different page */
+ int managed; /* a managed page */
+ int firstpv; /* first mapping for this PA */
+ int secondpv; /* second mapping for this PA */
+ int ci; /* cache inhibited */
+ int unmanaged; /* not a managed page */
+ int flushes; /* cache flushes */
+ int cachehit; /* new entry forced valid entry out */
+} enter_stats;
+struct {
+ int calls;
+ int removes;
+ int flushes;
+ int pidflushes; /* HW pid stolen */
+ int pvfirst;
+ int pvsearch;
+} remove_stats;
+
+#define stat_count(what) (what)++
+
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_PVENTRY 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_TLBPID 0x0400
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+
+int pmapdebugflag = 0x0;
+#define pmapdebug pmapdebugflag
+
+#else
+
+#define stat_count(what)
+#define pmapdebug (0)
+
+#endif /* DIAGNOSTIC */
+
+
+struct pmap kernel_pmap_store;
+
+psize_t mem_size; /* memory size in bytes */
+vaddr_t virtual_start; /* VA of first avail page (after kernel bss)*/
+vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
+
+struct segtab *free_segtab; /* free list kept locally */
+u_int tlbpid_gen = 1; /* TLB PID generation count */
+int tlbpid_cnt = 2; /* next available TLB PID */
+
+pt_entry_t *Sysmap; /* kernel pte table */
+u_int Sysmapsize; /* number of pte's in Sysmap */
+
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ */
+void
+pmap_bootstrap()
+{
+ int i;
+ pt_entry_t *spte;
+ int n;
+
+
+ /*
+ * Create a mapping table for kernel virtual memory. This
+ * table is a linear table in contrast to the user process
+ * mapping tables which are built with segment/page tables.
+ * Create at least 256MB of map even if physmem is smaller.
+ */
+ if (physmem < 65536)
+ Sysmapsize = 65536;
+ else
+ Sysmapsize = physmem;
+
+ virtual_start = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
+
+ Sysmap = (pt_entry_t *)uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
+
+ /*
+ * Allocate memory for pv_table.
+ * This will allocate more entries than we really need.
+ * We could do this in pmap_init when we know the actual
+ * phys_start and phys_end but its better to use kseg0 addresses
+ * rather than kernel virtual addresses mapped through the TLB.
+ */
+ i = 0;
+ for( n = 0; n < MAXMEMSEGS; n++) {
+ i += mem_layout[n].mem_size;
+ }
+ pv_table = (struct pv_entry *)uvm_pageboot_alloc(sizeof(struct pv_entry) * i);
+
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0,"pmappl", NULL);
+ pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0,"pvpl", NULL);
+
+ /* XXX need to decide how to set cnt.v_page_size */
+
+ simple_lock_init(&pmap_kernel()->pm_lock);
+ pmap_kernel()->pm_count = 1;
+
+ /*
+ * The R4?00 stores only one copy of the Global bit in the
+ * translation lookaside buffer for each 2 page entry.
+ * Thus invalid entrys must have the Global bit set so
+ * when Entry LO and Entry HI G bits are anded together
+ * they will produce a global bit to store in the tlb.
+ */
+ for(i = 0, spte = Sysmap; i < Sysmapsize; i++, spte++)
+ spte->pt_entry = PG_G;
+}
+
+/*
+ * Page steal allocator used during bootup.
+ */
+vaddr_t
+pmap_steal_memory(size, vstartp, vendp)
+ vsize_t size;
+ vaddr_t *vstartp, *vendp;
+{
+ int i, j, x;
+ int npgs;
+ vaddr_t va;
+ paddr_t pa;
+
+ if (uvm.page_init_done) {
+ panic("pmap_steal_memory: to late, vm is running!");
+ }
+
+ size = round_page(size);
+ npgs = atop(size);
+ va = 0;
+
+ for(i = 0; i < vm_nphysseg && va == 0; i++) {
+ if(vm_physmem[i].avail_start != vm_physmem[i].start ||
+ vm_physmem[i].avail_start >= vm_physmem[i].avail_end) {
+ continue;
+ }
+
+ if((vm_physmem[i].avail_end - vm_physmem[i].avail_start) < npgs) {
+ continue;
+ }
+
+ pa = ptoa(vm_physmem[i].avail_start);
+ vm_physmem[i].avail_start += npgs;
+ vm_physmem[i].start += npgs;
+
+ if(vm_physmem[i].avail_start == vm_physmem[i].end) {
+ if(vm_nphysseg == 1) {
+ panic("pmap_steal_memory: out of memory!");
+ }
+
+ vm_nphysseg--;
+ for(j = i; j < vm_nphysseg; x++) {
+ vm_physmem[x] = vm_physmem[x + 1];
+ }
+ }
+ if(vstartp) {
+ *vstartp = round_page(virtual_start);
+ }
+ if(vendp) {
+ *vendp = virtual_end;
+ }
+ va = PHYS_TO_KSEG0(pa);
+ memset((caddr_t)va, 0, size);
+ }
+
+ if(va == 0) {
+ panic("pmap_steal_memory: no memory to steal");
+ }
+
+ return(va);
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init()
+{
+ vsize_t s;
+ int bank;
+ pv_entry_t pv;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) {
+ printf("pmap_init()\n");
+ }
+
+ pv = pv_table;
+ for(bank = 0; bank < vm_nphysseg; bank++) {
+ s = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ pv += s;
+ }
+
+#if 0 /* too early */
+ pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
+#endif
+}
+
+inline struct pv_entry *pa_to_pvh __P((paddr_t));
+inline struct pv_entry *
+pa_to_pvh(pa)
+ paddr_t pa;
+{
+ int i, p;
+
+ i = vm_physseg_find(atop((pa)), &p);
+ return(&vm_physmem[i].pmseg.pvent[p]);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+pmap_t
+pmap_create()
+{
+ pmap_t pmap;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) {
+ printf("pmap_create()\n");
+ }
+
+ pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
+ bzero(pmap, sizeof(*pmap));
+ pmap_pinit(pmap);
+ return (pmap);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+void
+pmap_pinit(pmap)
+ struct pmap *pmap;
+{
+ int i;
+ int s;
+extern struct vmspace vmspace0;
+extern struct user *proc0paddr;
+
+ simple_lock_init(&pmap->pm_lock);
+ pmap->pm_count = 1;
+ if (free_segtab) {
+ s = splimp();
+ pmap->pm_segtab = free_segtab;
+ free_segtab = *(struct segtab **)free_segtab;
+ pmap->pm_segtab->seg_tab[0] = NULL;
+ splx(s);
+ }
+ else {
+ struct segtab *stp;
+ vm_page_t mem;
+ pv_entry_t pv;
+
+ do {
+ mem = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+ if (mem == NULL) {
+ /* XXX What else can we do? Deadlocks? */
+ uvm_wait("ppinit");
+ }
+ } while (mem == NULL);
+
+ pv = pa_to_pvh(VM_PAGE_TO_PHYS(mem));
+ if(pv->pv_flags & PV_CACHED &&
+ ((pv->pv_va ^ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem))) & CpuCacheAliasMask) != 0) {
+ Mips_SyncDCachePage(pv->pv_va);
+ }
+ pv->pv_va = PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+ pv->pv_flags = PV_CACHED;
+
+ pmap->pm_segtab = stp = (struct segtab *)
+ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+
+ i = NBPG / sizeof(struct segtab);
+ s = splimp();
+ while (--i != 0) {
+ stp++;
+ *(struct segtab **)stp = free_segtab;
+ free_segtab = stp;
+ }
+ splx(s);
+ }
+ if (pmap == vmspace0.vm_map.pmap) {
+ /*
+ * The initial process has already been allocated a TLBPID
+ * in mach_init().
+ */
+ pmap->pm_tlbpid = 1;
+ pmap->pm_tlbgen = tlbpid_gen;
+ proc0paddr->u_pcb.pcb_segtab = (void *)pmap->pm_segtab;
+ }
+ else {
+ pmap->pm_tlbpid = 0;
+ pmap->pm_tlbgen = 0;
+ }
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ pmap_t pmap;
+{
+ int count;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) {
+ printf("pmap_destroy(%x)\n", pmap);
+ }
+ if (pmap) {
+ simple_lock(&pmap->pm_lock);
+ count = --pmap->pm_count;
+ simple_unlock(&pmap->pm_lock);
+ if (count == 0) {
+ pmap_release(pmap);
+ pool_put(&pmap_pmap_pool, pmap);
+ }
+ }
+}
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap)
+ pmap_t pmap;
+{
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) {
+ printf("pmap_release(%x)\n", pmap);
+ }
+
+ if (pmap->pm_segtab) {
+ pt_entry_t *pte;
+ int i;
+ int s;
+#ifdef PARANIOA
+ int j;
+#endif
+
+ for (i = 0; i < PMAP_SEGTABSIZE; i++) {
+ /* get pointer to segment map */
+ pte = pmap->pm_segtab->seg_tab[i];
+ if (!pte)
+ continue;
+#ifdef PARANOIA
+ for (j = 0; j < NPTEPG; j++) {
+ if ((pte+j)->pt_entry)
+ panic("pmap_release: segmap not empty");
+ }
+#endif
+ Mips_HitInvalidateDCache((vaddr_t)pte, PAGE_SIZE);
+ uvm_pagefree(PHYS_TO_VM_PAGE(KSEG0_TO_PHYS(pte)));
+ pmap->pm_segtab->seg_tab[i] = NULL;
+ }
+ s = splimp();
+ *(struct segtab **)pmap->pm_segtab = free_segtab;
+ free_segtab = pmap->pm_segtab;
+ splx(s);
+ pmap->pm_segtab = NULL;
+ }
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_reference(%x)\n", pmap);
+ }
+ if (pmap) {
+ simple_lock(&pmap->pm_lock);
+ pmap->pm_count++;
+ simple_unlock(&pmap->pm_lock);
+ }
+}
+
+/*
+ * Make a new pmap (vmspace) active for the given process.
+ */
+void
+pmap_activate(p)
+ struct proc *p;
+{
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+
+ p->p_addr->u_pcb.pcb_segtab = pmap->pm_segtab;
+
+ pmap_alloc_tlbpid(p);
+}
+
+/*
+ * Make a previously active pmap (vmspace) inactive.
+ */
+void
+pmap_deactivate(p)
+ struct proc *p;
+{
+ /* Empty */
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap, sva, eva)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+{
+ vaddr_t nssva;
+ pt_entry_t *pte;
+ unsigned entry;
+
+ stat_count(remove_stats.calls);
+
+ if(pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) {
+ printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
+ }
+ if(pmap == NULL) {
+ return;
+ }
+
+ if(pmap == pmap_kernel()) {
+ pt_entry_t *pte;
+
+ /* remove entries from kernel pmap */
+#ifdef DIAGNOSTIC
+ if(sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
+ panic("pmap_remove: kva not in range");
+#endif
+ pte = kvtopte(sva);
+ for(; sva < eva; sva += NBPG, pte++) {
+ entry = pte->pt_entry;
+ if(!(entry & PG_V))
+ continue;
+ pmap->pm_stats.resident_count--;
+ pmap_remove_pv(pmap, sva, pfn_to_pad(entry));
+ /*
+ * Flush the TLB for the given address.
+ */
+ pte->pt_entry = PG_NV | PG_G; /* See above about G bit */
+ tlb_flush_addr(sva);
+ stat_count(remove_stats.flushes);
+ }
+ return;
+ }
+
+#ifdef DIAGNOSTIC
+ if (eva > VM_MAXUSER_ADDRESS)
+ panic("pmap_remove: uva not in range");
+#endif
+ while (sva < eva) {
+ nssva = mips_trunc_seg(sva) + NBSEG;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!(pte = pmap_segmap(pmap, sva))) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Invalidate every valid mapping within this segment.
+ */
+ pte += uvtopte(sva);
+ for (; sva < nssva; sva += NBPG, pte++) {
+ entry = pte->pt_entry;
+ if (!(entry & PG_V))
+ continue;
+ pmap->pm_stats.resident_count--;
+ if(!pfn_is_ext(entry)) {/* padr > 32 bits */
+ pmap_remove_pv(pmap, sva, pfn_to_pad(entry));
+ }
+ pte->pt_entry = PG_NV;
+ /*
+ * Flush the TLB for the given address.
+ */
+ if (pmap->pm_tlbgen == tlbpid_gen) {
+ tlb_flush_addr(sva | (pmap->pm_tlbpid <<
+ VMTLB_PID_SHIFT));
+ stat_count(remove_stats.flushes);
+ }
+ }
+ }
+}
+
+/*
+ * pmap_page_protect:
+ *
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(pg, prot)
+ struct vm_page *pg;
+ vm_prot_t prot;
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ pv_entry_t pv;
+ vaddr_t va;
+ int s, i;
+
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ ((prot == VM_PROT_NONE) && (pmapdebug & PDB_REMOVE))) {
+ printf("pmap_page_protect(%x, %x)\n", pa, prot);
+ }
+ if (!IS_VM_PHYSADDR(pa)) {
+ return;
+ }
+
+ switch (prot) {
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ break;
+
+ /* copy_on_write */
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ /*
+ * Loop over all current mappings setting/clearing as apropos.
+ */
+ if (pv->pv_pmap != NULL) {
+ for (; pv; pv = pv->pv_next) {
+ va = pv->pv_va;
+
+ /*
+ * XXX don't write protect pager mappings
+ */
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+ pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, prot);
+ }
+ }
+ splx(s);
+ break;
+
+ /* remove_all */
+ default:
+ i = 0;
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv->pv_pmap != NULL && i < 10) {
+ i++;
+ pmap_remove(pv->pv_pmap, pv->pv_va,
+ pv->pv_va + PAGE_SIZE);
+ }
+ splx(s);
+ }
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap, sva, eva, prot)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
+{
+ vaddr_t nssva;
+ pt_entry_t *pte;
+ unsigned entry;
+ u_int p;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) {
+ printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
+ }
+ if (pmap == NULL)
+ return;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+ p = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
+
+ if (!pmap->pm_segtab) {
+ /*
+ * Change entries in kernel pmap.
+ * This will trap if the page is writeable (in order to set
+ * the dirty bit) even if the dirty bit is already set. The
+ * optimization isn't worth the effort since this code isn't
+ * executed much. The common case is to make a user page
+ * read-only.
+ */
+#ifdef DIAGNOSTIC
+ if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
+ panic("pmap_protect: kva not in range");
+#endif
+ pte = kvtopte(sva);
+ for (; sva < eva; sva += NBPG, pte++) {
+ entry = pte->pt_entry;
+ if (!(entry & PG_V))
+ continue;
+ entry = (entry & ~(PG_M | PG_RO)) | p;
+ pte->pt_entry = entry;
+ /*
+ * Update the TLB if the given address is in the cache.
+ */
+ tlb_update(sva, entry);
+ }
+ return;
+ }
+
+#ifdef DIAGNOSTIC
+ if (eva > VM_MAXUSER_ADDRESS)
+ panic("pmap_protect: uva not in range");
+#endif
+ while (sva < eva) {
+ nssva = mips_trunc_seg(sva) + NBSEG;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+ if (!(pte = pmap_segmap(pmap, sva))) {
+ sva = nssva;
+ continue;
+ }
+ /*
+ * Change protection on every valid mapping within this segment.
+ */
+ pte += (sva >> PGSHIFT) & (NPTEPG - 1);
+ for (; sva < nssva; sva += NBPG, pte++) {
+ entry = pte->pt_entry;
+ if (!(entry & PG_V))
+ continue;
+ entry = (entry & ~(PG_M | PG_RO)) | p;
+ pte->pt_entry = entry;
+ /*
+ * Update the TLB if the given address is in the cache.
+ */
+ if (pmap->pm_tlbgen == tlbpid_gen)
+ tlb_update(sva | (pmap->pm_tlbpid <<
+ VMTLB_PID_SHIFT), entry);
+ }
+ }
+}
+
+/*
+ * Return RO protection of page.
+ */
+int
+pmap_is_page_ro(pmap, va, entry)
+ pmap_t pmap;
+ vaddr_t va;
+ int entry;
+{
+ return(entry & PG_RO);
+}
+
+/*
+ * pmap_page_cache:
+ *
+ * Change all mappings of a page to cached/uncached.
+ */
+void
+pmap_page_cache(pa,mode)
+ vaddr_t pa;
+ int mode;
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ unsigned entry;
+ unsigned newmode;
+ int s;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) {
+ printf("pmap_page_uncache(%x)\n", pa);
+ }
+ if (!IS_VM_PHYSADDR(pa)) {
+ return;
+ }
+
+ newmode = mode & PV_UNCACHED ? PG_UNCACHED : PG_CACHED;
+ pv = pa_to_pvh(pa);
+ s = splimp();
+ while (pv) {
+ pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode;
+ if (!pv->pv_pmap->pm_segtab) {
+ /*
+ * Change entries in kernel pmap.
+ */
+ pte = kvtopte(pv->pv_va);
+ entry = pte->pt_entry;
+ if (entry & PG_V) {
+ entry = (entry & ~PG_CACHEMODE) | newmode;
+ pte->pt_entry = entry;
+ tlb_update(pv->pv_va, entry);
+ }
+ }
+ else {
+ if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va))) {
+ pte += (pv->pv_va >> PGSHIFT) & (NPTEPG - 1);
+ entry = pte->pt_entry;
+ if (entry & PG_V) {
+ entry = (entry & ~PG_CACHEMODE) | newmode;
+ pte->pt_entry = entry;
+ if (pv->pv_pmap->pm_tlbgen == tlbpid_gen)
+ tlb_update(pv->pv_va | (pv->pv_pmap->pm_tlbpid <<
+ VMTLB_PID_SHIFT), entry);
+ }
+ }
+ }
+ pv = pv->pv_next;
+ }
+
+ splx(s);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap, va, pa, prot, stat)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int stat;
+{
+ pt_entry_t *pte;
+ u_int npte;
+ vm_page_t mem;
+ int is_physaddr;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) {
+ printf("pmap_enter(%x, %x, %x, %x, %x)\n",
+ pmap, va, pa, prot, stat);
+ }
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel()) {
+ enter_stats.kernel++;
+ if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
+ panic("pmap_enter: kva %p", va);
+ } else {
+ enter_stats.user++;
+ if (va >= VM_MAXUSER_ADDRESS)
+ panic("pmap_enter: uva %p", va);
+ }
+#endif
+
+ is_physaddr = IS_VM_PHYSADDR(pa);
+
+ if(is_physaddr) {
+ pv_entry_t pv, npv;
+ int s;
+
+ if (!(prot & VM_PROT_WRITE)) {
+ npte = PG_ROPAGE;
+ } else {
+ vm_page_t mem;
+
+ mem = PHYS_TO_VM_PAGE(pa);
+ if ((int)va < 0) {
+ /*
+ * Don't bother to trap on kernel writes,
+ * just record page as dirty.
+ */
+ npte = PG_RWPAGE;
+#if 0 /*XXX*/
+ mem->flags &= ~PG_CLEAN;
+#endif
+ } else {
+ if (!(mem->flags & PG_CLEAN)) {
+ npte = PG_RWPAGE;
+ } else {
+ npte = PG_CWPAGE;
+ }
+ }
+ }
+
+ stat_count(enter_stats.managed);
+ /*
+ * Enter the pmap and virtual address into the
+ * physical to virtual map table.
+ */
+ pv = pa_to_pvh(pa);
+ s = splimp();
+
+ if (pmapdebug & PDB_ENTER) {
+ printf("pmap_enter: pv %x: was %x/%x/%x\n",
+ pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
+ }
+
+ if (pv->pv_pmap == NULL) {
+ /*
+ * No entries yet, use header as the first entry
+ */
+
+ if (pmapdebug & PDB_PVENTRY) {
+ printf("pmap_enter: first pv: pmap %x va %x pa %p\n",
+ pmap, va, pa);
+ }
+ stat_count(enter_stats.firstpv);
+
+ Mips_SyncDCachePage(pv->pv_va);
+
+ pv->pv_va = va;
+ pv->pv_flags = PV_CACHED;
+ pv->pv_pmap = pmap;
+ pv->pv_next = NULL;
+ } else {
+ if (pv->pv_flags & PV_UNCACHED) {
+ npte = (npte & ~PG_CACHEMODE) | PG_UNCACHED;
+ } else if (CpuCacheAliasMask != 0) {
+ /*
+ * There is at least one other VA mapping this page.
+ * Check if they are cache index compatible. If not
+ * remove all mappings, flush the cache and set page
+ * to be mapped uncached. Caching will be restored
+ * when pages are mapped compatible again. NOT!
+ */
+ for (npv = pv; npv; npv = npv->pv_next) {
+ /*
+ * Check cache aliasing incompatibility
+ */
+ if(((npv->pv_va ^ va) & CpuCacheAliasMask) != 0) {
+ printf("pmap_enter: uncached mapping for pa %p, va %p != %p.\n", pa, npv->pv_va, va);
+ pmap_page_cache(pa,PV_UNCACHED);
+ Mips_SyncCache();
+ pv->pv_flags &= ~PV_CACHED;
+ npte = (npte & ~PG_CACHEMODE) | PG_UNCACHED;
+ break;
+ }
+ }
+ }
+
+ /*
+ * There is at least one other VA mapping this page.
+ * Place this entry after the header.
+ *
+ * Note: the entry may already be in the table if
+ * we are only changing the protection bits.
+ */
+ for (npv = pv; npv; npv = npv->pv_next) {
+ if (pmap == npv->pv_pmap && va == npv->pv_va) {
+ goto fnd;
+ }
+ }
+
+ if (pmapdebug & PDB_PVENTRY) {
+ printf("pmap_enter: new pv: pmap %x va %x pa %p\n",
+ pmap, va, pa);
+ }
+
+ /* can this cause us to recurse forever? */
+ npv = pmap_pv_alloc();
+ if(npv == NULL) {
+ panic("pmap_pv_alloc() failed");
+ }
+ npv->pv_va = va;
+ npv->pv_pmap = pmap;
+ npv->pv_next = pv->pv_next;
+ npv->pv_flags = pv->pv_flags;
+ pv->pv_next = npv;
+
+ if (!npv->pv_next)
+ stat_count(enter_stats.secondpv);
+ fnd:
+ ;
+ }
+ splx(s);
+ }
+ else {
+ /*
+ * Assumption: if it is not part of our managed memory
+ * then it must be device memory which may be volitile.
+ */
+ stat_count(enter_stats.unmanaged);
+ if (prot & VM_PROT_WRITE) {
+ npte = PG_IOPAGE & ~PG_G;
+ } else {
+ npte = PG_IOPAGE & ~(PG_G | PG_M);
+ }
+ }
+
+ if (pmap == pmap_kernel()) {
+ pte = kvtopte(va);
+ npte |= vad_to_pfn(pa) | PG_ROPAGE | PG_G;
+ if (!(pte->pt_entry & PG_V)) {
+ pmap->pm_stats.resident_count++;
+ }
+ if(pa != pfn_to_pad(pte->pt_entry)) {
+ pmap_remove(pmap, va, va + NBPG);
+ stat_count(enter_stats.mchange);
+ }
+
+ /*
+ * Update the same virtual address entry.
+ */
+ pte->pt_entry = npte;
+ tlb_update(va, npte);
+ return (KERN_SUCCESS);
+ }
+
+ /*
+ * User space mapping. Do table build.
+ */
+ if (!(pte = pmap_segmap(pmap, va))) {
+ pv_entry_t pv;
+ do {
+ mem = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+ if (mem == NULL) {
+ /* XXX What else can we do? Deadlocks? */
+ uvm_wait("penter");
+ }
+ } while (mem == NULL);
+
+ pv = pa_to_pvh(VM_PAGE_TO_PHYS(mem));
+ if(pv->pv_flags & PV_CACHED &&
+ ((pv->pv_va ^ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem))) & CpuCacheAliasMask) != 0) {
+ Mips_SyncDCachePage(pv->pv_va);
+ }
+ pv->pv_va = PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+ pv->pv_flags = PV_CACHED;
+
+ pmap_segmap(pmap, va) = pte = (pt_entry_t *)
+ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem));
+ }
+ pte += (va >> PGSHIFT) & (NPTEPG - 1);
+
+ /*
+ * Now validate mapping with desired protection/wiring.
+ * Assume uniform modified and referenced status for all
+ * MIPS pages in a OpenBSD page.
+ */
+ if (is_physaddr) {
+ npte |= vad_to_pfn(pa);
+ }
+ else {
+ npte |= vad_to_pfn(pa);
+ }
+ if (pmapdebug & PDB_ENTER) {
+ printf("pmap_enter: new pte %x", npte);
+ if (pmap->pm_tlbgen == tlbpid_gen)
+ printf(" tlbpid %d", pmap->pm_tlbpid);
+ printf("\n");
+ }
+
+ if(pa != pfn_to_pad(pte->pt_entry)) {
+ pmap_remove(pmap, va, va + NBPG);
+ stat_count(enter_stats.mchange);
+ }
+
+ if (!(pte->pt_entry & PG_V)) {
+ pmap->pm_stats.resident_count++;
+ }
+ pte->pt_entry = npte;
+ if (pmap->pm_tlbgen == tlbpid_gen) {
+ int s, i;
+ s = splimp();
+ i = tlb_update(va | (pmap->pm_tlbpid << VMTLB_PID_SHIFT), npte);
+
+ /*
+ * If mapping a memory space address invalidate ICache.
+ */
+ if (is_physaddr) {
+ Mips_InvalidateICachePage(va);
+ }
+ splx(s);
+ }
+
+ return (KERN_SUCCESS);
+}
+
+void
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+{
+ pt_entry_t *pte;
+ u_int npte;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) {
+ printf("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot);
+ }
+
+
+ npte = vad_to_pfn(pa) | PG_G;
+ if (prot & VM_PROT_WRITE) {
+ npte |= PG_RWPAGE;
+ }
+ else {
+ npte |= PG_ROPAGE;
+ }
+ pte = kvtopte(va);
+ pte->pt_entry = npte;
+ tlb_update(va, npte);
+}
+
+void
+pmap_kenter_cache(va, pa, prot, cache)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int cache;
+{
+ pt_entry_t *pte;
+ u_int npte;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) {
+ printf("pmap_kenter_cache(%lx, %lx, %x)\n", va, pa, prot);
+ }
+
+
+ npte = vad_to_pfn(pa) | PG_G;
+ if (prot & VM_PROT_WRITE) {
+ npte |= PG_M | cache;
+ }
+ else {
+ npte |= PG_RO | cache;
+ }
+ pte = kvtopte(va);
+ pte->pt_entry = npte;
+ tlb_update(va, npte);
+}
+
+void
+pmap_kremove(va, len)
+ vaddr_t va;
+ vsize_t len;
+{
+ pt_entry_t *pte;
+ vaddr_t eva;
+ u_int entry;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE)) {
+ printf("pmap_kremove(%lx, %lx)\n", va, len);
+ }
+
+ pte = kvtopte(va);
+ eva = va + len;
+ for (; va < eva; va += PAGE_SIZE, pte++) {
+ entry = pte->pt_entry;
+ if (entry & PG_V) {
+ continue;
+ }
+ Mips_SyncDCachePage(va);
+ pte->pt_entry = PG_NV | PG_G;
+ tlb_flush_addr(va);
+ }
+}
+
+void
+pmap_unwire(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
+{
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+boolean_t
+pmap_extract(pmap, va, pa)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t *pa;
+{
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_extract(%x, %x) -> ", pmap, va);
+ }
+
+ if (!pmap->pm_segtab) {
+ if (va >= KSEG0_BASE && va < (KSEG0_BASE + KSEG_SIZE)) {
+ *pa = KSEG0_TO_PHYS(va);
+ }
+ else {
+#ifdef DIAGNOSTIC
+ if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) {
+ printf("pmap_extract(%x, %x) -> ", pmap, va);
+ panic("pmap_extract");
+ }
+#endif
+ *pa = pfn_to_pad(kvtopte(va)->pt_entry);
+ }
+ }
+ else {
+ pt_entry_t *pte;
+
+ if (!(pte = pmap_segmap(pmap, va))) {
+ *pa = 0;
+ }
+ else {
+ pte += (va >> PGSHIFT) & (NPTEPG - 1);
+ *pa = pfn_to_pad(pte->pt_entry);
+ }
+ }
+ if (*pa)
+ *pa |= va & PGOFSET;
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_extract: pa %x\n", *pa);
+ }
+
+ return (TRUE);
+}
+
+/*
+ * Find first virtual address >= *vap that
+ * will not cause cache aliases.
+ */
+void
+pmap_prefer(foff, vap)
+ paddr_t foff;
+ vaddr_t *vap;
+{
+#if 1
+ vaddr_t va = *vap;
+ long d, m;
+
+ m = CpuCacheAliasMask;
+ if (m == 0) /* m=0 => no cache aliasing */
+ return;
+
+ m = (m | (m - 1)) + 1; /* Value from mask */
+ d = foff - va;
+ d &= (m - 1);
+ *vap = va + d;
+#else
+ *vap += (*vap ^ foff) & CpuCacheAliasMask;
+#endif
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+void
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vaddr_t dst_addr;
+ vsize_t len;
+ vaddr_t src_addr;
+{
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_copy(%x, %x, %x, %x, %x)\n",
+ dst_pmap, src_pmap, dst_addr, len, src_addr);
+ }
+}
+
+#ifndef pmap_update
+/*
+ * Require that all active physical maps contain no
+ * incorrect entries NOW. [This update includes
+ * forcing updates of any address map caching.]
+ *
+ * Generally used to insure that a thread about
+ * to run will see a semantically correct world.
+ */
+void
+pmap_update(pmap)
+ pmap_t pmap;
+{
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_update()\n");
+ }
+}
+#endif
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+void
+pmap_zero_page(struct vm_page *pg)
+{
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+ vaddr_t p;
+ pv_entry_t pv;
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_zero_page(%x)\n", phys);
+ }
+
+ p = (vaddr_t)PHYS_TO_KSEG0(phys);
+ pv = pa_to_pvh(phys);
+ if(pv->pv_flags & PV_CACHED &&
+ ((pv->pv_va ^ (int)p) & CpuCacheAliasMask) != 0) {
+ Mips_SyncDCachePage(pv->pv_va);
+ }
+ mem_zero_page(p);
+ Mips_HitSyncDCache(p, PAGE_SIZE);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent) page.
+ *
+ * We do the copy phys to phys and need to check if there may be
+ * a viritual coherence problem. If so flush the cache for the
+ * areas before copying, and flush afterwards.
+ */
+void
+pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
+{
+ paddr_t src = VM_PAGE_TO_PHYS(srcpg);
+ paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
+#if 0
+ int *s, *d;
+/* if (CpuCacheAliasMask == 0) { XXX */
+ s = (int *)PHYS_TO_KSEG0(src);
+ d = (int *)PHYS_TO_KSEG0(dst);
+
+ memcpy(d, s, PAGE_SIZE);
+
+#else
+ int *s, *d, *end;
+ int df = 1;
+ int sf = 1;
+ int tmp0, tmp1, tmp2, tmp3;
+ pv_entry_t pv;
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_copy_page(%x, %x)\n", src, dst);
+ }
+ s = (int *)PHYS_TO_KSEG0(src);
+ d = (int *)PHYS_TO_KSEG0(dst);
+
+ pv = pa_to_pvh(src);
+ if(pv->pv_flags & PV_CACHED &&
+ (sf = ((pv->pv_va ^ (int)s) & CpuCacheAliasMask) != 0)) {
+ Mips_SyncDCachePage(pv->pv_va);
+ }
+ pv = pa_to_pvh(dst);
+ if(pv->pv_flags & PV_CACHED &&
+ (df = ((pv->pv_va ^ (int)d) & CpuCacheAliasMask) != 0)) {
+ Mips_SyncDCachePage(pv->pv_va);
+ }
+
+ end = s + PAGE_SIZE / sizeof(int);
+ do {
+ tmp0 = s[0]; tmp1 = s[1]; tmp2 = s[2]; tmp3 = s[3];
+ d[0] = tmp0; d[1] = tmp1; d[2] = tmp2; d[3] = tmp3;
+ s += 4;
+ d += 4;
+ } while (s != end);
+
+ if(sf) {
+ Mips_HitSyncDCache((vaddr_t)PHYS_TO_KSEG0(src), PAGE_SIZE);
+ }
+#if 0 /* XXX TODO: Why can't we trust the following? */
+ if(df || (pv->pv_pmap == NULL) || (pv->pv_flags & PV_EXEC)) {
+ Mips_HitSyncDCachePage(dst);
+ }
+#else
+ Mips_HitSyncDCache((vaddr_t)PHYS_TO_KSEG0(dst), PAGE_SIZE);
+#endif
+#endif
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+boolean_t
+pmap_clear_modify(pg)
+ struct vm_page *pg;
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t rv = FALSE;
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_clear_modify(%x)\n", pa);
+ }
+ return(rv);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+boolean_t
+pmap_clear_reference(pg)
+ struct vm_page *pg;
+{
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_clear_reference(%x)\n", pa);
+ }
+ return(FALSE);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+boolean_t
+pmap_is_referenced(pg)
+ struct vm_page *pg;
+{
+ return (FALSE);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+boolean_t
+pmap_is_modified(pg)
+ struct vm_page *pg;
+{
+ return (FALSE);
+}
+
+paddr_t
+pmap_phys_address(ppn)
+ int ppn;
+{
+
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_phys_address(%x)\n", ppn);
+ }
+ return (ptoa(ppn));
+}
+
+/*
+ * Miscellaneous support routines
+ */
+
+/*
+ * Allocate a hardware PID and return it.
+ * It takes almost as much or more time to search the TLB for a
+ * specific PID and flush those entries as it does to flush the entire TLB.
+ * Therefore, when we allocate a new PID, we just take the next number. When
+ * we run out of numbers, we flush the TLB, increment the generation count
+ * and start over. PID zero is reserved for kernel use.
+ * This is called only by switch().
+ */
+int
+pmap_alloc_tlbpid(p)
+ struct proc *p;
+{
+ pmap_t pmap;
+ int id;
+
+ pmap = p->p_vmspace->vm_map.pmap;
+ if (pmap->pm_tlbgen != tlbpid_gen) {
+ id = tlbpid_cnt;
+ if (id >= VMNUM_PIDS) {
+ tlb_flush(sys_config.cpu.tlbsize);
+ /* reserve tlbpid_gen == 0 to alway mean invalid */
+ if (++tlbpid_gen == 0)
+ tlbpid_gen = 1;
+ id = 1;
+ }
+ tlbpid_cnt = id + 1;
+ pmap->pm_tlbpid = id;
+ pmap->pm_tlbgen = tlbpid_gen;
+ }
+ else {
+ id = pmap->pm_tlbpid;
+ }
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) {
+ if (curproc) {
+ printf("pmap_alloc_tlbpid: curproc %d '%s' ",
+ curproc->p_pid, curproc->p_comm);
+ }
+ else {
+ printf("pmap_alloc_tlbpid: curproc <none> ");
+ }
+ printf("segtab %x tlbpid %d pid %d '%s'\n",
+ pmap->pm_segtab, id, p->p_pid, p->p_comm);
+ }
+
+ return (id);
+}
+
+/*
+ * Remove a physical to virtual address translation.
+ * Returns TRUE if it was the last mapping and cached, else FALSE.
+ */
+void
+pmap_remove_pv(pmap, va, pa)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+{
+ pv_entry_t pv, npv;
+ int s;
+
+ if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) {
+ printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
+ }
+
+ /*
+ * Remove page from the PV table (raise IPL since we
+ * may be called at interrupt time).
+ */
+ if (!IS_VM_PHYSADDR(pa)) {
+ return;
+ }
+
+ pv = pa_to_pvh(pa);
+ s = splimp(); /* XXX not in nbsd */
+ /*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ npv = pv->pv_next;
+ if (npv) {
+ *pv = *npv;
+ pmap_pv_free(npv);
+ }
+ else {
+ pv->pv_pmap = NULL;
+ }
+ stat_count(remove_stats.pvfirst);
+ }
+ else {
+ for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
+ stat_count(remove_stats.pvsearch);
+ if (pmap == npv->pv_pmap && va == npv->pv_va)
+ goto fnd;
+ }
+#ifdef DIAGNOSTIC
+ printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
+ panic("pmap_remove_pv");
+#endif
+ fnd:
+ pv->pv_next = npv->pv_next;
+ pmap_pv_free(npv);
+ }
+ splx(s);
+ return;
+}
+
+/*
+ * pmap_pv_page_alloc:
+ *
+ * Allocate a page for the pv_entry pool.
+ */
+void *
+pmap_pv_page_alloc(u_long size, int flags, int mtype)
+{
+ paddr_t pg;
+
+ if(pmap_physpage_alloc(&pg))
+ return ((void *) PHYS_TO_KSEG0(pg));
+ return (NULL);
+}
+
+/*
+ * pmap_pv_page_free:
+ *
+ * Free a pv_entry pool page.
+ */
+void
+pmap_pv_page_free(void *v, u_long size, int mtype)
+{
+ pmap_physpage_free(KSEG0_TO_PHYS((vaddr_t)v));
+}
+
+/*
+ * pmap_physpage_alloc:
+ *
+ * Allocate a single page from the VM system and return the
+ * physical address for that page.
+ */
+boolean_t
+pmap_physpage_alloc(paddr_t *pap)
+{
+ struct vm_page *pg;
+ paddr_t pa;
+
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ if (pg != NULL) {
+ pa = VM_PAGE_TO_PHYS(pg);
+ *pap = pa;
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
+/*
+ * pmap_physpage_free:
+ *
+ * Free a single pmap metadate page at the specified physical address.
+ */
+void
+pmap_physpage_free(paddr_t pa)
+{
+ struct vm_page *pg;
+
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
+ panic("pmap_physpage_free: bogus physical page address");
+ }
+ uvm_pagefree(pg);
+}
+
+/*==================================================================*/
+/* Bus space map utility functions */
+
+int
+bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int cacheable,
+ bus_space_handle_t *bshp)
+{
+ bus_addr_t vaddr;
+ bus_addr_t spa, epa;
+ bus_size_t off;
+ int len;
+
+ spa = trunc_page(bpa);
+ epa = bpa + size;
+ off = bpa - spa;
+ len = size+off;
+
+ if (phys_map == NULL) {
+ printf("ouch, add mapping when phys map not ready!\n");
+ } else {
+ vaddr = uvm_km_valloc_wait(kernel_map, len);
+ }
+ *bshp = vaddr + off;
+#ifdef DEBUG_BUS_MEM_ADD_MAPPING
+ printf("map bus %x size %x to %x vbase %x\n", bpa, size, *bshp, spa);
+#endif
+ for (; len > 0; len -= NBPG) {
+ pmap_kenter_cache(vaddr, spa,
+ VM_PROT_READ | VM_PROT_WRITE,
+ cacheable ? PG_IOPAGE : PG_IOPAGE); /* XXX */
+ spa += NBPG;
+ vaddr += NBPG;
+ }
+ return 0;
+}
+
diff --git a/sys/arch/mips64/mips64/process_machdep.c b/sys/arch/mips64/mips64/process_machdep.c
new file mode 100644
index 00000000000..eb0347314fc
--- /dev/null
+++ b/sys/arch/mips64/mips64/process_machdep.c
@@ -0,0 +1,118 @@
+/* $OpenBSD: process_machdep.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/*
+ * Copyright (c) 1994 Adam Glass
+ * Copyright (c) 1993 The Regents of the University of California.
+ * Copyright (c) 1993 Jan-Simon Pendry
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From:
+ * Id: procfs_i386.c,v 4.1 1993/12/17 10:47:45 jsp Rel
+ *
+ * $Id: process_machdep.c,v 1.1 2004/08/06 20:56:03 pefo Exp $
+ */
+
+/*
+ * This file may seem a bit stylized, but that so that it's easier to port.
+ * Functions to be implemented here are:
+ *
+ * process_read_regs(proc, regs)
+ * Get the current user-visible register set from the process
+ * and copy it into the regs structure (<machine/reg.h>).
+ * The process is stopped at the time read_regs is called.
+ *
+ * process_write_regs(proc, regs)
+ * Update the current register set from the passed in regs
+ * structure. Take care to avoid clobbering special CPU
+ * registers or privileged bits in the PSL.
+ * The process is stopped at the time write_regs is called.
+ *
+ * process_sstep(proc)
+ * Arrange for the process to trap after executing a single instruction.
+ *
+ * process_set_pc(proc)
+ * Set the process's program counter.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/vnode.h>
+#include <sys/ptrace.h>
+#include <machine/pte.h>
+#include <machine/psl.h>
+#include <machine/frame.h>
+
+#define REGSIZE sizeof(struct trap_frame)
+
+extern void cpu_singlestep __P((struct proc *));
+int
+process_read_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ bcopy((caddr_t)p->p_md.md_regs, (caddr_t)regs, REGSIZE);
+ return (0);
+}
+
+int
+process_write_regs(p, regs)
+ struct proc *p;
+ struct reg *regs;
+{
+ bcopy((caddr_t)regs, (caddr_t)p->p_md.md_regs, REGSIZE);
+/*XXX Clear to user set bits!! */
+ return (0);
+}
+
+int
+process_sstep(p, sstep)
+ struct proc *p;
+{
+ if(sstep)
+ cpu_singlestep(p);
+ return (0);
+}
+
+int
+process_set_pc(p, addr)
+ struct proc *p;
+ caddr_t addr;
+{
+ p->p_md.md_regs->pc = (register_t)addr;
+ return (0);
+}
+
diff --git a/sys/arch/mips64/mips64/sendsig.c b/sys/arch/mips64/mips64/sendsig.c
new file mode 100644
index 00000000000..01442baf648
--- /dev/null
+++ b/sys/arch/mips64/mips64/sendsig.c
@@ -0,0 +1,282 @@
+/* $OpenBSD: sendsig.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz and Don Ahn.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*
+ * Copyright (c) 2001 Opsycon AB (www.opsycon.se)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Opsycon AB, Sweden.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/signalvar.h>
+#include <sys/user.h>
+#include <sys/exec.h>
+#include <sys/mount.h>
+#include <sys/syscallargs.h>
+
+#include <machine/regnum.h>
+
+/*
+ * WARNING: code in locore.s assumes the layout shown for sf_signum
+ * thru sf_handler so... don't screw with them!
+ */
+struct sigframe {
+ int sf_signum; /* signo for handler */
+ siginfo_t *sf_sip; /* pointer to siginfo_t */
+ struct sigcontext *sf_scp; /* context ptr for handler */
+ sig_t sf_handler; /* handler addr for u_sigc */
+ struct sigcontext sf_sc; /* actual context */
+ siginfo_t sf_si;
+};
+
+#ifdef DEBUG
+int sigdebug = 0;
+int sigpid = 0;
+#define SDB_FOLLOW 0x01
+#define SDB_KSTACK 0x02
+#define SDB_FPSTATE 0x04
+#endif
+
+/*
+ * Send an interrupt to process.
+ */
+void
+sendsig(catcher, sig, mask, code, type, val)
+ sig_t catcher;
+ int sig, mask;
+ u_long code;
+ int type;
+ union sigval val;
+{
+ struct proc *p = curproc;
+ struct sigframe *fp;
+ struct trap_frame *regs;
+ struct sigacts *psp = p->p_sigacts;
+ int oonstack, fsize;
+ struct sigcontext ksc;
+
+ regs = p->p_md.md_regs;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in data space, the
+ * call to grow() is a nop, and the copyout()
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ fsize = sizeof(struct sigframe);
+ if (!(psp->ps_siginfo & sigmask(sig)))
+ fsize -= sizeof(siginfo_t);
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_sp +
+ psp->ps_sigstk.ss_size - fsize);
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
+ } else
+ fp = (struct sigframe *)(regs->sp - fsize);
+ if ((long)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
+ (void)uvm_grow(p, (long)fp);
+#ifdef DEBUG
+ if ((sigdebug & SDB_FOLLOW) ||
+ ((sigdebug & SDB_KSTACK) && (p->p_pid == sigpid)))
+ printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
+ p->p_pid, sig, &oonstack, fp, &fp->sf_sc);
+#endif
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ ksc.sc_onstack = oonstack;
+ ksc.sc_mask = mask;
+ ksc.sc_pc = regs->pc;
+ ksc.mullo = regs->mullo;
+ ksc.mulhi = regs->mulhi;
+ ksc.sc_regs[0] = 0xACEDBADE; /* magic number */
+ bcopy((caddr_t)&regs->ast, (caddr_t)&ksc.sc_regs[1],
+ sizeof(ksc.sc_regs) - sizeof(int));
+ ksc.sc_fpused = p->p_md.md_flags & MDP_FPUSED;
+ if (ksc.sc_fpused) {
+ extern struct proc *machFPCurProcPtr;
+
+ /* if FPU has current state, save it first */
+ if (p == machFPCurProcPtr) {
+ if (regs->sr & SR_FR_32)
+ MipsSaveCurFPState(p);
+ else
+ MipsSaveCurFPState16(p);
+ }
+ bcopy((caddr_t)&p->p_md.md_regs->f0, (caddr_t)ksc.sc_fpregs,
+ sizeof(ksc.sc_fpregs));
+ }
+
+ if (psp->ps_siginfo & sigmask(sig)) {
+ siginfo_t si;
+
+ initsiginfo(&si, sig, code, type, val);
+ if (copyout((caddr_t)&si, (caddr_t)&fp->sf_si, sizeof si))
+ goto bail;
+ }
+
+ if (copyout((caddr_t)&ksc, (caddr_t)&fp->sf_sc, sizeof(ksc))) {
+bail:
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ sig = sigmask(SIGILL);
+ p->p_sigignore &= ~sig;
+ p->p_sigcatch &= ~sig;
+ p->p_sigmask &= ~sig;
+ psignal(p, SIGILL);
+ return;
+ }
+ /*
+ * Build the argument list for the signal handler.
+ */
+ regs->a0 = sig;
+ regs->a1 = (psp->ps_siginfo & sigmask(sig)) ? (register_t)&fp->sf_si : NULL;
+ regs->a2 = (register_t)&fp->sf_sc;
+ regs->a3 = (register_t)catcher;
+
+ regs->pc = (register_t)catcher;
+ regs->t9 = (register_t)catcher;
+ regs->sp = (register_t)fp;
+
+ regs->ra = p->p_sigcode;
+#ifdef DEBUG
+ if ((sigdebug & SDB_FOLLOW) ||
+ ((sigdebug & SDB_KSTACK) && (p->p_pid == sigpid)))
+ printf("sendsig(%d): sig %d returns\n",
+ p->p_pid, sig);
+#endif
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psl to gain improper priviledges or to cause
+ * a machine fault.
+ */
+/* ARGSUSED */
+int
+sys_sigreturn(p, v, retval)
+ struct proc *p;
+ void *v;
+ register_t *retval;
+{
+ struct sys_sigreturn_args /* {
+ syscallarg(struct sigcontext *) sigcntxp;
+ } */ *uap = v;
+ struct sigcontext *scp;
+ struct trap_frame *regs;
+ struct sigcontext ksc;
+ int error;
+
+ scp = SCARG(uap, sigcntxp);
+#ifdef DEBUG
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
+#endif
+ regs = p->p_md.md_regs;
+ /*
+ * Test and fetch the context structure.
+ * We grab it all at once for speed.
+ */
+ error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(ksc));
+ if (error || ksc.sc_regs[ZERO] != 0xACEDBADE) {
+#ifdef DEBUG
+ if (!(sigdebug & SDB_FOLLOW))
+ printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
+ printf(" old sp %x ra %x pc %x\n",
+ regs->sp, regs->ra, regs->pc);
+ printf(" new sp %x ra %x pc %x err %d z %x\n",
+ ksc.sc_regs[SP], ksc.sc_regs[RA], ksc.sc_regs[PC],
+ error, ksc.sc_regs[ZERO]);
+#endif
+ return (EINVAL);
+ }
+ scp = &ksc;
+ /*
+ * Restore the user supplied information
+ */
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
+ p->p_sigmask = scp->sc_mask &~ sigcantmask;
+ regs->pc = scp->sc_pc;
+ regs->mullo = scp->mullo;
+ regs->mulhi = scp->mulhi;
+ bcopy((caddr_t)&scp->sc_regs[1], (caddr_t)&regs->ast,
+ sizeof(scp->sc_regs) - sizeof(register_t));
+ if (scp->sc_fpused)
+ bcopy((caddr_t)scp->sc_fpregs, (caddr_t)&p->p_md.md_regs->f0,
+ sizeof(scp->sc_fpregs));
+ return (EJUSTRETURN);
+}
diff --git a/sys/arch/mips64/mips64/sys_machdep.c b/sys/arch/mips64/mips64/sys_machdep.c
new file mode 100644
index 00000000000..52a529772b5
--- /dev/null
+++ b/sys/arch/mips64/mips64/sys_machdep.c
@@ -0,0 +1,129 @@
+/* $OpenBSD: sys_machdep.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)sys_machdep.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ioctl.h>
+#include <sys/file.h>
+#include <sys/time.h>
+#include <sys/proc.h>
+#include <sys/uio.h>
+#include <sys/kernel.h>
+#include <sys/mtio.h>
+#include <sys/buf.h>
+
+#include <sys/mount.h>
+#include <sys/syscallargs.h>
+
+#ifdef TRACE
+int nvualarm;
+
+vtrace(p, uap, retval)
+ struct proc *p;
+ register struct vtrace_args /* {
+ syscallarg(int) request;
+ syscallarg(int) value;
+ } */ *uap;
+ register_t *retval;
+{
+ int vdoualarm();
+
+ switch (SCARG(uap, request)) {
+
+ case VTR_DISABLE: /* disable a trace point */
+ case VTR_ENABLE: /* enable a trace point */
+ if (SCARG(uap, value) < 0 || SCARG(uap, value) >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[SCARG(uap, value)];
+ traceflags[SCARG(uap, value)] = SCARG(uap, request);
+ break;
+
+ case VTR_VALUE: /* return a trace point setting */
+ if (SCARG(uap, value) < 0 || SCARG(uap, value) >= TR_NFLAGS)
+ return (EINVAL);
+ *retval = traceflags[SCARG(uap, value)];
+ break;
+
+ case VTR_UALARM: /* set a real-time ualarm, less than 1 min */
+ if (SCARG(uap, value) <= 0 || SCARG(uap, value) > 60 * hz ||
+ nvualarm > 5)
+ return (EINVAL);
+ nvualarm++;
+ timeout(vdoualarm, (caddr_t)p->p_pid, SCARG(uap, value));
+ break;
+
+ case VTR_STAMP:
+ trace(TR_STAMP, SCARG(uap, value), p->p_pid);
+ break;
+ }
+ return (0);
+}
+
+vdoualarm(arg)
+ int arg;
+{
+ register struct proc *p;
+
+ p = pfind(arg);
+ if (p)
+ psignal(p, 16);
+ nvualarm--;
+}
+#endif
+
+int
+sys_sysarch(p, v, retval)
+ struct proc *p;
+ void *v;
+ register_t *retval;
+{
+ struct sys_sysarch_args /* {
+ syscallarg(int) op;
+ syscallarg(char *) parms;
+ } */ *uap = v;
+ int error = 0;
+
+ switch(SCARG(uap, op)) {
+ default:
+ error = EINVAL;
+ break;
+ }
+ return(error);
+}
diff --git a/sys/arch/mips64/mips64/tlbhandler.S b/sys/arch/mips64/mips64/tlbhandler.S
new file mode 100644
index 00000000000..cd7ef33e076
--- /dev/null
+++ b/sys/arch/mips64/mips64/tlbhandler.S
@@ -0,0 +1,647 @@
+/* $OpenBSD: tlbhandler.S,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+
+/*
+ * Copyright (c) 1995-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code handles TLB exceptions and updates.
+ */
+
+#include <machine/param.h>
+#include <machine/psl.h>
+#include <machine/pte.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/regnum.h>
+#include <machine/cpustate.h>
+
+#include "assym.h"
+
+ .set mips3
+
+/* XXX pipeline drainer fix for older RM7000 cpus. */
+#define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
+
+ .set noreorder /* Default reorder mode */
+
+/*---------------------------------------------------------------- tlb_miss
+ * Low level TLB exception handler. TLB and XTLB share some
+ * code for the moment and is copied down at the same time.
+ * This code must be PIC and not more than 64 instructions
+ * for both TLB and XTLB handling or it will overflow the
+ * available storage. If the startup code finds out that it
+ * is larger, the trampoline code is copied instead of panicing.
+ */
+/***************************** Start of code copied to exception vector */
+ .globl tlb_miss /* 0xffffffff80000000 */
+ .set noat
+tlb_miss:
+#if 0
+ dmfc0 k0, COP_0_EXC_PC
+ PTR_L k1, tlbtrcptr
+ PTR_S k0, 0(k1)
+ dmfc0 k0, COP_0_BAD_VADDR
+ PTR_S zero, 2*REGSZ(k1)
+ PTR_S zero, 3*REGSZ(k1)
+ bltz k0, 1f # kernel address
+ PTR_S k0, REGSZ(k1)
+
+ PTR_L k1, curprocpaddr
+ PTR_L k1, U_PCB_SEGTAB(k1)
+ PTR_SRL k0, k0, SEGSHIFT - LOGREGSZ
+ andi k0, k0, 0x1ff << LOGREGSZ
+ PTR_ADDU k1, k1, k0
+ PTR_L k0, 0(k1) # get pointer to page table
+ PTR_L k1, tlbtrcptr
+ PTR_S k0, 2*REGSZ(k1)
+ beqz k0, 1f
+ dmfc0 k1, COP_0_BAD_VADDR
+
+ PTR_SRL k1, k1, PGSHIFT - 2
+ andi k1, k1, ((NPTEPG/2) - 1) << 3
+ PTR_ADDU k1, k1, k0 # index into segment map
+ lwu k0, 0(k1) # get page PTE
+ dsll k0, k0, 32
+ lwu k1, 4(k1)
+ or k0, k1
+ PTR_L k1, tlbtrcptr
+ PTR_S k0, 3*REGSZ(k1)
+1:
+ PTR_L k1, tlbtrcptr
+ PTR_ADDU k1, 4*REGSZ
+ LI k0, 0x100
+ nor k0, zero, k0
+ and k1, k0
+ LA k0, tlbtrcptr
+ PTR_S k1, 0(k0)
+#endif
+ PTR_L k1, curprocpaddr
+ dmfc0 k0, COP_0_BAD_VADDR
+ bltz k0, _k_miss # kernel address space
+ PTR_SRL k0, k0, SEGSHIFT - LOGREGSZ
+ PTR_L k1, U_PCB_SEGTAB(k1)
+ andi k0, k0, 0x1ff << LOGREGSZ
+ PTR_ADDU k1, k1, k0
+ PTR_L k1, 0(k1) # get pointer to page table
+ dmfc0 k0, COP_0_BAD_VADDR
+ PTR_SRL k0, k0, PGSHIFT - 2
+ andi k0, k0, ((NPTEPG/2) - 1) << 3
+ beq k1, zero, _inv_seg # invalid segment map
+ PTR_ADDU k1, k1, k0 # index into segment map
+ lw k0, 0(k1) # get page PTE
+tlb_load:
+ lw k1, 4(k1)
+ dsll k0, k0, 34
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO0
+ dsll k1, k1, 34
+ dsrl k1, k1, 34
+ dmtc0 k1, COP_0_TLB_LO1
+ nop # RM7000 needs 4 nops
+ nop
+ nop
+ nop
+ tlbwr # update TLB
+ nop
+ nop
+ nop
+ nop
+ eret # RM7000 need 4 for JTLB usage.
+
+/*---------------------------------------------------------------- xtlb_miss
+ * Low level XTLB exception handler.
+ * XXX! This handler should be changed to handle larger segtabs.
+ */
+ .globl xtlb_miss /* 0xffffffff80000080 */
+ .set noat
+xtlb_miss:
+ PTR_L k1, curprocpaddr
+ dmfc0 k0, COP_0_BAD_VADDR
+ bltz k0, _k_miss
+ PTR_SRL k0, k0, SEGSHIFT - LOGREGSZ
+ PTR_L k1, U_PCB_SEGTAB(k1)
+ andi k0, k0, 0x1ff << LOGREGSZ
+ PTR_ADDU k1, k1, k0
+ PTR_L k1, 0(k1) # get pointer to page table
+ dmfc0 k0, COP_0_BAD_VADDR
+ PTR_SRL k0, k0, PGSHIFT - 2
+ andi k0, k0, ((NPTEPG/2) - 1) << 3
+ beq k1, zero, _inv_seg
+ PTR_ADDU k1, k1, k0
+ b tlb_load # rest is same as 'tlb_miss'
+ lw k0, 0(k1)
+
+_inv_seg:
+ j tlb_miss_nopt # No page table for this segment.
+ nop
+
+_k_miss:
+ j k_tlb_miss # kernel tlbmiss.
+ dmfc0 k0, COP_0_BAD_VADDR # must reload.
+
+ .globl e_tlb_miss
+e_tlb_miss:
+ .set at
+/***************************** End of code copied to exception vector */
+
+tlb_miss_nopt:
+ .set noat
+ mfc0 k0, COP_0_STATUS_REG
+ andi k0, SR_KSU_USER
+ bne k0, zero, go_u_general
+ nop
+ j k_general
+ nop
+ .set at
+
+/*
+ * Trampolines copied to exception vectors when code is to big.
+ */
+ .globl tlb_miss_tramp
+tlb_miss_tramp:
+ .set noat
+ j tlb_miss
+ nop
+ .set at
+ .globl e_tlb_miss_tramp
+e_tlb_miss_tramp:
+
+ .globl xtlb_miss_tramp
+xtlb_miss_tramp:
+ .set noat
+ j xtlb_miss
+ nop
+ .set at
+ .globl e_xtlb_miss_tramp
+e_xtlb_miss_tramp:
+
+
+/*---------------------------------------------------------------- k_tlb_inv
+ * Handle a TLB invalid exception from kernel mode in kernel
+ * space. This happens when we have a TLB match but an invalid
+ * entry. Try to reload.
+ */
+NLEAF(k_tlb_inv)
+ .set noat
+ LA k1, (VM_MIN_KERNEL_ADDRESS) # compute index
+ dmfc0 k0, COP_0_BAD_VADDR # get the fault address
+ PTR_SUBU k0, k0, k1
+ lw k1, Sysmapsize # index within range?
+ PTR_SRL k0, k0, PGSHIFT
+ sltu k1, k0, k1
+ beq k1, zero, sys_stk_chk # No. check for valid stack
+ PTR_L k1, Sysmap
+
+ PTR_SLL k0, k0, 2 # compute offset from index
+ tlbp # Probe the invalid entry
+ PTR_ADDU k1, k1, k0
+ and k0, k0, 4 # check even/odd page
+ bne k0, zero, k_tlb_inv_odd
+ nop
+
+ mfc0 k0, COP_0_TLB_INDEX
+ blez k0, sys_stk_chk # probe fail or index 0!
+ lw k0, 0(k1) # get PTE entry
+
+ dsll k0, k0, 34 # get rid of "wired" bit
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO0 # load PTE entry
+ and k0, k0, PG_V # check for valid entry
+ beq k0, zero, go_k_general # PTE invalid
+ lw k0, 4(k1) # get odd PTE entry
+ dsll k0, k0, 34
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO1 # load PTE entry
+ nop
+ nop
+ nop
+ nop
+ tlbwi # write TLB
+ nop
+ nop
+ nop
+ nop
+ eret
+
+k_tlb_inv_odd:
+ mfc0 k0, COP_0_TLB_INDEX
+ blez k0, sys_stk_chk # probe fail or index 0!
+ lw k0, 0(k1) # get PTE entry
+
+ dsll k0, k0, 34 # get rid of wired bit
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO1 # save PTE entry
+ and k0, k0, PG_V # check for valid entry
+ beq k0, zero, go_k_general # PTE invalid
+ lw k0, -4(k1) # get even PTE entry
+ dsll k0, k0, 34
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO0 # save PTE entry
+ nop
+ nop
+ nop
+ nop
+ tlbwi # update TLB
+ nop
+ nop
+ nop
+ nop
+ eret
+END(k_tlb_inv)
+
+/*---------------------------------------------------------------- k_tlb_miss
+ *
+ * Handle a TLB miss exception from kernel mode in kernel space.
+ * We must check that this is coming from kernel mode. If not
+ * it's a bad address from user mode so handle properly.
+ * Load up the correct entry contents from the kernel map.
+ * k0 has bad address.
+ */
+NLEAF(k_tlb_miss)
+ .set noat
+ mfc0 k1, COP_0_STATUS_REG
+ andi k1, SR_KSU_USER
+ bne k1, zero, go_u_general
+ LA k1, (VM_MIN_KERNEL_ADDRESS) # compute index
+ PTR_SUBU k0, k0, k1
+ lw k1, Sysmapsize # index within range?
+ PTR_SRL k0, k0, PGSHIFT
+ sltu k1, k0, k1
+ beq k1, zero, sys_stk_chk # No. check for valid stack
+ PTR_L k1, Sysmap
+ PTR_SRL k0, k0, 1
+ PTR_SLL k0, k0, 3 # compute offset from index
+ PTR_ADDU k1, k1, k0
+ lw k0, 0(k1) # get PTE entry
+ lw k1, 4(k1) # get odd PTE entry
+ dsll k0, k0, 34 # get rid of "wired" bit
+ dsrl k0, k0, 34
+ dmtc0 k0, COP_0_TLB_LO0 # load PTE entry
+ dsll k1, k1, 34
+ dsrl k1, k1, 34
+ dmtc0 k1, COP_0_TLB_LO1 # load PTE entry
+ nop
+ nop
+ nop
+ nop
+ tlbwr # write TLB
+ nop
+ nop
+ nop
+ nop
+ eret
+
+sys_stk_chk:
+ PTR_L k1, curprocpaddr
+ PTR_SUBU k0, sp, k1 # check to see if we have a
+ sltiu k0, 2048 # valid kernel stack
+ beqz k0, go_k_general # yes, handle.
+ nop
+
+ LA a0, start-FRAMESZ(CF_SZ)-4*REGSZ # set sp to a valid place
+ PTR_S sp, CF_ARGSZ+2*REGSZ(a0)
+ move sp, a0
+ LA a0, 1f
+ mfc0 a2, COP_0_STATUS_REG
+ mfc0 a3, COP_0_CAUSE_REG
+ dmfc0 a1, COP_0_EXC_PC
+ REG_S a2, CF_ARGSZ+0*REGSZ(sp)
+ REG_S a3, CF_ARGSZ+1*REGSZ(sp)
+ move a2, ra
+ jal printf
+ dmfc0 a3, COP_0_BAD_VADDR
+
+ la sp, start-FRAMESZ(CF_SZ) # set sp to a valid place
+
+#ifdef DDB
+ LA a0, 2f
+ jal trapDump
+ nop
+#endif
+
+ PANIC("kernel stack overflow")
+ /*noreturn*/
+
+go_k_general:
+ j k_general
+ nop
+
+go_u_general:
+ j u_general
+ nop
+
+
+ .data
+1:
+ .asciiz "\rktlbmiss: PC %p RA %p ADR %p\nSR %p CR %p SP %p\n"
+2:
+ .asciiz "stack ovf"
+ .text
+
+ .set at
+END(k_tlb_miss)
+
+/*---------------------------------------------------------------- tlb_write_i
+ * Write the given entry into the TLB at the given index.
+ */
+LEAF(tlb_write_indexed)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ lw a2, 8(a1)
+ lw a3, 12(a1)
+ dmfc0 t0, COP_0_TLB_HI # Save the current PID.
+
+ dmtc0 a2, COP_0_TLB_LO0 # Set up entry low0.
+ dmtc0 a3, COP_0_TLB_LO1 # Set up entry low1.
+ lw a2, 0(a1)
+ lw a3, 4(a1)
+ mtc0 a0, COP_0_TLB_INDEX # Set the index.
+ dmtc0 a2, COP_0_TLB_PG_MASK # Set up entry mask.
+ dmtc0 a3, COP_0_TLB_HI # Set up entry high.
+ nop
+ nop
+ nop
+ nop
+ tlbwi # Write the TLB
+ nop
+ nop # Delay for effect
+ nop
+ nop
+
+ dmtc0 t0, COP_0_TLB_HI # Restore the PID.
+ nop
+ dmtc0 zero, COP_0_TLB_PG_MASK # Default mask value.
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(tlb_write_indexed)
+
+/*---------------------------------------------------------------- tlb_flush
+ * Flush the "random" entries from the TLB.
+ * Uses "wired" register to determine what register to start with.
+ * Arg "tlbsize" is the number of entries to flush.
+ */
+LEAF(tlb_flush)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ mfc0 t1, COP_0_TLB_WIRED
+ LA v0, KSEG0_BASE # invalid address
+ dmfc0 t0, COP_0_TLB_HI # Save the PID
+
+ dmtc0 v0, COP_0_TLB_HI # Mark entry high as invalid
+ dmtc0 zero, COP_0_TLB_LO0 # Zero out low entry0.
+ dmtc0 zero, COP_0_TLB_LO1 # Zero out low entry1.
+ mtc0 zero, COP_0_TLB_PG_MASK # Zero out mask entry.
+/*
+ * Align the starting value (t1) and the upper bound (a0).
+ */
+1:
+ mtc0 t1, COP_0_TLB_INDEX # Set the index register.
+ addu t1, t1, 1 # Increment index.
+ nop
+ nop
+ nop
+ tlbwi # Write the TLB entry.
+ nop
+ nop
+ bne t1, a0, 1b
+ nop
+
+ dmtc0 t0, COP_0_TLB_HI # Restore the PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(tlb_flush)
+
+/*--------------------------------------------------------------- tlb_flush_addr
+ * Flush any TLB entries for the given address and TLB PID.
+ */
+LEAF(tlb_flush_addr)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ li v0, (PG_HVPN | PG_ASID)
+ and a0, a0, v0 # Make shure valid hi value.
+ dmfc0 t0, COP_0_TLB_HI # Get current PID
+ dmtc0 a0, COP_0_TLB_HI # look for addr & PID
+ nop
+ nop
+ nop
+ nop
+ tlbp # Probe for the entry.
+ nop
+ nop # Delay for effect
+ nop
+ LA t1, KSEG0_BASE # Load invalid entry.
+ mfc0 v0, COP_0_TLB_INDEX # See what we got
+ bltz v0, 1f # index < 0 => !found
+ nop
+ dmtc0 t1, COP_0_TLB_HI # Mark entry high as invalid
+
+ dmtc0 zero, COP_0_TLB_LO0 # Zero out low entry.
+ dmtc0 zero, COP_0_TLB_LO1 # Zero out low entry.
+ nop
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+ nop
+ nop
+1:
+ dmtc0 t0, COP_0_TLB_HI # restore PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(tlb_flush_addr)
+
+/*---------------------------------------------------------------- tlb_update
+ * Update the TLB if highreg is found; otherwise, enter the data.
+ */
+LEAF(tlb_update)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ and t1, a0, 0x1000 # t1 = Even/Odd flag
+ li v0, (PG_HVPN | PG_ASID)
+ and a0, a0, v0
+ dmfc0 t0, COP_0_TLB_HI # Save current PID
+ dmtc0 a0, COP_0_TLB_HI # Init high reg
+ and a2, a1, PG_G # Copy global bit
+ nop
+ nop
+ nop
+ tlbp # Probe for the entry.
+ dsll a1, a1, 34
+ dsrl a1, a1, 34
+ bne t1, zero, 2f # Decide even odd
+ mfc0 v0, COP_0_TLB_INDEX # See what we got
+# EVEN
+ bltz v0, 1f # index < 0 => !found
+ nop
+
+ tlbr # update, read entry first
+ nop
+ nop
+ nop
+ dmtc0 a1, COP_0_TLB_LO0 # init low reg0.
+ nop
+ nop
+ nop
+ nop
+ tlbwi # update slot found
+ b 4f
+ li v0, 1
+1:
+ mtc0 zero, COP_0_TLB_PG_MASK # init mask.
+ dmtc0 a0, COP_0_TLB_HI # init high reg.
+ dmtc0 a1, COP_0_TLB_LO0 # init low reg0.
+ dmtc0 a2, COP_0_TLB_LO1 # init low reg1.
+ nop
+ nop
+ nop
+ nop
+ tlbwr # enter into a random slot
+ b 4f
+ li v0, 0
+# ODD
+2:
+ nop
+ bltz v0, 3f # index < 0 => !found
+ nop
+
+ tlbr # read the entry first
+ nop
+ nop
+ nop
+ dmtc0 a1, COP_0_TLB_LO1 # init low reg1.
+ nop
+ nop
+ nop
+ nop
+ tlbwi # update slot found
+ b 4f
+ li v0, 1
+3:
+ mtc0 zero, COP_0_TLB_PG_MASK # init mask.
+ dmtc0 a0, COP_0_TLB_HI # init high reg.
+ dmtc0 a2, COP_0_TLB_LO0 # init low reg0.
+ dmtc0 a1, COP_0_TLB_LO1 # init low reg1.
+ nop
+ nop
+ nop
+ nop
+ tlbwr # enter into a random slot
+ nop
+ li v0, 0
+
+4: # Make shure pipeline
+ nop # advances before we
+ nop # uses the tlb.
+ dmtc0 t0, COP_0_TLB_HI # restore PID
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ j ra
+ nop
+END(tlb_update)
+
+/*---------------------------------------------------------------- tlb_read
+ * Read the TLB entry.
+ */
+LEAF(tlb_read)
+ mfc0 v1, COP_0_STATUS_REG # Save the status register.
+ mtc0 zero, COP_0_STATUS_REG # Disable interrupts
+ ITLBNOPFIX
+ dmfc0 t0, COP_0_TLB_HI # Get current PID
+
+ mtc0 a0, COP_0_TLB_INDEX # Set the index register
+ nop
+ nop
+ nop
+ nop
+ tlbr # Read from the TLB
+ nop
+ nop
+ nop
+ mfc0 t2, COP_0_TLB_PG_MASK # fetch the hi entry
+ dmfc0 t3, COP_0_TLB_HI # fetch the hi entry
+ dmfc0 t4, COP_0_TLB_LO0 # See what we got
+ dmfc0 t5, COP_0_TLB_LO1 # See what we got
+ dmtc0 t0, COP_0_TLB_HI # restore PID
+ nop
+ nop
+ nop # wait for PID active
+ mtc0 v1, COP_0_STATUS_REG # Restore the status register
+ ITLBNOPFIX
+ sw t2, 0(a1)
+ sw t3, 4(a1)
+ sw t4, 8(a1)
+ j ra
+ sw t5, 12(a1)
+END(tlb_read)
+
+/*---------------------------------------------------------------- tlb_get_pid
+ * Read the tlb pid value.
+ */
+LEAF(tlb_get_pid)
+ dmfc0 v0, COP_0_TLB_HI # get PID
+ li v1, VMTLB_PID # mask off PID
+ j ra
+ and v0, v0, v1 # mask off PID
+END(tlb_get_pid)
+
+/*---------------------------------------------------------------- tlb_set_pid
+ * Write the given pid into the TLB pid reg.
+ */
+LEAF(tlb_set_pid)
+ dmtc0 a0, COP_0_TLB_HI # Write the hi reg value
+ j ra
+ nop
+END(tlb_set_pid)
+
+/*---------------------------------------------------------------- tlb_get_wired
+ * Get the value from the TLB wired reg.
+ */
+LEAF(tlb_get_wired)
+ mfc0 v0, COP_0_TLB_WIRED
+ j ra
+ nop
+END(tlb_get_wired)
+
+/*---------------------------------------------------------------- tlb_set_wired
+ * Write the given value into the TLB wired reg.
+ */
+LEAF(tlb_set_wired)
+ mtc0 a0, COP_0_TLB_WIRED
+ j ra
+ nop
+END(tlb_set_wired)
diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c
new file mode 100644
index 00000000000..23758f7148e
--- /dev/null
+++ b/sys/arch/mips64/mips64/trap.c
@@ -0,0 +1,1379 @@
+/* $OpenBSD: trap.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/* tracked to 1.23 */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: trap.c 1.32 91/04/06
+ *
+ * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
+ */
+
+/*
+ * THIS CODE SHOULD BE REWRITTEN!
+ */
+
+#include "ppp.h"
+#include "bridge.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/user.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+#include <net/netisr.h>
+#include <miscfs/procfs/procfs.h>
+
+#include <machine/trap.h>
+#include <machine/psl.h>
+#include <machine/cpu.h>
+#include <machine/pio.h>
+#include <machine/intr.h>
+#include <machine/autoconf.h>
+#include <machine/pte.h>
+#include <machine/pmap.h>
+#include <machine/mips_opcode.h>
+#include <machine/frame.h>
+#include <machine/regnum.h>
+
+#include <machine/rm7000.h>
+
+#include <mips64/archtype.h>
+
+#ifdef DDB
+#include <mips64/db_machdep.h>
+#include <ddb/db_sym.h>
+#endif
+
+#include <sys/cdefs.h>
+#include <sys/syslog.h>
+
+struct proc *machFPCurProcPtr; /* pointer to last proc to use FP */
+
+char *trap_type[] = {
+ "external interrupt",
+ "TLB modification",
+ "TLB miss (load or instr. fetch)",
+ "TLB miss (store)",
+ "address error (load or I-fetch)",
+ "address error (store)",
+ "bus error (I-fetch)",
+ "bus error (load or store)",
+ "system call",
+ "breakpoint",
+ "reserved instruction",
+ "coprocessor unusable",
+ "arithmetic overflow",
+ "trap",
+ "viritual coherency instruction",
+ "floating point",
+ "reserved 16",
+ "reserved 17",
+ "reserved 18",
+ "reserved 19",
+ "reserved 20",
+ "reserved 21",
+ "reserved 22",
+ "watch",
+ "reserved 24",
+ "reserved 25",
+ "reserved 26",
+ "reserved 27",
+ "reserved 28",
+ "reserved 29",
+ "reserved 30",
+ "viritual coherency data",
+};
+
+#if defined(DDB) || defined(DEBUG)
+extern register_t *tlbtrcptr;
+struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
+
+void stacktrace(struct trap_frame *);
+void logstacktrace(struct trap_frame *);
+int kdbpeek(void *);
+/* extern functions printed by name in stack backtraces */
+extern void idle __P((void));
+#endif /* DDB || DEBUG */
+
+#if defined(DDB)
+int kdb_trap(int, db_regs_t *);
+#endif
+
+extern u_long intrcnt[];
+extern void MipsSwitchFPState(struct proc *, struct trap_frame *);
+extern void MipsSwitchFPState16(struct proc *, struct trap_frame *);
+extern void MipsFPTrap(u_int, u_int, u_int);
+
+u_int trap(struct trap_frame *);
+int cpu_singlestep(struct proc *);
+u_long MipsEmulateBranch(struct trap_frame *, long, int, long);
+
+/*
+ * Handle an exception.
+ * In the case of a kernel trap, we return the pc where to resume if
+ * pcb_onfault is set, otherwise, return old pc.
+ */
+unsigned
+trap(trapframe)
+ struct trap_frame *trapframe;
+{
+ int type, i;
+ unsigned ucode = 0;
+ struct proc *p = curproc;
+ u_quad_t sticks;
+ vm_prot_t ftype;
+ extern unsigned onfault_table[];
+ int typ = 0;
+ union sigval sv;
+
+ trapdebug_enter(trapframe, -1);
+
+ type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
+ if (USERMODE(trapframe->sr)) {
+ type |= T_USER;
+ sticks = p->p_sticks;
+ }
+
+ /*
+ * Enable hardware interrupts if they were on before the trap.
+ * If it was off disable all (splhigh) so we don't accidently
+ * enable it when doing a spllower().
+ */
+/*XXX do in locore? */
+ if(trapframe->sr & SR_INT_ENAB) {
+#ifndef IMASK_EXTERNAL
+ updateimask(trapframe->cpl);
+#endif
+ enableintr();
+ } else
+ splhigh();
+
+
+ switch (type) {
+ case T_TLB_MOD:
+ /* check for kernel address */
+ if ((int)trapframe->badvaddr < 0) {
+ pt_entry_t *pte;
+ unsigned int entry;
+ vaddr_t pa;
+
+ pte = kvtopte(trapframe->badvaddr);
+ entry = pte->pt_entry;
+#ifdef DIAGNOSTIC
+ if (!(entry & PG_V) || (entry & PG_M))
+ panic("trap: ktlbmod: invalid pte");
+#endif
+ if (pmap_is_page_ro(pmap_kernel(), mips_trunc_page(trapframe->badvaddr), entry)) {
+ /* write to read only page in the kernel */
+ ftype = VM_PROT_WRITE;
+ goto kernel_fault;
+ }
+ entry |= PG_M;
+ pte->pt_entry = entry;
+ trapframe->badvaddr &= ~PGOFSET;
+ tlb_update(trapframe->badvaddr, entry);
+ pa = pfn_to_pad(entry);
+ if (!IS_VM_PHYSADDR(pa))
+ panic("trap: ktlbmod: unmanaged page");
+ PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
+ return (trapframe->pc);
+ }
+ /* FALLTHROUGH */
+
+ case T_TLB_MOD+T_USER:
+ {
+ pt_entry_t *pte;
+ unsigned int entry;
+ paddr_t pa;
+ pmap_t pmap = p->p_vmspace->vm_map.pmap;
+
+ if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
+ panic("trap: utlbmod: invalid segmap");
+ pte += (trapframe->badvaddr >> PGSHIFT) & (NPTEPG - 1);
+ entry = pte->pt_entry;
+#ifdef DIAGNOSTIC
+ if (!(entry & PG_V) || (entry & PG_M)) {
+ panic("trap: utlbmod: invalid pte");
+ }
+#endif
+ if (pmap_is_page_ro(pmap, (vaddr_t)mips_trunc_page(trapframe->badvaddr), entry)) {
+ /* write to read only page */
+ ftype = VM_PROT_WRITE;
+ goto dofault;
+ }
+ entry |= PG_M;
+ pte->pt_entry = entry;
+ trapframe->badvaddr = (trapframe->badvaddr & ~PGOFSET) | (pmap->pm_tlbpid << VMTLB_PID_SHIFT);
+ tlb_update(trapframe->badvaddr, entry);
+ pa = pfn_to_pad(entry);
+ if (!IS_VM_PHYSADDR(pa)) {
+ panic("trap: utlbmod: unmanaged page");
+ }
+ PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN;
+ if (!USERMODE(trapframe->sr))
+ return (trapframe->pc);
+ goto out;
+ }
+
+ case T_TLB_LD_MISS:
+ case T_TLB_ST_MISS:
+ ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
+ /* check for kernel address */
+ if (trapframe->badvaddr < 0) {
+ vaddr_t va;
+ int rv;
+
+ kernel_fault:
+ va = trunc_page((vaddr_t)trapframe->badvaddr);
+ rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
+ if (rv == KERN_SUCCESS)
+ return (trapframe->pc);
+ if ((i = p->p_addr->u_pcb.pcb_onfault) != 0) {
+ p->p_addr->u_pcb.pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ goto err;
+ }
+ /*
+ * It is an error for the kernel to access user space except
+ * through the copyin/copyout routines. However we allow
+ * accesses to the top of user stack for compat emul data.
+ */
+#define szsigcode ((long)(p->p_emul->e_esigcode - p->p_emul->e_sigcode))
+ if (trapframe->badvaddr < VM_MAXUSER_ADDRESS &&
+ trapframe->badvaddr >= (long)STACKGAPBASE)
+ goto dofault;
+
+ if ((i = p->p_addr->u_pcb.pcb_onfault) == 0) {
+ goto dofault;
+ }
+#undef szsigcode
+ /* check for fuswintr() or suswintr() getting a page fault */
+ if (i == 5)
+ return (onfault_table[i]);
+ goto dofault;
+
+ case T_TLB_LD_MISS+T_USER:
+ ftype = VM_PROT_READ;
+ goto dofault;
+
+ case T_TLB_ST_MISS+T_USER:
+ ftype = VM_PROT_WRITE;
+ dofault:
+ {
+ vaddr_t va;
+ struct vmspace *vm;
+ vm_map_t map;
+ int rv;
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ va = trunc_page((vaddr_t)trapframe->badvaddr);
+ rv = uvm_fault(map, trunc_page(va), 0, ftype);
+#if defined(VMFAULT_TRACE)
+ printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
+ map, &vm->vm_map.pmap, va, trapframe->badvaddr, ftype, FALSE, rv, trapframe->pc);
+#endif
+ /*
+ * If this was a stack access we keep track of the maximum
+ * accessed stack size. Also, if vm_fault gets a protection
+ * failure it is due to accessing the stack region outside
+ * the current limit and we need to reflect that as an access
+ * error.
+ */
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (rv == KERN_SUCCESS) {
+ unsigned nss;
+
+ nss = btoc(USRSTACK-(unsigned)va);
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (rv == KERN_PROTECTION_FAILURE)
+ rv = KERN_INVALID_ADDRESS;
+ }
+ if (rv == KERN_SUCCESS) {
+ if (!USERMODE(trapframe->sr))
+ return (trapframe->pc);
+ goto out;
+ }
+ if (!USERMODE(trapframe->sr)) {
+ if ((i = p->p_addr->u_pcb.pcb_onfault) != 0) {
+ p->p_addr->u_pcb.pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ goto err;
+ }
+
+#ifdef ADEBUG
+printf("SIG-SEGV @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
+#endif
+ ucode = ftype;
+ i = SIGSEGV;
+ typ = SEGV_MAPERR;
+ break;
+ }
+
+ case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */
+ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */
+ ucode = 0; /* XXX should be VM_PROT_something */
+ i = SIGBUS;
+ typ = BUS_ADRALN;
+#ifdef ADEBUG
+printf("SIG-BUSA @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
+#endif
+ break;
+ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */
+ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */
+ ucode = 0; /* XXX should be VM_PROT_something */
+ i = SIGBUS;
+ typ = BUS_OBJERR;
+#ifdef ADEBUG
+printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
+#endif
+ break;
+
+ case T_SYSCALL+T_USER:
+ {
+ struct trap_frame *locr0 = p->p_md.md_regs;
+ struct sysent *callp;
+ unsigned int code;
+ unsigned long tpc;
+ int numsys;
+ struct args {
+ register_t i[8];
+ } args;
+ register_t rval[2];
+
+ uvmexp.syscalls++;
+
+ /* compute next PC after syscall instruction */
+ tpc = trapframe->pc; /* Remember if restart */
+ if ((int)trapframe->cause & CR_BR_DELAY) {
+ locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0);
+ }
+ else {
+ locr0->pc += 4;
+ }
+ callp = p->p_emul->e_sysent;
+ numsys = p->p_emul->e_nsysent;
+ code = locr0->v0;
+ switch (code) {
+ case SYS_syscall:
+ /*
+ * Code is first argument, followed by actual args.
+ */
+ code = locr0->a0;
+ if (code >= numsys)
+ callp += p->p_emul->e_nosys; /* (illegal) */
+ else
+ callp += code;
+ i = callp->sy_argsize / sizeof(register_t);
+ args.i[0] = locr0->a1;
+ args.i[1] = locr0->a2;
+ args.i[2] = locr0->a3;
+ if (i > 3) {
+ if (p->p_md.md_flags & MDP_O32) {
+ int32_t p[5];
+
+ i = copyin((int32_t *)locr0->sp + 4,
+ p, 5 * sizeof(int32_t));
+ args.i[3] = p[0];
+ args.i[4] = p[1];
+ args.i[5] = p[2];
+ args.i[6] = p[3];
+ args.i[7] = p[4];
+ } else {
+ args.i[3] = locr0->a4;
+ args.i[4] = locr0->a5;
+ args.i[5] = locr0->a6;
+ args.i[6] = locr0->a7;
+ i = copyin((void *)(locr0->sp +
+ 7 * sizeof(register_t)), &args.i[7],
+ sizeof(register_t));
+ }
+ }
+ break;
+
+ case SYS___syscall:
+ /*
+ * Like syscall, but code is a quad, so as to maintain
+ * quad alignment for the rest of the arguments.
+ */
+ if (p->p_md.md_flags & MDP_O32) {
+ if(_QUAD_LOWWORD == 0) {
+ code = locr0->a0;
+ } else {
+ code = locr0->a1;
+ }
+ args.i[0] = locr0->a2;
+ args.i[1] = locr0->a3;
+ } else {
+ code = locr0->a0;
+ args.i[0] = locr0->a1;
+ args.i[1] = locr0->a2;
+ args.i[2] = locr0->a3;
+ }
+
+ if (code >= numsys)
+ callp += p->p_emul->e_nosys; /* (illegal) */
+ else
+ callp += code;
+ i = callp->sy_argsize / sizeof(int);
+ if (i > 2 && p->p_md.md_flags & MDP_O32) {
+ int32_t p[6];
+
+ i = copyin((int32_t *)locr0->sp + 4,
+ p, 6 * sizeof(int32_t));
+ args.i[2] = p[0];
+ args.i[3] = p[1];
+ args.i[4] = p[2];
+ args.i[5] = p[3];
+ args.i[6] = p[4];
+ args.i[7] = p[5];
+ } else if (i > 3) {
+ args.i[3] = locr0->a4;
+ args.i[4] = locr0->a5;
+ args.i[5] = locr0->a6;
+ args.i[6] = locr0->a7;
+ i = copyin((void *)(locr0->sp +
+ 7 * sizeof(register_t)), &args.i[7],
+ sizeof(register_t));
+ }
+ break;
+
+ default:
+ if (code >= numsys)
+ callp += p->p_emul->e_nosys; /* (illegal) */
+ else
+ callp += code;
+
+ i = callp->sy_narg;
+ args.i[0] = locr0->a0;
+ args.i[1] = locr0->a1;
+ args.i[2] = locr0->a2;
+ args.i[3] = locr0->a3;
+ if (i > 4) {
+ if (p->p_md.md_flags & MDP_O32) {
+ int32_t p[4];
+
+ i = copyin((int32_t *)locr0->sp + 4,
+ p, 4 * sizeof(int32_t));
+ args.i[4] = p[0];
+ args.i[5] = p[1];
+ args.i[6] = p[2];
+ args.i[7] = p[3];
+ } else {
+ args.i[4] = locr0->a4;
+ args.i[5] = locr0->a5;
+ args.i[6] = locr0->a6;
+ args.i[7] = locr0->a7;
+ }
+ }
+ }
+#ifdef SYSCALL_DEBUG
+ scdebug_call(p, code, args.i);
+#endif
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, callp->sy_argsize, args.i);
+#endif
+ rval[0] = 0;
+ rval[1] = locr0->v1;
+#if defined(DDB) || defined(DEBUG)
+ if (trp == trapdebug)
+ trapdebug[TRAPSIZE - 1].code = code;
+ else
+ trp[-1].code = code;
+#endif
+ i = (*callp->sy_call)(p, &args, rval);
+ /*
+ * Reinitialize proc pointer `p' as it may be different
+ * if this is a child returning from fork syscall.
+ */
+ p = curproc;
+ locr0 = p->p_md.md_regs;
+
+ trapdebug_enter(locr0, -code);
+
+ switch (i) {
+ case 0:
+ locr0->v0 = rval[0];
+ locr0->v1 = rval[1];
+ locr0->a3 = 0;
+ break;
+
+ case ERESTART:
+ locr0->pc = tpc;
+ break;
+
+ case EJUSTRETURN:
+ break; /* nothing to do */
+
+ default:
+ locr0->v0 = i;
+ locr0->a3 = 1;
+ }
+ if(code == SYS_ptrace)
+ Mips_SyncCache();
+#ifdef SYSCALL_DEBUG
+ scdebug_ret(p, code, i, rval);
+#endif
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, i, rval[0]);
+#endif
+ goto out;
+ }
+
+#ifdef DDB
+ case T_BREAK:
+ kdb_trap(type, trapframe);
+ return(trapframe->pc);
+#endif
+
+ case T_BREAK+T_USER:
+ {
+ caddr_t va;
+ u_int32_t instr;
+ struct uio uio;
+ struct iovec iov;
+
+ /* compute address of break instruction */
+ va = (caddr_t)trapframe->pc;
+ if ((int)trapframe->cause & CR_BR_DELAY)
+ va += 4;
+
+ /* read break instruction */
+ copyin(&instr, va, sizeof(int32_t));
+#if 0
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, trapframe->pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+#endif
+ if (p->p_md.md_ss_addr != (long)va || instr != BREAK_SSTEP) {
+ i = SIGTRAP;
+ typ = TRAP_TRACE;
+ break;
+ }
+
+ /*
+ * Restore original instruction and clear BP
+ */
+ iov.iov_base = (caddr_t)&p->p_md.md_ss_instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)(int)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ i = procfs_domem(p, p, NULL, &uio);
+ Mips_SyncCache();
+
+ if (i < 0)
+ printf("Warning: can't restore instruction at %x: %x\n",
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr);
+
+ p->p_md.md_ss_addr = 0;
+ i = SIGTRAP;
+ typ = TRAP_BRKPT;
+ break;
+ }
+
+ case T_IWATCH+T_USER:
+ case T_DWATCH+T_USER:
+ {
+ caddr_t va;
+ /* compute address of trapped instruction */
+ va = (caddr_t)trapframe->pc;
+ if ((int)trapframe->cause & CR_BR_DELAY)
+ va += 4;
+ printf("watch exception @ %p\n", va);
+ if(rm7k_watchintr(trapframe)) {
+ /* Return to user, don't add any more overhead */
+ return (trapframe->pc);
+ }
+ i = SIGTRAP;
+ typ = TRAP_BRKPT;
+ break;
+ }
+
+ case T_TRAP+T_USER:
+ {
+ caddr_t va;
+ u_int32_t instr;
+ struct trap_frame *locr0 = p->p_md.md_regs;
+
+ /* compute address of trap instruction */
+ va = (caddr_t)trapframe->pc;
+ if ((int)trapframe->cause & CR_BR_DELAY)
+ va += 4;
+ /* read break instruction */
+ copyin(&instr, va, sizeof(int32_t));
+
+ if ((int)trapframe->cause & CR_BR_DELAY) {
+ locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0);
+ }
+ else {
+ locr0->pc += 4;
+ }
+ if(instr == 0x040c0000) { /* Performance cntr trap */
+ int result;
+
+ result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
+ trapframe->a2, trapframe->a3);
+ locr0->v0 = -result;
+ /* Return to user, don't add any more overhead */
+ return (trapframe->pc);
+ }
+ else {
+ i = SIGEMT; /* Stuff it with something for now */
+ typ = 0;
+ }
+ break;
+ }
+
+ case T_RES_INST+T_USER:
+ i = SIGILL;
+ typ = ILL_ILLOPC;
+ break;
+
+ case T_COP_UNUSABLE+T_USER:
+ if ((trapframe->cause & CR_COP_ERR) != 0x10000000) {
+ i = SIGILL; /* only FPU instructions allowed */
+ typ = ILL_ILLOPC;
+ break;
+ }
+
+ if (p->p_md.md_regs->sr & SR_FR_32)
+ MipsSwitchFPState(machFPCurProcPtr, p->p_md.md_regs);
+ else
+ MipsSwitchFPState16(machFPCurProcPtr, p->p_md.md_regs);
+
+ machFPCurProcPtr = p;
+ p->p_md.md_regs->sr |= SR_COP_1_BIT;
+ p->p_md.md_flags |= MDP_FPUSED;
+ goto out;
+
+ case T_FPE:
+ printf("FPU Trap: PC %x CR %x SR %x\n",
+ trapframe->pc, trapframe->cause, trapframe->sr);
+ goto err;
+
+ case T_FPE+T_USER:
+ MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
+ goto out;
+
+ case T_OVFLOW+T_USER:
+ i = SIGFPE;
+ typ = FPE_FLTOVF;
+ break;
+
+ case T_ADDR_ERR_LD: /* misaligned access */
+ case T_ADDR_ERR_ST: /* misaligned access */
+ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
+ if ((i = p->p_addr->u_pcb.pcb_onfault) != 0) {
+ p->p_addr->u_pcb.pcb_onfault = 0;
+ return (onfault_table[i]);
+ }
+ /* FALLTHROUGH */
+
+ default:
+ err:
+ disableintr();
+#ifndef DDB
+ trapDump("trap");
+#endif
+ printf("\nTrap cause = %d Frame %p\n", type, trapframe);
+ printf("Trap PC %p RA %p\n", trapframe->pc, trapframe->ra);
+ stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
+#ifdef DDB
+ kdb_trap(type, trapframe);
+#endif
+ panic("trap");
+ }
+ p->p_md.md_regs->pc = trapframe->pc;
+ p->p_md.md_regs->cause = trapframe->cause;
+ p->p_md.md_regs->badvaddr = trapframe->badvaddr;
+ sv.sival_int = trapframe->badvaddr;
+ trapsignal(p, i, ucode, typ, sv);
+out:
+ /*
+ * Note: we should only get here if returning to user mode.
+ */
+ /* take pending signals */
+ while ((i = CURSIG(p)) != 0)
+ postsig(i);
+ p->p_priority = p->p_usrpri;
+ astpending = 0;
+ if (want_resched) {
+ int s;
+
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ splx(s);
+ while ((i = CURSIG(p)) != 0)
+ postsig(i);
+ }
+
+ /*
+ * If profiling, charge system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL) {
+ extern int psratio;
+
+ addupc_task(p, trapframe->pc, (int)(p->p_sticks - sticks) * psratio);
+ }
+
+ curpriority = p->p_priority;
+ return (trapframe->pc);
+}
+
+void
+child_return(arg)
+ void *arg;
+{
+ struct proc *p = arg;
+ struct trap_frame *trapframe;
+ int i;
+
+ trapframe = p->p_md.md_regs;
+ trapframe->v0 = 0;
+ trapframe->v1 = 1;
+ trapframe->a3 = 0;
+
+ /* take pending signals */
+ while ((i = CURSIG(p)) != 0)
+ postsig(i);
+ p->p_priority = p->p_usrpri;
+ astpending = 0;
+ if (want_resched) {
+ int s;
+
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ splx(s);
+ while ((i = CURSIG(p)) != 0)
+ postsig(i);
+ }
+
+#if 0 /* Need sticks */
+ if (p->p_flag & P_PROFIL) {
+ extern int psratio;
+
+ addupc_task(p, trapframe->pc, (int)(p->p_sticks - sticks) * psratio);
+ }
+#endif
+
+ curpriority = p->p_priority;
+
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, SYS_fork, 0, 0);
+#endif
+}
+
+#if defined(DDB) || defined(DEBUG)
+void
+trapDump(msg)
+ char *msg;
+{
+ int i;
+ int s;
+
+ s = splhigh();
+ printf("trapDump(%s)\n", msg);
+ for (i = 0; i < TRAPSIZE; i++) {
+ if (trp == trapdebug) {
+ trp = &trapdebug[TRAPSIZE - 1];
+ }
+ else {
+ trp--;
+ }
+
+ if (trp->cause == 0)
+ break;
+
+ printf("%s: PC %p CR 0x%x SR 0x%x\n",
+ trap_type[(trp->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT],
+ trp->pc, trp->cause, trp->status);
+
+ printf(" RA %p SP %p ADR %p\n", trp->ra, trp->sp, trp->vadr);
+ }
+
+#ifdef TLBTRACE
+ if (tlbtrcptr != NULL) {
+ register_t *next;
+
+ printf("tlbtrace:\n");
+ next = tlbtrcptr;
+ do {
+ if (next[0] != NULL) {
+ printf("pc %p, va %p segtab %p pte %p\n",
+ next[0], next[1], next[2], next[3]);
+ }
+ next += 4;
+ next = (register_t *)((long)next & ~0x100);
+ } while (next != tlbtrcptr);
+ }
+#endif
+
+ splx(s);
+}
+#endif
+
+
+/*
+ * Return the resulting PC as if the branch was executed.
+ */
+unsigned long
+MipsEmulateBranch(framePtr, instPC, fpcCSR, instptr)
+ struct trap_frame *framePtr;
+ long instPC;
+ int fpcCSR;
+ long instptr;
+{
+ InstFmt inst;
+ unsigned long retAddr;
+ int condition;
+ register_t *regsPtr = (register_t *)framePtr;
+
+#define GetBranchDest(InstPtr, inst) \
+ ((unsigned long)InstPtr + 4 + ((short)inst.IType.imm << 2))
+
+
+ if(instptr) {
+ inst = *(InstFmt *)&instptr;
+ }
+ else {
+ inst = *(InstFmt *)instPC;
+ }
+#if 0
+ printf("regsPtr=%x PC=%x Inst=%x fpcCsr=%x\n", regsPtr, instPC,
+ inst.word, fpcCSR); /* XXX */
+#endif
+ regsPtr[ZERO] = 0; /* Make sure zero is 0x0 */
+
+ switch ((int)inst.JType.op) {
+ case OP_SPECIAL:
+ switch ((int)inst.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ retAddr = regsPtr[inst.RType.rs];
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ break;
+ }
+ break;
+
+ case OP_BCOND:
+ switch ((int)inst.IType.rt) {
+ case OP_BLTZ:
+ case OP_BLTZL:
+ case OP_BLTZAL:
+ case OP_BLTZALL:
+ if ((int)(regsPtr[inst.RType.rs]) < 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BGEZ:
+ case OP_BGEZL:
+ case OP_BGEZAL:
+ case OP_BGEZALL:
+ if ((int)(regsPtr[inst.RType.rs]) >= 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_TGEI:
+ case OP_TGEIU:
+ case OP_TLTI:
+ case OP_TLTIU:
+ case OP_TEQI:
+ case OP_TNEI:
+ retAddr = instPC + 4; /* Like syscall... */
+ break;
+
+ default:
+ panic("MipsEmulateBranch: Bad branch cond");
+ }
+ break;
+
+ case OP_J:
+ case OP_JAL:
+ retAddr = (inst.JType.target << 2) | (instPC & ~0x0fffffff);
+ break;
+
+ case OP_BEQ:
+ case OP_BEQL:
+ if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BNE:
+ case OP_BNEL:
+ if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BLEZ:
+ case OP_BLEZL:
+ if ((int)(regsPtr[inst.RType.rs]) <= 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_BGTZ:
+ case OP_BGTZL:
+ if ((int)(regsPtr[inst.RType.rs]) > 0)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ case OP_COP1:
+ switch (inst.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
+ condition = fpcCSR & FPC_COND_BIT;
+ else
+ condition = !(fpcCSR & FPC_COND_BIT);
+ if (condition)
+ retAddr = GetBranchDest(instPC, inst);
+ else
+ retAddr = instPC + 8;
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ }
+ break;
+
+ default:
+ retAddr = instPC + 4;
+ }
+ return (retAddr);
+}
+
+/*
+ * This routine is called by procxmt() to single step one instruction.
+ * We do this by storing a break instruction after the current instruction,
+ * resuming execution, and then restoring the old instruction.
+ */
+int
+cpu_singlestep(p)
+ struct proc *p;
+{
+ unsigned va;
+ struct trap_frame *locr0 = p->p_md.md_regs;
+ int i;
+ int bpinstr = BREAK_SSTEP;
+ int curinstr;
+ struct uio uio;
+ struct iovec iov;
+
+ /*
+ * Fetch what's at the current location.
+ */
+ iov.iov_base = (caddr_t)&curinstr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)locr0->pc;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_procp = curproc;
+ procfs_domem(curproc, p, NULL, &uio);
+
+ /* compute next address after current location */
+ if(curinstr != 0) {
+ va = MipsEmulateBranch(locr0, locr0->pc, locr0->fsr, curinstr);
+ }
+ else {
+ va = locr0->pc + 4;
+ }
+ if (p->p_md.md_ss_addr) {
+ printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
+ p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
+ return (EFAULT);
+ }
+ p->p_md.md_ss_addr = va;
+ /*
+ * Fetch what's at the current location.
+ */
+ iov.iov_base = (caddr_t)&p->p_md.md_ss_instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_procp = curproc;
+ procfs_domem(curproc, p, NULL, &uio);
+
+ /*
+ * Store breakpoint instruction at the "next" location now.
+ */
+ iov.iov_base = (caddr_t)&bpinstr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ i = procfs_domem(curproc, p, NULL, &uio);
+ Mips_SyncCache();
+
+ if (i < 0)
+ return (EFAULT);
+#if 0
+ printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
+ p->p_comm, p->p_pid, p->p_md.md_ss_addr,
+ p->p_md.md_ss_instr, locr0[PC], curinstr); /* XXX */
+#endif
+ return (0);
+}
+
+#if defined(DDB) || defined(DEBUG)
+#define MIPS_JR_RA 0x03e00008 /* instruction code for jr ra */
+
+/* forward */
+char *fn_name(long addr);
+void stacktrace_subr __P((struct trap_frame *, int (*)(const char*, ...)));
+
+/*
+ * Print a stack backtrace.
+ */
+void
+stacktrace(regs)
+ struct trap_frame *regs;
+{
+ stacktrace_subr(regs, printf);
+}
+
+void
+logstacktrace(regs)
+ struct trap_frame *regs;
+{
+ stacktrace_subr(regs, addlog);
+}
+
+void
+stacktrace_subr(regs, printfn)
+ struct trap_frame *regs;
+ int (*printfn) __P((const char*, ...));
+{
+ long pc, sp, fp, ra, va, subr;
+ long a0, a1, a2, a3;
+ unsigned instr, mask;
+ InstFmt i;
+ int more, stksize;
+ extern char edata[];
+ unsigned int frames = 0;
+
+ /* get initial values from the exception frame */
+ sp = regs->sp;
+ pc = regs->pc;
+ fp = regs->s8;
+ ra = regs->ra; /* May be a 'leaf' function */
+ a0 = regs->a0;
+ a1 = regs->a1;
+ a2 = regs->a2;
+ a3 = regs->a3;
+
+/* Jump here when done with a frame, to start a new one */
+loop:
+
+/* Jump here after a nonstandard (interrupt handler) frame */
+ stksize = 0;
+ subr = 0;
+ if (frames++ > 6) {
+ (*printfn)("stackframe count exceeded\n");
+ return;
+ }
+
+ /* check for bad SP: could foul up next frame */
+ if (sp & 3 || sp < 0xffffffff80000000) {
+ (*printfn)("SP 0x%x: not in kernel\n", sp);
+ ra = 0;
+ subr = 0;
+ goto done;
+ }
+
+#if 0
+ /* Backtraces should contine through interrupts from kernel mode */
+ if (pc >= (unsigned)MipsKernIntr && pc < (unsigned)MipsUserIntr) {
+ (*printfn)("MipsKernIntr+%x: (%x, %x ,%x) -------\n",
+ pc-(unsigned)MipsKernIntr, a0, a1, a2);
+ regs = (struct trap_frame *)(sp + STAND_ARG_SIZE);
+ a0 = kdbpeek(&regs->a0);
+ a1 = kdbpeek(&regs->a1);
+ a2 = kdbpeek(&regs->a2);
+ a3 = kdbpeek(&regs->a3);
+
+ pc = kdbpeek(&regs->pc); /* exc_pc - pc at time of exception */
+ ra = kdbpeek(&regs->ra); /* ra at time of exception */
+ sp = kdbpeek(&regs->sp);
+ goto specialframe;
+ }
+#endif
+
+
+# define Between(x, y, z) \
+ ( ((x) <= (y)) && ((y) < (z)) )
+# define pcBetween(a,b) \
+ Between((unsigned)a, pc, (unsigned)b)
+
+ /* check for bad PC */
+ if (pc & 3 || pc < 0xffffffff80000000 || pc >= (unsigned)edata) {
+ (*printfn)("PC 0x%x: not in kernel\n", pc);
+ ra = 0;
+ goto done;
+ }
+
+ /*
+ * Find the beginning of the current subroutine by scanning backwards
+ * from the current PC for the end of the previous subroutine.
+ */
+ if (!subr) {
+ va = pc - sizeof(int);
+ while ((instr = kdbpeek((void *)va)) != MIPS_JR_RA)
+ va -= sizeof(int);
+ va += 2 * sizeof(int); /* skip back over branch & delay slot */
+ /* skip over nulls which might separate .o files */
+ while ((instr = kdbpeek((void *)va)) == 0)
+ va += sizeof(int);
+ subr = va;
+ }
+
+ /*
+ * Jump here for locore entry points for which the preceding
+ * function doesn't end in "j ra"
+ */
+ /* scan forwards to find stack size and any saved registers */
+ stksize = 0;
+ more = 3;
+ mask = 0;
+ for (va = subr; more; va += sizeof(int),
+ more = (more == 3) ? 3 : more - 1) {
+ /* stop if hit our current position */
+ if (va >= pc)
+ break;
+ instr = kdbpeek((void *)va);
+ i.word = instr;
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ switch (i.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_SYSCALL:
+ case OP_BREAK:
+ more = 1; /* stop now */
+ };
+ break;
+
+ case OP_BCOND:
+ case OP_J:
+ case OP_JAL:
+ case OP_BEQ:
+ case OP_BNE:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_COP0:
+ case OP_COP1:
+ case OP_COP2:
+ case OP_COP3:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ more = 2; /* stop after next instruction */
+ };
+ break;
+
+ case OP_SW:
+ case OP_SD:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ switch (i.IType.rt) {
+ case 4: /* a0 */
+ a0 = kdbpeek((void *)(sp + (short)i.IType.imm));
+ break;
+
+ case 5: /* a1 */
+ a1 = kdbpeek((void *)(sp + (short)i.IType.imm));
+ break;
+
+ case 6: /* a2 */
+ a2 = kdbpeek((void *)(sp + (short)i.IType.imm));
+ break;
+
+ case 7: /* a3 */
+ a3 = kdbpeek((void *)(sp + (short)i.IType.imm));
+ break;
+
+ case 30: /* fp */
+ fp = kdbpeek((void *)(sp + (short)i.IType.imm));
+ break;
+
+ case 31: /* ra */
+ ra = kdbpeek((void *)(sp + (short)i.IType.imm));
+ }
+ break;
+
+ case OP_ADDI:
+ case OP_ADDIU:
+ case OP_DADDI:
+ case OP_DADDIU:
+ /* look for stack pointer adjustment */
+ if (i.IType.rs != 29 || i.IType.rt != 29)
+ break;
+ stksize = - ((short)i.IType.imm);
+ }
+ }
+
+done:
+ (*printfn)("%s+%x ra %p sp %p (%p,%p,%p,%p)\n",
+ fn_name(subr), pc - subr, ra, sp, a0, a1, a2, a3);
+#if defined(_LP64)
+ a0 = a1 = a2 = a3 = 0x00dead0000dead00;
+#else
+ a0 = a1 = a2 = a3 = 0x00dead00;
+#endif
+
+ if (ra) {
+ if (pc == ra && stksize == 0)
+ (*printfn)("stacktrace: loop!\n");
+ else {
+ pc = ra;
+ sp += stksize;
+ ra = 0;
+ goto loop;
+ }
+ } else {
+ if (curproc)
+ (*printfn)("User-level: pid %d\n", curproc->p_pid);
+ else
+ (*printfn)("User-level: curproc NULL\n");
+ }
+}
+
+/*
+ * Functions ``special'' enough to print by name
+ */
+#ifdef __STDC__
+#define Name(_fn) { (void*)_fn, # _fn }
+#else
+#define Name(_fn) { _fn, "_fn"}
+#endif
+static struct { void *addr; char *name;} names[] = {
+ Name(trap),
+ {0, 0}
+};
+
+/*
+ * Map a function address to a string name, if known; or a hex string.
+ */
+char *
+fn_name(long addr)
+{
+ static char buf[17];
+ int i = 0;
+
+ for (i = 0; names[i].name; i++)
+ if (names[i].addr == (void*)addr)
+ return (names[i].name);
+ snprintf(buf, sizeof(buf), "%x", addr);
+ return (buf);
+}
+
+#endif /* DDB */
diff --git a/sys/arch/mips64/mips64/vm_machdep.c b/sys/arch/mips64/mips64/vm_machdep.c
new file mode 100644
index 00000000000..85c248eefd3
--- /dev/null
+++ b/sys/arch/mips64/mips64/vm_machdep.c
@@ -0,0 +1,296 @@
+/* $OpenBSD: vm_machdep.c,v 1.1 2004/08/06 20:56:03 pefo Exp $ */
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah Hdr: vm_machdep.c 1.21 91/04/06
+ *
+ * from: @(#)vm_machdep.c 8.3 (Berkeley) 1/4/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <sys/vnode.h>
+#include <sys/user.h>
+#include <sys/core.h>
+#include <sys/exec.h>
+#include <sys/signalvar.h>
+
+
+#include <machine/pte.h>
+#include <machine/cpu.h>
+
+extern void proc_trampoline __P((void));
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ */
+void
+cpu_fork(p1, p2, stack, stacksize, func, arg)
+ struct proc *p1, *p2;
+ void *stack;
+ size_t stacksize;
+ void (*func)(void *);
+ void *arg;
+{
+ struct pcb *pcb;
+ extern struct proc *machFPCurProcPtr;
+
+ p2->p_md.md_regs = &p2->p_addr->u_pcb.pcb_regs;
+ p2->p_md.md_flags = p1->p_md.md_flags & MDP_FORKSAVE;
+
+ if (p1 == machFPCurProcPtr) {
+ if (p2->p_addr->u_pcb.pcb_regs.sr & SR_FR_32)
+ MipsSaveCurFPState(p1);
+ else
+ MipsSaveCurFPState16(p1);
+ }
+
+#ifdef notyet
+ /*
+ * If specified, give the child a different stack.
+ */
+ if (stack != NULL)
+ /* XXX How??? */;
+#endif
+
+ /*
+ * Copy the process control block to the new proc and
+ * create a clean stack for exit through trampoline.
+ * pcb_context has s0-s7, sp, s8, ra, sr, icr, cpl.
+ */
+ pcb = &p2->p_addr->u_pcb;
+ *pcb = p1->p_addr->u_pcb;
+
+ pcb->pcb_context.val[10] = (register_t)proc_trampoline;
+ pcb->pcb_context.val[8] = (register_t)(caddr_t)pcb + USPACE - 64;
+ pcb->pcb_context.val[0] = (register_t)func;
+ pcb->pcb_context.val[1] = (register_t)arg;
+}
+
+/*
+ * Finish a swapin operation. Nothing to do.
+ */
+void
+cpu_swapin(p)
+ struct proc *p;
+{
+}
+
+/*
+ * cpu_exit is called as the last action during exit.
+ * release adress space and machine dependent resources.
+ */
+void
+cpu_exit(p)
+ struct proc *p;
+{
+ extern struct proc *machFPCurProcPtr;
+
+ if (machFPCurProcPtr == p)
+ machFPCurProcPtr = (struct proc *)0;
+
+ (void) splhigh();
+ switch_exit(p);
+ /* NOTREACHED */
+}
+
+/*
+ * Dump the machine specific header information at the start of a core dump.
+ */
+int
+cpu_coredump(p, vp, cred, chdr)
+ struct proc *p;
+ struct vnode *vp;
+ struct ucred *cred;
+ struct core *chdr;
+{
+ int error;
+ /*register struct user *up = p->p_addr;*/
+ struct coreseg cseg;
+ extern struct proc *machFPCurProcPtr;
+
+ CORE_SETMAGIC(*chdr, COREMAGIC, MID_MIPS, 0);
+ chdr->c_hdrsize = ALIGN(sizeof(*chdr));
+ chdr->c_seghdrsize = ALIGN(sizeof(cseg));
+ chdr->c_cpusize = sizeof (p -> p_addr -> u_pcb.pcb_regs);
+
+ /*
+ * Copy floating point state from the FP chip if this process
+ * has state stored there.
+ */
+ if (p == machFPCurProcPtr) {
+ if (p->p_md.md_regs->sr & SR_FR_32)
+ MipsSaveCurFPState(p);
+ else
+ MipsSaveCurFPState16(p);
+ }
+
+ CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MIPS, CORE_CPU);
+ cseg.c_addr = 0;
+ cseg.c_size = chdr->c_cpusize;
+
+ error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
+ (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
+ IO_NODELOCKED|IO_UNIT, cred, NULL, p);
+ if (error)
+ return error;
+
+ error = vn_rdwr(UIO_WRITE, vp,
+ (caddr_t)(&(p -> p_addr -> u_pcb.pcb_regs)),
+ (off_t)chdr -> c_cpusize,
+ (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize),
+ UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
+ cred, NULL, p);
+
+ if (!error)
+ chdr->c_nseg++;
+
+ return error;
+}
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of CLSIZE.
+ */
+void
+pagemove(from, to, size)
+ caddr_t from, to;
+ size_t size;
+{
+ pt_entry_t *fpte, *tpte;
+
+ if (size % PAGE_SIZE)
+ panic("pagemove");
+ fpte = kvtopte(from);
+ tpte = kvtopte(to);
+ if(((vaddr_t)from & CpuCacheAliasMask) != ((vaddr_t)to & CpuCacheAliasMask)) {
+ Mips_HitSyncDCache((vaddr_t)from, size);
+ }
+ while (size > 0) {
+ tlb_flush_addr((vaddr_t)from);
+ tlb_update((vaddr_t)to, fpte->pt_entry);
+ *tpte++ = *fpte;
+ fpte->pt_entry = PG_NV | PG_G;
+ fpte++;
+ size -= NBPG;
+ from += NBPG;
+ to += NBPG;
+ }
+}
+
+extern vm_map_t phys_map;
+
+/*
+ * Map an IO request into kernel virtual address space. Requests fall into
+ * one of five catagories:
+ *
+ * B_PHYS|B_UAREA: User u-area swap.
+ * Address is relative to start of u-area (p_addr).
+ * B_PHYS|B_PAGET: User page table swap.
+ * Address is a kernel VA in usrpt (Usrptmap).
+ * B_PHYS|B_DIRTY: Dirty page push.
+ * Address is a VA in proc2's address space.
+ * B_PHYS|B_PGIN: Kernel pagein of user pages.
+ * Address is VA in user's address space.
+ * B_PHYS: User "raw" IO request.
+ * Address is VA in user's address space.
+ *
+ * All requests are (re)mapped into kernel VA space via the phys_map
+ */
+
+#define trunc_page_align(x) ((vaddr_t)(x) & ~(CpuCacheAliasMask | PAGE_SIZE))
+
+void
+vmapbuf(bp, len)
+ struct buf *bp;
+ vsize_t len;
+{
+ vaddr_t uva, kva;
+ vsize_t sz, off;
+ paddr_t pa;
+ struct pmap *pmap;
+
+ if ((bp->b_flags & B_PHYS) == 0) {
+ panic("vmapbuf");
+ }
+
+ pmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
+ bp->b_saveaddr = bp->b_data;
+ uva = trunc_page((vaddr_t)bp->b_saveaddr);
+ off = (vaddr_t)bp->b_saveaddr - uva;
+ sz = round_page(off + len);
+
+ kva = uvm_km_valloc_prefer_wait(phys_map, sz, uva);
+ bp->b_data = (caddr_t) (kva + off);
+
+ while (sz > 0) {
+ pmap_extract(pmap, uva, &pa);
+ if (pa == 0) {
+ panic("vmapbuf: null page frame");
+ }
+ pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
+ VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
+ uva += PAGE_SIZE;
+ kva += PAGE_SIZE;
+ sz -= PAGE_SIZE;
+ }
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+void
+vunmapbuf(bp, len)
+ struct buf *bp;
+ vsize_t len;
+{
+ vsize_t sz;
+ vaddr_t addr;
+
+ if ((bp->b_flags & B_PHYS) == 0) {
+ panic("vunmapbuf");
+ }
+ addr = trunc_page((vaddr_t)bp->b_data);
+ sz = round_page(len + ((vaddr_t)bp->b_data - addr));
+ uvm_km_free_wakeup(phys_map, addr, sz);
+ bp->b_data = bp->b_saveaddr;
+ bp->b_saveaddr = NULL;
+}