summaryrefslogtreecommitdiff
path: root/sys/lib/libkern/arch
diff options
context:
space:
mode:
authorMichael Shalayeff <mickey@cvs.openbsd.org>1998-06-23 19:39:16 +0000
committerMichael Shalayeff <mickey@cvs.openbsd.org>1998-06-23 19:39:16 +0000
commiteae609490d212153bfb18fdecc719446860a4433 (patch)
tree1b8b33ae8e16f760004b10b0d524c5981d2c0da4 /sys/lib/libkern/arch
parent3eb962f33405f5d242202b0c16e31ce87c4b6537 (diff)
use HP version
Diffstat (limited to 'sys/lib/libkern/arch')
-rw-r--r--sys/lib/libkern/arch/hppa/Makefile.inc10
-rw-r--r--sys/lib/libkern/arch/hppa/divI.S211
-rw-r--r--sys/lib/libkern/arch/hppa/divU.S199
-rw-r--r--sys/lib/libkern/arch/hppa/milli.S1978
-rw-r--r--sys/lib/libkern/arch/hppa/mulI.S480
-rw-r--r--sys/lib/libkern/arch/hppa/mulU.S131
-rw-r--r--sys/lib/libkern/arch/hppa/remI.S164
-rw-r--r--sys/lib/libkern/arch/hppa/remU.S152
8 files changed, 1981 insertions, 1344 deletions
diff --git a/sys/lib/libkern/arch/hppa/Makefile.inc b/sys/lib/libkern/arch/hppa/Makefile.inc
index 5364c28199b..7cc23362d1a 100644
--- a/sys/lib/libkern/arch/hppa/Makefile.inc
+++ b/sys/lib/libkern/arch/hppa/Makefile.inc
@@ -1,10 +1,6 @@
-# $OpenBSD: Makefile.inc,v 1.1 1998/06/23 18:56:53 mickey Exp $
+# $OpenBSD: Makefile.inc,v 1.2 1998/06/23 19:39:01 mickey Exp $
SRCS+= __main.c imax.c imin.c lmax.c lmin.c max.c min.c ulmax.c ulmin.c \
- random.c ashrdi3.c
-
-# `source' files built from m4 source
-SRCS+= divU.S divI.S
-SRCS+= remU.S remI.S
-SRCS+= mulU.S mulI.S
+ random.c ashrdi3.c divdi3.c
+SRCS+= milli.S
diff --git a/sys/lib/libkern/arch/hppa/divI.S b/sys/lib/libkern/arch/hppa/divI.S
deleted file mode 100644
index 62ff31a547a..00000000000
--- a/sys/lib/libkern/arch/hppa/divI.S
+++ /dev/null
@@ -1,211 +0,0 @@
-/* $OpenBSD: divI.S,v 1.1 1998/06/23 18:56:53 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- *
- * ROUTINES: $$divI, $$divoI
- *
- * Single precision divide for singed binary integers.
- *
- * The quotient is truncated towards zero.
- * The sign of the quotient is the XOR of the signs of the dividend and
- * divisor.
- * Divide by zero is trapped.
- * Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
- *
- * INPUT REGISTERS:
- * arg0 == dividend
- * arg1 == divisor
- * r31 == return pc
- * sr0 == return space when called externally
- *
- * OUTPUT REGISTERS:
- * arg0 = undefined
- * arg1 = undefined
- * ret1 = quotient
- *
- * OTHER REGISTERS AFFECTED:
- * r1 = undefined
- *
- * SIDE EFFECTS:
- * Causes a trap under the following conditions:
- * divisor is zero (traps with ADDIT,= 0,25,0)
- * dividend==-2**31 and divisor==-1 and routine is $$divoI
- * (traps with ADDO 26,25,0)
- * Changes memory at the following places:
- * NONE
- *
- * PERMISSIBLE CONTEXT:
- * Unwindable.
- * Suitable for internal or external millicode.
- * Assumes the special millicode register conventions.
- *
- * DISCUSSION:
- * Branchs to other millicode routines using BE
- * $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
- *
- * For selected divisors, calls a divide by constant routine written by
- * Karl Pettis. Eligible divisors are 1..15 excluding 11 and 13.
- *
- * The only overflow case is -2**31 divided by -1.
- * Both routines return -2**31 but only $$divoI traps.
- */
-
-DEFINE(temp,r1)
-DEFINE(retreg,ret1) ; r29
- .import $$divI_2,millicode
- .import $$divI_3,millicode
- .import $$divI_4,millicode
- .import $$divI_5,millicode
- .import $$divI_6,millicode
- .import $$divI_7,millicode
- .import $$divI_8,millicode
- .import $$divI_9,millicode
- .import $$divI_10,millicode
- .import $$divI_12,millicode
- .import $$divI_14,millicode
- .import $$divI_15,millicode
- .export $$divI,millicode
- .export $$divoI,millicode
- .proc
- .callinfo millicode
-$$divoI
- comib,=,n -1,arg1,negative1 ; when divisor == -1
-$$divI
- comib,>>=,n 15,arg1,small_divisor
- add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
-normal
- sub 0,retreg,retreg ; make it positive
- sub 0,arg1,temp ; clear carry,
- ; negate the divisor
- ds 0,temp,0 ; set V-bit to the comple-
- ; ment of the divisor sign
- add retreg,retreg,retreg ; shift msb bit into carry
- ds r0,arg1,temp ; 1st divide step, if no carry
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 2nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 3rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 4th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 5th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 6th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 7th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 8th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 9th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 10th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 11th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 12th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 13th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 14th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 15th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 16th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 17th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 18th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 19th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 20th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 21st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 22nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 23rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 24th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 25th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 26th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 27th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 28th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 29th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 30th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 31st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 32nd divide step,
- addc retreg,retreg,retreg ; shift last retreg bit into retreg
- xor,>= arg0,arg1,0 ; get correct sign of quotient
- sub 0,retreg,retreg ; based on operand signs
- MILLIRETN
- nop
-;______________________________________________________________________
-small_divisor
- blr,n arg1,r0
- nop
-; table for divisor == 0,1, ... ,15
- addit,= 0,arg1,r0 ; trap if divisor == 0
- nop
- MILLIRET ; divisor == 1
- copy arg0,retreg
- MILLI_BEN($$divI_2) ; divisor == 2
- nop
- MILLI_BEN($$divI_3) ; divisor == 3
- nop
- MILLI_BEN($$divI_4) ; divisor == 4
- nop
- MILLI_BEN($$divI_5) ; divisor == 5
- nop
- MILLI_BEN($$divI_6) ; divisor == 6
- nop
- MILLI_BEN($$divI_7) ; divisor == 7
- nop
- MILLI_BEN($$divI_8) ; divisor == 8
- nop
- MILLI_BEN($$divI_9) ; divisor == 9
- nop
- MILLI_BEN($$divI_10) ; divisor == 10
- nop
- b normal ; divisor == 11
- add,>= 0,arg0,retreg
- MILLI_BEN($$divI_12) ; divisor == 12
- nop
- b normal ; divisor == 13
- add,>= 0,arg0,retreg
- MILLI_BEN($$divI_14) ; divisor == 14
- nop
- MILLI_BEN($$divI_15) ; divisor == 15
- nop
-;______________________________________________________________________
-negative1
- sub 0,arg0,retreg ; result is negation of dividend
- MILLIRET
- addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1
- .procend
- .end
diff --git a/sys/lib/libkern/arch/hppa/divU.S b/sys/lib/libkern/arch/hppa/divU.S
deleted file mode 100644
index 025ce1bdf11..00000000000
--- a/sys/lib/libkern/arch/hppa/divU.S
+++ /dev/null
@@ -1,199 +0,0 @@
-/* $OpenBSD: divU.S,v 1.1 1998/06/23 18:56:53 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- * ROUTINE: $$divU
- *
- * Single precision divide for unsigned integers.
- *
- * Quotient is truncated towards zero.
- * Traps on divide by zero.
- *
- * INPUT REGISTERS:
- * arg0 == dividend
- * arg1 == divisor
- * r31 == return pc
- * sr0 == return space when called externally
- *
- * OUTPUT REGISTERS:
- * arg0 = undefined
- * arg1 = undefined
- * ret1 = quotient
- *
- * OTHER REGISTERS AFFECTED:
- * r1 = undefined
- *
- * SIDE EFFECTS:
- * Causes a trap under the following conditions:
- * divisor is zero
- * Changes memory at the following places:
- * NONE
- *
- * PERMISSIBLE CONTEXT:
- * Unwindable.
- * Does not create a stack frame.
- * Suitable for internal or external millicode.
- * Assumes the special millicode register conventions.
- *
- * DISCUSSION:
- * Branchs to other millicode routines using BE:
- * $$divU_# for 3,5,6,7,9,10,12,14,15
- *
- * For selected small divisors calls the special divide by constant
- * routines written by Karl Pettis. These are: 3,5,6,7,9,10,12,14,15.
- *
- */
-
-DEFINE(temp,r1)
-DEFINE(retreg,ret1) ; r29
- .subspa $MILLICODE$
- .export $$divU,millicode
- .import $$divU_3,millicode
- .import $$divU_5,millicode
- .import $$divU_6,millicode
- .import $$divU_7,millicode
- .import $$divU_9,millicode
- .import $$divU_10,millicode
- .import $$divU_12,millicode
- .import $$divU_14,millicode
- .import $$divU_15,millicode
- .proc
- .callinfo millicode
-$$divU
-; The subtract is not nullified since it does no harm and can be used
-; by the two cases that branch back to "normal".
- comib,>= 15,arg1,special_divisor
- sub r0,arg1,temp ; clear carry, negate the divisor
- ds r0,temp,r0 ; set V-bit to 1
-normal
- add arg0,arg0,retreg ; shift msb bit into carry
- ds r0,arg1,temp ; 1st divide step, if no carry
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 2nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 3rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 4th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 5th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 6th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 7th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 8th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 9th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 10th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 11th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 12th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 13th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 14th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 15th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 16th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 17th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 18th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 19th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 20th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 21st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 22nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 23rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 24th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 25th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 26th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 27th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 28th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 29th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 30th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 31st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds temp,arg1,temp ; 32nd divide step,
- MILLIRET
- addc retreg,retreg,retreg ; shift last retreg bit into retreg
-;_____________________________________________________________________________
-; handle the cases where divisor is a small constant or has high bit on
-special_divisor
- blr arg1,r0
- comib,>,n 0,arg1,big_divisor ; nullify previous instruction
-zero_divisor ; this label is here to provide external visibility
- addit,= 0,arg1,0 ; trap for zero dvr
- nop
- MILLIRET ; divisor == 1
- copy arg0,retreg
- MILLIRET ; divisor == 2
- extru arg0,30,31,retreg
- MILLI_BEN($$divU_3) ; divisor == 3
- nop
- MILLIRET ; divisor == 4
- extru arg0,29,30,retreg
- MILLI_BEN($$divU_5) ; divisor == 5
- nop
- MILLI_BEN($$divU_6) ; divisor == 6
- nop
- MILLI_BEN($$divU_7) ; divisor == 7
- nop
- MILLIRET ; divisor == 8
- extru arg0,28,29,retreg
- MILLI_BEN($$divU_9) ; divisor == 9
- nop
- MILLI_BEN($$divU_10) ; divisor == 10
- nop
- b normal ; divisor == 11
- ds r0,temp,r0 ; set V-bit to 1
- MILLI_BEN($$divU_12) ; divisor == 12
- nop
- b normal ; divisor == 13
- ds r0,temp,r0 ; set V-bit to 1
- MILLI_BEN($$divU_14) ; divisor == 14
- nop
- MILLI_BEN($$divU_15) ; divisor == 15
- nop
-;_____________________________________________________________________________
-; Handle the case where the high bit is on in the divisor.
-; Compute: if( dividend>=divisor) quotient=1; else quotient=0;
-; Note: dividend>==divisor iff dividend-divisor does not borrow
-; and not borrow iff carry
-big_divisor
- sub arg0,arg1,r0
- MILLIRET
- addc r0,r0,retreg
- .procend
- .end
diff --git a/sys/lib/libkern/arch/hppa/milli.S b/sys/lib/libkern/arch/hppa/milli.S
new file mode 100644
index 00000000000..b6b2eeed8e9
--- /dev/null
+++ b/sys/lib/libkern/arch/hppa/milli.S
@@ -0,0 +1,1978 @@
+;
+; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
+;
+; To anyone who acknowledges that this file is provided "AS IS"
+; without any express or implied warranty:
+; permission to use, copy, modify, and distribute this file
+; for any purpose is hereby granted without fee, provided that
+; the above copyright notice and this notice appears in all
+; copies, and that the name of Hewlett-Packard Company not be
+; used in advertising or publicity pertaining to distribution
+; of the software without specific, written prior permission.
+; Hewlett-Packard Company makes no representations about the
+; suitability of this software for any purpose.
+;
+
+; Standard Hardware Register Definitions for Use with Assembler
+; version A.08.06
+; - fr16-31 added at Utah
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+; Hardware General Registers
+r0: .equ 0
+
+r1: .equ 1
+
+r2: .equ 2
+
+r3: .equ 3
+
+r4: .equ 4
+
+r5: .equ 5
+
+r6: .equ 6
+
+r7: .equ 7
+
+r8: .equ 8
+
+r9: .equ 9
+
+r10: .equ 10
+
+r11: .equ 11
+
+r12: .equ 12
+
+r13: .equ 13
+
+r14: .equ 14
+
+r15: .equ 15
+
+r16: .equ 16
+
+r17: .equ 17
+
+r18: .equ 18
+
+r19: .equ 19
+
+r20: .equ 20
+
+r21: .equ 21
+
+r22: .equ 22
+
+r23: .equ 23
+
+r24: .equ 24
+
+r25: .equ 25
+
+r26: .equ 26
+
+r27: .equ 27
+
+r28: .equ 28
+
+r29: .equ 29
+
+r30: .equ 30
+
+r31: .equ 31
+
+; Hardware Space Registers
+sr0: .equ 0
+
+sr1: .equ 1
+
+sr2: .equ 2
+
+sr3: .equ 3
+
+sr4: .equ 4
+
+sr5: .equ 5
+
+sr6: .equ 6
+
+sr7: .equ 7
+
+; Hardware Floating Point Registers
+fr0: .equ 0
+
+fr1: .equ 1
+
+fr2: .equ 2
+
+fr3: .equ 3
+
+fr4: .equ 4
+
+fr5: .equ 5
+
+fr6: .equ 6
+
+fr7: .equ 7
+
+fr8: .equ 8
+
+fr9: .equ 9
+
+fr10: .equ 10
+
+fr11: .equ 11
+
+fr12: .equ 12
+
+fr13: .equ 13
+
+fr14: .equ 14
+
+fr15: .equ 15
+
+fr16: .equ 16
+
+fr17: .equ 17
+
+fr18: .equ 18
+
+fr19: .equ 19
+
+fr20: .equ 20
+
+fr21: .equ 21
+
+fr22: .equ 22
+
+fr23: .equ 23
+
+fr24: .equ 24
+
+fr25: .equ 25
+
+fr26: .equ 26
+
+fr27: .equ 27
+
+fr28: .equ 28
+
+fr29: .equ 29
+
+fr30: .equ 30
+
+fr31: .equ 31
+
+; Hardware Control Registers
+cr0: .equ 0
+
+rctr: .equ 0 ; Recovery Counter Register
+
+
+cr8: .equ 8 ; Protection ID 1
+
+pidr1: .equ 8
+
+
+cr9: .equ 9 ; Protection ID 2
+
+pidr2: .equ 9
+
+
+cr10: .equ 10
+
+ccr: .equ 10 ; Coprocessor Confiquration Register
+
+
+cr11: .equ 11
+
+sar: .equ 11 ; Shift Amount Register
+
+
+cr12: .equ 12
+
+pidr3: .equ 12 ; Protection ID 3
+
+
+cr13: .equ 13
+
+pidr4: .equ 13 ; Protection ID 4
+
+
+cr14: .equ 14
+
+iva: .equ 14 ; Interrupt Vector Address
+
+
+cr15: .equ 15
+
+eiem: .equ 15 ; External Interrupt Enable Mask
+
+
+cr16: .equ 16
+
+itmr: .equ 16 ; Interval Timer
+
+
+cr17: .equ 17
+
+pcsq: .equ 17 ; Program Counter Space queue
+
+
+cr18: .equ 18
+
+pcoq: .equ 18 ; Program Counter Offset queue
+
+
+cr19: .equ 19
+
+iir: .equ 19 ; Interruption Instruction Register
+
+
+cr20: .equ 20
+
+isr: .equ 20 ; Interruption Space Register
+
+
+cr21: .equ 21
+
+ior: .equ 21 ; Interruption Offset Register
+
+
+cr22: .equ 22
+
+ipsw: .equ 22 ; Interrpution Processor Status Word
+
+
+cr23: .equ 23
+
+eirr: .equ 23 ; External Interrupt Request
+
+
+cr24: .equ 24
+
+ppda: .equ 24 ; Physcial Page Directory Address
+
+tr0: .equ 24 ; Temporary register 0
+
+
+cr25: .equ 25
+
+hta: .equ 25 ; Hash Table Address
+
+tr1: .equ 25 ; Temporary register 1
+
+
+cr26: .equ 26
+
+tr2: .equ 26 ; Temporary register 2
+
+
+cr27: .equ 27
+
+tr3: .equ 27 ; Temporary register 3
+
+
+cr28: .equ 28
+
+tr4: .equ 28 ; Temporary register 4
+
+
+cr29: .equ 29
+
+tr5: .equ 29 ; Temporary register 5
+
+
+cr30: .equ 30
+
+tr6: .equ 30 ; Temporary register 6
+
+
+cr31: .equ 31
+
+tr7: .equ 31 ; Temporary register 7
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+; Procedure Call Convention ~
+; Register Definitions for Use with Assembler ~
+; version A.08.06
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+; Software Architecture General Registers
+rp: .equ r2 ; return pointer
+
+mrp: .equ r31 ; millicode return pointer
+
+ret0: .equ r28 ; return value
+
+ret1: .equ r29 ; return value (high part of double)
+
+sl: .equ r29 ; static link
+
+sp: .equ r30 ; stack pointer
+
+dp: .equ r27 ; data pointer
+
+arg0: .equ r26 ; argument
+
+arg1: .equ r25 ; argument or high part of double argument
+
+arg2: .equ r24 ; argument
+
+arg3: .equ r23 ; argument or high part of double argument
+
+;_____________________________________________________________________________
+; Software Architecture Space Registers
+; sr0 ; return link form BLE
+sret: .equ sr1 ; return value
+
+sarg: .equ sr1 ; argument
+
+; sr4 ; PC SPACE tracker
+; sr5 ; process private data
+;_____________________________________________________________________________
+; Software Architecture Pseudo Registers
+previous_sp: .equ 64 ; old stack pointer (locates previous frame)
+
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+; Standard space and subspace definitions. version A.08.06
+; These are generally suitable for programs on HP_UX and HPE.
+; Statements commented out are used when building such things as operating
+; system kernels.
+;;;;;;;;;;;;;;;;
+ .SPACE $TEXT$, SPNUM=0,SORT=8
+ .subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
+ .subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
+ .subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
+; Additional code subspaces should have ALIGN=8 for an interspace BV
+; and should have SORT=24.
+;
+; For an incomplete executable (program bound to shared libraries),
+; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
+; and $PLT$ subspaces respectively.
+;;;;;;;;;;;;;;;
+ .SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16
+ .subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
+ .import $global$
+ .subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
+ .subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
+
+ .SPACE $TEXT$
+ .SUBSPA $MILLICODE$
+
+ .EXPORT $$remI,millicode
+; .IMPORT cerror
+$$remI:
+ .PROC
+ .CALLINFO NO_CALLS
+ .ENTRY
+ addit,= 0,arg1,r0
+ add,>= r0,arg0,ret1
+ sub r0,ret1,ret1
+ sub r0,arg1,r1
+ ds r0,r1,r0
+ or r0,r0,r1
+ add ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ ds r1,arg1,r1
+ addc ret1,ret1,ret1
+ movb,>=,n r1,ret1,remI300
+ add,< arg1,r0,r0
+ add,tr r1,arg1,ret1
+ sub r1,arg1,ret1
+remI300: add,>= arg0,r0,r0
+
+ sub r0,ret1,ret1
+ bv r0(r31)
+ nop
+ .EXIT
+ .PROCEND
+
+bit1: .equ 1
+
+bit30: .equ 30
+bit31: .equ 31
+
+len2: .equ 2
+
+len4: .equ 4
+
+
+$$dyncall:
+ .proc
+ .callinfo NO_CALLS
+ .export $$dyncall,MILLICODE
+
+ bb,>=,n 22,bit30,noshlibs
+
+ depi 0,bit31,len2,22
+ ldw 4(22),19
+ ldw 0(22),22
+noshlibs:
+ ldsid (22),r1
+ mtsp r1,sr0
+ be 0(sr0,r22)
+ stw rp,-24(sp)
+ .procend
+
+temp: .EQU r1
+
+retreg: .EQU ret1 ; r29
+
+
+ .export $$divU,millicode
+ .import $$divU_3,millicode
+ .import $$divU_5,millicode
+ .import $$divU_6,millicode
+ .import $$divU_7,millicode
+ .import $$divU_9,millicode
+ .import $$divU_10,millicode
+ .import $$divU_12,millicode
+ .import $$divU_14,millicode
+ .import $$divU_15,millicode
+$$divU:
+ .proc
+ .callinfo NO_CALLS
+; The subtract is not nullified since it does no harm and can be used
+; by the two cases that branch back to "normal".
+ comib,>= 15,arg1,special_divisor
+ sub r0,arg1,temp ; clear carry, negate the divisor
+ ds r0,temp,r0 ; set V-bit to 1
+normal:
+ add arg0,arg0,retreg ; shift msb bit into carry
+ ds r0,arg1,temp ; 1st divide step, if no carry
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 2nd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 3rd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 4th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 5th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 6th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 7th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 8th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 9th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 10th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 11th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 12th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 13th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 14th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 15th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 16th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 17th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 18th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 19th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 20th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 21st divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 22nd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 23rd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 24th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 25th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 26th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 27th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 28th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 29th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 30th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 31st divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 32nd divide step,
+ bv 0(r31)
+ addc retreg,retreg,retreg ; shift last retreg bit into retreg
+;_____________________________________________________________________________
+; handle the cases where divisor is a small constant or has high bit on
+special_divisor:
+ blr arg1,r0
+ comib,>,n 0,arg1,big_divisor ; nullify previous instruction
+zero_divisor: ; this label is here to provide external visibility
+
+ addit,= 0,arg1,0 ; trap for zero dvr
+ nop
+ bv 0(r31) ; divisor == 1
+ copy arg0,retreg
+ bv 0(r31) ; divisor == 2
+ extru arg0,30,31,retreg
+ b,n $$divU_3 ; divisor == 3
+ nop
+ bv 0(r31) ; divisor == 4
+ extru arg0,29,30,retreg
+ b,n $$divU_5 ; divisor == 5
+ nop
+ b,n $$divU_6 ; divisor == 6
+ nop
+ b,n $$divU_7 ; divisor == 7
+ nop
+ bv 0(r31) ; divisor == 8
+ extru arg0,28,29,retreg
+ b,n $$divU_9 ; divisor == 9
+ nop
+ b,n $$divU_10 ; divisor == 10
+ nop
+ b normal ; divisor == 11
+ ds r0,temp,r0 ; set V-bit to 1
+ b,n $$divU_12 ; divisor == 12
+ nop
+ b normal ; divisor == 13
+ ds r0,temp,r0 ; set V-bit to 1
+ b,n $$divU_14 ; divisor == 14
+ nop
+ b,n $$divU_15 ; divisor == 15
+ nop
+;_____________________________________________________________________________
+; Handle the case where the high bit is on in the divisor.
+; Compute: if( dividend>=divisor) quotient=1; else quotient=0;
+; Note: dividend>==divisor iff dividend-divisor does not borrow
+; and not borrow iff carry
+big_divisor:
+ sub arg0,arg1,r0
+ bv 0(r31)
+ addc r0,r0,retreg
+ .procend
+ .end
+
+t2: .EQU r1
+
+; x2 .EQU arg0 ; r26
+t1: .EQU arg1 ; r25
+
+; x1 .EQU ret1 ; r29
+;_____________________________________________________________________________
+
+$$divide_by_constant:
+ .PROC
+ .CALLINFO NO_CALLS
+
+
+ .export $$divide_by_constant,millicode
+; Provides a "nice" label for the code covered by the unwind descriptor
+; for things like gprof.
+
+
+
+
+
+
+
+
+
+$$divI_2:
+ .EXPORT $$divI_2,MILLICODE
+ COMCLR,>= arg0,0,0
+ ADDI 1,arg0,arg0
+ bv 0(r31)
+ EXTRS arg0,30,31,ret1
+
+
+
+$$divI_4:
+ .EXPORT $$divI_4,MILLICODE
+ COMCLR,>= arg0,0,0
+ ADDI 3,arg0,arg0
+ bv 0(r31)
+ EXTRS arg0,29,30,ret1
+
+
+
+$$divI_8:
+ .EXPORT $$divI_8,MILLICODE
+ COMCLR,>= arg0,0,0
+ ADDI 7,arg0,arg0
+ bv 0(r31)
+ EXTRS arg0,28,29,ret1
+
+
+$$divI_16:
+ .EXPORT $$divI_16,MILLICODE
+ COMCLR,>= arg0,0,0
+ ADDI 15,arg0,arg0
+ bv 0(r31)
+ EXTRS arg0,27,28,ret1
+
+
+
+
+
+
+
+
+
+
+
+$$divI_3:
+ .EXPORT $$divI_3,MILLICODE
+ COMB,<,N arg0,0,$neg3
+
+ ADDI 1,arg0,arg0
+ EXTRU arg0,1,2,ret1
+ SH2ADD arg0,arg0,arg0
+ B $pos
+ ADDC ret1,0,ret1
+
+$neg3:
+ SUBI 1,arg0,arg0
+ EXTRU arg0,1,2,ret1
+ SH2ADD arg0,arg0,arg0
+ B $neg
+ ADDC ret1,0,ret1
+
+$$divU_3:
+ .EXPORT $$divU_3,MILLICODE
+ ADDI 1,arg0,arg0
+ ADDC 0,0,ret1
+ SHD ret1,arg0,30,t1
+ SH2ADD arg0,arg0,arg0
+ B $pos
+ ADDC ret1,t1,ret1
+
+
+
+$$divI_5:
+ .EXPORT $$divI_5,MILLICODE
+ COMB,<,N arg0,0,$neg5
+ ADDI 3,arg0,t1
+ SH1ADD arg0,t1,arg0
+ B $pos
+ ADDC 0,0,ret1
+
+$neg5:
+ SUB 0,arg0,arg0
+ ADDI 1,arg0,arg0
+ SHD 0,arg0,31,ret1
+ SH1ADD arg0,arg0,arg0
+ B $neg
+ ADDC ret1,0,ret1
+
+$$divU_5:
+ .EXPORT $$divU_5,MILLICODE
+ ADDI 1,arg0,arg0
+ ADDC 0,0,ret1
+ SHD ret1,arg0,31,t1
+ SH1ADD arg0,arg0,arg0
+ B $pos
+ ADDC t1,ret1,ret1
+
+
+$$divI_6:
+ .EXPORT $$divI_6,MILLICODE
+ COMB,<,N arg0,0,$neg6
+ EXTRU arg0,30,31,arg0
+ ADDI 5,arg0,t1
+ SH2ADD arg0,t1,arg0
+ B $pos
+ ADDC 0,0,ret1
+
+$neg6:
+ SUBI 2,arg0,arg0
+
+
+ EXTRU arg0,30,31,arg0
+ SHD 0,arg0,30,ret1
+ SH2ADD arg0,arg0,arg0
+ B $neg
+ ADDC ret1,0,ret1
+
+$$divU_6:
+ .EXPORT $$divU_6,MILLICODE
+ EXTRU arg0,30,31,arg0
+ ADDI 1,arg0,arg0
+ SHD 0,arg0,30,ret1
+ SH2ADD arg0,arg0,arg0
+ B $pos
+ ADDC ret1,0,ret1
+
+
+$$divU_10:
+ .EXPORT $$divU_10,MILLICODE
+ EXTRU arg0,30,31,arg0
+ ADDI 3,arg0,t1
+ SH1ADD arg0,t1,arg0
+ ADDC 0,0,ret1
+$pos:
+ SHD ret1,arg0,28,t1
+ SHD arg0,0,28,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+$pos_for_17:
+ SHD ret1,arg0,24,t1
+ SHD arg0,0,24,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+
+ SHD ret1,arg0,16,t1
+ SHD arg0,0,16,t2
+ ADD arg0,t2,arg0
+ bv 0(r31)
+ ADDC ret1,t1,ret1
+
+$$divI_10:
+ .EXPORT $$divI_10,MILLICODE
+ COMB,< arg0,0,$neg10
+ COPY 0,ret1
+ EXTRU arg0,30,31,arg0
+ ADDIB,TR 1,arg0,$pos
+ SH1ADD arg0,arg0,arg0
+
+$neg10:
+ SUBI 2,arg0,arg0
+
+
+ EXTRU arg0,30,31,arg0
+ SH1ADD arg0,arg0,arg0
+$neg:
+ SHD ret1,arg0,28,t1
+ SHD arg0,0,28,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+$neg_for_17:
+ SHD ret1,arg0,24,t1
+ SHD arg0,0,24,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+
+ SHD ret1,arg0,16,t1
+ SHD arg0,0,16,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+ bv 0(r31)
+ SUB 0,ret1,ret1
+
+
+$$divI_12:
+ .EXPORT $$divI_12,MILLICODE
+ COMB,< arg0,0,$neg12
+ COPY 0,ret1
+ EXTRU arg0,29,30,arg0
+ ADDIB,TR 1,arg0,$pos
+ SH2ADD arg0,arg0,arg0
+
+$neg12:
+ SUBI 4,arg0,arg0
+
+
+ EXTRU arg0,29,30,arg0
+ B $neg
+ SH2ADD arg0,arg0,arg0
+
+$$divU_12:
+ .EXPORT $$divU_12,MILLICODE
+ EXTRU arg0,29,30,arg0
+ ADDI 5,arg0,t1
+ SH2ADD arg0,t1,arg0
+ B $pos
+ ADDC 0,0,ret1
+
+
+$$divI_15:
+ .EXPORT $$divI_15,MILLICODE
+ COMB,< arg0,0,$neg15
+ COPY 0,ret1
+ ADDIB,TR 1,arg0,$pos+4
+ SHD ret1,arg0,28,t1
+
+$neg15:
+ B $neg
+ SUBI 1,arg0,arg0
+
+$$divU_15:
+ .EXPORT $$divU_15,MILLICODE
+ ADDI 1,arg0,arg0
+ B $pos
+ ADDC 0,0,ret1
+
+
+$$divI_17:
+ .EXPORT $$divI_17,MILLICODE
+ COMB,<,N arg0,0,$neg17
+ ADDI 1,arg0,arg0
+ SHD 0,arg0,28,t1
+ SHD arg0,0,28,t2
+ SUB t2,arg0,arg0
+ B $pos_for_17
+ SUBB t1,0,ret1
+
+$neg17:
+ SUBI 1,arg0,arg0
+ SHD 0,arg0,28,t1
+ SHD arg0,0,28,t2
+ SUB t2,arg0,arg0
+ B $neg_for_17
+ SUBB t1,0,ret1
+
+$$divU_17:
+ .EXPORT $$divU_17,MILLICODE
+ ADDI 1,arg0,arg0
+ ADDC 0,0,ret1
+ SHD ret1,arg0,28,t1
+$u17:
+ SHD arg0,0,28,t2
+ SUB t2,arg0,arg0
+ B $pos_for_17
+ SUBB t1,ret1,ret1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+$$divI_7:
+ .EXPORT $$divI_7,MILLICODE
+ COMB,<,N arg0,0,$neg7
+$7:
+ ADDI 1,arg0,arg0
+ SHD 0,arg0,29,ret1
+ SH3ADD arg0,arg0,arg0
+ ADDC ret1,0,ret1
+$pos7:
+ SHD ret1,arg0,26,t1
+ SHD arg0,0,26,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+
+ SHD ret1,arg0,20,t1
+ SHD arg0,0,20,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,t1
+
+
+
+ COPY 0,ret1
+ SHD,= t1,arg0,24,t1
+$1:
+ ADDB,TR t1,ret1,$2
+ EXTRU arg0,31,24,arg0
+
+ bv,n 0(r31)
+
+$2:
+ ADDB,TR t1,arg0,$1
+ EXTRU,= arg0,7,8,t1
+
+$neg7:
+ SUBI 1,arg0,arg0
+$8:
+ SHD 0,arg0,29,ret1
+ SH3ADD arg0,arg0,arg0
+ ADDC ret1,0,ret1
+
+$neg7_shift:
+ SHD ret1,arg0,26,t1
+ SHD arg0,0,26,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,ret1
+
+ SHD ret1,arg0,20,t1
+ SHD arg0,0,20,t2
+ ADD arg0,t2,arg0
+ ADDC ret1,t1,t1
+
+
+
+ COPY 0,ret1
+ SHD,= t1,arg0,24,t1
+$3:
+ ADDB,TR t1,ret1,$4
+ EXTRU arg0,31,24,arg0
+
+ bv 0(r31)
+ SUB 0,ret1,ret1
+
+$4:
+ ADDB,TR t1,arg0,$3
+ EXTRU,= arg0,7,8,t1
+
+$$divU_7:
+ .EXPORT $$divU_7,MILLICODE
+ ADDI 1,arg0,arg0
+ ADDC 0,0,ret1
+ SHD ret1,arg0,29,t1
+ SH3ADD arg0,arg0,arg0
+ B $pos7
+ ADDC t1,ret1,ret1
+
+
+$$divI_9:
+ .EXPORT $$divI_9,MILLICODE
+ COMB,<,N arg0,0,$neg9
+ ADDI 1,arg0,arg0
+ SHD 0,arg0,29,t1
+ SHD arg0,0,29,t2
+ SUB t2,arg0,arg0
+ B $pos7
+ SUBB t1,0,ret1
+
+$neg9:
+ SUBI 1,arg0,arg0
+ SHD 0,arg0,29,t1
+ SHD arg0,0,29,t2
+ SUB t2,arg0,arg0
+ B $neg7_shift
+ SUBB t1,0,ret1
+
+$$divU_9:
+ .EXPORT $$divU_9,MILLICODE
+ ADDI 1,arg0,arg0
+ ADDC 0,0,ret1
+ SHD ret1,arg0,29,t1
+ SHD arg0,0,29,t2
+ SUB t2,arg0,arg0
+ B $pos7
+ SUBB t1,ret1,ret1
+
+
+$$divI_14:
+ .EXPORT $$divI_14,MILLICODE
+ COMB,<,N arg0,0,$neg14
+$$divU_14:
+ .EXPORT $$divU_14,MILLICODE
+ B $7
+ EXTRU arg0,30,31,arg0
+
+$neg14:
+ SUBI 2,arg0,arg0
+ B $8
+ EXTRU arg0,30,31,arg0
+
+ .PROCEND
+ .END
+
+rmndr: .EQU ret1 ; r29
+
+
+ .export $$remU,millicode
+$$remU:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ comib,>=,n 0,arg1,special_case
+ sub r0,arg1,rmndr ; clear carry, negate the divisor
+ ds r0,rmndr,r0 ; set V-bit to 1
+ add arg0,arg0,temp ; shift msb bit into carry
+ ds r0,arg1,rmndr ; 1st divide step, if no carry
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 2nd divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 3rd divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 4th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 5th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 6th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 7th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 8th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 9th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 10th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 11th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 12th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 13th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 14th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 15th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 16th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 17th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 18th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 19th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 20th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 21st divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 22nd divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 23rd divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 24th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 25th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 26th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 27th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 28th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 29th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 30th divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 31st divide step
+ addc temp,temp,temp ; shift temp with/into carry
+ ds rmndr,arg1,rmndr ; 32nd divide step,
+ comiclr,<= 0,rmndr,r0
+ add rmndr,arg1,rmndr ; correction
+; .exit
+ bv,n 0(r31)
+ nop
+; Putting >= on the last DS and deleting COMICLR does not work!
+;_____________________________________________________________________________
+special_case:
+ addit,= 0,arg1,r0 ; trap on div by zero
+ sub,>>= arg0,arg1,rmndr
+ copy arg0,rmndr
+ .exit
+ bv,n 0(r31)
+ nop
+ .procend
+ .end
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+; Use bv 0(r31) and bv,n 0(r31) instead.
+; #define return bv 0(%mrp)
+; #define return_n bv,n 0(%mrp)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ .subspa $MILLICODE$
+ .align 16
+$$mulI:
+
+ .proc
+ .callinfo NO_CALLS
+ .export $$mulI, millicode
+ combt,<<= %r25,%r26,l4 ; swap args if unsigned %r25>%r26
+ copy 0,%r29 ; zero out the result
+ xor %r26,%r25,%r26 ; swap %r26 & %r25 using the
+ xor %r26,%r25,%r25 ; old xor trick
+ xor %r26,%r25,%r26
+l4: combt,<= 0,%r26,l3 ; if %r26>=0 then proceed like unsigned
+
+ zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
+ sub,> 0,%r25,%r1 ; otherwise negate both and
+ combt,<=,n %r26,%r1,l2 ; swap back if |%r26|<|%r25|
+ sub 0,%r26,%r25
+ movb,tr,n %r1,%r26,l2 ; 10th inst.
+
+l0: add %r29,%r1,%r29 ; add in this partial product
+
+l1: zdep %r26,23,24,%r26 ; %r26 <<= 8 ******************
+
+l2: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
+
+l3: blr %r1,0 ; case on these 8 bits ******
+
+ extru %r25,23,24,%r25 ; %r25 >>= 8 ******************
+
+;16 insts before this.
+; %r26 <<= 8 **************************
+x0: comb,<> %r25,0,l2 ! zdep %r26,23,24,%r26 ! bv,n 0(r31) ! nop
+
+x1: comb,<> %r25,0,l1 ! add %r29,%r26,%r29 ! bv,n 0(r31) ! nop
+
+x2: comb,<> %r25,0,l1 ! sh1add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
+
+x3: comb,<> %r25,0,l0 ! sh1add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
+
+x4: comb,<> %r25,0,l1 ! sh2add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
+
+x5: comb,<> %r25,0,l0 ! sh2add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
+
+x6: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x7: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r26,%r29,%r29 ! b,n ret_t0
+
+x8: comb,<> %r25,0,l1 ! sh3add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
+
+x9: comb,<> %r25,0,l0 ! sh3add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
+
+x10: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x11: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
+
+x12: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x13: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
+
+x14: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x15: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r1,%r1 ! b,n ret_t0
+
+x16: zdep %r26,27,28,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
+
+x17: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r1,%r1 ! b,n ret_t0
+
+x18: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x19: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r26,%r1 ! b,n ret_t0
+
+x20: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x21: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
+
+x22: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x23: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x24: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x25: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
+
+x26: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x27: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r1,%r1 ! b,n ret_t0
+
+x28: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x29: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x30: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x31: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
+
+x32: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
+
+x33: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
+
+x34: zdep %r26,27,28,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x35: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r26,%r1,%r1
+
+x36: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x37: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
+
+x38: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x39: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x40: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x41: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
+
+x42: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x43: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x44: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x45: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
+
+x46: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! add %r1,%r26,%r1
+
+x47: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
+
+x48: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! zdep %r1,27,28,%r1 ! b,n ret_t0
+
+x49: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r26,%r1,%r1
+
+x50: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x51: sh3add %r26,%r26,%r1 ! sh3add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x52: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x53: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x54: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x55: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x56: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x57: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x58: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x59: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
+
+x60: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x61: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x62: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x63: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
+
+x64: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
+
+x65: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
+
+x66: zdep %r26,26,27,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x67: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x68: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x69: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x70: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
+
+x71: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sub %r1,%r26,%r1
+
+x72: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
+
+x73: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! add %r29,%r1,%r29
+
+x74: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x75: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x76: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x77: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x78: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x79: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
+
+x80: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
+
+x81: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
+
+x82: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x83: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x84: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x85: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x86: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x87: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r26,%r1,%r1
+
+x88: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x89: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x90: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x91: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x92: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
+
+x93: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x94: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r26,%r1,%r1
+
+x95: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x96: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x97: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x98: zdep %r26,26,27,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
+
+x99: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x100: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x101: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x102: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x103: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r26,%r1
+
+x104: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x105: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x106: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x107: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh3add %r1,%r26,%r1
+
+x108: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x109: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x110: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x111: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x112: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! zdep %r1,27,28,%r1
+
+x113: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
+
+x114: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
+
+x115: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
+
+x116: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
+
+x117: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
+
+x118: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0a0 ! sh3add %r1,%r1,%r1
+
+x119: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
+
+x120: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x121: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x122: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x123: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x124: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x125: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x126: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x127: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
+
+x128: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
+
+x129: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! add %r1,%r26,%r1 ! b,n ret_t0
+
+x130: zdep %r26,25,26,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x131: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x132: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x133: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x134: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x135: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x136: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x137: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x138: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x139: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
+
+x140: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r1,%r1
+
+x141: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
+
+x142: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
+
+x143: zdep %r26,27,28,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
+
+x144: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x145: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x146: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x147: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x148: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x149: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x150: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x151: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
+
+x152: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x153: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x154: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x155: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x156: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
+
+x157: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
+
+x158: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
+
+x159: zdep %r26,26,27,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
+
+x160: sh2add %r26,%r26,%r1 ! sh2add %r1,0,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x161: sh3add %r26,0,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x162: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x163: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
+
+x164: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x165: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x166: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x167: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
+
+x168: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x169: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x170: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x171: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
+
+x172: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
+
+x173: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
+
+x174: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t04a0 ! sh2add %r1,%r1,%r1
+
+x175: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
+
+x176: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0 ! add %r1,%r26,%r1
+
+x177: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
+
+x178: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
+
+x179: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r26,%r1
+
+x180: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x181: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x182: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
+
+x183: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
+
+x184: sh2add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! add %r1,%r26,%r1
+
+x185: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x186: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
+
+x187: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
+
+x188: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r26,%r1,%r1
+
+x189: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
+
+x190: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
+
+x191: zdep %r26,25,26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
+
+x192: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x193: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x194: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x195: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x196: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
+
+x197: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
+
+x198: zdep %r26,25,26,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x199: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
+
+x200: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x201: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x202: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x203: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
+
+x204: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
+
+x205: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x206: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
+
+x207: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
+
+x208: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
+
+x209: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
+
+x210: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
+
+x211: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
+
+x212: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
+
+x213: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0a0 ! sh2add %r1,%r26,%r1
+
+x214: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e2t04a0 ! sh3add %r1,%r26,%r1
+
+x215: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
+
+x216: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x217: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x218: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
+
+x219: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x220: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
+
+x221: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
+
+x222: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
+
+x223: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
+
+x224: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
+
+x225: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
+
+x226: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! zdep %r1,26,27,%r1
+
+x227: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
+
+x228: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
+
+x229: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r1,%r1
+
+x230: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_5t0 ! add %r1,%r26,%r1
+
+x231: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
+
+x232: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0 ! sh2add %r1,%r26,%r1
+
+x233: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0a0 ! sh2add %r1,%r26,%r1
+
+x234: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r1,%r1
+
+x235: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r1,%r1
+
+x236: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e4t08a0 ! sh1add %r1,%r1,%r1
+
+x237: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_3t0 ! sub %r1,%r26,%r1
+
+x238: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e2t04a0 ! sh3add %r1,%r1,%r1
+
+x239: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0ma0 ! sh1add %r1,%r1,%r1
+
+x240: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0 ! sh1add %r1,%r1,%r1
+
+x241: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0a0 ! sh1add %r1,%r1,%r1
+
+x242: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
+
+x243: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
+
+x244: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
+
+x245: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
+
+x246: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
+
+x247: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
+
+x248: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
+
+x249: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
+
+x250: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
+
+x251: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
+
+x252: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
+
+x253: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
+
+x254: zdep %r26,24,25,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
+
+x255: zdep %r26,23,24,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
+
+;1040 insts before this.
+ret_t0: bv 0(r31)
+
+e_t0: add %r29,%r1,%r29
+
+e_shift: comb,<> %r25,0,l2
+
+ zdep %r26,23,24,%r26 ; %r26 <<= 8 ***********
+ bv,n 0(r31)
+e_t0ma0: comb,<> %r25,0,l0
+
+ sub %r1,%r26,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_t0a0: comb,<> %r25,0,l0
+
+ add %r1,%r26,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_t02a0: comb,<> %r25,0,l0
+
+ sh1add %r26,%r1,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_t04a0: comb,<> %r25,0,l0
+
+ sh2add %r26,%r1,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_2t0: comb,<> %r25,0,l1
+
+ sh1add %r1,%r29,%r29
+ bv,n 0(r31)
+e_2t0a0: comb,<> %r25,0,l0
+
+ sh1add %r1,%r26,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e2t04a0: sh1add %r26,%r1,%r1
+
+ comb,<> %r25,0,l1
+ sh1add %r1,%r29,%r29
+ bv,n 0(r31)
+e_3t0: comb,<> %r25,0,l0
+
+ sh1add %r1,%r1,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_4t0: comb,<> %r25,0,l1
+
+ sh2add %r1,%r29,%r29
+ bv,n 0(r31)
+e_4t0a0: comb,<> %r25,0,l0
+
+ sh2add %r1,%r26,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e4t08a0: sh1add %r26,%r1,%r1
+
+ comb,<> %r25,0,l1
+ sh2add %r1,%r29,%r29
+ bv,n 0(r31)
+e_5t0: comb,<> %r25,0,l0
+
+ sh2add %r1,%r1,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+e_8t0: comb,<> %r25,0,l1
+
+ sh3add %r1,%r29,%r29
+ bv,n 0(r31)
+e_8t0a0: comb,<> %r25,0,l0
+
+ sh3add %r1,%r26,%r1
+ bv 0(r31)
+ add %r29,%r1,%r29
+
+ .procend
+ .end
+
+ .import $$divI_2,millicode
+ .import $$divI_3,millicode
+ .import $$divI_4,millicode
+ .import $$divI_5,millicode
+ .import $$divI_6,millicode
+ .import $$divI_7,millicode
+ .import $$divI_8,millicode
+ .import $$divI_9,millicode
+ .import $$divI_10,millicode
+ .import $$divI_12,millicode
+ .import $$divI_14,millicode
+ .import $$divI_15,millicode
+ .export $$divI,millicode
+ .export $$divoI,millicode
+$$divoI:
+ .proc
+ .callinfo NO_CALLS
+ comib,=,n -1,arg1,negative1 ; when divisor == -1
+$$divI:
+ comib,>>=,n 15,arg1,small_divisor
+ add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
+normal1:
+ sub 0,retreg,retreg ; make it positive
+ sub 0,arg1,temp ; clear carry,
+ ; negate the divisor
+ ds 0,temp,0 ; set V-bit to the comple-
+ ; ment of the divisor sign
+ add retreg,retreg,retreg ; shift msb bit into carry
+ ds r0,arg1,temp ; 1st divide step, if no carry
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 2nd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 3rd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 4th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 5th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 6th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 7th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 8th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 9th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 10th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 11th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 12th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 13th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 14th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 15th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 16th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 17th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 18th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 19th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 20th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 21st divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 22nd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 23rd divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 24th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 25th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 26th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 27th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 28th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 29th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 30th divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 31st divide step
+ addc retreg,retreg,retreg ; shift retreg with/into carry
+ ds temp,arg1,temp ; 32nd divide step,
+ addc retreg,retreg,retreg ; shift last retreg bit into retreg
+ xor,>= arg0,arg1,0 ; get correct sign of quotient
+ sub 0,retreg,retreg ; based on operand signs
+ bv,n 0(r31)
+ nop
+;______________________________________________________________________
+small_divisor:
+ blr,n arg1,r0
+ nop
+; table for divisor == 0,1, ... ,15
+ addit,= 0,arg1,r0 ; trap if divisor == 0
+ nop
+ bv 0(r31) ; divisor == 1
+ copy arg0,retreg
+ b,n $$divI_2 ; divisor == 2
+ nop
+ b,n $$divI_3 ; divisor == 3
+ nop
+ b,n $$divI_4 ; divisor == 4
+ nop
+ b,n $$divI_5 ; divisor == 5
+ nop
+ b,n $$divI_6 ; divisor == 6
+ nop
+ b,n $$divI_7 ; divisor == 7
+ nop
+ b,n $$divI_8 ; divisor == 8
+ nop
+ b,n $$divI_9 ; divisor == 9
+ nop
+ b,n $$divI_10 ; divisor == 10
+ nop
+ b normal1 ; divisor == 11
+ add,>= 0,arg0,retreg
+ b,n $$divI_12 ; divisor == 12
+ nop
+ b normal1 ; divisor == 13
+ add,>= 0,arg0,retreg
+ b,n $$divI_14 ; divisor == 14
+ nop
+ b,n $$divI_15 ; divisor == 15
+ nop
+;______________________________________________________________________
+negative1:
+ sub 0,arg0,retreg ; result is negation of dividend
+ bv 0(r31)
+ addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1
+ .procend
+ .end
diff --git a/sys/lib/libkern/arch/hppa/mulI.S b/sys/lib/libkern/arch/hppa/mulI.S
deleted file mode 100644
index 3bf9c1d215b..00000000000
--- a/sys/lib/libkern/arch/hppa/mulI.S
+++ /dev/null
@@ -1,480 +0,0 @@
-/* $OpenBSD: mulI.S,v 1.1 1998/06/23 18:56:54 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- * Author: Dan Zuras
- * Editor: Pratap Subrahmanyam
- *
- * ROUTINE: $$mulI
- *
- *
- * DESCRIPTION:
- *
- * $$mulI multiplies two single word integers, giving a single
- * word result.
- *
- *
- * INPUT REGISTERS:
- *
- * arg0 = Operand 1
- * arg1 = Operand 2
- * r31 == return pc
- * sr0 == return space when called externally
- *
- *
- * OUTPUT REGISTERS:
- *
- * arg0 = undefined
- * arg1 = undefined
- * ret1 = result
- *
- * OTHER REGISTERS AFFECTED:
- *
- * r1 = undefined
- *
- * SIDE EFFECTS:
- *
- * Causes a trap under the following conditions: NONE
- * Changes memory at the following places: NONE
- *
- * PERMISSIBLE CONTEXT:
- *
- * Unwindable
- * Does not create a stack frame
- * Is usable for internal or external microcode
- *
- * DISCUSSION:
- *
- * Calls other millicode routines via r31: NONE
- * Calls other millicode routines: NONE
- *
- */
-
-
-#define a0 %arg0
-#define a1 %arg1
-#define rtn %mrp
-#define t0 %r1
-#define r %ret1
-
-#define a0__128a0 zdep a0,24,25,a0
-#define a0__256a0 zdep a0,23,24,a0
-#define a1_ne_0_b_l0 comb,<> a1,0,l0
-#define a1_ne_0_b_l1 comb,<> a1,0,l1
-#define a1_ne_0_b_l2 comb,<> a1,0,l2
-#define b_n_ret_t0 b,n ret_t0
-#define b_e_shift b e_shift
-#define b_e_t0ma0 b e_t0ma0
-#define b_e_t0 b e_t0
-#define b_e_t0a0 b e_t0a0
-#define b_e_t02a0 b e_t02a0
-#define b_e_t04a0 b e_t04a0
-#define b_e_2t0 b e_2t0
-#define b_e_2t0a0 b e_2t0a0
-#define b_e_2t04a0 b e2t04a0
-#define b_e_3t0 b e_3t0
-#define b_e_4t0 b e_4t0
-#define b_e_4t0a0 b e_4t0a0
-#define b_e_4t08a0 b e4t08a0
-#define b_e_5t0 b e_5t0
-#define b_e_8t0 b e_8t0
-#define b_e_8t0a0 b e_8t0a0
-#define r__r_a0 add r,a0,r
-#define r__r_2a0 sh1add a0,r,r
-#define r__r_4a0 sh2add a0,r,r
-#define r__r_8a0 sh3add a0,r,r
-#define r__r_t0 add r,t0,r
-#define r__r_2t0 sh1add t0,r,r
-#define r__r_4t0 sh2add t0,r,r
-#define r__r_8t0 sh3add t0,r,r
-; Use MILLIRET and MILLIRETN instead.
-; #define return bv 0(rtn)
-; #define return_n bv,n 0(rtn)
-#define t0__3a0 sh1add a0,a0,t0
-#define t0__4a0 sh2add a0,0,t0
-#define t0__5a0 sh2add a0,a0,t0
-#define t0__8a0 sh3add a0,0,t0
-#define t0__9a0 sh3add a0,a0,t0
-#define t0__16a0 zdep a0,27,28,t0
-#define t0__32a0 zdep a0,26,27,t0
-#define t0__64a0 zdep a0,25,26,t0
-#define t0__128a0 zdep a0,24,25,t0
-#define t0__t0ma0 sub t0,a0,t0
-#define t0__t0_a0 add t0,a0,t0
-#define t0__t0_2a0 sh1add a0,t0,t0
-#define t0__t0_4a0 sh2add a0,t0,t0
-#define t0__t0_8a0 sh3add a0,t0,t0
-#define t0__2t0_a0 sh1add t0,a0,t0
-#define t0__3t0 sh1add t0,t0,t0
-#define t0__4t0 sh2add t0,0,t0
-#define t0__4t0_a0 sh2add t0,a0,t0
-#define t0__5t0 sh2add t0,t0,t0
-#define t0__8t0 sh3add t0,0,t0
-#define t0__8t0_a0 sh3add t0,a0,t0
-#define t0__9t0 sh3add t0,t0,t0
-#define t0__16t0 zdep t0,27,28,t0
-#define t0__32t0 zdep t0,26,27,t0
-#define t0__256a0 zdep a0,23,24,t0
-
-
- .subspa $MILLICODE$
- .align 16
- .proc
- .callinfo millicode
- .export $$mulI, millicode
-$$mulI
- combt,<<= a1,a0,l4 ; swap args if unsigned a1>a0
- copy 0,r ; zero out the result
- xor a0,a1,a0 ; swap a0 & a1 using the
- xor a0,a1,a1 ; old xor trick
- xor a0,a1,a0
-l4 combt,<= 0,a0,l3 ; if a0>=0 then proceed like unsigned
- zdep a1,30,8,t0 ; t0 = (a1&0xff)<<1 *********
- sub,> 0,a1,t0 ; otherwise negate both and
- combt,<=,n a0,t0,l2 ; swap back if |a0|<|a1|
- sub 0,a0,a1
- movb,tr,n t0,a0,l2 ; 10th inst.
-
-l0 r__r_t0 ; add in this partial product
-l1 a0__256a0 ; a0 <<= 8 ******************
-l2 zdep a1,30,8,t0 ; t0 = (a1&0xff)<<1 *********
-l3 blr t0,0 ; case on these 8 bits ******
- extru a1,23,24,a1 ; a1 >>= 8 ******************
-
-;16 insts before this.
-; a0 <<= 8 **************************
-x0 a1_ne_0_b_l2 ! a0__256a0 ! MILLIRETN ! nop
-x1 a1_ne_0_b_l1 ! r__r_a0 ! MILLIRETN ! nop
-x2 a1_ne_0_b_l1 ! r__r_2a0 ! MILLIRETN ! nop
-x3 a1_ne_0_b_l0 ! t0__3a0 ! MILLIRET ! r__r_t0
-x4 a1_ne_0_b_l1 ! r__r_4a0 ! MILLIRETN ! nop
-x5 a1_ne_0_b_l0 ! t0__5a0 ! MILLIRET ! r__r_t0
-x6 t0__3a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
-x7 t0__3a0 ! a1_ne_0_b_l0 ! r__r_4a0 ! b_n_ret_t0
-x8 a1_ne_0_b_l1 ! r__r_8a0 ! MILLIRETN ! nop
-x9 a1_ne_0_b_l0 ! t0__9a0 ! MILLIRET ! r__r_t0
-x10 t0__5a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
-x11 t0__3a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
-x12 t0__3a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
-x13 t0__5a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
-x14 t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
-x15 t0__5a0 ! a1_ne_0_b_l0 ! t0__3t0 ! b_n_ret_t0
-x16 t0__16a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
-x17 t0__9a0 ! a1_ne_0_b_l0 ! t0__t0_8a0 ! b_n_ret_t0
-x18 t0__9a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
-x19 t0__9a0 ! a1_ne_0_b_l0 ! t0__2t0_a0 ! b_n_ret_t0
-x20 t0__5a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
-x21 t0__5a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
-x22 t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
-x23 t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
-x24 t0__3a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
-x25 t0__5a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
-x26 t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
-x27 t0__3a0 ! a1_ne_0_b_l0 ! t0__9t0 ! b_n_ret_t0
-x28 t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
-x29 t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
-x30 t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
-x31 t0__32a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
-x32 t0__32a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
-x33 t0__8a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
-x34 t0__16a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
-x35 t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__t0_8a0
-x36 t0__9a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
-x37 t0__9a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
-x38 t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
-x39 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
-x40 t0__5a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
-x41 t0__5a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
-x42 t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
-x43 t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
-x44 t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
-x45 t0__9a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
-x46 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_a0
-x47 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_2a0
-x48 t0__3a0 ! a1_ne_0_b_l0 ! t0__16t0 ! b_n_ret_t0
-x49 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_4a0
-x50 t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
-x51 t0__9a0 ! t0__t0_8a0 ! b_e_t0 ! t0__3t0
-x52 t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
-x53 t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
-x54 t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
-x55 t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__2t0_a0
-x56 t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
-x57 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__3t0
-x58 t0__3a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
-x59 t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__3t0
-x60 t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
-x61 t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
-x62 t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
-x63 t0__64a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
-x64 t0__64a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
-x65 t0__8a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
-x66 t0__32a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
-x67 t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
-x68 t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
-x69 t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
-x70 t0__64a0 ! t0__t0_4a0 ! b_e_t0 ! t0__t0_2a0
-x71 t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__t0ma0
-x72 t0__9a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
-x73 t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_t0
-x74 t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
-x75 t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
-x76 t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
-x77 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
-x78 t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__2t0_a0
-x79 t0__16a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
-x80 t0__16a0 ! t0__5t0 ! b_e_shift ! r__r_t0
-x81 t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_t0
-x82 t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
-x83 t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
-x84 t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
-x85 t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
-x86 t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
-x87 t0__9a0 ! t0__9t0 ! b_e_t02a0 ! t0__t0_4a0
-x88 t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
-x89 t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
-x90 t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
-x91 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__2t0_a0
-x92 t0__5a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
-x93 t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__3t0
-x94 t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__t0_2a0
-x95 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
-x96 t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
-x97 t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
-x98 t0__32a0 ! t0__3t0 ! b_e_t0 ! t0__t0_2a0
-x99 t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
-x100 t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
-x101 t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
-x102 t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
-x103 t0__5a0 ! t0__5t0 ! b_e_t02a0 ! t0__4t0_a0
-x104 t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
-x105 t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
-x106 t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__4t0_a0
-x107 t0__9a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__8t0_a0
-x108 t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
-x109 t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
-x110 t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__2t0_a0
-x111 t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
-x112 t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__16t0
-x113 t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__3t0
-x114 t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__3t0
-x115 t0__9a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__3t0
-x116 t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__4t0_a0
-x117 t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
-x118 t0__3a0 ! t0__4t0_a0 ! b_e_t0a0 ! t0__9t0
-x119 t0__3a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__9t0
-x120 t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
-x121 t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
-x122 t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
-x123 t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
-x124 t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
-x125 t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
-x126 t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
-x127 t0__128a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
-x128 t0__128a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
-x129 t0__128a0 ! a1_ne_0_b_l0 ! t0__t0_a0 ! b_n_ret_t0
-x130 t0__64a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
-x131 t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
-x132 t0__8a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
-x133 t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
-x134 t0__8a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
-x135 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__3t0
-x136 t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
-x137 t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
-x138 t0__8a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
-x139 t0__8a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__4t0_a0
-x140 t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__5t0
-x141 t0__8a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__2t0_a0
-x142 t0__9a0 ! t0__8t0 ! b_e_2t0 ! t0__t0ma0
-x143 t0__16a0 ! t0__9t0 ! b_e_t0 ! t0__t0ma0
-x144 t0__9a0 ! t0__8t0 ! b_e_shift ! r__r_2t0
-x145 t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__2t0_a0
-x146 t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
-x147 t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
-x148 t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
-x149 t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
-x150 t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
-x151 t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
-x152 t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
-x153 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
-x154 t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
-x155 t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__5t0
-x156 t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
-x157 t0__32a0 ! t0__t0ma0 ! b_e_t02a0 ! t0__5t0
-x158 t0__16a0 ! t0__5t0 ! b_e_2t0 ! t0__t0ma0
-x159 t0__32a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
-x160 t0__5a0 ! t0__4t0 ! b_e_shift ! r__r_8t0
-x161 t0__8a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
-x162 t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_2t0
-x163 t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__2t0_a0
-x164 t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_4t0
-x165 t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
-x166 t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__2t0_a0
-x167 t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
-x168 t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
-x169 t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__8t0_a0
-x170 t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__5t0
-x171 t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__9t0
-x172 t0__5a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__2t0_a0
-x173 t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__9t0
-x174 t0__32a0 ! t0__t0_2a0 ! b_e_t04a0 ! t0__5t0
-x175 t0__8a0 ! t0__2t0_a0 ! b_e_5t0 ! t0__2t0_a0
-x176 t0__5a0 ! t0__4t0_a0 ! b_e_8t0 ! t0__t0_a0
-x177 t0__5a0 ! t0__4t0_a0 ! b_e_8t0a0 ! t0__t0_a0
-x178 t0__5a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__8t0_a0
-x179 t0__5a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__8t0_a0
-x180 t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
-x181 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
-x182 t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__2t0_a0
-x183 t0__9a0 ! t0__5t0 ! b_e_2t0a0 ! t0__2t0_a0
-x184 t0__5a0 ! t0__9t0 ! b_e_4t0 ! t0__t0_a0
-x185 t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
-x186 t0__32a0 ! t0__t0ma0 ! b_e_2t0 ! t0__3t0
-x187 t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__5t0
-x188 t0__9a0 ! t0__5t0 ! b_e_4t0 ! t0__t0_2a0
-x189 t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
-x190 t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__5t0
-x191 t0__64a0 ! t0__3t0 ! b_e_t0 ! t0__t0ma0
-x192 t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
-x193 t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
-x194 t0__8a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
-x195 t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
-x196 t0__8a0 ! t0__3t0 ! b_e_4t0 ! t0__2t0_a0
-x197 t0__8a0 ! t0__3t0 ! b_e_4t0a0 ! t0__2t0_a0
-x198 t0__64a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
-x199 t0__8a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
-x200 t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_8t0
-x201 t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__8t0_a0
-x202 t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__4t0_a0
-x203 t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__4t0_a0
-x204 t0__8a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
-x205 t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__5t0
-x206 t0__64a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__3t0
-x207 t0__8a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
-x208 t0__5a0 ! t0__5t0 ! b_e_8t0 ! t0__t0_a0
-x209 t0__5a0 ! t0__5t0 ! b_e_8t0a0 ! t0__t0_a0
-x210 t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__5t0
-x211 t0__5a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__5t0
-x212 t0__3a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__4t0_a0
-x213 t0__3a0 ! t0__4t0_a0 ! b_e_4t0a0 ! t0__4t0_a0
-x214 t0__9a0 ! t0__t0_4a0 ! b_e_2t04a0 ! t0__8t0_a0
-x215 t0__5a0 ! t0__4t0_a0 ! b_e_5t0 ! t0__2t0_a0
-x216 t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
-x217 t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
-x218 t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
-x219 t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
-x220 t0__3a0 ! t0__9t0 ! b_e_4t0 ! t0__2t0_a0
-x221 t0__3a0 ! t0__9t0 ! b_e_4t0a0 ! t0__2t0_a0
-x222 t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__3t0
-x223 t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
-x224 t0__9a0 ! t0__3t0 ! b_e_8t0 ! t0__t0_a0
-x225 t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
-x226 t0__3a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__32t0
-x227 t0__9a0 ! t0__5t0 ! b_e_t02a0 ! t0__5t0
-x228 t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
-x229 t0__9a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__3t0
-x230 t0__9a0 ! t0__5t0 ! b_e_5t0 ! t0__t0_a0
-x231 t0__9a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
-x232 t0__3a0 ! t0__2t0_a0 ! b_e_8t0 ! t0__4t0_a0
-x233 t0__3a0 ! t0__2t0_a0 ! b_e_8t0a0 ! t0__4t0_a0
-x234 t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__9t0
-x235 t0__3a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__9t0
-x236 t0__9a0 ! t0__2t0_a0 ! b_e_4t08a0 ! t0__3t0
-x237 t0__16a0 ! t0__5t0 ! b_e_3t0 ! t0__t0ma0
-x238 t0__3a0 ! t0__4t0_a0 ! b_e_2t04a0 ! t0__9t0
-x239 t0__16a0 ! t0__5t0 ! b_e_t0ma0 ! t0__3t0
-x240 t0__9a0 ! t0__t0_a0 ! b_e_8t0 ! t0__3t0
-x241 t0__9a0 ! t0__t0_a0 ! b_e_8t0a0 ! t0__3t0
-x242 t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__8t0_a0
-x243 t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__3t0
-x244 t0__5a0 ! t0__3t0 ! b_e_4t0 ! t0__4t0_a0
-x245 t0__8a0 ! t0__3t0 ! b_e_5t0 ! t0__2t0_a0
-x246 t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__3t0
-x247 t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__3t0
-x248 t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_8t0
-x249 t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__8t0_a0
-x250 t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__5t0
-x251 t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__5t0
-x252 t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
-x253 t0__64a0 ! t0__t0ma0 ! b_e_t0 ! t0__4t0_a0
-x254 t0__128a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
-x255 t0__256a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
-;1040 insts before this.
-ret_t0 MILLIRET
-e_t0 r__r_t0
-e_shift a1_ne_0_b_l2
- a0__256a0 ; a0 <<= 8 ***********
- MILLIRETN
-e_t0ma0 a1_ne_0_b_l0
- t0__t0ma0
- MILLIRET
- r__r_t0
-e_t0a0 a1_ne_0_b_l0
- t0__t0_a0
- MILLIRET
- r__r_t0
-e_t02a0 a1_ne_0_b_l0
- t0__t0_2a0
- MILLIRET
- r__r_t0
-e_t04a0 a1_ne_0_b_l0
- t0__t0_4a0
- MILLIRET
- r__r_t0
-e_2t0 a1_ne_0_b_l1
- r__r_2t0
- MILLIRETN
-e_2t0a0 a1_ne_0_b_l0
- t0__2t0_a0
- MILLIRET
- r__r_t0
-e2t04a0 t0__t0_2a0
- a1_ne_0_b_l1
- r__r_2t0
- MILLIRETN
-e_3t0 a1_ne_0_b_l0
- t0__3t0
- MILLIRET
- r__r_t0
-e_4t0 a1_ne_0_b_l1
- r__r_4t0
- MILLIRETN
-e_4t0a0 a1_ne_0_b_l0
- t0__4t0_a0
- MILLIRET
- r__r_t0
-e4t08a0 t0__t0_2a0
- a1_ne_0_b_l1
- r__r_4t0
- MILLIRETN
-e_5t0 a1_ne_0_b_l0
- t0__5t0
- MILLIRET
- r__r_t0
-e_8t0 a1_ne_0_b_l1
- r__r_8t0
- MILLIRETN
-e_8t0a0 a1_ne_0_b_l0
- t0__8t0_a0
- MILLIRET
- r__r_t0
-
- .procend
- .end
-
diff --git a/sys/lib/libkern/arch/hppa/mulU.S b/sys/lib/libkern/arch/hppa/mulU.S
deleted file mode 100644
index 10147a8bb42..00000000000
--- a/sys/lib/libkern/arch/hppa/mulU.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/* $OpenBSD: mulU.S,v 1.1 1998/06/23 18:56:54 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- *
- * Editor: Michael J. Eager
- *
- *
- * ROUTINE: $$mulU
- *
- *
- * DESCRIPTION:
- *
- * $$mulU multiplies two single word unsigned integers, giving a single
- * word result.
- *
- *
- * INPUT REGISTERS:
- *
- * arg0 = Operand 1
- * arg1 = Operand 2
- * r31 == return pc
- * sr0 == return space when called externally
- *
- *
- * OUTPUT REGISTERS:
- *
- * arg0 = undefined
- * arg1 = undefined
- * ret1 = result
- *
- * OTHER REGISTERS AFFECTED:
- *
- * r1 = undefined
- *
- * SIDE EFFECTS:
- *
- * Causes a trap under the following conditions: NONE
- * Changes memory at the following places: NONE
- *
- * PERMISSIBLE CONTEXT:
- *
- * Unwindable
- * Does not create a stack frame
- * Is usable for internal or external microcode
- *
- * DISCUSSION:
- *
- * Calls other millicode routines via r31: NONE
- * Calls other millicode routines: NONE
- */
-
-DEFINE(tmp,r1)
-DEFINE(retreg,ret1)
-
- .subspa $MILLICODE$
- .proc
- .callinfo millicode
-
- .entry
-$$mulU
- .export $$mulU,MILLICODE
-
- combt,<<= arg1,arg0,$loop
- add 0,0,retreg
- add 0,arg1,tmp
- add 0,arg0,arg1
- add 0,tmp,arg0
-$loop extru arg1,31,4,tmp
- blr tmp,0
- extru arg1,27,28,arg1
- comb,=,n arg1,0,$return
- b,n $shmcand
- movb,tr arg0,tmp,$addtmp
- comb,= arg1,0,$return
-$two b $endloop
- sh1add arg0,0,tmp
- b $endloop
- sh1add arg0,arg0,tmp
- b $endloop
- sh2add arg0,0,tmp
- b $endloop
- sh2add arg0,arg0,tmp
- b $fiftnx
- sh1add arg0,0,tmp
- movb,tr arg0,tmp,$fiftnx
- sh2add arg0,retreg,retreg
- b $endloop
- sh3add arg0,0,tmp
- b $endloop
- sh3add arg0,arg0,tmp
- b $two
- sh3add arg0,retreg,retreg
- movb,tr arg0,tmp,$fiftnx
- sh3add arg0,retreg,retreg
- b $fiftnx
- sh2add arg0,0,tmp
- sh2add arg0,arg0,tmp
- b $endloop
- sh3add arg0,retreg,retreg
- sh1add,tr arg0,0,tmp
- sh2add arg0,arg0,tmp
-$fiftnx sh1add tmp,tmp,tmp
-$endloop comb,= arg1,0,$return
-$addtmp add tmp,retreg,retreg
-$shmcand addb,tr arg0,arg0,$loop
- sh3add arg0,0,arg0
- .exit
-$return MILLIRET
- nop
- .procend
-#ifdef milliext
- .origin 0x00000100
-#endif
- .end
diff --git a/sys/lib/libkern/arch/hppa/remI.S b/sys/lib/libkern/arch/hppa/remI.S
deleted file mode 100644
index cc7b92f779f..00000000000
--- a/sys/lib/libkern/arch/hppa/remI.S
+++ /dev/null
@@ -1,164 +0,0 @@
-/* $OpenBSD: remI.S,v 1.1 1998/06/23 18:56:54 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- *
- * Editor: Michael J. Eager
- *
- *
- * ROUTINE: $$remI
- *
- * DESCRIPTION:
- *
- * $$remI returns the remainder of the division of two signed 32-bit
- * integers. The sign of the remainder is the same as the sign of
- * the dividend.
- *
- *
- * INPUT REGISTERS:
- *
- * arg0 == dividend
- * arg1 == divisor
- * r31 == return pc
- * sr0 == return space when called externally
- *
- * OUTPUT REGISTERS:
- *
- * arg0 = destroyed
- * arg1 = destroyed
- * ret1 = remainder
- *
- * OTHER REGISTERS AFFECTED:
- *
- * r1 = undefined
- *
- * SIDE EFFECTS:
- *
- * Causes a trap under the following conditions: DIVIDE BY ZERO
- * Changes memory at the following places: NONE
- *
- * PERMISSIBLE CONTEXT:
- *
- * Unwindable
- * Does not create a stack frame
- * Is usable for internal or external microcode
- *
- * DISCUSSION:
- *
- * Calls other millicode routines via r31: NONE
- * Calls other millicode routines: NONE
- */
-
-DEFINE(tmp,r1)
-DEFINE(retreg,ret1)
-
- .subspa $MILLICODE$
- .proc
- .callinfo millicode
- .entry
-$$remI
- .export $$remI,MILLICODE
- addit,= 0,arg1,0 ; trap if div by zero
- add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
- sub 0,retreg,retreg ; make it positive
- sub 0,arg1, tmp ; clear carry,
- ; negate the divisor
- ds 0, tmp,0 ; set V-bit to the comple-
- ; ment of the divisor sign
- or 0,0, tmp ; clear tmp
- add retreg,retreg,retreg ; shift msb bit into carry
- ds tmp,arg1, tmp ; 1st divide step, if no carry
- ; out, msb of quotient = 0
- addc retreg,retreg,retreg ; shift retreg with/into carry
-tt1 ds tmp,arg1, tmp ; 2nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 3rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 4th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 5th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 6th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 7th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 8th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 9th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 10th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 11th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 12th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 13th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 14th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 15th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 16th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 17th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 18th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 19th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 20th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 21st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 22nd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 23rd divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 24th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 25th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 26th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 27th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 28th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 29th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 30th divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 31st divide step
- addc retreg,retreg,retreg ; shift retreg with/into carry
- ds tmp,arg1, tmp ; 32nd divide step,
- addc retreg,retreg,retreg ; shift last bit into retreg
- movb,>=,n tmp,retreg,finish ; branch if pos. tmp
- add,< arg1,0,0 ; if arg1 > 0, add arg1
- add,tr tmp,arg1,retreg ; for correcting remainder tmp
- sub tmp,arg1,retreg ; else add absolute value arg1
-finish add,>= arg0,0,0 ; set sign of remainder
- sub 0,retreg,retreg ; to sign of dividend
- .exit
- MILLIRET
- nop
- .procend
-#ifdef milliext
- .origin 0x00000200
-#endif
- .end
diff --git a/sys/lib/libkern/arch/hppa/remU.S b/sys/lib/libkern/arch/hppa/remU.S
deleted file mode 100644
index b347a6d49f0..00000000000
--- a/sys/lib/libkern/arch/hppa/remU.S
+++ /dev/null
@@ -1,152 +0,0 @@
-/* $OpenBSD: remU.S,v 1.1 1998/06/23 18:56:54 mickey Exp $ */
-
-/*
- * (c) Copyright 1985 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-
-#include "prefix.h"
-
-/*
- * Author: Committee
- *
- *
- * ROUTINE: $$remU
- *
- * Single precision divide for remainder with unsigned binary integers.
- *
- * The remainder must be dividend-(dividend/divisor)*divisor.
- * Divide by zero is trapped.
- *
- * INPUT REGISTERS:
- * arg0 == dividend
- * arg1 == divisor
- * r31 == return pc
- * sr0 == return space when called externally
- *
- * OUTPUT REGISTERS:
- * arg0 = undefined
- * arg1 = undefined
- * ret1 = remainder
- *
- * OTHER REGISTERS AFFECTED:
- * r1 = undefined
- *
- * SIDE EFFECTS:
- * Causes a trap under the following conditions: DIVIDE BY ZERO
- * Changes memory at the following places: NONE
- *
- * PERMISSIBLE CONTEXT:
- * Unwindable.
- * Does not create a stack frame.
- * Suitable for internal or external millicode.
- * Assumes the special millicode register conventions.
- *
- * DISCUSSION:
- * Calls other millicode routines using r31: NONE
- * Calls other millicode routines: NONE
- */
-
-DEFINE(temp,r1)
-DEFINE(rmndr,ret1) ; r29
- .subspa $MILLICODE$
- .export $$remU,millicode
- .proc
- .callinfo millicode
-
- .entry
-$$remU
-
- comib,>=,n 0,arg1,special_case
- sub r0,arg1,rmndr ; clear carry, negate the divisor
- ds r0,rmndr,r0 ; set V-bit to 1
- add arg0,arg0,temp ; shift msb bit into carry
- ds r0,arg1,rmndr ; 1st divide step, if no carry
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 2nd divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 3rd divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 4th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 5th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 6th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 7th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 8th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 9th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 10th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 11th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 12th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 13th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 14th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 15th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 16th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 17th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 18th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 19th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 20th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 21st divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 22nd divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 23rd divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 24th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 25th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 26th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 27th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 28th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 29th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 30th divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 31st divide step
- addc temp,temp,temp ; shift temp with/into carry
- ds rmndr,arg1,rmndr ; 32nd divide step,
- comiclr,<= 0,rmndr,r0
- add rmndr,arg1,rmndr ; correction
- MILLIRETN
- nop
-; Putting >= on the last DS and deleting COMICLR does not work!
-
-
-special_case
- addit,= 0,arg1,r0 ; trap on div by zero
- sub,>>= arg0,arg1,rmndr
- copy arg0,rmndr
- .exit
- MILLIRETN
- nop
- .procend
- .end