summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/vax/conf/Makefile.vax8
-rw-r--r--sys/arch/vax/vax/emulate.s1214
-rw-r--r--sys/arch/vax/vax/ka49.c4
-rw-r--r--sys/arch/vax/vax/ka53.c4
-rw-r--r--sys/arch/vax/vax/ka680.c4
-rw-r--r--sys/arch/vax/vax/locore.S609
-rw-r--r--sys/arch/vax/vax/machdep.c10
-rw-r--r--sys/arch/vax/vax/udiv.s24
-rw-r--r--sys/arch/vax/vax/unimpl_emul.s596
-rw-r--r--sys/arch/vax/vax/urem.s26
10 files changed, 1248 insertions, 1251 deletions
diff --git a/sys/arch/vax/conf/Makefile.vax b/sys/arch/vax/conf/Makefile.vax
index 1194a66a61f..a20261c72c2 100644
--- a/sys/arch/vax/conf/Makefile.vax
+++ b/sys/arch/vax/conf/Makefile.vax
@@ -1,4 +1,4 @@
-# $OpenBSD: Makefile.vax,v 1.68 2013/06/23 20:33:52 miod Exp $
+# $OpenBSD: Makefile.vax,v 1.69 2013/07/05 21:11:57 miod Exp $
# For instructions on building kernels consult the config(8) and options(4)
# manual pages.
@@ -39,7 +39,7 @@ COPTS?= -O2
CFLAGS= ${DEBUG} ${CWARNFLAGS} ${CMACHFLAGS} ${COPTS} ${PIPE}
AFLAGS= -D_LOCORE -x assembler-with-cpp -traditional-cpp \
${CWARNFLAGS} ${CMACHFLAGS}
-LINKFLAGS= -N -Ttext 80000000 -e start
+LINKFLAGS= -N -Ttext 80000000 -e __start --warn-common
.if ${IDENT:M-DDDB_STRUCT}
DB_STRUCTINFO= db_structinfo.h
@@ -78,13 +78,13 @@ SYSTEM_LD_TAIL= @${SIZE} $@; chmod 755 $@
DEBUG?=
.if ${DEBUG} == "-g"
LINKFLAGS+= -X
-STRIPFLAGS= -g
+STRIPFLAGS= -g -x
SYSTEM_LD_TAIL+=; \
echo mv $@ $@.gdb; rm -f $@.gdb; mv $@ $@.gdb; \
echo ${STRIP} ${STRIPFLAGS} -o $@ $@.gdb; \
${STRIP} ${STRIPFLAGS} -o $@ $@.gdb
.else
-LINKFLAGS+= -S
+LINKFLAGS+= -S -x
.endif
%LOAD
diff --git a/sys/arch/vax/vax/emulate.s b/sys/arch/vax/vax/emulate.s
index 47b2b0e54b2..8626606bf8b 100644
--- a/sys/arch/vax/vax/emulate.s
+++ b/sys/arch/vax/vax/emulate.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: emulate.s,v 1.4 2003/06/02 23:27:58 millert Exp $ */
+/* $OpenBSD: emulate.s,v 1.5 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: emulate.s,v 1.2 1997/03/15 16:14:25 ragge Exp $ */
/*
* Copyright (c) 1986, 1987 The Regents of the University of California.
@@ -34,6 +34,8 @@
* @(#)emulate.s 7.5 (Berkeley) 6/28/90
*/
+#include <machine/asm.h>
+
/*
* String instruction emulation - MicroVAX only. These routines are called
* from locore.s when an "emulate" fault occurs on the MicroVAX. They are
@@ -62,270 +64,260 @@
*/
#define return rsb
-#define savepsl movpsl 4(sp)
-#define setpsl(reg) movl reg,4(sp)
-#define overflowpsl movl $2,4(sp)
-#define arg1 12(sp)
-#define arg2 16(sp)
-#define arg3 20(sp)
-#define arg4 24(sp)
-#define arg5 28(sp)
-#define arg6 32(sp)
-#define argub(num,reg) movzbl 8+4*num(sp),reg
-#define arguw(num,reg) movzwl 8+4*num(sp),reg
-#define argul(num,reg) movl 8+4*num(sp),reg
-#define argb(num,reg) cvtbl 8+4*num(sp),reg
-#define argw(num,reg) cvtwl 8+4*num(sp),reg
-#define argl(num,reg) movl 8+4*num(sp),reg
-#define toarg(reg,num) movl reg,8+4*num(sp)
+#define savepsl movpsl 4(%sp)
+#define setpsl(reg) movl reg,4(%sp)
+#define overflowpsl movl $2,4(%sp)
+#define arg1 12(%sp)
+#define arg2 16(%sp)
+#define arg3 20(%sp)
+#define arg4 24(%sp)
+#define arg5 28(%sp)
+#define arg6 32(%sp)
+#define argub(num,reg) movzbl 8+4*num(%sp),reg
+#define arguw(num,reg) movzwl 8+4*num(%sp),reg
+#define argul(num,reg) movl 8+4*num(%sp),reg
+#define argb(num,reg) cvtbl 8+4*num(%sp),reg
+#define argw(num,reg) cvtwl 8+4*num(%sp),reg
+#define argl(num,reg) movl 8+4*num(%sp),reg
+#define toarg(reg,num) movl reg,8+4*num(%sp)
.text
- .align 1
- .globl _EMcrc
-_EMcrc:
- argl(1,r11) # (1) table address == r11
- argl(2,r0) # (2) initial crc == r0
- argl(4,r3) # (4) source address == r3
- arguw(3,r2) # (3) source length == r2
+ _ALIGN_TEXT
+ALTENTRY(EMcrc)
+ argl(1,%r11) # (1) table address == r11
+ argl(2,%r0) # (2) initial crc == r0
+ argl(4,%r3) # (4) source address == r3
+ arguw(3,%r2) # (3) source length == r2
jeql Lcrc_out
Lcrc_loop:
- xorb2 (r3)+,r0
- extzv $0,$4,r0,r10
- extzv $4,$28,r0,r1
- xorl3 r1,(r11)[r10],r0
- extzv $0,$4,r0,r10
- extzv $4,$28,r0,r1
- xorl3 r1,(r11)[r10],r0
- sobgtr r2,Lcrc_loop
- tstl r0
+ xorb2 (%r3)+,%r0
+ extzv $0,$4,%r0,%r10
+ extzv $4,$28,%r0,%r1
+ xorl3 %r1,(%r11)[%r10],%r0
+ extzv $0,$4,%r0,%r10
+ extzv $4,$28,%r0,%r1
+ xorl3 %r1,(%r11)[%r10],%r0
+ sobgtr %r2,Lcrc_loop
+ tstl %r0
Lcrc_out:
savepsl
- clrl r1
+ clrl %r1
return
- .align 1
- .globl _EMmovtc
-_EMmovtc:
- arguw(1,r0) # (1) source length == r0
- argl(2,r1) # (2) source address == r1
- argub(3,r11) # (3) fill character == r11
- argl(4,r3) # (4) table address == r3
- argl(6,r5) # (6) destination address == r5
- arguw(5,r4) # (5) destination length == r4
+ _ALIGN_TEXT
+ALTENTRY(EMmovtc)
+ arguw(1,%r0) # (1) source length == r0
+ argl(2,%r1) # (2) source address == r1
+ argub(3,%r11) # (3) fill character == r11
+ argl(4,%r3) # (4) table address == r3
+ argl(6,%r5) # (6) destination address == r5
+ arguw(5,%r4) # (5) destination length == r4
jeql Lmovtc_out
Lmovtc_loop:
- tstl r0
+ tstl %r0
jeql Lmovtc_2loop
- movzbl (r1)+,r2
- movb (r3)[r2],(r5)+
- decl r0
- sobgtr r4,Lmovtc_loop
+ movzbl (%r1)+,%r2
+ movb (%r3)[%r2],(%r5)+
+ decl %r0
+ sobgtr %r4,Lmovtc_loop
jbr Lmovtc_out
Lmovtc_2loop:
- movb r11,(r5)+
- sobgtr r4,Lmovtc_2loop
+ movb %r11,(%r5)+
+ sobgtr %r4,Lmovtc_2loop
Lmovtc_out:
- cmpl r4,r0
+ cmpl %r4,%r0
savepsl
- clrl r2
+ clrl %r2
return
- .align 1
- .globl _EMmovtuc
-_EMmovtuc:
- arguw(1,r0) # (1) source length == r0
- argl(2,r1) # (2) source address == r1
- argub(3,r11) # (3) escape character == r11
- argl(4,r3) # (4) table address == r3
- argl(6,r5) # (6) destination address == r5
- arguw(5,r4) # (5) destination length == r4
+ _ALIGN_TEXT
+ALTENTRY(EMmovtuc)
+ arguw(1,%r0) # (1) source length == r0
+ argl(2,%r1) # (2) source address == r1
+ argub(3,%r11) # (3) escape character == r11
+ argl(4,%r3) # (4) table address == r3
+ argl(6,%r5) # (6) destination address == r5
+ arguw(5,%r4) # (5) destination length == r4
jeql Lmovtuc_out
Lmovtuc_loop:
- tstl r0
+ tstl %r0
jeql Lmovtuc_out
- movzbl (r1),r2
- movzbl (r3)[r2],r2
- cmpl r2,r11
+ movzbl (%r1),%r2
+ movzbl (%r3)[%r2],%r2
+ cmpl %r2,%r11
jeql Lmovtuc_out
- movzbl (r1)+,r2
- movb (r3)[r2],(r5)+
- decl r0
- sobgtr r4,Lmovtuc_loop
+ movzbl (%r1)+,%r2
+ movb (%r3)[%r2],(%r5)+
+ decl %r0
+ sobgtr %r4,Lmovtuc_loop
Lmovtuc_out:
- cmpl r4,r0
+ cmpl %r4,%r0
savepsl
- clrl r2
+ clrl %r2
return
- .align 1
- .globl _EMmatchc
-_EMmatchc:
- argl(2,r10) # (2) substring address == r10
- arguw(3,r2) # (3) source length == r2
- argl(4,r3) # (4) source address == r3
- arguw(1,r11) # (1) substring length == r11
+ _ALIGN_TEXT
+ALTENTRY(EMmatchc)
+ argl(2,%r10) # (2) substring address == r10
+ arguw(3,%r2) # (3) source length == r2
+ argl(4,%r3) # (4) source address == r3
+ arguw(1,%r11) # (1) substring length == r11
jeql Lmatchc_out # temp source address == r1
- addl2 r10,r11 # temp substring address == r0
- tstl r2
+ addl2 %r10,%r11 # temp substring address == r0
+ tstl %r2
jeql Lmatchc_out
Lmatchc_loop:
- cmpb (r10),(r3)
+ cmpb (%r10),(%r3)
jneq Lmatchc_fail
- movl r3,r1
- movl r10,r0
+ movl %r3,%r1
+ movl %r10,%r0
Lmatchc_2loop:
- cmpl r0,r11
+ cmpl %r0,%r11
jeql Lmatchc_succ
- cmpb (r0)+,(r1)+
+ cmpb (%r0)+,(%r1)+
jeql Lmatchc_2loop
Lmatchc_fail:
- incl r3
- sobgtr r2,Lmatchc_loop
- movl r10,r1
- subl3 r10,r11,r0
+ incl %r3
+ sobgtr %r2,Lmatchc_loop
+ movl %r10,%r1
+ subl3 %r10,%r11,%r0
jbr Lmatchc_out
Lmatchc_succ:
- movl r1,r3
- movl r11,r1
- clrl r0
+ movl %r1,%r3
+ movl %r11,%r1
+ clrl %r0
Lmatchc_out:
savepsl
return
- .align 1
- .globl _EMspanc
-_EMspanc:
- argl(2,r1) # (2) string address == r1
- argub(4,r2) # (4) character-mask == r2
- argl(3,r3) # (3) table address == r3
- arguw(1,r0) # (1) string length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMspanc)
+ argl(2,%r1) # (2) string address == r1
+ argub(4,%r2) # (4) character-mask == r2
+ argl(3,%r3) # (3) table address == r3
+ arguw(1,%r0) # (1) string length == r0
jeql Lspanc_out
Lspanc_loop:
- movzbl (r1),r11
- mcomb (r3)[r11],r11
- bicb3 r11,r2,r11
+ movzbl (%r1),%r11
+ mcomb (%r3)[%r11],%r11
+ bicb3 %r11,%r2,%r11
jeql Lspanc_out
- incl r1
- sobgtr r0,Lspanc_loop
+ incl %r1
+ sobgtr %r0,Lspanc_loop
Lspanc_out:
savepsl
- clrl r2
+ clrl %r2
return
- .align 1
- .globl _EMscanc
-_EMscanc:
- argl(2,r1) # (2) string address == r1
- argub(4,r2) # (4) character-mask == r2
- argl(3,r3) # (3) table address == r3
- arguw(1,r0) # (1) string length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMscanc)
+ argl(2,%r1) # (2) string address == r1
+ argub(4,%r2) # (4) character-mask == r2
+ argl(3,%r3) # (3) table address == r3
+ arguw(1,%r0) # (1) string length == r0
jeql Lscanc_out
Lscanc_loop:
- movzbl (r1),r11
- mcomb (r3)[r11],r11
- bicb3 r11,r2,r11
+ movzbl (%r1),%r11
+ mcomb (%r3)[%r11],%r11
+ bicb3 %r11,%r2,%r11
jneq Lscanc_out
- incl r1
- sobgtr r0,Lscanc_loop
+ incl %r1
+ sobgtr %r0,Lscanc_loop
Lscanc_out:
savepsl
- clrl r2
+ clrl %r2
return
- .align 1
- .globl _EMskpc
-_EMskpc:
- argub(1,r11) # (1) character == r11
- argl(3,r1) # (3) string address == r1
- arguw(2,r0) # (2) string length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMskpc)
+ argub(1,%r11) # (1) character == r11
+ argl(3,%r1) # (3) string address == r1
+ arguw(2,%r0) # (2) string length == r0
jeql Lskpc_out # forget zero length strings
Lskpc_loop:
- cmpb (r1),r11
+ cmpb (%r1),%r11
jneq Lskpc_out
- incl r1
- sobgtr r0,Lskpc_loop
+ incl %r1
+ sobgtr %r0,Lskpc_loop
Lskpc_out:
- tstl r0 # be sure of condition codes
+ tstl %r0 # be sure of condition codes
savepsl
return
- .align 1
- .globl _EMlocc
-_EMlocc:
- argub(1,r11) # (1) character == r11
- argl(3,r1) # (3) string address == r1
- arguw(2,r0) # (2) string length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMlocc)
+ argub(1,%r11) # (1) character == r11
+ argl(3,%r1) # (3) string address == r1
+ arguw(2,%r0) # (2) string length == r0
jeql Lskpc_out # forget zero length strings
Llocc_loop:
- cmpb (r1),r11
+ cmpb (%r1),%r11
jeql Llocc_out
- incl r1
- sobgtr r0,Llocc_loop
+ incl %r1
+ sobgtr %r0,Llocc_loop
Llocc_out:
- tstl r0 # be sure of condition codes
+ tstl %r0 # be sure of condition codes
savepsl
return
- .align 1
- .globl _EMcmpc3
-_EMcmpc3:
- argl(2,r1) # (2) string1 address == r1
- argl(3,r3) # (3) string2 address == r3
- arguw(1,r0) # (1) strings length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMcmpc3)
+ argl(2,%r1) # (2) string1 address == r1
+ argl(3,%r3) # (3) string2 address == r3
+ arguw(1,%r0) # (1) strings length == r0
jeql Lcmpc3_out
Lcmpc3_loop:
- cmpb (r1),(r3)
+ cmpb (%r1),(%r3)
jneq Lcmpc3_out
- incl r1
- incl r3
- sobgtr r0,Lcmpc3_loop
+ incl %r1
+ incl %r3
+ sobgtr %r0,Lcmpc3_loop
Lcmpc3_out:
savepsl
- movl r0,r2
+ movl %r0,%r2
return
- .align 1
- .globl _EMcmpc5
-_EMcmpc5:
- argl(2,r1) # (2) string1 address == r1
- argub(3,r11) # (1) fill character == r11
- arguw(4,r2) # (1) string2 length == r2
- argl(5,r3) # (3) string2 address == r3
- arguw(1,r0) # (1) string1 length == r0
+ _ALIGN_TEXT
+ALTENTRY(EMcmpc5)
+ argl(2,%r1) # (2) string1 address == r1
+ argub(3,%r11) # (1) fill character == r11
+ arguw(4,%r2) # (1) string2 length == r2
+ argl(5,%r3) # (3) string2 address == r3
+ arguw(1,%r0) # (1) string1 length == r0
jeql Lcmpc5_str2
Lcmpc5_loop:
- tstl r2
+ tstl %r2
jeql Lcmpc5_str1loop
- cmpb (r1),(r3)
+ cmpb (%r1),(%r3)
jneq Lcmpc5_out
- incl r1
- incl r3
- decl r2
- sobgtr r0,Lcmpc5_loop
+ incl %r1
+ incl %r3
+ decl %r2
+ sobgtr %r0,Lcmpc5_loop
Lcmpc5_str2:
- tstl r2
+ tstl %r2
jeql Lcmpc5_out
Lcmpc5_str2loop:
- cmpb r11,(r3)
+ cmpb %r11,(%r3)
jneq Lcmpc5_out
- incl r3
- sobgtr r2,Lcmpc5_str2loop
+ incl %r3
+ sobgtr %r2,Lcmpc5_str2loop
jbr Lcmpc5_out
Lcmpc5_str1loop:
- cmpb (r1),r11
+ cmpb (%r1),%r11
jneq Lcmpc5_out
- incl r1
- sobgtr r0,Lcmpc5_str1loop
+ incl %r1
+ sobgtr %r0,Lcmpc5_str1loop
Lcmpc5_out:
savepsl
return
@@ -340,283 +332,281 @@ Lcmpc5_out:
#define NEGATIVEalt $11
- .align 1
- .globl _EMaddp4
-_EMaddp4:
- toarg(r9,6) # save register r9 in arg6 spot
- arguw(1,r11) # (1) source length == r11
- argl(2,r10) # (2) source address == r10
- arguw(3,r9) # (3) destination length == r9
- argl(4,r3) # (4) destination address == r3
- ashl $-1,r11,r11
- addl2 r11,r10 # source address of LSNibble
- incl r11 # source length is in bytes
- ashl $-1,r9,r9
- addl2 r9,r3 # r3 = destination address of LSNibble
- incl r9 # destination length is in bytes
- toarg(r3,5)
- extzv $0,$4,(r3),r2 # set standard +/- indicators in destination
- cmpl r2,NEGATIVE
+ _ALIGN_TEXT
+ALTENTRY(EMaddp4)
+ toarg(%r9,6) # save register r9 in arg6 spot
+ arguw(1,%r11) # (1) source length == r11
+ argl(2,%r10) # (2) source address == r10
+ arguw(3,%r9) # (3) destination length == r9
+ argl(4,%r3) # (4) destination address == r3
+ ashl $-1,%r11,%r11
+ addl2 %r11,%r10 # source address of LSNibble
+ incl %r11 # source length is in bytes
+ ashl $-1,%r9,%r9
+ addl2 %r9,%r3 # r3 = destination address of LSNibble
+ incl %r9 # destination length is in bytes
+ toarg(%r3,5)
+ extzv $0,$4,(%r3),%r2 # set standard +/- indicators in destination
+ cmpl %r2,NEGATIVE
jeql L112
- cmpl r2,NEGATIVEalt
+ cmpl %r2,NEGATIVEalt
jeql L111
- insv POSITIVE,$0,$4,(r3)
+ insv POSITIVE,$0,$4,(%r3)
jbr L112
L111:
- insv NEGATIVE,$0,$4,(r3)
+ insv NEGATIVE,$0,$4,(%r3)
L112:
- extzv $0,$4,(r10),r2 # r2 = standard +/- of source
- cmpl r2,NEGATIVE
+ extzv $0,$4,(%r10),%r2 # r2 = standard +/- of source
+ cmpl %r2,NEGATIVE
jeql L114
- cmpl r2,NEGATIVEalt
+ cmpl %r2,NEGATIVEalt
jeql L113
- movl POSITIVE,r2
+ movl POSITIVE,%r2
jbr L114
L113:
- movl NEGATIVE,r2
+ movl NEGATIVE,%r2
L114:
- cmpl r11,r9 # if source is longer than destination
+ cmpl %r11,%r9 # if source is longer than destination
jleq L115
- movl r9,r11 # set source length == destination length
+ movl %r9,%r11 # set source length == destination length
L115:
- extzv $4,$4,(r3),r9 # r9 = LSDigit of destination
- extzv $4,$4,(r10),r1 # r1 = LSDigit of source
- extzv $0,$4,(r3),r0
- cmpl r0,r2 # if signs of operands are not equal
+ extzv $4,$4,(%r3),%r9 # r9 = LSDigit of destination
+ extzv $4,$4,(%r10),%r1 # r1 = LSDigit of source
+ extzv $0,$4,(%r3),%r0
+ cmpl %r0,%r2 # if signs of operands are not equal
jeql Laddp4_same # do a subtraction
- clrl r2 # r2 is non-zero if result is non-zero
- subl2 r1,r9 # r9 = "addition" of operands high nibble
+ clrl %r2 # r2 is non-zero if result is non-zero
+ subl2 %r1,%r9 # r9 = "addition" of operands high nibble
jbr L119 # jump into addition loop
Laddp4_diff_loop:
- decl r3
- extzv $0,$4,(r3),r0
- addl2 r0,r1 # r1 = carry + next (low) nibble of source
- decl r10
- extzv $0,$4,(r10),r0
- subl2 r0,r1 # r1 -= next (low) nibble of destination
+ decl %r3
+ extzv $0,$4,(%r3),%r0
+ addl2 %r0,%r1 # r1 = carry + next (low) nibble of source
+ decl %r10
+ extzv $0,$4,(%r10),%r0
+ subl2 %r0,%r1 # r1 -= next (low) nibble of destination
jgeq L121 # if negative result
- mnegl $1,r9 # r9 == carry = -1
- addl2 $10,r1 # r1 == result += 10
+ mnegl $1,%r9 # r9 == carry = -1
+ addl2 $10,%r1 # r1 == result += 10
jbr L122 # else
L121:
- clrl r9 # r9 == carry = 0
+ clrl %r9 # r9 == carry = 0
L122:
- insv r1,$0,$4,(r3) # store result low nibble
- bisl2 r1,r2
- extzv $4,$4,(r3),r0
- addl2 r0,r9 # r9 = carry + next (high) nibble of source
- extzv $4,$4,(r10),r0
- subl2 r0,r9 # r9 -= next (high) nibble of destination
+ insv %r1,$0,$4,(%r3) # store result low nibble
+ bisl2 %r1,%r2
+ extzv $4,$4,(%r3),%r0
+ addl2 %r0,%r9 # r9 = carry + next (high) nibble of source
+ extzv $4,$4,(%r10),%r0
+ subl2 %r0,%r9 # r9 -= next (high) nibble of destination
L119:
jgeq L117 # if negative result
- mnegl $1,r1 # r1 == carry = -1
- addl2 $10,r9 # r9 == result += 10
+ mnegl $1,%r1 # r1 == carry = -1
+ addl2 $10,%r9 # r9 == result += 10
jbr L118 # else
L117:
- clrl r1 # r1 == carry = 0
+ clrl %r1 # r1 == carry = 0
L118:
- insv r9,$4,$4,(r3) # store result high nibble
- bisl2 r9,r2 # r2 is non-zero if result is non-zero
- decl r11 # while (--source length)
+ insv %r9,$4,$4,(%r3) # store result high nibble
+ bisl2 %r9,%r2 # r2 is non-zero if result is non-zero
+ decl %r11 # while (--source length)
jneq Laddp4_diff_loop
- argl(4,r10) # r10 = address of destination MSNibble
+ argl(4,%r10) # r10 = address of destination MSNibble
jbr Laddp4_diff_carry
Laddp4_diff_carlop:
- decl r3
- extzv $0,$4,(r3),r0
- addl2 r0,r1 # r1 == carry += next (low) nibble
+ decl %r3
+ extzv $0,$4,(%r3),%r0
+ addl2 %r0,%r1 # r1 == carry += next (low) nibble
jgeq L127 # if less than zero
- movl r1,r9 # r9 == carry (must be -1)
- movl $9,r1 # r1 == result = 9
+ movl %r1,%r9 # r9 == carry (must be -1)
+ movl $9,%r1 # r1 == result = 9
jbr L128
L127: # else
- clrl r9 # r9 == carry = 0
+ clrl %r9 # r9 == carry = 0
L128:
- insv r1,$0,$4,(r3) # store result
- bisl2 r1,r2
- extzv $4,$4,(r3),r0
- addl2 r0,r9 # r9 == carry += next (high) nibble
+ insv %r1,$0,$4,(%r3) # store result
+ bisl2 %r1,%r2
+ extzv $4,$4,(%r3),%r0
+ addl2 %r0,%r9 # r9 == carry += next (high) nibble
jgeq L129 # if less than zero
- movl r9,r1 # r1 == carry (must be -1)
- movl $9,r9 # r9 == result = 9
+ movl %r9,%r1 # r1 == carry (must be -1)
+ movl $9,%r9 # r9 == result = 9
jbr L130
L129:
- clrl r1
+ clrl %r1
L130:
- insv r9,$4,$4,(r3) # store result
- bisl2 r9,r2
+ insv %r9,$4,$4,(%r3) # store result
+ bisl2 %r9,%r2
Laddp4_diff_carry:
- cmpl r3,r10
+ cmpl %r3,%r10
jneq Laddp4_diff_carlop
- tstl r1 # if carry out of MSN then fix up result
+ tstl %r1 # if carry out of MSN then fix up result
jeql Laddp4_add_done
- argl(5,r3) # r3 == address of LSN of destination
- extzv $0,$4,(r3),r0
- cmpl r0,NEGATIVE # switch sign of result
+ argl(5,%r3) # r3 == address of LSN of destination
+ extzv $0,$4,(%r3),%r0
+ cmpl %r0,NEGATIVE # switch sign of result
jneq L132
- insv POSITIVE,$0,$4,(r3)
+ insv POSITIVE,$0,$4,(%r3)
jbr L133
L132:
- insv NEGATIVE,$0,$4,(r3)
+ insv NEGATIVE,$0,$4,(%r3)
L133:
- extzv $4,$4,(r3),r0 # normalize result (carry out of MSN into LSN)
- subl3 r0,$10,r9 # r9 = 10 - destination LSNibble
+ extzv $4,$4,(%r3),%r0 # normalize result (carry out of MSN into LSN)
+ subl3 %r0,$10,%r9 # r9 = 10 - destination LSNibble
jbr L134
L137:
- movl $9,r1
+ movl $9,%r1
Laddp4_diff_norm:
- insv r9,$4,$4,(r3)
- cmpl r3,r10 # while (not at MSNibble)
+ insv %r9,$4,$4,(%r3)
+ cmpl %r3,%r10 # while (not at MSNibble)
jeql Laddp4_add_done
- decl r3
- extzv $0,$4,(r3),r0 # low nibble = (9 + carry) - low nibble
- subl2 r0,r1
- cmpl r1,$9
+ decl %r3
+ extzv $0,$4,(%r3),%r0 # low nibble = (9 + carry) - low nibble
+ subl2 %r0,%r1
+ cmpl %r1,$9
jleq L135
- clrl r1
- movl $10,r9
+ clrl %r1
+ movl $10,%r9
jbr L136
L135:
- movl $9,r9
+ movl $9,%r9
L136:
- insv r1,$0,$4,(r3)
- extzv $4,$4,(r3),r0 # high nibble = (9 + carry) - high nibble
- subl2 r0,r9
+ insv %r1,$0,$4,(%r3)
+ extzv $4,$4,(%r3),%r0 # high nibble = (9 + carry) - high nibble
+ subl2 %r0,%r9
L134:
- cmpl r9,$9
+ cmpl %r9,$9
jleq L137
- clrl r9
- movl $10,r1
+ clrl %r9
+ movl $10,%r1
jbr Laddp4_diff_norm
Laddp4_same: # operands are of the same sign
- clrl r2
- addl2 r1,r9
+ clrl %r2
+ addl2 %r1,%r9
jbr L139
Laddp4_same_loop:
- decl r3
- extzv $0,$4,(r3),r0
- addl2 r0,r1 # r1 == carry += next (low) nibble of dest
- decl r10
- extzv $0,$4,(r10),r0
- addl2 r0,r1 # r1 += next (low) nibble of source
- cmpl r1,$9 # if result > 9
+ decl %r3
+ extzv $0,$4,(%r3),%r0
+ addl2 %r0,%r1 # r1 == carry += next (low) nibble of dest
+ decl %r10
+ extzv $0,$4,(%r10),%r0
+ addl2 %r0,%r1 # r1 += next (low) nibble of source
+ cmpl %r1,$9 # if result > 9
jleq L141
- movl $1,r9 # r9 == carry = 1
- subl2 $10,r1 # r1 == result -= 10
+ movl $1,%r9 # r9 == carry = 1
+ subl2 $10,%r1 # r1 == result -= 10
jbr L142
L141: # else
- clrl r9 # r9 == carry = 0
+ clrl %r9 # r9 == carry = 0
L142:
- insv r1,$0,$4,(r3) # store result
- bisl2 r1,r2
- extzv $4,$4,(r10),r0
- addl2 r0,r9 # ditto for high nibble
- extzv $4,$4,(r3),r0
- addl2 r0,r9
+ insv %r1,$0,$4,(%r3) # store result
+ bisl2 %r1,%r2
+ extzv $4,$4,(%r10),%r0
+ addl2 %r0,%r9 # ditto for high nibble
+ extzv $4,$4,(%r3),%r0
+ addl2 %r0,%r9
L139:
- cmpl r9,$9
+ cmpl %r9,$9
jleq L143
- movl $1,r1
- subl2 $10,r9
+ movl $1,%r1
+ subl2 $10,%r9
jbr L144
L143:
- clrl r1
+ clrl %r1
L144:
- insv r9,$4,$4,(r3)
- bisl2 r9,r2
- sobgtr r11,Laddp4_same_loop # while (--source length)
- argl(4,r10) # r10 = destination address of MSNibble
+ insv %r9,$4,$4,(%r3)
+ bisl2 %r9,%r2
+ sobgtr %r11,Laddp4_same_loop # while (--source length)
+ argl(4,%r10) # r10 = destination address of MSNibble
jbr Laddp4_same_carry
Laddp4_same_cloop:
- decl r3
- extzv $0,$4,(r3),r0 # propagate carry up to MSNibble of destination
- addl2 r0,r1
- cmpl r1,$10
+ decl %r3
+ extzv $0,$4,(%r3),%r0 # propagate carry up to MSNibble of destination
+ addl2 %r0,%r1
+ cmpl %r1,$10
jneq L147
- movl $1,r9
- clrl r1
+ movl $1,%r9
+ clrl %r1
jbr L148
L147:
- clrl r9
+ clrl %r9
L148:
- insv r1,$0,$4,(r3)
- bisl2 r1,r2
- extzv $4,$4,(r3),r0
- addl2 r0,r9
- cmpl r9,$10
+ insv %r1,$0,$4,(%r3)
+ bisl2 %r1,%r2
+ extzv $4,$4,(%r3),%r0
+ addl2 %r0,%r9
+ cmpl %r9,$10
jneq L149
- movl $1,r1
- clrl r9
+ movl $1,%r1
+ clrl %r9
jbr L150
L149:
- clrl r1
+ clrl %r1
L150:
- insv r9,$4,$4,(r3)
- bisl2 r9,r2
+ insv %r9,$4,$4,(%r3)
+ bisl2 %r9,%r2
Laddp4_same_carry:
- cmpl r3,r10
+ cmpl %r3,%r10
jneq Laddp4_same_cloop
Laddp4_add_done:
- argl(5,r3) # r3 = destination address of LSNibble
- tstl r2 # if zero result
+ argl(5,%r3) # r3 = destination address of LSNibble
+ tstl %r2 # if zero result
jneq L151
savepsl # remember that for condition codes
- insv POSITIVE,$0,$4,(r3) # make sure sign of result is positive
+ insv POSITIVE,$0,$4,(%r3) # make sure sign of result is positive
jbr Laddp4_out
L151: # else
- extzv $0,$4,(r3),r0
- cmpl r0,NEGATIVE # if result is negative
+ extzv $0,$4,(%r3),%r0
+ cmpl %r0,NEGATIVE # if result is negative
jneq Laddp4_out
- mnegl r2,r2 # remember THAT in Cond Codes
+ mnegl %r2,%r2 # remember THAT in Cond Codes
savepsl
Laddp4_out:
- argl(4,r3)
- argl(2,r1)
- clrl r0
- clrl r2
- argl(6,r9) # restore r9 from stack
+ argl(4,%r3)
+ argl(2,%r1)
+ clrl %r0
+ clrl %r2
+ argl(6,%r9) # restore r9 from stack
return
- .align 1
- .globl _EMmovp
-_EMmovp:
- arguw(1,r11) # (1) string length == r11
- argl(2,r10) # (1) source address == r10
- argl(3,r3) # (1) destination address == r3
+ _ALIGN_TEXT
+ALTENTRY(EMmovp)
+ arguw(1,%r11) # (1) string length == r11
+ argl(2,%r10) # (1) source address == r10
+ argl(3,%r3) # (1) destination address == r3
# we will need arg2 and arg3 later
- clrl r2 # r2 == non-zero if source is non-zero
- ashl $-1,r11,r11 # length is number of bytes, not nibbles
+ clrl %r2 # r2 == non-zero if source is non-zero
+ ashl $-1,%r11,%r11 # length is number of bytes, not nibbles
jeql Lmovp_zlen
Lmovp_copy:
- bisb2 (r10),r2 # keep track of non-zero source
- movb (r10)+,(r3)+ # move two nibbles
- sobgtr r11,Lmovp_copy # loop for length of source
+ bisb2 (%r10),%r2 # keep track of non-zero source
+ movb (%r10)+,(%r3)+ # move two nibbles
+ sobgtr %r11,Lmovp_copy # loop for length of source
Lmovp_zlen:
- extzv $4,$4,(r10),r0 # look at least significant nibble
- bisl2 r0,r2
- extzv $0,$4,(r10),r0 # check sign nibble
- cmpl r0,NEGATIVEalt
+ extzv $4,$4,(%r10),%r0 # look at least significant nibble
+ bisl2 %r0,%r2
+ extzv $0,$4,(%r10),%r0 # check sign nibble
+ cmpl %r0,NEGATIVEalt
jeql Lmovp_neg
- cmpl r0,NEGATIVE
+ cmpl %r0,NEGATIVE
jneq Lmovp_pos
Lmovp_neg: # source was negative
- mnegl r2,r2
+ mnegl %r2,%r2
Lmovp_pos:
- tstl r2 # set condition codes
+ tstl %r2 # set condition codes
savepsl
jeql Lmovp_zero
- movb (r10),(r3) # move last byte if non-zero result
+ movb (%r10),(%r3) # move last byte if non-zero result
jbr Lmovp_out
Lmovp_zero:
- movb POSITIVE,(r3) # otherwise, make result zero and positive
+ movb POSITIVE,(%r3) # otherwise, make result zero and positive
Lmovp_out:
- clrl r0
- argl(2,r1)
- clrl r2
- argl(3,r3)
+ clrl %r0
+ argl(2,%r1)
+ clrl %r2
+ argl(3,%r3)
return
@@ -654,52 +644,51 @@ Lmovp_out:
*/
#define SIGNIFBIT $0
-#define setsignif bisl2 $1,r4
-#define clsignif bicl2 $1,r4
+#define setsignif bisl2 $1,%r4
+#define clsignif bicl2 $1,%r4
#define OVERFLOWBIT $1
-#define setoverflow bisl2 $2,r4
-#define cloverflow bicl2 $2,r4
+#define setoverflow bisl2 $2,%r4
+#define cloverflow bicl2 $2,%r4
#define ZEROBIT $2
-#define setzero bisl2 $4,r4
-#define clzero bicl2 $4,r4
+#define setzero bisl2 $4,%r4
+#define clzero bicl2 $4,%r4
#define NEGATIVEBIT $3
-#define setnegative bisl2 $8,r4
-#define clnegative bicl2 $8,r4
-#define putfill movb arg5,(r5)+
+#define setnegative bisl2 $8,%r4
+#define clnegative bicl2 $8,%r4
+#define putfill movb arg5,(%r5)+
#define setfill(reg) movb reg,arg5
-#define putsign movb arg6,(r5)+
+#define putsign movb arg6,(%r5)+
#define setsign(reg) movb reg,arg6
- .align 1
- .globl _EMeditpc
-_EMeditpc:
- arguw(1,r11) # (1) source length == r11
- argl(2,r10) # (2) source address == r10
- argl(3,r3) # (3) pattern address == r3
- argl(4,r5) # (4) destination address == r5
+ _ALIGN_TEXT
+ALTENTRY(EMeditpc)
+ arguw(1,%r11) # (1) source length == r11
+ argl(2,%r10) # (2) source address == r10
+ argl(3,%r3) # (3) pattern address == r3
+ argl(4,%r5) # (4) destination address == r5
/* # we will need arg1 and arg2 later */
/* # arg5 and arg6 are used for fill and sign - r0 is free */
setfill($32) # fill character is ' '
setsign($32) # sign character is ' '
- clrl r4 # clear flags
- ashl $-1,r11,r11 # source length / 2
- addl3 r11,r10,r2
- extzv $4,$4,(r2),r1 # r1 == least significant nibble of source
+ clrl %r4 # clear flags
+ ashl $-1,%r11,%r11 # source length / 2
+ addl3 %r11,%r10,%r2
+ extzv $4,$4,(%r2),%r1 # r1 == least significant nibble of source
L169:
- cmpl r2,r10
+ cmpl %r2,%r10
jeql L170
- tstb -(r2) # loop over source packed decimal number
+ tstb -(%r2) # loop over source packed decimal number
jeql L169
- incl r1 # r1 is non-zero if source is non-zero
+ incl %r1 # r1 is non-zero if source is non-zero
L170:
- addl3 r11,r10,r2
- tstl r1
+ addl3 %r11,%r10,%r2
+ tstl %r1
jeql L172 # source is zero - set flags
- extzv $0,$4,(r2),r11
- cmpl r11,NEGATIVEalt
+ extzv $0,$4,(%r2),%r11
+ cmpl %r11,NEGATIVEalt
jeql L9998 # source is negative - set sign and flags
- cmpl r11,NEGATIVE
+ cmpl %r11,NEGATIVE
jneq L175
L9998:
setnegative
@@ -708,18 +697,18 @@ L9998:
L172:
setzero
L175:
- arguw(1,r2) # (1) source length == r2
+ arguw(1,%r2) # (1) source length == r2
Ledit_case:
- movzbl (r3)+,r11 # get next edit command (pattern)
- cmpl r11,$128
+ movzbl (%r3)+,%r11 # get next edit command (pattern)
+ cmpl %r11,$128
jlss L180
- extzv $0,$4,r11,r1 # command has a "count" arg - into r1
- ashl $-4,r11,r11 # and shift over
+ extzv $0,$4,%r11,%r1 # command has a "count" arg - into r1
+ ashl $-4,%r11,%r11 # and shift over
L180:
- jbc $6,r11,L181 # "shift" those commands > 64 to 16 and up
- subl2 $48,r11
+ jbc $6,%r11,L181 # "shift" those commands > 64 to 16 and up
+ subl2 $48,%r11
L181:
- caseb r11,$0,$0x18 # "do" the command
+ caseb %r11,$0,$0x18 # "do" the command
# r11 is available for use, r1 has "count" in it
Lcaseb_label:
.word Le_end - Lcaseb_label # 00
@@ -747,16 +736,16 @@ Lcaseb_label:
.word Le_replace_sign - Lcaseb_label # 46
.word Le_adjust_input - Lcaseb_label # 47
Le_end:
- arguw(1,r0)
- argl(2,r1)
- clrl r2
- decl r3
- setpsl(r4)
- clrl r4
+ arguw(1,%r0)
+ argl(2,%r1)
+ clrl %r2
+ decl %r3
+ setpsl(%r4)
+ clrl %r4
return
Le_end_float:
- jbs SIGNIFBIT,r4,Ledit_case # if significance not set
+ jbs SIGNIFBIT,%r4,Ledit_case # if significance not set
putsign # drop in the sign
# fall into...
Le_set_signif:
@@ -772,139 +761,138 @@ Le_store_sign:
jbr Ledit_case
Le_load_fill:
- setfill((r3)+)
+ setfill((%r3)+)
jbr Ledit_case
Le_load_plus:
- jbs NEGATIVEBIT,r4,Lpattern_inc # if non-negative
+ jbs NEGATIVEBIT,%r4,Lpattern_inc # if non-negative
# fall into...
Le_load_sign:
- setsign((r3)+)
+ setsign((%r3)+)
jbr Ledit_case
Le_load_minus:
- jbs NEGATIVEBIT,r4,Le_load_sign # if negative load the sign
- incl r3 # else increment pattern
+ jbs NEGATIVEBIT,%r4,Le_load_sign # if negative load the sign
+ incl %r3 # else increment pattern
jbr Ledit_case
Le_insert:
- jbc SIGNIFBIT,r4,L196 # if significance set, put next byte
- movb (r3)+,(r5)+
+ jbc SIGNIFBIT,%r4,L196 # if significance set, put next byte
+ movb (%r3)+,(%r5)+
jbr Ledit_case
L196: # else put in fill character
putfill
# and throw away character in pattern
Le_replace_sign: # we dont do anything with
Lpattern_inc: # replace sign cause we dont
- incl r3 # get negative zero
+ incl %r3 # get negative zero
jbr Ledit_case
Le_blank_zero:
- jbc ZEROBIT,r4,Lpattern_inc # if zero
- movzbl (r3)+,r11 # next byte is a count
+ jbc ZEROBIT,%r4,Lpattern_inc # if zero
+ movzbl (%r3)+,%r11 # next byte is a count
jeql Ledit_case
- subl2 r11,r5 # to back up over output and replace
+ subl2 %r11,%r5 # to back up over output and replace
L200:
putfill # with fill character
- sobgtr r11,L200
+ sobgtr %r11,L200
jbr Ledit_case
Le_adjust_input:
- movzbl (r3)+,r0 # get count of nibbles from pattern
- subl3 r2,r0,r11
+ movzbl (%r3)+,%r0 # get count of nibbles from pattern
+ subl3 %r2,%r0,%r11
jgeq Ledit_case # if length of source is > this number
L204: # discard digits in source
- jlbc r2,L206 # use low bit of length to choose nibble
- bitb $0xf0,(r10) # high nibble
+ jlbc %r2,L206 # use low bit of length to choose nibble
+ bitb $0xf0,(%r10) # high nibble
jeql L208
setsignif # set significance and overflow if
setoverflow # wasted digit is non-zero
jbr L208
L206:
- bitb $0xf,(r10) # low nibble
+ bitb $0xf,(%r10) # low nibble
jeql L209
setsignif
setoverflow
L209:
- incl r10 # increment to next byte
+ incl %r10 # increment to next byte
L208:
- decl r2 # decrement source length
- incl r11 # continue till were out of excess
+ decl %r2 # decrement source length
+ incl %r11 # continue till were out of excess
jlss L204
jbr Ledit_case
Le_fill:
- tstl r1 # put (count in r1) fill characters
+ tstl %r1 # put (count in r1) fill characters
jeql Ledit_case
Le_fill_loop:
putfill
- sobgtr r1,Le_fill_loop
+ sobgtr %r1,Le_fill_loop
jbr Ledit_case
Le_move:
- tstl r1 # move (count in r1) characters
+ tstl %r1 # move (count in r1) characters
jeql Ledit_case # from source to destination
L214:
- jlbc r2,L215 # read a nibble
- extzv $4,$4,(r10),r11
+ jlbc %r2,L215 # read a nibble
+ extzv $4,$4,(%r10),%r11
jbr L216
L215:
- extzv $0,$4,(r10),r11
- incl r10
+ extzv $0,$4,(%r10),%r11
+ incl %r10
L216:
- decl r2 # source length CAN go negative here...
- tstl r11
+ decl %r2 # source length CAN go negative here...
+ tstl %r11
jeql L218 # if non-zero
setsignif # set significance
L218:
- jbc SIGNIFBIT,r4,L219 # if significance set
- addb3 $48,r11,(r5)+ # put 0 + digit into destination
+ jbc SIGNIFBIT,%r4,L219 # if significance set
+ addb3 $48,%r11,(%r5)+ # put 0 + digit into destination
jbr L220
L219: # else put fill character
putfill
L220:
- sobgtr r1,L214
+ sobgtr %r1,L214
jbr Ledit_case
Le_float: # move with floating sign character
- tstl r1
+ tstl %r1
jeql Ledit_case
L221:
- jlbc r2,L222
- extzv $4,$4,(r10),r11
+ jlbc %r2,L222
+ extzv $4,$4,(%r10),%r11
jbr L223
L222:
- extzv $0,$4,(r10),r11
- incl r10
+ extzv $0,$4,(%r10),%r11
+ incl %r10
L223:
- decl r2 # source length CAN go negative here...
- tstl r11
+ decl %r2 # source length CAN go negative here...
+ tstl %r11
jeql L225
- jbs SIGNIFBIT,r4,L226
+ jbs SIGNIFBIT,%r4,L226
putsign
L226:
setsignif
L225:
- jbc SIGNIFBIT,r4,L227
- addb3 $48,r11,(r5)+
+ jbc SIGNIFBIT,%r4,L227
+ addb3 $48,%r11,(%r5)+
jbr L228
L227:
putfill
L228:
- sobgtr r1,L221
+ sobgtr %r1,L221
jbr Ledit_case
- .align 1
- .globl _EMashp
-_EMashp:
- argb(1,r11) # (1) scale (number to shift) == r11
- arguw(2,r10) # (2) source length == r10
- argl(3,r1) # (3) source address == r1
- argub(4,r2) # (4) rounding factor == r2
- arguw(5,r3) # (5) destination length == r3
- toarg(r6,3)/* # arg3 holds register 6 from caller */
- argl(6,r6) # (6) destination address == r6
+ _ALIGN_TEXT
+ALTENTRY(EMashp)
+ argb(1,%r11) # (1) scale (number to shift) == r11
+ arguw(2,%r10) # (2) source length == r10
+ argl(3,%r1) # (3) source address == r1
+ argub(4,%r2) # (4) rounding factor == r2
+ arguw(5,%r3) # (5) destination length == r3
+ toarg(%r6,3)/* # arg3 holds register 6 from caller */
+ argl(6,%r6) # (6) destination address == r6
/*
# we need arg6 for later
# arg1 is used for temporary storage
@@ -912,296 +900,283 @@ _EMashp:
# arg4 is used as general storage
# arg5 is used as general storage
*/
- ashl $-1,r3,r0 # destination length is number of bytes
- addl2 r0,r6 # destination address == least sig nibble
- toarg(r6,1) # save in arg1 spot for later
- ashl $-1,r10,r0
- addl2 r0,r1 # source address == least sig nibble
- extzv $0,$4,(r1),r0 # determine sign of source
- cmpl r0,NEGATIVEalt
+ ashl $-1,%r3,%r0 # destination length is number of bytes
+ addl2 %r0,%r6 # destination address == least sig nibble
+ toarg(%r6,1) # save in arg1 spot for later
+ ashl $-1,%r10,%r0
+ addl2 %r0,%r1 # source address == least sig nibble
+ extzv $0,$4,(%r1),%r0 # determine sign of source
+ cmpl %r0,NEGATIVEalt
jeql Lashp_neg
- cmpl r0,NEGATIVE
+ cmpl %r0,NEGATIVE
jeql Lashp_neg
- movb POSITIVE,(r6)
+ movb POSITIVE,(%r6)
jbr L245
Lashp_neg:
- movb NEGATIVE,(r6)
+ movb NEGATIVE,(%r6)
L245:
clrl arg2 # arg2 is 1 if dstlen is even, 0 if odd
- blbs r3,L246
+ blbs %r3,L246
incl arg2
- bisl2 $1,r3 # r3<0> counts digits going into destination
+ bisl2 $1,%r3 # r3<0> counts digits going into destination
L246: # and is flip-flop for which nibble to
- tstl r11 # write in destination (1 = high, 0 = low)
+ tstl %r11 # write in destination (1 = high, 0 = low)
jgeq Lashp_left # (it must start out odd)
- addl2 r11,r10 # scale is negative (right shift)
+ addl2 %r11,%r10 # scale is negative (right shift)
jgeq Lashp_right
- clrl r10 # test for shifting whole number out
+ clrl %r10 # test for shifting whole number out
jbr Lashp_setround
Lashp_right:
- divl3 $2,r11,r0
- addl2 r0,r1 # source address == MSNibble to be shifted off
- jlbc r11,L249
- extzv $4,$4,(r1),r0
- addl2 r0,r2 # round = last nibble to be shifted off + round
+ divl3 $2,%r11,%r0
+ addl2 %r0,%r1 # source address == MSNibble to be shifted off
+ jlbc %r11,L249
+ extzv $4,$4,(%r1),%r0
+ addl2 %r0,%r2 # round = last nibble to be shifted off + round
jbr Lashp_setround
L249:
- extzv $0,$4,(r1),r0
- addl2 r0,r2 # round = last nibble to be shifted off + round
+ extzv $0,$4,(%r1),%r0
+ addl2 %r0,%r2 # round = last nibble to be shifted off + round
Lashp_setround: # r11<0> now is flip-flop for which nibble to
- incl r11 # read from source (1 == high, 0 == low)
- cmpl r2,$9 # set rounding factor to one if nibble shifted
+ incl %r11 # read from source (1 == high, 0 == low)
+ cmpl %r2,$9 # set rounding factor to one if nibble shifted
jleq Lashp_noround # off + round argument was 10 or greater
- movl $1,r2
+ movl $1,%r2
jbr Lashp_shift
Lashp_zloop:
- jlbs r3,L257 # dont need to clear high nibble twice
- clrb -(r6) # clear low (and high) nib of next byte in dest
+ jlbs %r3,L257 # dont need to clear high nibble twice
+ clrb -(%r6) # clear low (and high) nib of next byte in dest
L257:
- sobgtr r3,L258 # move to next nibble in destination, but
- incl r3 # dont go beyond the end.
+ sobgtr %r3,L258 # move to next nibble in destination, but
+ incl %r3 # dont go beyond the end.
L258:
- decl r11
+ decl %r11
Lashp_left: # while scale is positive
jneq Lashp_zloop
- incl r11 # r11<0> is flip-plop ... (incl sets it to one)
+ incl %r11 # r11<0> is flip-plop ... (incl sets it to one)
Lashp_noround:
- clrl r2 # no more rounding
+ clrl %r2 # no more rounding
Lashp_shift:
clrl arg4 # arg4 will be used for result condition codes
- tstl r10
+ tstl %r10
jeql Lashp_round
Lashp_shloop:
- jlbc r11,L260
- extzv $4,$4,(r1),r0
+ jlbc %r11,L260
+ extzv $4,$4,(%r1),%r0
jbr L261
L260:
- decl r1
- extzv $0,$4,(r1),r0
+ decl %r1
+ extzv $0,$4,(%r1),%r0
L261:
- incl r11 # flip the source nibble flip/flop
- addl2 r0,r2 # round += next nibble
- cmpl r2,$10 # if round == 10
+ incl %r11 # flip the source nibble flip/flop
+ addl2 %r0,%r2 # round += next nibble
+ cmpl %r2,$10 # if round == 10
jneq L262
clrl arg5 # then result = 0 and round = 1
- movl $1,r2
+ movl $1,%r2
jbr L263
L262: # else
- movl r2,arg5 # store result and round = 0
- clrl r2
+ movl %r2,arg5 # store result and round = 0
+ clrl %r2
L263:
bisl2 arg5,arg4 # remember if result was nonzero in arg4
- decl r3 # move to next nibble early to check
- cmpl r3,arg2 # if weve moved passed destination limits
+ decl %r3 # move to next nibble early to check
+ cmpl %r3,arg2 # if weve moved passed destination limits
jgeq Lashp_noovfl # test the result for possible overflow
- movl arg2,r3 # ignore zero nibbles
+ movl arg2,%r3 # ignore zero nibbles
tstl arg5 # if the nibble was non-zero, overflow
jeql L265
jbr Lashp_overfl
Lashp_noovfl: # else
- jlbs r3,L264
- insv arg5,$4,$4,(r6) # put the result into destination (high or low)
+ jlbs %r3,L264
+ insv arg5,$4,$4,(%r6) # put the result into destination (high or low)
jbr L265
L264:
- movb arg5,-(r6)
+ movb arg5,-(%r6)
L265:
- sobgtr r10,Lashp_shloop # loop for length of source
+ sobgtr %r10,Lashp_shloop # loop for length of source
Lashp_round:
- tstl r2 # take care of round out of high nibble
+ tstl %r2 # take care of round out of high nibble
jeql Lashp_zeroround
- decl r3
- cmpl r3,arg2 # if weve moved passed destination limits
+ decl %r3
+ cmpl %r3,arg2 # if weve moved passed destination limits
jlss Lashp_overfl # then overflow
- jlbs r3,L266
- insv arg5,$4,$4,(r6) # put the round into destination (high or low)
+ jlbs %r3,L266
+ insv arg5,$4,$4,(%r6) # put the round into destination (high or low)
jbr Lashp_zeroround
L266:
- movb arg5,-(r6)
+ movb arg5,-(%r6)
Lashp_zeroround:
- argl(1,r10) # r10 = address of destination LSNibble
- argl(6,r3) # r3 = address of destination MSNibble
- movl arg4,r11 # r11 = non-zero if destination == non-zero
+ argl(1,%r10) # r10 = address of destination LSNibble
+ argl(6,%r3) # r3 = address of destination MSNibble
+ movl arg4,%r11 # r11 = non-zero if destination == non-zero
savepsl
jbr L267
Lashp_zerofill:
- clrb -(r6) # fill up MSNs of destination with zeros
+ clrb -(%r6) # fill up MSNs of destination with zeros
L267:
- cmpl r3,r6
+ cmpl %r3,%r6
jneq Lashp_zerofill
- extzv $0,$4,(r10),r0 # test for negative result
- cmpl r0,NEGATIVE
+ extzv $0,$4,(%r10),%r0 # test for negative result
+ cmpl %r0,NEGATIVE
jneq Lashp_out
- mnegl r11,r11
+ mnegl %r11,%r11
savepsl
jneq Lashp_out # turn -0 into 0
- insv POSITIVE,$0,$4,(r10)
+ insv POSITIVE,$0,$4,(%r10)
Lashp_out:
- clrl r0
- argl(3,r6) # restore r6 from stack
+ clrl %r0
+ argl(3,%r6) # restore r6 from stack
return
Lashp_overfl: # do overflow
- clrl r2
+ clrl %r2
overflowpsl
jbr Lashp_out
- .align 1
- .globl _EMcvtlp
-_EMcvtlp:
- arguw(2,r10) # (2) destination length == r10
- argl(3,r3) # (3) destination address == r3
- ashl $-1,r10,r10
- addl2 r10,r3 # destination address points to Least Sig byte
- incl r10 # length is # of bytes, not nibbles
- argl(1,r11) # (1) source == r11
+ _ALIGN_TEXT
+ALTENTRY(EMcvtlp)
+ arguw(2,%r10) # (2) destination length == r10
+ argl(3,%r3) # (3) destination address == r3
+ ashl $-1,%r10,%r10
+ addl2 %r10,%r3 # destination address points to Least Sig byte
+ incl %r10 # length is # of bytes, not nibbles
+ argl(1,%r11) # (1) source == r11
savepsl
jgeq Lcvtlp_pos
- movb NEGATIVE,(r3) # source is negative
- divl3 $10,r11,r0
- mull3 $10,r0,r1
- subl3 r11,r1,r2 # r2 = source mod 10
- mnegl r0,r11 # source = -(source / 10)
+ movb NEGATIVE,(%r3) # source is negative
+ divl3 $10,%r11,%r0
+ mull3 $10,%r0,%r1
+ subl3 %r11,%r1,%r2 # r2 = source mod 10
+ mnegl %r0,%r11 # source = -(source / 10)
jbr Lcvtlp_cvt
Lcvtlp_pos:
- movb POSITIVE,(r3) # source is non-negative
- divl3 $10,r11,r0
- mull3 $10,r0,r1
- subl3 r1,r11,r2 # r2 = source mod 10
- movl r0,r11 # source = source / 10
+ movb POSITIVE,(%r3) # source is non-negative
+ divl3 $10,%r11,%r0
+ mull3 $10,%r0,%r1
+ subl3 %r1,%r11,%r2 # r2 = source mod 10
+ movl %r0,%r11 # source = source / 10
Lcvtlp_cvt:
- insv r2,$4,$4,(r3) # store least significant digit
- tstl r11
+ insv %r2,$4,$4,(%r3) # store least significant digit
+ tstl %r11
jeql Lcvtlp_zloop
Lcvtlp_loop: # while source is non-zero
- decl r10 # and for length of destination ...
+ decl %r10 # and for length of destination ...
jeql Lcvtlp_over
- divl3 $10,r11,r1 # r1 = source / 10
- mull3 $10,r1,r0
- subl2 r0,r11 # source = source mod 10
- movb r11,-(r3) # store low "nibble" in next significant byte
- divl3 $10,r1,r11 # source = r1 / 10
- mull3 $10,r11,r0
- subl2 r0,r1 # r1 = source mod 10
- insv r1,$4,$4,(r3) # store high nibble
- tstl r11
+ divl3 $10,%r11,%r1 # r1 = source / 10
+ mull3 $10,%r1,%r0
+ subl2 %r0,%r11 # source = source mod 10
+ movb %r11,-(%r3) # store low "nibble" in next significant byte
+ divl3 $10,%r1,%r11 # source = r1 / 10
+ mull3 $10,%r11,%r0
+ subl2 %r0,%r1 # r1 = source mod 10
+ insv %r1,$4,$4,(%r3) # store high nibble
+ tstl %r11
jneq Lcvtlp_loop # quit if source becomes zero
Lcvtlp_zloop: # fill any remaining bytes with zeros
- decl r10
+ decl %r10
jeql Lcvtlp_out
- clrb -(r3)
+ clrb -(%r3)
jbr Lcvtlp_zloop
Lcvtlp_over:
overflowpsl
Lcvtlp_out:
- clrl r1 # r0 is already zero
- clrl r2
+ clrl %r1 # r0 is already zero
+ clrl %r2
return
- .align 1
- .globl _EMcvtpl
-_EMcvtpl:
- arguw(1,r11) # (1) source length == r11
- argl(2,r10) # (2) source address == r10
- clrl r3 # r3 == destination
- movl r10,r1 # r1 set up now for return
- ashl $-1,r11,r11 # source length is number of bytes
+ _ALIGN_TEXT
+ALTENTRY(EMcvtpl)
+ arguw(1,%r11) # (1) source length == r11
+ argl(2,%r10) # (2) source address == r10
+ clrl %r3 # r3 == destination
+ movl %r10,%r1 # r1 set up now for return
+ ashl $-1,%r11,%r11 # source length is number of bytes
jeql Lcvtpl_zero
Lcvtpl_loop: # for source length
- mull2 $10,r3 # destination *= 10
- extzv $4,$4,(r10),r0
- addl2 r0,r3 # destination += high nibble
- mull2 $10,r3 # destination *= 10
- extzv $0,$4,(r10),r0
- addl2 r0,r3 # destination += low nibble
- incl r10
- sobgtr r11,Lcvtpl_loop
+ mull2 $10,%r3 # destination *= 10
+ extzv $4,$4,(%r10),%r0
+ addl2 %r0,%r3 # destination += high nibble
+ mull2 $10,%r3 # destination *= 10
+ extzv $0,$4,(%r10),%r0
+ addl2 %r0,%r3 # destination += low nibble
+ incl %r10
+ sobgtr %r11,Lcvtpl_loop
Lcvtpl_zero: # least significant byte
- mull2 $10,r3
- extzv $4,$4,(r10),r0
- addl2 r0,r3 # dest = 10 * dest + high nibble
+ mull2 $10,%r3
+ extzv $4,$4,(%r10),%r0
+ addl2 %r0,%r3 # dest = 10 * dest + high nibble
savepsl
- extzv $0,$4,(r10),r2 # test sign nibble
- cmpl r2,NEGATIVE
+ extzv $0,$4,(%r10),%r2 # test sign nibble
+ cmpl %r2,NEGATIVE
jeql Lcvtpl_neg
- cmpl r2,NEGATIVEalt
+ cmpl %r2,NEGATIVEalt
jneq Lcvtpl_out
Lcvtpl_neg: # source was negative - negate destination
- mnegl r3,r3
+ mnegl %r3,%r3
savepsl
Lcvtpl_out:
- toarg(r3,3)
- clrl r0
- clrl r2
- clrl r3
+ toarg(%r3,3)
+ clrl %r0
+ clrl %r2
+ clrl %r3
return
- .align 1
- .globl _EMcvtps
-_EMcvtps:
+ _ALIGN_TEXT
+ALTENTRY(EMcvtps)
return
- .align 1
- .globl _EMcvtsp
-_EMcvtsp:
+ _ALIGN_TEXT
+ALTENTRY(EMcvtsp)
return
- .align 1
- .globl _EMaddp6
-_EMaddp6:
+ _ALIGN_TEXT
+ALTENTRY(EMaddp6)
return
- .align 1
- .globl _EMsubp4
-_EMsubp4:
+ _ALIGN_TEXT
+ALTENTRY(EMsubp4)
return
- .align 1
- .globl _EMsubp6
-_EMsubp6:
+ _ALIGN_TEXT
+ALTENTRY(EMsubp6)
return
- .align 1
- .globl _EMcvtpt
-_EMcvtpt:
+ _ALIGN_TEXT
+ALTENTRY(EMcvtpt)
return
- .align 1
- .globl _EMmulp
-_EMmulp:
+ _ALIGN_TEXT
+ALTENTRY(EMmulp)
return
- .align 1
- .globl _EMcvttp
-_EMcvttp:
+ _ALIGN_TEXT
+ALTENTRY(EMcvttp)
return
- .align 1
- .globl _EMdivp
-_EMdivp:
+ _ALIGN_TEXT
+ALTENTRY(EMdivp)
return
- .align 1
- .globl _EMcmpp3
-_EMcmpp3:
+ _ALIGN_TEXT
+ALTENTRY(EMcmpp3)
return
- .align 1
- .globl _EMcmpp4
-_EMcmpp4:
+ _ALIGN_TEXT
+ALTENTRY(EMcmpp4)
return
@@ -1213,9 +1188,9 @@ _EMcmpp4:
*/
#define EMUTABLE 0x43
#define NOEMULATE .long noemulate
-#define EMULATE(a) .long _EM/**/a
- .globl _emJUMPtable
-_emJUMPtable:
+#define EMULATE(a) .long _C_LABEL(__CONCAT(EM,a))
+ .globl _C_LABEL(emJUMPtable)
+_C_LABEL(emJUMPtable):
/* f8 */ EMULATE(ashp); EMULATE(cvtlp); NOEMULATE; NOEMULATE
/* fc */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
/* 00 */ NOEMULATE; NOEMULATE; NOEMULATE; NOEMULATE
@@ -1270,21 +1245,22 @@ _emJUMPtable:
*/
SCBVEC(emulate):
- movl r11,32(sp) # save register r11 in unused operand
- movl r10,36(sp) # save register r10 in unused operand
- cvtbl (sp),r10 # get opcode
- addl2 $8,r10 # shift negative opcodes
- subl3 r10,$EMUTABLE,r11 # forget it if opcode is out of range
+ movl %r11,32(%sp) # save register r11 in unused operand
+ movl %r10,36(%sp) # save register r10 in unused operand
+ cvtbl (%sp),%r10 # get opcode
+ addl2 $8,%r10 # shift negative opcodes
+ subl3 %r10,$EMUTABLE,%r11 # forget it if opcode is out of range
bcs noemulate
- movl _emJUMPtable[r10],r10 # call appropriate emulation routine
- jsb (r10) # routines put return values into regs 0-5
- movl 32(sp),r11 # restore register r11
- movl 36(sp),r10 # restore register r10
- insv (sp),$0,$4,44(sp) # and condition codes in Opcode spot
- addl2 $40,sp # adjust stack for return
+ movl _C_LABEL(emJUMPtable)[%r10],%r10
+ # call appropriate emulation routine
+ jsb (%r10) # routines put return values into regs 0-5
+ movl 32(%sp),%r11 # restore register r11
+ movl 36(%sp),%r10 # restore register r10
+ insv (%sp),$0,$4,44(%sp) # and condition codes in Opcode spot
+ addl2 $40,%sp # adjust stack for return
rei
noemulate:
- addl2 $48,sp # adjust stack for
+ addl2 $48,%sp # adjust stack for
.word 0xffff # "reserved instruction fault"
SCBVEC(emulateFPD):
.word 0xffff # "reserved instruction fault"
diff --git a/sys/arch/vax/vax/ka49.c b/sys/arch/vax/vax/ka49.c
index fdc3784da54..87e66c1344f 100644
--- a/sys/arch/vax/vax/ka49.c
+++ b/sys/arch/vax/vax/ka49.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ka49.c,v 1.11 2011/09/15 00:48:24 miod Exp $ */
+/* $OpenBSD: ka49.c,v 1.12 2013/07/05 21:11:57 miod Exp $ */
/*
* Copyright (c) 1999 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -181,7 +181,7 @@ ka49_cache_enable()
mtpr(0, start);
/* Flush the pipes (via REI) */
- asm("movpsl -(sp); movab 1f,-(sp); rei; 1:;");
+ asm("movpsl -(%sp); movab 1f,-(%sp); rei; 1:;");
/* Enable primary cache */
mtpr(PCCTL_P_EN|PCCTL_I_EN|PCCTL_D_EN, PR_PCCTL);
diff --git a/sys/arch/vax/vax/ka53.c b/sys/arch/vax/vax/ka53.c
index dae90c4774a..b7ff8169377 100644
--- a/sys/arch/vax/vax/ka53.c
+++ b/sys/arch/vax/vax/ka53.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ka53.c,v 1.10 2011/09/19 21:53:02 miod Exp $ */
+/* $OpenBSD: ka53.c,v 1.11 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: ka53.c,v 1.2 2000/06/04 02:19:27 matt Exp $ */
/*
* Copyright (c) 2002 Hugh Graham.
@@ -193,7 +193,7 @@ ka53_cache_enable()
mtpr(0, start);
/* Flush the pipes (via REI) */
- asm("movpsl -(sp); movab 1f,-(sp); rei; 1:;");
+ asm("movpsl -(%sp); movab 1f,-(%sp); rei; 1:;");
/* Enable primary cache */
mtpr(PCCTL_P_EN|PCCTL_I_EN|PCCTL_D_EN, PR_PCCTL);
diff --git a/sys/arch/vax/vax/ka680.c b/sys/arch/vax/vax/ka680.c
index 37086955341..1ba28b59e44 100644
--- a/sys/arch/vax/vax/ka680.c
+++ b/sys/arch/vax/vax/ka680.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ka680.c,v 1.15 2011/09/19 21:53:02 miod Exp $ */
+/* $OpenBSD: ka680.c,v 1.16 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: ka680.c,v 1.3 2001/01/28 21:01:53 ragge Exp $ */
/*
* Copyright (c) 2002 Hugh Graham.
@@ -225,7 +225,7 @@ ka680_cache_enable()
mtpr(0, start);
/* Flush the pipes (via REI) */
- asm("movpsl -(sp); movab 1f,-(sp); rei; 1:;");
+ asm("movpsl -(%sp); movab 1f,-(%sp); rei; 1:;");
/* Enable primary cache */
mtpr(PCCTL_P_EN|PCCTL_I_EN|PCCTL_D_EN, PR_PCCTL);
diff --git a/sys/arch/vax/vax/locore.S b/sys/arch/vax/vax/locore.S
index 683e3c32514..b2c951ec725 100644
--- a/sys/arch/vax/vax/locore.S
+++ b/sys/arch/vax/vax/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.6 2011/09/27 15:15:35 miod Exp $ */
+/* $OpenBSD: locore.S,v 1.7 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: intvec.s,v 1.39 1999/06/28 08:20:48 itojun Exp $ */
/*
@@ -36,24 +36,30 @@
#include <machine/asm.h>
-#define JSBENTRY(x) \
- .text; _ALIGN_TEXT; .globl x; x:
+#define JSBENTRY(x) \
+ .text; \
+ _ALIGN_TEXT; \
+ .globl x; \
+ .type x,@function; \
+x:
+
+#define SCBENTRY(x) JSBENTRY(__CONCAT(X,x))
#define TRAPCALL(namn, typ) \
-JSBENTRY(namn) ; \
+SCBENTRY(namn) ; \
pushl $0 ; \
pushl $typ ; \
jbr trap
#define TRAPARGC(namn, typ) \
-JSBENTRY(namn) ; \
+SCBENTRY(namn) ; \
pushl $typ ; \
jbr trap
#define FASTINTR(namn, rutin) \
-JSBENTRY(namn) ; \
+SCBENTRY(namn) ; \
pushr $0x3f ; \
- calls $0,_/**/rutin ; \
+ calls $0,_C_LABEL(rutin); \
popr $0x3f ; \
rei
@@ -64,13 +70,14 @@ JSBENTRY(namn) ; \
#define ISTACK 1
#define NOVEC .long 0
#define INTVEC(label,stack) \
- .long label+stack;
- .text
+ .long __CONCAT(X,label) + stack;
+
+ .text
- .globl _kernbase, _rpb, _kernel_text
- .set _kernel_text,KERNBASE
-_kernbase:
-_rpb:
+ .globl _C_LABEL(kernbase), _C_LABEL(rpb), _C_LABEL(kernel_text)
+ .set _C_LABEL(kernel_text),KERNBASE
+_C_LABEL(kernbase):
+_C_LABEL(rpb):
/*
* First page in memory we have rpb; so that we know where
* (must be on a 64k page boundary, easiest here). We use it
@@ -138,41 +145,41 @@ _rpb:
NOVEC; # Unused, E4
NOVEC; # Unused, E8
NOVEC; # Unused, EC
- NOVEC;
- NOVEC;
- NOVEC;
- NOVEC;
+ NOVEC;
+ NOVEC;
+ NOVEC;
+ NOVEC;
/* space for adapter vectors */
.space 0x100
- .align 2
+ .align 2
#
# mcheck is the badaddress trap, also called when referencing
# a invalid address (busserror)
# memtest holds the address to continue execution at when returning
# from a intentional test.
#
-mcheck: .globl mcheck
+SCBENTRY(mcheck)
tstl _ASM_LABEL(memtest) # Are we expecting a machine check?
bneq L4 # Yes.
pushr $0x7f
- pushab 24(sp)
- movl _dep_call,r6 # CPU dependent mchk handling
- calls $1,*MCHK(r6)
- tstl r0 # If not machine check, try memory error
+ pushab 24(%sp)
+ movl _C_LABEL(dep_call),%r6 # CPU dependent mchk handling
+ calls $1,*MCHK(%r6)
+ tstl %r0 # If not machine check, try memory error
beql 1f
- calls $0,*MEMERR(r6)
+ calls $0,*MEMERR(%r6)
pushab 2f
- calls $1,_panic
+ calls $1,_C_LABEL(panic)
2: .asciz "mchk"
1: popr $0x7f
- addl2 (sp)+,sp
+ addl2 (%sp)+,%sp
rei
-L4: addl2 (sp)+,sp # remove info pushed on stack
+L4: addl2 (%sp)+,%sp # remove info pushed on stack
# Clear the machine check condition by writing to the
# MCESR register if available.
cmpl $VAX_TYP_UV2, _C_LABEL(vax_cputype)
@@ -180,12 +187,12 @@ L4: addl2 (sp)+,sp # remove info pushed on stack
cmpl $VAX_TYP_SOC, _C_LABEL(vax_cputype)
beql 2f
mtpr $0,$PR_MCESR # clear the bus error bit
-2: movl _ASM_LABEL(memtest),(sp) # REI to new address
+2: movl _ASM_LABEL(memtest),(%sp) # REI to new address
rei
TRAPCALL(invkstk, T_KSPNOTVAL)
-JSBENTRY(privinflt) # Privileged/unimplemented instruction
+SCBENTRY(privinflt) # Privileged/unimplemented instruction
#ifdef INSN_EMULATE
jsb unimemu # do not return if insn emulated
#endif
@@ -204,69 +211,65 @@ TRAPCALL(resadflt, T_RESADFLT)
* put in a need for an extra check when the fault is gotten during
* PTE reference. Handled in pmap.c.
*/
- .align 2
- .globl transl_v # 20: Translation violation
-transl_v:
+SCBENTRY(transl_v) # 20: Translation violation
PUSHR
- pushl 28(sp)
- pushl 28(sp)
- calls $2,_pmap_simulref
- tstl r0
+ pushl 28(%sp)
+ pushl 28(%sp)
+ calls $2,_C_LABEL(pmap_simulref)
+ tstl %r0
bneq 1f
POPR
- addl2 $8,sp
+ addl2 $8,%sp
rei
1: POPR
brb access_v
- .align 2
- .globl access_v # 24: Access cntrl viol fault
+SCBENTRY(access_v) # 24: Access cntrl viol fault
access_v:
- blbs (sp), ptelen
+ blbs (%sp), ptelen
pushl $T_ACCFLT
- bbc $1,4(sp),1f
- bisl2 $T_PTEFETCH,(sp)
-1: bbc $2,4(sp),2f
- bisl2 $T_WRITE,(sp)
-2: movl (sp), 4(sp)
- addl2 $4, sp
+ bbc $1,4(%sp),1f
+ bisl2 $T_PTEFETCH,(%sp)
+1: bbc $2,4(%sp),2f
+ bisl2 $T_WRITE,(%sp)
+2: movl (%sp), 4(%sp)
+ addl2 $4, %sp
jbr trap
-ptelen: movl $T_PTELEN, (sp) # PTE must expand (or send segv)
- jbr trap;
+ptelen: movl $T_PTELEN, (%sp) # PTE must expand (or send segv)
+ jbr trap
TRAPCALL(tracep, T_TRCTRAP)
TRAPCALL(breakp, T_BPTFLT)
TRAPARGC(arithflt, T_ARITHFLT)
-JSBENTRY(syscall) # Main system call
+SCBENTRY(syscall) # Main system call
pushl $T_SYSCALL
pushr $0xfff
- mfpr $PR_USP, -(sp)
- pushl ap
- pushl fp
- pushl sp # pointer to syscall frame; defined in trap.h
- calls $1, _syscall
- movl (sp)+, fp
- movl (sp)+, ap
- mtpr (sp)+, $PR_USP
+ mfpr $PR_USP, -(%sp)
+ pushl %ap
+ pushl %fp
+ pushl %sp # pointer to syscall frame; defined in trap.h
+ calls $1, _C_LABEL(syscall)
+ movl (%sp)+, %fp
+ movl (%sp)+, %ap
+ mtpr (%sp)+, $PR_USP
popr $0xfff
- addl2 $8, sp
+ addl2 $8, %sp
mtpr $0x1f, $PR_IPL # Be sure we can REI
rei
-
-JSBENTRY(cmrerr)
+SCBENTRY(cmrerr)
PUSHR
- movl _dep_call,r0
- calls $0,*MEMERR(r0)
+ movl _C_LABEL(dep_call),%r0
+ calls $0,*MEMERR(%r0)
POPR
rei
-JSBENTRY(sbiflt)
+SCBENTRY(sbiflt)
pushab sbifltmsg
- calls $1, _panic
+ calls $1,_C_LABEL(panic)
TRAPCALL(astintr, T_ASTFLT)
@@ -274,37 +277,39 @@ FASTINTR(softintr,softintr_dispatch)
TRAPCALL(ddbtrap, T_KDBTRAP)
-JSBENTRY(hardclock)
+SCBENTRY(hardclock)
mtpr $0xc1,$PR_ICCS # Reset interrupt flag
PUSHR
- pushl sp
- addl2 $24,(sp)
- movl _dep_call,r0
- calls $1,*HARDCLOCK(r0)
- incl _clock_intrcnt+EC_COUNT # increment low longword
- adwc $0,_clock_intrcnt+EC_COUNT+4 # add any carry to hi longword
+ pushl %sp
+ addl2 $24,(%sp)
+ movl _C_LABEL(dep_call),%r0
+ calls $1,*HARDCLOCK(%r0)
+ incl _C_LABEL(clock_intrcnt)+EC_COUNT # increment low longword
+ adwc $0,_C_LABEL(clock_intrcnt)+EC_COUNT+4 # add any carry to hi
+ # longword
POPR
rei
/*
* Main routine for traps; all go through this.
* Note that we put USP on the frame here, which sometimes should
- * be KSP to be correct, but because we only alters it when we are
+ * be KSP to be correct, but because we only alters it when we are
* called from user space it doesn't care.
* _sret is used in cpu_set_kpc to jump out to user space first time.
*/
- .globl _sret
trap: pushr $0xfff
- mfpr $PR_USP, -(sp)
- pushl ap
- pushl fp
- pushl sp
- calls $1, _arithflt
-_sret: movl (sp)+, fp
- movl (sp)+, ap
- mtpr (sp)+, $PR_USP
+ mfpr $PR_USP, -(%sp)
+ pushl %ap
+ pushl %fp
+ pushl %sp
+ calls $1, _C_LABEL(arithflt)
+ .globl _C_LABEL(sret)
+_C_LABEL(sret):
+ movl (%sp)+, %fp
+ movl (%sp)+, %ap
+ mtpr (%sp)+, $PR_USP
popr $0xfff
- addl2 $8, sp
+ addl2 $8, %sp
mtpr $0x1f, $PR_IPL # Be sure we can REI
rei
@@ -316,25 +321,42 @@ sbifltmsg:
* Table of emulated Microvax instructions supported by emulate.s.
* Use noemulate to convert unimplemented ones to reserved instruction faults.
*/
- .globl _emtable
-_emtable:
-/* f8 */ .long _EMashp; .long _EMcvtlp; .long noemulate; .long noemulate
-/* fc */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 00 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 04 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 08 */ .long _EMcvtps; .long _EMcvtsp; .long noemulate; .long _EMcrc
-/* 0c */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 10 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 14 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 18 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 1c */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 20 */ .long _EMaddp4; .long _EMaddp6; .long _EMsubp4; .long _EMsubp6
-/* 24 */ .long _EMcvtpt; .long _EMmulp; .long _EMcvttp; .long _EMdivp
-/* 28 */ .long noemulate; .long _EMcmpc3; .long _EMscanc; .long _EMspanc
-/* 2c */ .long noemulate; .long _EMcmpc5; .long _EMmovtc; .long _EMmovtuc
-/* 30 */ .long noemulate; .long noemulate; .long noemulate; .long noemulate
-/* 34 */ .long _EMmovp; .long _EMcmpp3; .long _EMcvtpl; .long _EMcmpp4
-/* 38 */ .long _EMeditpc; .long _EMmatchc; .long _EMlocc; .long _EMskpc
+ .globl _C_LABEL(emtable)
+_C_LABEL(emtable):
+/* f8 */ .long _C_LABEL(EMashp); .long _C_LABEL(EMcvtlp)
+ .long noemulate; .long noemulate
+/* fc */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 00 */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 04 */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 08 */ .long _C_LABEL(EMcvtps); .long _C_LABEL(EMcvtsp)
+ .long noemulate; .long _C_LABEL(EMcrc)
+/* 0c */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 10 */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 14 */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 18 */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 1c */ .long noemulate; .long noemulate
+ .long noemulate; .long noemulate
+/* 20 */ .long _C_LABEL(EMaddp4); .long _C_LABEL(EMaddp6)
+ .long _C_LABEL(EMsubp4); .long _C_LABEL(EMsubp6)
+/* 24 */ .long _C_LABEL(EMcvtpt); .long _C_LABEL(EMmulp)
+ .long _C_LABEL(EMcvttp); .long _C_LABEL(EMdivp)
+/* 28 */ .long noemulate; .long _C_LABEL(EMcmpc3)
+ .long _C_LABEL(EMscanc); .long _C_LABEL(EMspanc)
+/* 2c */ .long noemulate; .long _C_LABEL(EMcmpc5)
+ .long _C_LABEL(EMmovtc); .long _C_LABEL(EMmovtuc)
+/* 30 */ .long noemulate; .long noemulate;
+ .long noemulate; .long noemulate
+/* 34 */ .long _C_LABEL(EMmovp); .long _C_LABEL(EMcmpp3)
+ .long _C_LABEL(EMcvtpl); .long _C_LABEL(EMcmpp4)
+/* 38 */ .long _C_LABEL(EMeditpc); .long _C_LABEL(EMmatchc);
+ .long _C_LABEL(EMlocc); .long _C_LABEL(EMskpc)
#endif
/*
* The following is called with the stack set up as follows:
@@ -373,83 +395,77 @@ _emtable:
* information.
*/
- .align 2
- .globl emulate
-emulate:
+SCBENTRY(emulate)
#if INSN_EMULATE
- movl r11,32(sp) # save register r11 in unused operand
- movl r10,36(sp) # save register r10 in unused operand
- cvtbl (sp),r10 # get opcode
- addl2 $8,r10 # shift negative opcodes
- subl3 r10,$0x43,r11 # forget it if opcode is out of range
+ movl %r11,32(%sp) # save register r11 in unused operand
+ movl %r10,36(%sp) # save register r10 in unused operand
+ cvtbl (%sp),%r10 # get opcode
+ addl2 $8,%r10 # shift negative opcodes
+ subl3 %r10,$0x43,%r11 # forget it if opcode is out of range
bcs noemulate
- movl _emtable[r10],r10 # call appropriate emulation routine
- jsb (r10) # routines put return values into regs 0-5
- movl 32(sp),r11 # restore register r11
- movl 36(sp),r10 # restore register r10
- insv (sp),$0,$4,44(sp) # and condition codes in Opcode spot
- addl2 $40,sp # adjust stack for return
+ movl _C_LABEL(emtable)[%r10],%r10 # call appropriate emulation routine
+ jsb (%r10) # routines put return values into regs 0-5
+ movl 32(%sp),%r11 # restore register r11
+ movl 36(%sp),%r10 # restore register r10
+ insv (%sp),$0,$4,44(%sp) # and condition codes in Opcode spot
+ addl2 $40,%sp # adjust stack for return
rei
noemulate:
- addl2 $48,sp # adjust stack for
+ addl2 $48,%sp # adjust stack for
#endif
.word 0xffff # "reserved instruction fault"
- .data
-_scb: .long 0
- .globl _scb
-
.text
/*
* First entry routine from boot. This should be in a file called locore.
*/
-ASENTRY_NOPROFILE(start, 0)
- bisl3 $0x80000000,r9,_esym # End of loaded code
+ASENTRY_NOPROFILE(__start, 0)
+ bisl3 $0x80000000,%r9,_C_LABEL(esym) # End of loaded code
pushl $0x1f0000 # Push a nice PSL
pushl $to # Address to jump to
rei # change to kernel stack
-to: movw $0xfff,_panic # Save all regs in panic
- moval _end, r0 # Get kernel end address
- addl2 $0x3ff, r0 # Round it up
- cmpl _esym, r0 # Compare with symbol table end
+to: movw $0xfff,_C_LABEL(panic) # Save all regs in panic
+ moval _C_LABEL(end), %r0 # Get kernel end address
+ addl2 $0x3ff, %r0 # Round it up
+ cmpl _C_LABEL(esym), %r0 # Compare with symbol table end
bleq eskip # Symbol table not present
- addl3 _esym, $0x3ff, r0 # Use symbol end and round
+ addl3 _C_LABEL(esym), $0x3ff, %r0 # Use symbol end and round
eskip:
- bicl3 $0x3ff,r0,_proc0paddr # save proc0 uarea pointer
- bicl3 $0x80000000,_proc0paddr,r0 # get phys proc0 uarea addr
- mtpr r0,$PR_PCBB # Save in IPR PCBB
- addl3 $USPACE,_proc0paddr,r0 # Get kernel stack top
- mtpr r0,$PR_KSP # put in IPR KSP
- movl r0,_Sysmap # SPT start addr after KSP
+ bicl3 $0x3ff,%r0,_C_LABEL(proc0paddr) # save proc0 uarea pointer
+ bicl3 $0x80000000,_C_LABEL(proc0paddr),%r0 # get phys proc0 uarea addr
+ mtpr %r0,$PR_PCBB # Save in IPR PCBB
+ addl3 $USPACE,_C_LABEL(proc0paddr),%r0 # Get kernel stack top
+ mtpr %r0,$PR_KSP # put in IPR KSP
+ movl %r0,_C_LABEL(Sysmap) # SPT start addr after KSP
# Set some registers in known state
- movl _proc0paddr,r0
- clrl P0LR(r0)
- clrl P1LR(r0)
+ movl _C_LABEL(proc0paddr),%r0
+ clrl P0LR(%r0)
+ clrl P1LR(%r0)
mtpr $0,$PR_P0LR
mtpr $0,$PR_P1LR
- movl $0x80000000,r1
- movl r1,P0BR(r0)
- movl r1,P1BR(r0)
- mtpr r1,$PR_P0BR
- mtpr r1,$PR_P1BR
- clrl IFTRAP(r0)
+ movl $0x80000000,%r1
+ movl %r1,P0BR(%r0)
+ movl %r1,P1BR(%r0)
+ mtpr %r1,$PR_P0BR
+ mtpr %r1,$PR_P1BR
+ clrl IFTRAP(%r0)
mtpr $0,$PR_SCBB
# Copy the RPB to its new position
#if 1 /* compat with old bootblocks */
- tstl (ap) # Any arguments?
+ tstl (%ap) # Any arguments?
bneq 1f # Yes, called from new boot
- movl r11,_boothowto # Howto boot (single etc...)
-# movl r10,_bootdev # uninteresting, will complain
- movl r8,_avail_end # Usable memory (from VMB)
- clrl -(sp) # Have no RPB
+ movl %r11,_C_LABEL(boothowto) # Howto boot (single etc...)
+# movl %r10,_C_LABEL(bootdev) # uninteresting, will complain
+ movl %r8,_C_LABEL(avail_end) # Usable memory (from VMB)
+ clrl -(%sp) # Have no RPB
brb 2f
#endif
-1: pushl 4(ap) # Address of old rpb
-2: calls $1,_start # Jump away.
+1: pushl 4(%ap) # Address of old rpb
+2: calls $1,_C_LABEL(_start) # Jump away.
/* NOTREACHED */
@@ -457,114 +473,115 @@ eskip:
* Signal handler code.
*/
- .globl _sigcode,_esigcode
-_sigcode:
- movl 0x0c(sp),r0 /* get signal handler */
- calls $3,(r0) /* and call it */
- chmk $SYS_sigreturn /* sigreturn frame set up by sendsig */
- chmk $SYS_exit
- halt
- .align 2
-_esigcode:
-
- .globl _idsptch, _eidsptch
-_idsptch: pushr $0x3f
- .word 0x9f16 # jsb to absolute address
- .long _cmn_idsptch # the absolute address
- .long 0 # the callback interrupt routine
- .long 0 # its argument
- .long 0 # ptr to correspond evcount struct
-_eidsptch:
+ .globl _C_LABEL(sigcode),_C_LABEL(esigcode)
+_C_LABEL(sigcode):
+ movl 0x0c(%sp),%r0 /* get signal handler */
+ calls $3,(%r0) /* and call it */
+ chmk $SYS_sigreturn /* sigreturn frame set up by sendsig */
+ chmk $SYS_exit
+ halt
+ _ALIGN_TEXT
+_C_LABEL(esigcode):
+
+ .globl _C_LABEL(idsptch), _C_LABEL(eidsptch)
+_C_LABEL(idsptch):
+ pushr $0x3f
+ .word 0x9f16 # jsb to absolute address
+ .long _cmn_idsptch # the absolute address
+ .long 0 # the callback interrupt routine
+ .long 0 # its argument
+ .long 0 # ptr to correspond evcount struct
+_C_LABEL(eidsptch):
_cmn_idsptch:
- movl (sp)+,r0 # get pointer to idspvec
- movl 8(r0),r1 # get evcount pointer
- beql 1f # no ptr, skip increment
- incl EC_COUNT(r1) # increment low longword
- adwc $0,EC_COUNT+4(r1) # add any carry to hi longword
-1: pushl 4(r0) # push argument
- calls $1,*(r0) # call interrupt routine
- popr $0x3f # pop registers
- rei # return from interrut
+ movl (%sp)+,%r0 # get pointer to idspvec
+ movl 8(%r0),%r1 # get evcount pointer
+ beql 1f # no ptr, skip increment
+ incl EC_COUNT(%r1) # increment low longword
+ adwc $0,EC_COUNT+4(%r1) # add any carry to hi longword
+1: pushl 4(%r0) # push argument
+ calls $1,*(%r0) # call interrupt routine
+ popr $0x3f # pop registers
+ rei # return from interrut
ENTRY_NOPROFILE(badaddr,R2|R3) # Called with addr,b/w/l
- mfpr $0x12,r0 # splhigh()
- mtpr $0x1f,$0x12
- movl 4(ap),r2 # First argument, the address
- movl 8(ap),r1 # Sec arg, b,w,l
- pushl r0 # Save old IPL
- clrl r3
- movab 4f,_ASM_LABEL(memtest) # Set the return address
-
- caseb r1,$1,$4 # What is the size
-1: .word 1f-1b
- .word 2f-1b
- .word 3f-1b # This is unused
- .word 3f-1b
-
-1: movb (r2),r1 # Test a byte
- brb 5f
-
-2: movw (r2),r1 # Test a word
- brb 5f
-
-3: movl (r2),r1 # Test a long
- brb 5f
-
-4: incl r3 # Got machine chk => addr bad
-5: clrl _ASM_LABEL(memtest) # do not ignore further mchk
- mtpr (sp)+,$0x12
- movl r3,r0
- ret
+ mfpr $0x12,%r0 # splhigh()
+ mtpr $0x1f,$0x12
+ movl 4(%ap),%r2 # First argument, the address
+ movl 8(%ap),%r1 # Sec arg, b,w,l
+ pushl %r0 # Save old IPL
+ clrl %r3
+ movab 4f,_ASM_LABEL(memtest) # Set the return address
+
+ caseb %r1,$1,$4 # What is the size
+1: .word 1f-1b
+ .word 2f-1b
+ .word 3f-1b # This is unused
+ .word 3f-1b
+
+1: movb (%r2),%r1 # Test a byte
+ brb 5f
+
+2: movw (%r2),%r1 # Test a word
+ brb 5f
+
+3: movl (%r2),%r1 # Test a long
+ brb 5f
+
+4: incl %r3 # Got machine chk => addr bad
+5: clrl _ASM_LABEL(memtest) # do not ignore further mchk
+ mtpr (%sp)+,$0x12
+ movl %r3,%r0
+ ret
#ifdef DDB
/*
* DDB is the only routine that uses setjmp/longjmp.
*/
ENTRY_NOPROFILE(setjmp, 0)
- movl 4(ap), r0
- movl 8(fp), (r0)
- movl 12(fp), 4(r0)
- movl 16(fp), 8(r0)
- addl3 fp,$28,12(r0)
- clrl r0
+ movl 4(%ap), %r0
+ movl 8(%fp), (%r0)
+ movl 12(%fp), 4(%r0)
+ movl 16(%fp), 8(%r0)
+ addl3 %fp,$28,12(%r0)
+ clrl %r0
ret
ENTRY_NOPROFILE(longjmp, 0)
- movl 4(ap), r1
- movl $1, r0
- movl (r1), ap
- movl 4(r1), fp
- movl 12(r1), sp
- jmp *8(r1)
-#endif
+ movl 4(%ap), %r1
+ movl $1, %r0
+ movl (%r1), %ap
+ movl 4(%r1), %fp
+ movl 12(%r1), %sp
+ jmp *8(%r1)
+#endif
#
# void
# cpu_switchto(struct proc *oldproc = r0, struct proc *newproc = r1);
#
-#define CURPROC _cpu_info_store + CI_CURPROC
+#define CURPROC _C_LABEL(cpu_info_store) + CI_CURPROC
JSBENTRY(__cpu_switchto)
svpctx
- movb $SONPROC,P_STAT(r1) # p->p_stat = SONPROC
- movl r1, CURPROC # set new process running
+ movb $SONPROC,P_STAT(%r1) # p->p_stat = SONPROC
+ movl %r1, CURPROC # set new process running
- movl P_ADDR(r1),r0 # Get pointer to new pcb.
- addl3 r0,$IFTRAP,pcbtrap # Save for copy* functions.
+ movl P_ADDR(%r1),%r0 # Get pointer to new pcb.
+ addl3 %r0,$IFTRAP,pcbtrap # Save for copy* functions.
# inline kvtophys
- extzv $9,$21,r0,r1 # extract offset
- movl *_Sysmap[r1],r2 # get pte
- ashl $9,r2,r3 # shift to get phys address.
+ extzv $9,$21,%r0,%r1 # extract offset
+ movl *_C_LABEL(Sysmap)[%r1],%r2 # get pte
+ ashl $9,%r2,%r3 # shift to get phys address.
#
# Do the actual process switch. pc + psl are already on stack, from
# the beginning of this routine.
#
- mtpr r3,$PR_PCBB
+ mtpr %r3,$PR_PCBB
pushl CURPROC
calls $1, _C_LABEL(pmap_activate)
@@ -573,85 +590,85 @@ JSBENTRY(__cpu_switchto)
rei
#
-# copy/fetch/store routines.
+# copy/fetch/store routines.
#
- .align 2,1
+ .align 2
ENTRY_NOPROFILE(copyin, R2|R3|R4|R5|R6)
- movl 4(ap), r0
+ movl 4(%ap), %r0
blss 3f # kernel space
- movl 8(ap), r1
+ movl 8(%ap), %r1
brb 2f
ENTRY_NOPROFILE(copyout, R2|R3|R4|R5|R6)
- movl 8(ap), r1
+ movl 8(%ap), %r1
blss 3f # kernel space
- movl 4(ap), r0
+ movl 4(%ap), %r0
2: movab 1f,*pcbtrap
- movzwl 12(ap), r2
- movzwl 14(ap), r6
+ movzwl 12(%ap), %r2
+ movzwl 14(%ap), %r6
+
+ movc3 %r2, (%r0), (%r1)
- movc3 r2, (r0), (r1)
-
- tstl r6
+ tstl %r6
bleq 1f
-0: movb (r1)+, (r3)+
- movc3 $0xffff, (r1), (r3)
- sobgtr r6,0b
-
+0: movb (%r1)+, (%r3)+
+ movc3 $0xffff, (%r1), (%r3)
+ sobgtr %r6,0b
+
1: clrl *pcbtrap
ret
-3: movl $EFAULT, r0
+3: movl $EFAULT, %r0
ret
-/* kcopy: just like bcopy, except return EFAULT upon failure */
+/* kcopy: just like bcopy, except return EFAULT upon failure */
ENTRY_NOPROFILE(kcopy,R2|R3|R4|R5|R6)
- movl *pcbtrap,-(sp)
+ movl *pcbtrap,-(%sp)
movab 1f,*pcbtrap
- movl 4(ap), r0
- movl 8(ap), r1
- movzwl 12(ap), r2
- movzwl 14(ap), r6
-
- movc3 r2, (r0), (r1)
-
- tstl r6
+ movl 4(%ap), %r0
+ movl 8(%ap), %r1
+ movzwl 12(%ap), %r2
+ movzwl 14(%ap), %r6
+
+ movc3 %r2, (%r0), (%r1)
+
+ tstl %r6
bleq 1f
-0: movb (r1)+, (r3)+
- movc3 $0xffff, (r1), (r3)
- sobgtr r6, 0b
-
- /*
+0: movb (%r1)+, (%r3)+
+ movc3 $0xffff, (%r1), (%r3)
+ sobgtr %r6, 0b
+
+ /*
* If there is a failure, trap.c will set r0 to EFAULT, and jump
* to the following 1. If not, we return 0 (movc3 sets r0 to 0).
*/
1:
- movl (sp)+,*pcbtrap
+ movl (%sp)+,*pcbtrap
ret
ENTRY_NOPROFILE(copyinstr,0)
- tstl 4(ap) # is from a kernel address?
+ tstl 4(%ap) # is from a kernel address?
bgeq 8f # no, continue
-6: movl $EFAULT,r0
+6: movl $EFAULT,%r0
ret
ENTRY_NOPROFILE(copyoutstr,0)
- tstl 8(ap) # is to a kernel address?
+ tstl 8(%ap) # is to a kernel address?
bgeq 8f # no, continue
brb 6b
ENTRY_NOPROFILE(copystr,0)
-8: movl 4(ap),r4 # from
- movl 8(ap),r5 # to
- movl 16(ap),r3 # copied
- movl 12(ap),r2 # len
+8: movl 4(%ap),%r4 # from
+ movl 8(%ap),%r5 # to
+ movl 16(%ap),%r3 # copied
+ movl 12(%ap),%r2 # len
bneq 1f # nothing to copy?
- movl $ENAMETOOLONG,r0
- tstl r3
+ movl $ENAMETOOLONG,%r0
+ tstl %r3
beql 0f
- movl $0,(r3)
+ movl $0,(%r3)
0: ret
1: movab 2f,*pcbtrap
@@ -662,52 +679,52 @@ ENTRY_NOPROFILE(copystr,0)
* locc only handles <64k strings, we default to the slow version if the
* string is longer.
*/
- cmpl _vax_cputype,$VAX_TYP_UV2
+ cmpl _C_LABEL(vax_cputype),$VAX_TYP_UV2
bneq 4f # Check if locc emulated
-9: movl r2,r0
-7: movb (r4)+,(r5)+
+9: movl %r2,%r0
+7: movb (%r4)+,(%r5)+
beql 6f # end of string
- sobgtr r0,7b # no null byte in the len first bytes?
+ sobgtr %r0,7b # no null byte in the len first bytes?
brb 1f
-6: tstl r3
+6: tstl %r3
beql 5f
- incl r2
- subl3 r0,r2,(r3)
-5: clrl r0
+ incl %r2
+ subl3 %r0,%r2,(%r3)
+5: clrl %r0
clrl *pcbtrap
ret
-4: cmpl r2,$65535 # maxlen < 64k?
+4: cmpl %r2,$65535 # maxlen < 64k?
blss 8f # then use fast code.
- locc $0,$65535,(r4) # is strlen < 64k?
+ locc $0,$65535,(%r4) # is strlen < 64k?
beql 9b # No, use slow code
- subl3 r0,$65535,r1 # Get string len
+ subl3 %r0,$65535,%r1 # Get string len
brb 0f # do the copy
-8: locc $0,r2,(r4) # check for null byte
+8: locc $0,%r2,(%r4) # check for null byte
beql 1f
- subl3 r0,r2,r1 # Calculate len to copy
-0: incl r1 # Copy null byte also
- tstl r3
+ subl3 %r0,%r2,%r1 # Calculate len to copy
+0: incl %r1 # Copy null byte also
+ tstl %r3
beql 3f
- movl r1,(r3) # save len copied
-3: movc3 r1,(r4),(r5)
+ movl %r1,(%r3) # save len copied
+3: movc3 %r1,(%r4),(%r5)
brb 4f
-1: movl $ENAMETOOLONG,r0
+1: movl $ENAMETOOLONG,%r0
2: movab 4f,*pcbtrap # if we fault again, don't resume there
- subl3 8(ap),r5,r1 # did we write to the string?
+ subl3 8(%ap),%r5,%r1 # did we write to the string?
beql 3f
- decl r5
-3: movb $0,(r5) # null terminate the output string
- tstl r3
+ decl %r5
+3: movb $0,(%r5) # null terminate the output string
+ tstl %r3
beql 4f
- incl r1 # null byte accounts for outlen...
- movl r1,(r3) # save len copied
+ incl %r1 # null byte accounts for outlen...
+ movl %r1,(%r3) # save len copied
4: clrl *pcbtrap
ret
@@ -718,5 +735,9 @@ ENTRY_NOPROFILE(copystr,0)
_ASM_LABEL(memtest): # badaddr() in progress
.long 0
-pcbtrap: .long 0x800001fc; .globl pcbtrap # Safe place
-_bootdev: .long 0; .globl _bootdev
+pcbtrap:
+ .long 0x800001fc # Safe place
+
+ .globl _C_LABEL(bootdev)
+_C_LABEL(bootdev):
+ .long 0
diff --git a/sys/arch/vax/vax/machdep.c b/sys/arch/vax/vax/machdep.c
index e1a269e4cc0..128697fa138 100644
--- a/sys/arch/vax/vax/machdep.c
+++ b/sys/arch/vax/vax/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.125 2013/06/29 13:00:35 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.126 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: machdep.c,v 1.108 2000/09/13 15:00:23 thorpej Exp $ */
/*
@@ -592,8 +592,8 @@ haltsys:
mtpr(GC_CONS|GC_BTFL, PR_TXDB);
}
- asm("movl %0, r5":: "g" (showto)); /* How to boot */
- asm("movl %0, r11":: "r"(showto)); /* ??? */
+ asm("movl %0, %%r5":: "g" (showto)); /* How to boot */
+ asm("movl %0, %%r11":: "r"(showto)); /* ??? */
asm("halt");
for (;;) ;
/* NOTREACHED */
@@ -1048,7 +1048,7 @@ splassert_check(int wantipl, const char *func)
}
#endif
-void start(struct rpb *);
+void _start(struct rpb *);
void main(void);
extern paddr_t avail_end;
@@ -1082,7 +1082,7 @@ extern struct cpu_dep vxt_calls;
* management is disabled, and no interrupt system is active.
*/
void
-start(struct rpb *prpb)
+_start(struct rpb *prpb)
{
extern vaddr_t scratch;
int preserve_cca = 0;
diff --git a/sys/arch/vax/vax/udiv.s b/sys/arch/vax/vax/udiv.s
index cbc084754a0..28a5db508ec 100644
--- a/sys/arch/vax/vax/udiv.s
+++ b/sys/arch/vax/vax/udiv.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: udiv.s,v 1.4 2005/05/06 18:55:02 miod Exp $ */
+/* $OpenBSD: udiv.s,v 1.5 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: udiv.s,v 1.2 1994/10/26 08:03:34 cgd Exp $ */
/*-
@@ -44,25 +44,25 @@
*/
-#define DIVIDEND 4(ap)
-#define DIVISOR 8(ap)
+#define DIVIDEND 4(%ap)
+#define DIVISOR 8(%ap)
-ASENTRY(udiv, 0)
- movl DIVISOR,r2
+ASENTRY(__udiv, 0)
+ movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
- movl DIVIDEND,r0
+ movl DIVIDEND,%r0
jlss Lhard # big dividend: extended division
- divl2 r2,r0 # small divisor and dividend: signed division
+ divl2 %r2,%r0 # small divisor and dividend: signed division
ret
Lhard:
- clrl r1
- ediv r2,r0,r0,r1
+ clrl %r1
+ ediv %r2,%r0,%r0,%r1
ret
Leasy:
- cmpl DIVIDEND,r2
+ cmpl DIVIDEND,%r2
jgequ Lone # if dividend is as big or bigger, return 1
- clrl r0 # else return 0
+ clrl %r0 # else return 0
ret
Lone:
- movl $1,r0
+ movl $1,%r0
ret
diff --git a/sys/arch/vax/vax/unimpl_emul.s b/sys/arch/vax/vax/unimpl_emul.s
index 869376953a4..c479b27a60d 100644
--- a/sys/arch/vax/vax/unimpl_emul.s
+++ b/sys/arch/vax/vax/unimpl_emul.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: unimpl_emul.s,v 1.8 2005/05/06 18:55:02 miod Exp $ */
+/* $OpenBSD: unimpl_emul.s,v 1.9 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: unimpl_emul.s,v 1.2 2000/08/14 11:16:52 ragge Exp $ */
/*
@@ -41,23 +41,23 @@
#undef EMULATE_INKERNEL
# Defines to fetch register operands
-#define S_R0 (fp)
-#define S_R1 4(fp)
-#define S_R2 8(fp)
-#define S_R3 12(fp)
-#define S_R4 16(fp)
-#define S_R5 20(fp)
-#define S_R6 24(fp)
-#define S_R7 28(fp)
-#define S_R8 32(fp)
-#define S_R9 36(fp)
-#define S_R10 40(fp)
-#define S_R11 44(fp)
-#define S_AP 48(fp)
-#define S_FP 52(fp)
-#define S_SP 56(fp)
-#define S_PC 60(fp)
-#define S_PSL 64(fp)
+#define S_R0 (%fp)
+#define S_R1 4(%fp)
+#define S_R2 8(%fp)
+#define S_R3 12(%fp)
+#define S_R4 16(%fp)
+#define S_R5 20(%fp)
+#define S_R6 24(%fp)
+#define S_R7 28(%fp)
+#define S_R8 32(%fp)
+#define S_R9 36(%fp)
+#define S_R10 40(%fp)
+#define S_R11 44(%fp)
+#define S_AP 48(%fp)
+#define S_FP 52(%fp)
+#define S_SP 56(%fp)
+#define S_PC 60(%fp)
+#define S_PSL 64(%fp)
#define PSL_Q (PSL_N | PSL_Z | PSL_V | PSL_C)
@@ -65,14 +65,14 @@
# Emulation of instruction trapped via SCB vector 0x18. (reserved op)
#
.globl unimemu; unimemu:
- pushl r0
- movl 8(sp),r0 # get trap address
- movzbl (r0),r0 # fetch insn generating trap
- caseb r0,$0x74,$1 # case to jump to address
+ pushl %r0
+ movl 8(%sp),%r0 # get trap address
+ movzbl (%r0),%r0 # fetch insn generating trap
+ caseb %r0,$0x74,$1 # case to jump to address
0: .word emodd-0b
.word polyd-0b
-1: movl (sp)+,r0 # restore reg
+1: movl (%sp)+,%r0 # restore reg
rsb # continue fault
#
@@ -80,26 +80,26 @@
# puts the psl + pc (+ jsb return address) on top of user stack.
#
#ifdef EMULATE_INKERNEL
-touser: movl (sp),-52(sp) # save rsb address on top of new stack
- movl 4(sp),r0 # restore saved reg
- addl2 $12,sp # pop junk from stack
+touser: movl (%sp),-52(%sp) # save rsb address on top of new stack
+ movl 4(%sp),%r0 # restore saved reg
+ addl2 $12,%sp # pop junk from stack
pushr $0x7fff # save all regs
- movl sp,fp # new frame pointer
- tstl -(sp) # remember old rsb address
+ movl %sp,%fp # new frame pointer
+ tstl -(%sp) # remember old rsb address
incl S_PC # skip matching insn
rsb
#else
-touser: mfpr $PR_USP,r0 # get user stack pointer
- movl 4(sp),-68(r0) # move already saved r0
- movl (sp),-72(r0) # move return address
- movq 12(sp),-8(r0) # move pc + psl
- addl2 $12,sp # remove moved fields from stack
- movl $1f,(sp) # change return address
+touser: mfpr $PR_USP,%r0 # get user stack pointer
+ movl 4(%sp),-68(%r0) # move already saved r0
+ movl (%sp),-72(%r0) # move return address
+ movq 12(%sp),-8(%r0) # move pc + psl
+ addl2 $12,%sp # remove moved fields from stack
+ movl $1f,(%sp) # change return address
rei
-1: subl2 $8,sp # trapaddr + psl already on stack
+1: subl2 $8,%sp # trapaddr + psl already on stack
pushr $0x7ffe # r0 already saved
- subl2 $8,sp # do not trash r0 + retaddr
- movab 4(sp),fp
+ subl2 $8,%sp # do not trash r0 + retaddr
+ movab 4(%sp),%fp
incl S_PC # skip matching insn
rsb
#endif
@@ -107,7 +107,7 @@ touser: mfpr $PR_USP,r0 # get user stack pointer
#
# Restore registers, cleanup and continue
#
-goback: movl fp,sp # be sure
+goback: movl %fp,%sp # be sure
popr $0x7fff # restore all regs
rei
@@ -116,11 +116,11 @@ goback: movl fp,sp # be sure
* current operand specifier pointed to by S_PC. It also increments S_PC.
*/
getval:
- clrq r0
+ clrq %r0
pushr $(R2+R3+R4+R5+R6)
- movl S_PC,r3 # argument address
- extzv $4,$4,(r3),r2 # get mode
- caseb r2,$0,$0xf
+ movl S_PC,%r3 # argument address
+ extzv $4,$4,(%r3),%r2 # get mode
+ caseb %r2,$0,$0xf
0: .word getval_literal-0b # 0-3 literal
.word getval_literal-0b
.word getval_literal-0b
@@ -138,8 +138,8 @@ getval:
.word getval_longdis-0b # E longword displacement
.word 2f-0b # F longword displacement deferred
#ifdef EMULATE_INKERNEL
-2: movab 0f,r0
- movl r2,r1
+2: movab 0f,%r0
+ movl %r2,%r1
brw die
0: .asciz "getval: missing address mode %d\n"
#else
@@ -153,7 +153,7 @@ getval:
* for details).
*/
getval_literal:
- movzbl (r3)+,r0 # correct operand
+ movzbl (%r3)+,%r0 # correct operand
brw 4f
/*
@@ -161,10 +161,10 @@ getval_literal:
* Register mode. Grab the register number, yank the value out.
*/
getval_reg:
- extzv $0,$4,(r3),r2 # Get reg number
- incl r3
- ashl $2,r2,r2
- addl3 fp,r2,r5
+ extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
+ ashl $2,%r2,%r2
+ addl3 %fp,%r2,%r5
bsbw emul_extract
brw 4f
@@ -174,11 +174,11 @@ getval_reg:
* use that as the address to get the real value.
*/
getval_regdefer:
- extzv $0,$4,(r3),r2 # Get reg number
- incl r3
- ashl $2,r2,r2
- addl2 fp,r2
- movl (r2),r5
+ extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
+ ashl $2,%r2,%r2
+ addl2 %fp,%r2
+ movl (%r2),%r5
bsbw emul_extract
brw 4f
@@ -188,23 +188,23 @@ getval_regdefer:
* then increment the register.
*/
getval_ai:
- extzv $0,$4,(r3),r2 # Get reg number
- incl r3
+ extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
/*
* In the case of the register being PC (0xf), this is called immediate mode;
* we can treat it the same as any other register, as long as we keep r3
* and S_PC in sync. We do that here.
*/
- movl r3,S_PC
+ movl %r3,S_PC
- ashl $2,r2,r2
- addl2 fp,r2
- movl (r2),r5
+ ashl $2,%r2,%r2
+ addl2 %fp,%r2
+ movl (%r2),%r5
bsbw emul_extract
- addl2 r6,(r2)
+ addl2 %r6,(%r2)
- movl S_PC,r3 /* if PC did change, S_PC was changed too */
+ movl S_PC,%r3 /* if PC did change, S_PC was changed too */
brw 4f
/*
@@ -212,14 +212,14 @@ getval_ai:
* Byte displacement mode.
*/
getval_bytedis:
- extzv $0, $4, (r3), r2 # get register
- incl r3
- ashl $2,r2,r2
- addl2 fp,r2
- movl (r2),r5
- movzbl (r3),r4
- incl r3
- addl2 r4, r5
+ extzv $0, $4, (%r3), %r2 # get register
+ incl %r3
+ ashl $2,%r2,%r2
+ addl2 %fp,%r2
+ movl (%r2),%r5
+ movzbl (%r3),%r4
+ incl %r3
+ addl2 %r4, %r5
bsbw emul_extract
brw 4f
@@ -228,16 +228,16 @@ getval_bytedis:
* Longword displacement mode.
*/
getval_longdis:
- extzv $0, $4, (r3), r2 # get register
- incl r3
- ashl $2,r2,r2
- addl2 fp,r2
- movl (r2),r5
- movl (r3)+,r4
- addl2 r4, r5
+ extzv $0, $4, (%r3), %r2 # get register
+ incl %r3
+ ashl $2,%r2,%r2
+ addl2 %fp,%r2
+ movl (%r2),%r5
+ movl (%r3)+,%r4
+ addl2 %r4, %r5
bsbw emul_extract
-4: movl r3,S_PC
+4: movl %r3,S_PC
popr $(R2+R3+R4+R5+R6)
rsb
@@ -247,11 +247,11 @@ getval_longdis:
* 8 is the current maximum length.
*/
emul_extract:
- cmpl $0x8, r6
+ cmpl $0x8, %r6
bgeq 1f
.word 0xffff # reserved operand
1:
- caseb r6, $0x1, $0x7
+ caseb %r6, $0x1, $0x7
0: .word 1f-0b # 1: byte
.word 2f-0b # 2: word
.word 9f-0b # unknown
@@ -261,16 +261,16 @@ emul_extract:
.word 9f-0b # unknown
.word 8f-0b # 8: quadword
-1: movzbl (r5), r0
+1: movzbl (%r5), %r0
rsb
-2: movzwl (r5), r0
+2: movzwl (%r5), %r0
rsb
-4: movl (r5), r0
+4: movl (%r5), %r0
rsb
-8: movq (r5), r0
+8: movq (%r5), %r0
rsb
9:
@@ -278,50 +278,50 @@ emul_extract:
rsb
getval_dfloat:
- clrq r0
+ clrq %r0
pushr $(R2+R3+R6) # use r2+r3 as scratch reg
- movl S_PC,r3 # argument address
- extzv $4,$4,(r3),r2 # get mode
- caseb r2,$0,$0x3
+ movl S_PC,%r3 # argument address
+ extzv $4,$4,(%r3),%r2 # get mode
+ caseb %r2,$0,$0x3
0: .word 1f-0b # 0-3 literal
.word 1f-0b
.word 1f-0b
.word 1f-0b
- movl $0x8, r6
+ movl $0x8, %r6
bsbw getval
brw 4f
-1: insv (r3),$0,$3,r0 # insert fraction
- extzv $3,$3,(r3),r2 # get exponent
- addl2 $128,r2 # bias the exponent
- insv r2,$7,$8,r0 # insert exponent
- tstb (r3)+
- movl r3,S_PC
+1: insv (%r3),$0,$3,%r0 # insert fraction
+ extzv $3,$3,(%r3),%r2 # get exponent
+ addl2 $128,%r2 # bias the exponent
+ insv %r2,$7,$8,%r0 # insert exponent
+ tstb (%r3)+
+ movl %r3,S_PC
4:
popr $(R2+R3+R6)
rsb
getval_long:
- clrl r0
+ clrl %r0
pushr $(R6+R1)
- movl $0x4, r6
+ movl $0x4, %r6
bsbw getval
popr $(R6+R1)
rsb
getval_word:
- clrl r0
+ clrl %r0
pushr $(R6+R1)
- movl $0x2, r6
+ movl $0x2, %r6
bsbw getval
popr $(R6+R1)
rsb
getval_byte:
- clrl r0
+ clrl %r0
pushr $(R6+R1) # use r2+r3 as scratch reg
- movl $0x1, r6
+ movl $0x1, %r6
bsbw getval
popr $(R6+R1)
rsb
@@ -330,11 +330,11 @@ getval_byte:
# getaddr_byte get 4 bytes and stores them in r0. Increases PC.
#
getaddr_byte:
- clrl r0
+ clrl %r0
pushr $(R2+R3) # use r2+r3 as scratch reg
- movl S_PC,r3 # argument address
- extzv $4,$4,(r3),r2 # get mode
- caseb r2,$0,$0xf
+ movl S_PC,%r3 # argument address
+ extzv $4,$4,(%r3),%r2 # get mode
+ caseb %r2,$0,$0xf
0: .word 2f-0b # 0-3 literal
.word 2f-0b
.word 2f-0b
@@ -352,62 +352,62 @@ getaddr_byte:
.word 1f-0b # 14 long disp
.word 2f-0b # 15 long disp deferred (missing)
#ifdef EMULATE_INKERNEL
-2: movab 3f,r0
- movl r2,r1
+2: movab 3f,%r0
+ movl %r2,%r1
brw die # reserved operand
3: .asciz "getaddr_byte: missing address mode %d\n"
#else
2: .word 0xffff # reserved operand
#endif
-1: extzv $0,$4,(r3),r2 # Get reg number
- incl r3
- movl (fp)[r2],r0 # Register contents
- addl2 (r3),r0 # add displacement
- cmpl r2,$15 # pc?
+1: extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
+ movl (%fp)[%r2],%r0 # Register contents
+ addl2 (%r3),%r0 # add displacement
+ cmpl %r2,$15 # pc?
bneq 0f # no, skip
- addl2 $5,r0 # compensate for displacement size
-0: addl2 $4,r3 # increase pc
+ addl2 $5,%r0 # compensate for displacement size
+0: addl2 $4,%r3 # increase pc
brw 4f
-5: extzv $0,$4,(r3),r2 # Get reg number
- incl r3
- movl (fp)[r2],r0
+5: extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
+ movl (%fp)[%r2],%r0
brw 4f
7:
- extzv $0, $4, (r3), r2 # get register
- incl r3
- movl (fp)[r2],r0 # Register contents
- pushl r4
- cvtbl (r3),r4
- addl2 r4,r0 # add displacement
- movl (sp)+,r4
- cmpl r2,$15 # pc?
+ extzv $0, $4, (%r3), %r2 # get register
+ incl %r3
+ movl (%fp)[%r2],%r0 # Register contents
+ pushl %r4
+ cvtbl (%r3),%r4
+ addl2 %r4,%r0 # add displacement
+ movl (%sp)+,%r4
+ cmpl %r2,$15 # pc?
bneq 0f # no, skip
- addl2 $2,r0 # compensate for displacement size
-0: incl r3 # increase pc
+ addl2 $2,%r0 # compensate for displacement size
+0: incl %r3 # increase pc
brw 4f
8:
- extzv $0, $4, (r3), r2 # get register
- incl r3
- movl (fp)[r2],r0 # Register contents
- pushl r4
- cvtwl (r3),r4
- addl2 r4,r0 # add displacement
- movl (sp)+,r4
- cmpl r2,$15 # pc?
+ extzv $0, $4, (%r3), %r2 # get register
+ incl %r3
+ movl (%fp)[%r2],%r0 # Register contents
+ pushl %r4
+ cvtwl (%r3),%r4
+ addl2 %r4,%r0 # add displacement
+ movl (%sp)+,%r4
+ cmpl %r2,$15 # pc?
bneq 0f # no, skip
- addl2 $3,r0 # compensate for displacement size
-0: addl2 $2,r3 # increase pc
+ addl2 $3,%r0 # compensate for displacement size
+0: addl2 $2,%r3 # increase pc
brw 4f
-6: extzv $0,$4,(r3),r2 # Get reg number
- incl r3
- moval (fp)[r2],r0
+6: extzv $0,$4,(%r3),%r2 # Get reg number
+ incl %r3
+ moval (%fp)[%r2],%r0
-4: movl r3,S_PC
+4: movl %r3,S_PC
popr $(R2+R3)
rsb
@@ -422,42 +422,42 @@ getaddr_byte:
#
polyd: bsbw touser # go back to user mode
bsbw getval_dfloat # fetches argument to r0/r1
- movq r0,r6
+ movq %r0,%r6
bsbw getval_word
- movl r0,r4
+ movl %r0,%r4
bsbw getaddr_byte
- movl r0,r3
- clrq r0
+ movl %r0,%r3
+ clrq %r0
# Ok, do the real calculation (Horner's method)
-0: addd2 (r3)+,r0 # add constant
- tstl r4 # more?
+0: addd2 (%r3)+,%r0 # add constant
+ tstl %r4 # more?
beql 1f # no, exit
- muld2 r6,r0 # multiply with arg
- decl r4 # lower degree
+ muld2 %r6,%r0 # multiply with arg
+ decl %r4 # lower degree
brb 0b
-1: movq r0,(fp)
+1: movq %r0,(%fp)
clrl S_R2
- movl r3,S_R3
+ movl %r3,S_R3
clrq S_R4
brw goback
#ifdef EMULATE_INKERNEL
# When we end up somewhere we don't want.
-die: pushl r1
- pushl r0
+die: pushl %r1
+ pushl %r0
calls $2,_printf
- movl fp,sp
+ movl %fp,%sp
brw goback # anything may happen
#endif
# these emodd-related
#define TMPSIZE 0x20 /* temp bytes -- be careful with this! */
#define PRECIS 0x7
-#define TMPFRAC1 (ap)
-#define TMPFRAC2 32(ap)
-#define TMPFRACTGT 64(ap)
+#define TMPFRAC1 (%ap)
+#define TMPFRAC2 32(%ap)
+#define TMPFRACTGT 64(%ap)
#
# Extended multiply/modulus
# XXX just EMODD for now
@@ -469,20 +469,20 @@ emodd: bsbw touser
/*
* We temporarily appropriate ap for the use of TMPFRAC*.
*/
- pushl ap
- subl2 $(3*TMPSIZE), sp
- movl sp, ap
+ pushl %ap
+ subl2 $(3*TMPSIZE), %sp
+ movl %sp, %ap
movc5 $0x0, TMPFRAC1, $0x0, $TMPSIZE, TMPFRAC1
movc5 $0x0, TMPFRAC2, $0x0, $TMPSIZE, TMPFRAC2
movc5 $0x0, TMPFRACTGT, $0x0, $TMPSIZE, TMPFRACTGT
- clrl -(sp)
- movl sp, r3 /* r3 = addr of exp space (1) */
- clrl -(sp)
- movl sp, r5 /* r5 = addr of exp space (2) */
- subl2 $0x10, sp
- movl sp, r6 /* r6 = addr of allocated target space */
+ clrl -(%sp)
+ movl %sp, %r3 /* r3 = addr of exp space (1) */
+ clrl -(%sp)
+ movl %sp, %r5 /* r5 = addr of exp space (2) */
+ subl2 $0x10, %sp
+ movl %sp, %r6 /* r6 = addr of allocated target space */
/*
* Now we package both numbers up and call fltext_De, which
@@ -493,8 +493,8 @@ emodd: bsbw touser
bsbw getval_dfloat # get operand into r0 and r1
/* Check for sign = 0 and exp = 0; if it is, zeroexit. */
- bicl3 $0x7f, r0, r4
- cmpl r4, $0x0
+ bicl3 $0x7f, %r0, %r4
+ cmpl %r4, $0x0
bneq 1f
bsbw getval_byte # get multiplier extension operand
bsbw getval_dfloat # get target operand
@@ -502,64 +502,64 @@ emodd: bsbw touser
1:
/* Check for sign = 1 and exp = 0; if it is, do a resopflt. */
- cmpw r0, $0x8000
+ cmpw %r0, $0x8000
bneq 1f
bsbw getval_byte # get multiplier extension operand
bsbw getval_dfloat # get operand into r0 and r1
- extzv $0, $0xff, r0, r0 # generate a resopflt -- XXX is this ok?
+ extzv $0, $0xff, %r0, %r0 # generate a resopflt -- XXX is this ok?
1:
- movd r0, TMPFRACTGT
- bicl3 $0xffff7fff, r0, r6 # Extract the sign while we're here.
+ movd %r0, TMPFRACTGT
+ bicl3 $0xffff7fff, %r0, %r6 # Extract the sign while we're here.
bsbw getval_byte # get multiplier extension operand
- movzbl r0, -(sp)
- movd r9, r0
- pushl r3
+ movzbl %r0, -(%sp)
+ movd %r9, %r0
+ pushl %r3
pushab TMPFRAC1
- movab TMPFRACTGT, -(sp)
+ movab TMPFRACTGT, -(%sp)
calls $0x4, fltext_De
bsbw getval_dfloat # get operand into r0 and r1
/* Check for sign = 0 and exp = 0; if it is, zeroexit. */
- bicl3 $0x7f, r0, r4
- cmpl r4, $0x0
+ bicl3 $0x7f, %r0, %r4
+ cmpl %r4, $0x0
bneq 1f
bsbw getval_byte # get multiplier extension operand
bsbw getval_dfloat # get target operand
jmp zeroexit
1:
/* Check for sign = 1 and exp = 0; if it is, do a resopflt. */
- cmpw r0, $0x8000
+ cmpw %r0, $0x8000
bneq 1f
bsbw getval_byte # get multiplier extension operand
bsbw getval_dfloat # get operand into r0 and r1
- extzv $0, $0xff, r0, r0 # generate a resopflt -- XXX is this ok?
+ extzv $0, $0xff, %r0, %r0 # generate a resopflt -- XXX is this ok?
1:
- movd r0, TMPFRACTGT
- bicl3 $0xffff7fff, r0, r7 # Extract the sign while we're here.
- movzbl $0x0, -(sp) # no multiplier extension here
- pushl r5
+ movd %r0, TMPFRACTGT
+ bicl3 $0xffff7fff, %r0, %r7 # Extract the sign while we're here.
+ movzbl $0x0, -(%sp) # no multiplier extension here
+ pushl %r5
pushab TMPFRAC2
- movab TMPFRACTGT, -(sp)
+ movab TMPFRACTGT, -(%sp)
calls $0x4, fltext_De
/* first, add exponents */
- addl3 (r5), (r3), r9 /* r9 = exponent (used later) */
- subl2 $0x80, r9 /* we are excess-128 */
+ addl3 (%r5), (%r3), %r9 /* r9 = exponent (used later) */
+ subl2 $0x80, %r9 /* we are excess-128 */
/*
* Let's calculate the target sign. Signs from multipliers are in r6 and
* r7, and both the fraction and integer parts have the same sign.
*/
- xorl2 r7, r6
+ xorl2 %r7, %r6
pushab TMPFRAC1
calls $0x1, bitcnt
- movl r0, r1 /* r1 = bitcount of TMPFRAC1 */
+ movl %r0, %r1 /* r1 = bitcount of TMPFRAC1 */
pushab TMPFRAC2
calls $0x1, bitcnt
- movl r0, r2 /* r2 = bitcount of TMPFRAC2 */
+ movl %r0, %r2 /* r2 = bitcount of TMPFRAC2 */
/*
* Now we get ready to multiply. This multiplies a byte at a time,
@@ -568,60 +568,60 @@ emodd: bsbw touser
*/
clrd TMPFRACTGT
pushr $0x7fc
- subl2 $0x8, sp /* make some temporary space */
- movl sp, r1
- subl2 $0x8, sp
- movl sp, r2
+ subl2 $0x8, %sp /* make some temporary space */
+ movl %sp, %r1
+ subl2 $0x8, %sp
+ movl %sp, %r2
- movl $PRECIS, r5 /* r5 = TMPFRAC1 byte count */
- movl $PRECIS, r6 /* r6 = TMPFRAC2 byte count */
- clrl r7
+ movl $PRECIS, %r5 /* r5 = TMPFRAC1 byte count */
+ movl $PRECIS, %r6 /* r6 = TMPFRAC2 byte count */
+ clrl %r7
1:
-# addl3 r5, $TMPFRAC1, r3 /* r3 - current byte in tmpfrac1 */
- movab TMPFRAC1, r7
- addl3 r5, r7, r3
-# addl3 r6, $TMPFRAC2, r4 /* r4 - current byte in tmpfrac2 */
- movab TMPFRAC2, r7
- addl3 r6, r7, r4
-
- movzbl (r3), r10
- movzbl (r4), r11
- mull3 r10, r11, r7
- movl r7, r3
- cvtld r7, (r2)
-
- subl3 r5, $0x8, r8
- subl3 r6, $0x8, r9
- addl2 r8, r9
- mull2 $0x8, r9
- subl2 $0x40, r9
+# addl3 %r5, $TMPFRAC1, %r3 /* r3 - current byte in tmpfrac1 */
+ movab TMPFRAC1, %r7
+ addl3 %r5, %r7, %r3
+# addl3 %r6, $TMPFRAC2, %r4 /* r4 - current byte in tmpfrac2 */
+ movab TMPFRAC2, %r7
+ addl3 %r6, %r7, %r4
+
+ movzbl (%r3), %r10
+ movzbl (%r4), %r11
+ mull3 %r10, %r11, %r7
+ movl %r7, %r3
+ cvtld %r7, (%r2)
+
+ subl3 %r5, $0x8, %r8
+ subl3 %r6, $0x8, %r9
+ addl2 %r8, %r9
+ mull2 $0x8, %r9
+ subl2 $0x40, %r9
blss 9f
/* This may be bigger than a longword. Break it up. */
-5: cmpl r9, $0x1e
+5: cmpl %r9, $0x1e
bleq 6f
- subl2 $0x1e, r9
+ subl2 $0x1e, %r9
- ashl $0x1e, $0x1, r8
- cvtld r8, (r1)
- muld2 (r1), (r2)
+ ashl $0x1e, $0x1, %r8
+ cvtld %r8, (%r1)
+ muld2 (%r1), (%r2)
jmp 5b
6:
- ashl r9, $0x1, r8
- cvtld r8, (r1)
- muld2 (r1), (r2)
- addd2 (r2), TMPFRACTGT
+ ashl %r9, $0x1, %r8
+ cvtld %r8, (%r1)
+ muld2 (%r1), (%r2)
+ addd2 (%r2), TMPFRACTGT
9:
- cmpl r5, $0x0
+ cmpl %r5, $0x0
beql 2f
- decl r5
+ decl %r5
jmp 1b
-2: cmpl r6, $0x0
+2: cmpl %r6, $0x0
beql 3f
- decl r6
- movl $PRECIS, r5
+ decl %r6
+ movl $PRECIS, %r5
jmp 1b
3:
@@ -632,18 +632,18 @@ emodd: bsbw touser
* we calculate how many bits it will take to shift the value in r7
* so that bit 15 = 1.
*/
- addl2 $0x10, sp
- movl r7, 0x14(sp) /* move r7 onto the frame we're about to pop off */
+ addl2 $0x10, %sp
+ movl %r7, 0x14(%sp) /* move r7 onto the frame we're about to pop off */
popr $0x7fc
- clrl r3 /* r3 = counter */
- movl r7, r8 /* r8 = temp */
+ clrl %r3 /* r3 = counter */
+ movl %r7, %r8 /* r8 = temp */
1:
- bicl3 $0xffff7fff, r8, r5
+ bicl3 $0xffff7fff, %r8, %r5
bneq 2f
- incl r3
- ashl $0x1, r8, r5
- movl r5, r8
+ incl %r3
+ ashl $0x1, %r8, %r5
+ movl %r5, %r8
jmp 1b
2:
@@ -651,32 +651,32 @@ emodd: bsbw touser
* Now we do post-normalization (by subtracting r3) and
* put the exponent (in r9) into TMPFRACTGT.
*/
- subl2 r3, r9
- insv r9, $0x7, $0x8, TMPFRACTGT
+ subl2 %r3, %r9
+ insv %r9, $0x7, $0x8, TMPFRACTGT
- bisl2 r6, TMPFRACTGT # set the sign
+ bisl2 %r6, TMPFRACTGT # set the sign
/*
* Now we need to separate. CVT* won't work in the case of a
* >32-bit integer, so we count the integer bits and use ASHQ to
* shift them away.
*/
- cmpl $0x80, r9
+ cmpl $0x80, %r9
blss 7f /* if we are less than 1.0, we can avoid this */
brw 8f
7:
- subl3 $0x80, r9, r8
+ subl3 $0x80, %r9, %r8
movq TMPFRACTGT, TMPFRAC1
/*
* Check for integer overflow by comparing the integer bit count.
* If this is the case, set V in PSL.
*/
- cmpl r8, $0x20
+ cmpl %r8, $0x20
blss 3f
bisl2 $PSL_V, S_PSL
3:
- cmpl r8, $0x38
+ cmpl %r8, $0x38
blss 1f
/*
* In the case where we have more than 55 bits in the integer,
@@ -692,31 +692,31 @@ emodd: bsbw touser
* that the significance increases from start to finish.
*/
- movab TMPFRACTGT, r0
- movab TMPFRAC1, r1
- movb (r0), 7(r1)
- bisb2 $0x80, 7(r1)
- movw 2(r0), 5(r1)
- movw 4(r0), 3(r1)
- movb 7(r0), 2(r1)
- movb 6(r0), 1(r1)
+ movab TMPFRACTGT, %r0
+ movab TMPFRAC1, %r1
+ movb (%r0), 7(%r1)
+ bisb2 $0x80, 7(%r1)
+ movw 2(%r0), 5(%r1)
+ movw 4(%r0), 3(%r1)
+ movb 7(%r0), 2(%r1)
+ movb 6(%r0), 1(%r1)
/* Calculate exactly how many bits to shift. */
- subl3 r8, $0x40, r7
- mnegl r7, r6
- ashq r6, TMPFRAC1, r0 # shift right
- ashq r7, r0, TMPFRAC2 # shift left
+ subl3 %r8, $0x40, %r7
+ mnegl %r7, %r6
+ ashq %r6, TMPFRAC1, %r0 # shift right
+ ashq %r7, %r0, TMPFRAC2 # shift left
/* Now put it back into a D_. */
- movab TMPFRAC2, r0
- movab TMPFRAC1, r1
- extv $0x18, $0x7, 4(r0), (r1)
- extzv $0x7, $0x9, TMPFRACTGT, r2
- insv r2, $0x7, $0x9, (r1)
+ movab TMPFRAC2, %r0
+ movab TMPFRAC1, %r1
+ extv $0x18, $0x7, 4(%r0), (%r1)
+ extzv $0x7, $0x9, TMPFRACTGT, %r2
+ insv %r2, $0x7, $0x9, (%r1)
- movw 5(r0), 2(r1)
- movw 3(r0), 4(r1)
- movw 1(r0), 6(r1)
+ movw 5(%r0), 2(%r1)
+ movw 3(%r0), 4(%r1)
+ movw 1(%r0), 6(%r1)
# we have the integer in TMPFRAC1, now get the fraction in TMPFRAC2
subd3 TMPFRAC1, TMPFRACTGT, TMPFRAC2
@@ -734,25 +734,25 @@ emodd: bsbw touser
* We're done. We can use CVTDL here, since EMODD is supposed to
* truncate.
*/
- cvtdl TMPFRAC1, r4
+ cvtdl TMPFRAC1, %r4
bsbw getaddr_byte
- movl r4, (r0)
+ movl %r4, (%r0)
bsbw getaddr_byte
- movq TMPFRAC2, (r0)
- movd TMPFRAC2, r0 /* move this here so we can test it later */
+ movq TMPFRAC2, (%r0)
+ movd TMPFRAC2, %r0 /* move this here so we can test it later */
/* Clean up sp. */
- addl2 $0x74, sp
- movl (sp)+, ap
+ addl2 $0x74, %sp
+ movl (%sp)+, %ap
/*
* Now set condition codes. We know Z == 0; C is always 0; and V
* is set above as necessary. Check to see if TMPFRAC2 is
* negative; if it is, set N.
*/
- tstd r0
+ tstd %r0
bgeq 1f /* branch if N == 0 */
bisl2 $PSL_N, S_PSL
1:
@@ -761,9 +761,9 @@ zeroexit:
/* Z == 1, everything else has been cleared already */
bisl2 $PSL_Z, S_PSL
bsbw getaddr_byte
- movl $0x0, (r0)
+ movl $0x0, (%r0)
bsbw getaddr_byte
- movd $0f0, (r0)
+ movd $0f0, (%r0)
brw goback
@@ -782,14 +782,14 @@ ASENTRY(bitcnt, R1|R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* of each (the *least* significant bit at this point!) and doing
* FFSes until we find a bit set.
*/
- movl 4(ap), r0
- movl $0x8, r1
-1: decl r1
- addl3 r1, r0, r4
- movzbl (r4), r2
- ffs $0, $0x20, r2, r3
+ movl 4(%ap), %r0
+ movl $0x8, %r1
+1: decl %r1
+ addl3 %r1, %r0, %r4
+ movzbl (%r4), %r2
+ ffs $0, $0x20, %r2, %r3
bneq 2f /* if we found a bit, Z == 0, continue */
- cmpl r1, $0x0
+ cmpl %r1, $0x0
jeql 3f /* if r1 is zero and there's no bit set, qw is 0 */
jmp 1b /* else continue with the loop */
@@ -797,13 +797,13 @@ ASENTRY(bitcnt, R1|R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* We found a bit; its position in the byte is in r3, and r1 is the
* position of the byte in the quadword.
*/
- subl3 r3, $0x8, r0
- ashl $0x5, r1, r2
- addl2 r2, r0
+ subl3 %r3, $0x8, %r0
+ ashl $0x5, %r1, %r2
+ addl2 %r2, %r0
ret
3: /* this quadword is 0 */
- movl $0xffffffff, r0
+ movl $0xffffffff, %r0
ret
@@ -823,26 +823,26 @@ ASENTRY(bitcnt, R1|R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* in EMOD*, et al. If these bits are not in use, specify 0.
*/
ASENTRY(fltext_De, R0|R1|R2)
- movl 0x4(ap), r0 # r0 - addr of source
- movl 0x8(ap), r1 # r1 - addr of fraction destination
+ movl 0x4(%ap), %r0 # r0 - addr of source
+ movl 0x8(%ap), %r1 # r1 - addr of fraction destination
- movb (r0), (r1)
- bisb2 $0x80, (r1)+ # This is the hidden bit.
+ movb (%r0), (%r1)
+ bisb2 $0x80, (%r1)+ # This is the hidden bit.
- movb 3(r0), (r1)+
- movb 2(r0), (r1)+
- movb 5(r0), (r1)+
- movb 4(r0), (r1)+
- movb 7(r0), (r1)+
- movb 6(r0), (r1)+
+ movb 3(%r0), (%r1)+
+ movb 2(%r0), (%r1)+
+ movb 5(%r0), (%r1)+
+ movb 4(%r0), (%r1)+
+ movb 7(%r0), (%r1)+
+ movb 6(%r0), (%r1)+
/*
* if there are extension bits (EMOD EDIV etc.) they are
* low-order
*/
- movb 0x10(ap), (r1)
+ movb 0x10(%ap), (%r1)
- movl 0x4(ap), r0 # r0 - addr of source
- movl 0xc(ap), r2 # r2 - addr of exponent destination
- extzv $0x7, $0x8, (r0), (r2) # get exponent out
+ movl 0x4(%ap), %r0 # r0 - addr of source
+ movl 0xc(%ap), %r2 # r2 - addr of exponent destination
+ extzv $0x7, $0x8, (%r0), (%r2) # get exponent out
ret
diff --git a/sys/arch/vax/vax/urem.s b/sys/arch/vax/vax/urem.s
index 18b05b9e08d..c4730195964 100644
--- a/sys/arch/vax/vax/urem.s
+++ b/sys/arch/vax/vax/urem.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: urem.s,v 1.4 2005/05/06 18:55:02 miod Exp $ */
+/* $OpenBSD: urem.s,v 1.5 2013/07/05 21:11:57 miod Exp $ */
/* $NetBSD: urem.s,v 1.2 1994/10/26 08:03:37 cgd Exp $ */
/*-
@@ -42,25 +42,25 @@
* urem() takes an ordinary dividend/divisor pair;
*/
-#define DIVIDEND 4(ap)
-#define DIVISOR 8(ap)
+#define DIVIDEND 4(%ap)
+#define DIVISOR 8(%ap)
-ASENTRY(urem, 0)
- movl 8(ap),r2
+ASENTRY(__urem, 0)
+ movl 8(%ap),%r2
jlss Leasy # big divisor: settle by comparison
- movl 4(ap),r0
+ movl 4(%ap),%r0
jlss Lhard # big dividend: need extended division
- divl3 r2,r0,r1 # small divisor and dividend: signed modulus
- mull2 r2,r1
- subl2 r1,r0
+ divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
+ mull2 %r2,%r1
+ subl2 %r1,%r0
ret
Lhard:
- clrl r1
- ediv r2,r0,r1,r0
+ clrl %r1
+ ediv %r2,%r0,%r1,%r0
ret
Leasy:
- subl3 r2,DIVIDEND,r0
+ subl3 %r2,DIVIDEND,%r0
jcc Ldifference # if divisor goes in once, return difference
- movl DIVIDEND,r0 # if divisor is bigger, return dividend
+ movl DIVIDEND,%r0 # if divisor is bigger, return dividend
Ldifference:
ret