summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2013-07-05 21:10:51 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2013-07-05 21:10:51 +0000
commita4ad12e3d1a512b89d9d85f6febc3f1c1bf7ea22 (patch)
tree081554f6ab8a597fdf4025181d92fd919da94a3e /lib
parent50508fdc39c1c58bc7a03b6cad331ab7b3917975 (diff)
VAX ELF userland bits. Consists mostly of register prefix additions.
Diffstat (limited to 'lib')
-rw-r--r--lib/csu/vax/Makefile65
-rw-r--r--lib/csu/vax/crt0.c143
-rw-r--r--lib/csu/vax/md_init.h36
-rw-r--r--lib/libc/arch/vax/SYS.h8
-rw-r--r--lib/libc/arch/vax/gen/_setjmp.S42
-rw-r--r--lib/libc/arch/vax/gen/fabs.S6
-rw-r--r--lib/libc/arch/vax/gen/ldexp.S20
-rw-r--r--lib/libc/arch/vax/gen/modf.S14
-rw-r--r--lib/libc/arch/vax/gen/setjmp.S50
-rw-r--r--lib/libc/arch/vax/gen/sigsetjmp.S12
-rw-r--r--lib/libc/arch/vax/gen/udiv.S56
-rw-r--r--lib/libc/arch/vax/gen/urem.S58
-rw-r--r--lib/libc/arch/vax/net/htonl.S6
-rw-r--r--lib/libc/arch/vax/net/htons.S6
-rw-r--r--lib/libc/arch/vax/net/ntohl.S6
-rw-r--r--lib/libc/arch/vax/net/ntohs.S6
-rw-r--r--lib/libc/arch/vax/stdlib/insque.S4
-rw-r--r--lib/libc/arch/vax/stdlib/remque.S4
-rw-r--r--lib/libc/arch/vax/string/bcmp.S20
-rw-r--r--lib/libc/arch/vax/string/bcopy.S48
-rw-r--r--lib/libc/arch/vax/string/bzero.S14
-rw-r--r--lib/libc/arch/vax/string/ffs.S8
-rw-r--r--lib/libc/arch/vax/string/index.S16
-rw-r--r--lib/libc/arch/vax/string/memcmp.S32
-rw-r--r--lib/libc/arch/vax/string/memcpy.S58
-rw-r--r--lib/libc/arch/vax/string/memmove.S58
-rw-r--r--lib/libc/arch/vax/string/memset.S18
-rw-r--r--lib/libc/arch/vax/sys/Ovfork.S18
-rw-r--r--lib/libc/arch/vax/sys/brk.S12
-rw-r--r--lib/libc/arch/vax/sys/cerror.S10
-rw-r--r--lib/libc/arch/vax/sys/exect.S4
-rw-r--r--lib/libc/arch/vax/sys/fork.S6
-rw-r--r--lib/libc/arch/vax/sys/sbrk.S14
-rw-r--r--lib/libc/arch/vax/sys/sigpending.S6
-rw-r--r--lib/libc/arch/vax/sys/sigprocmask.S18
-rw-r--r--lib/libc/arch/vax/sys/sigsuspend.S8
-rw-r--r--lib/libc/arch/vax/sys/syscall.S10
-rw-r--r--lib/libc/arch/vax/sys/tfork_thread.S16
-rw-r--r--lib/libm/arch/vax/n_argred.S374
-rw-r--r--lib/libm/arch/vax/n_atan2.S176
-rw-r--r--lib/libm/arch/vax/n_cbrt.S62
-rw-r--r--lib/libm/arch/vax/n_hypot.S76
-rw-r--r--lib/libm/arch/vax/n_infnan.S6
-rw-r--r--lib/libm/arch/vax/n_sincos.S28
-rw-r--r--lib/libm/arch/vax/n_sqrt.S56
-rw-r--r--lib/libm/arch/vax/n_support.S174
-rw-r--r--lib/libm/arch/vax/n_tan.S28
-rw-r--r--lib/librthread/arch/vax/cerror.S10
-rw-r--r--lib/librthread/rthread.c4
-rw-r--r--lib/libssl/crypto/arch/vax/bn_asm_vax.S302
50 files changed, 1158 insertions, 1074 deletions
diff --git a/lib/csu/vax/Makefile b/lib/csu/vax/Makefile
index 06f01f3acd9..bc0a4d57364 100644
--- a/lib/csu/vax/Makefile
+++ b/lib/csu/vax/Makefile
@@ -1,31 +1,62 @@
-# $OpenBSD: Makefile,v 1.8 2011/11/08 10:37:09 guenther Exp $
+# $OpenBSD: Makefile,v 1.9 2013/07/05 21:10:50 miod Exp $
# from: @(#)Makefile 5.6 (Berkeley) 5/22/91
-CFLAGS+= -I${.CURDIR}/..
-OBJS= crt0.o gcrt0.o scrt0.o
-CLEANFILES+= core crt0.out gcrt0.out scrt0.out
+OBJS= crt0.o gcrt0.o crtbegin.o crtend.o crtbeginS.o crtendS.o
+SRCS= crt0.c crtbegin.c crtbeginS.c crtend.c crtendS.c
+
+ELFDIR= ${.CURDIR}/../common_elf
+.PATH: ${ELFDIR}
+CFLAGS+= -I${ELFDIR} -I${.CURDIR}
+
+#PICFLAG?=-fpic
+PICFLAG=
all: ${OBJS}
crt0.o: crt0.c
- ${COMPILE.c} -DCRT0 -UDYNAMIC ${.ALLSRC}
- ${LD} -x -r ${.TARGET} -o ${.TARGET}ut
- mv ${.TARGET}ut ${.TARGET}
+ @echo ${COMPILE.c} -DCRT0 ${.CURDIR}/crt0.c -o ${.TARGET}
+ @${COMPILE.c} -DCRT0 ${.CURDIR}/crt0.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
gcrt0.o: crt0.c
- ${COMPILE.c} -DMCRT0 ${.ALLSRC} -o ${.TARGET}
- ${LD} -x -r ${.TARGET} -o ${.TARGET}ut
- mv ${.TARGET}ut ${.TARGET}
-
-scrt0.o: crt0.c
- ${COMPILE.c} -DSCRT0 ${.ALLSRC} -o ${.TARGET}
- ${LD} -x -r ${.TARGET} -o ${.TARGET}ut
- mv ${.TARGET}ut ${.TARGET}
+ @echo ${COMPILE.c} -DMCRT0 ${.CURDIR}/crt0.c -o ${.TARGET}
+ @${COMPILE.c} -DMCRT0 ${.CURDIR}/crt0.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
+
+crtbegin.o: crtbegin.c
+ @echo ${COMPILE.c} ${ELFDIR}/crtbegin.c -o ${.TARGET}
+ @${COMPILE.c} ${ELFDIR}/crtbegin.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
+
+crtbeginS.o: crtbeginS.c
+ @echo ${COMPILE.c} ${PICFLAG} ${ELFDIR}/crtbeginS.c -o ${.TARGET}
+ @${COMPILE.c} ${PICFLAG} ${ELFDIR}/crtbeginS.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
+
+crtend.o: crtend.c
+ @echo ${COMPILE.c} ${ELFDIR}/crtend.c -o ${.TARGET}
+ @${COMPILE.c} ${ELFDIR}/crtend.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
+
+crtendS.o: crtendS.c
+ @echo ${COMPILE.c} ${PICFLAG} ${ELFDIR}/crtendS.c -o ${.TARGET}
+ @${COMPILE.c} ${PICFLAG} ${ELFDIR}/crtendS.c -o ${.TARGET}.o
+ @${LD} -x -r -o ${.TARGET} ${.TARGET}.o
+ @rm -f ${.TARGET}.o
realinstall:
- ${INSTALL} ${INSTALL_COPY} -S -o ${BINOWN} -g ${BINGRP} -m 444 ${OBJS} \
+ ${INSTALL} ${INSTALL_COPY} -o ${BINOWN} -g ${BINGRP} -m 444 ${OBJS} \
${DESTDIR}/usr/lib
-depend lint tags:
+afterdepend: .depend
+ @(TMP=/tmp/_depend$$$$; \
+ sed -e 's/^\([^\.]*\).o[ ]*:/\1.o g\1.o:/' \
+ < .depend > $$TMP; \
+ mv $$TMP .depend)
.include <bsd.prog.mk>
diff --git a/lib/csu/vax/crt0.c b/lib/csu/vax/crt0.c
index a0132be6f5d..b30622053e1 100644
--- a/lib/csu/vax/crt0.c
+++ b/lib/csu/vax/crt0.c
@@ -1,9 +1,13 @@
-/* $OpenBSD: crt0.c,v 1.9 2005/08/04 16:33:05 espie Exp $ */
-/* $NetBSD: crt0.c,v 1.1.2.1 1995/10/15 19:40:04 ragge Exp $ */
-/*
- * Copyright (c) 1993 Paul Kranenburg
+/* $OpenBSD: crt0.c,v 1.10 2013/07/05 21:10:50 miod Exp $ */
+/* $NetBSD: crt0.c,v 1.14 2002/05/16 19:38:21 wiz Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -14,62 +18,71 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Paul Kranenburg.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
-
-#include <sys/param.h>
#include <stdlib.h>
+#include <limits.h>
+
+#ifdef MCRT0
+extern void monstartup(u_long, u_long);
+extern void _mcleanup(void);
+extern unsigned char _etext, _eprol;
+#endif
+
+char **environ;
+char *__progname = "";
+char __progname_storage[1 + NAME_MAX];
+
+static char *_strrchr(const char *, char);
-#include "common.h"
+struct kframe {
+ int kargc;
+ char *kargv[1]; /* size depends on kargc */
+ char kargstr[1]; /* size varies */
+ char kenvstr[1]; /* size varies */
+};
-extern void start(void) asm("start");
+ asm(" .text");
+ asm(" .align 2");
+ asm(" .globl _start");
+ asm(" .type _start,@function");
+ asm(" _start:");
+ asm(" .word 0x0101"); /* two nops just in case */
+ asm(" pushl %sp"); /* no registers to save */
+ asm(" calls $1,__start"); /* do the real start */
+ asm(" halt");
void
-start()
+__start(struct kframe *kfp)
{
- struct kframe {
- int kargc;
- char *kargv[1]; /* size depends on kargc */
- char kargstr[1]; /* size varies */
- char kenvstr[1]; /* size varies */
- };
- /*
- * ALL REGISTER VARIABLES!!!
- */
- register struct kframe *kfp;
- register char **argv, *ap;
+ char **argv, *ap;
char *s;
-#ifdef lint
- kfp = 0;
- initcode = initcode = 0;
-#else /* not lint */
- /* make kfp point to the arguments on stack */
- asm ("movl sp, %0" : "=r" (kfp));
-#endif /* not lint */
argv = &kfp->kargv[0];
environ = argv + kfp->kargc + 1;
- if (ap = argv[0]) {
- if ((__progname = _strrchr(ap, '/')) == NULL)
- __progname = ap;
+ if ((__progname = argv[0]) != NULL) {
+ if ((__progname = _strrchr(__progname, '/')) == NULL)
+ __progname = argv[0];
else
- ++__progname;
+ __progname++;
for (s = __progname_storage; *__progname &&
s < &__progname_storage[sizeof __progname_storage - 1]; )
*s++ = *__progname++;
@@ -77,41 +90,29 @@ start()
__progname = __progname_storage;
}
-#ifdef DYNAMIC
- /* ld(1) convention: if DYNAMIC = 0 then statically linked */
-#ifdef stupid_gcc
- if (&_DYNAMIC)
-#else
- if ( ({volatile caddr_t x = (caddr_t)&_DYNAMIC; x; }) )
-#endif
- __load_rtld(&_DYNAMIC);
-#endif /* DYNAMIC */
-
-asm("eprol:");
-
#ifdef MCRT0
atexit(_mcleanup);
- monstartup((u_long)&eprol, (u_long)&etext);
+ monstartup((u_long)&_eprol, (u_long)&_etext);
#endif /* MCRT0 */
+ __init();
+
asm ("__callmain:"); /* Defined for the benefit of debuggers */
exit(main(kfp->kargc, argv, environ));
}
-#ifdef DYNAMIC
- asm(" ___syscall:");
- asm(" .word 0"); /* no registers to save */
- asm(" movl 4(ap), r0"); /* get syscall number */
- asm(" subl3 $1,(ap)+,(ap)"); /* n-1 args to syscall */
- asm(" chmk r0"); /* do system call */
- asm(" jcs 1f"); /* check error */
- asm(" ret"); /* return */
- asm(" 1: movl $-1, r0");
- asm(" ret");
-
-#endif /* DYNAMIC */
+static char *
+_strrchr(const char *p, char ch)
+{
+ char *save;
-#include "common.c"
+ for (save = NULL; ; ++p) {
+ if (*p == ch)
+ save = (char *)p;
+ if (*p == '\0')
+ return (save);
+ }
+}
#ifdef MCRT0
asm (" .text");
diff --git a/lib/csu/vax/md_init.h b/lib/csu/vax/md_init.h
new file mode 100644
index 00000000000..335c51fb5f5
--- /dev/null
+++ b/lib/csu/vax/md_init.h
@@ -0,0 +1,36 @@
+/* $OpenBSD: md_init.h,v 1.1 2013/07/05 21:10:50 miod Exp $ */
+
+/*
+ * Copyright (c) 2008 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define MD_SECT_CALL_FUNC(section, func) __asm ( \
+ "\t.section\t" #section ",\"ax\",@progbits\n" \
+ "\tcalls\t$0," #func "\n" \
+ "\t.previous")
+
+#define MD_SECTION_PROLOGUE(section, entry) __asm ( \
+ "\t.section\t" #section ",\"ax\",@progbits\n" \
+ "\t.globl\t" #entry "\n" \
+ "\t.type\t" #entry ",@function\n" \
+ "\t.align\t1\n" \
+ #entry ":\n" \
+ "\t.word 0x0000\n" /* entry mask */ \
+ "\t.previous")
+
+#define MD_SECTION_EPILOGUE(section) __asm( \
+ "\t.section\t" #section ",\"ax\",@progbits\n" \
+ "\tret\n" \
+ "\t.previous")
diff --git a/lib/libc/arch/vax/SYS.h b/lib/libc/arch/vax/SYS.h
index 73fc812200b..32404db4134 100644
--- a/lib/libc/arch/vax/SYS.h
+++ b/lib/libc/arch/vax/SYS.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: SYS.h,v 1.14 2008/05/21 20:39:30 miod Exp $ */
+/* $OpenBSD: SYS.h,v 1.15 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: SYS.h,v 1.4 1997/05/02 18:15:32 kleink Exp $ */
/*
@@ -44,13 +44,13 @@
#endif
#define __SYSCALL(p,x,y) \
- err: jmp _C_LABEL(_cerror); \
+ err: jmp _C_LABEL(__cerror); \
__ENTRY(p,x); \
__DO_SYSCALL(y); \
jcs err
#define __PSEUDO(p,x,y) \
- err: jmp _C_LABEL(_cerror); \
+ err: jmp _C_LABEL(__cerror); \
__ENTRY(p,x); \
__DO_SYSCALL(y); \
jcs err; \
@@ -85,4 +85,4 @@
__ENTRY(_thread_sys_,x)
#define SYSNAME(x) _CAT(__thread_sys_,x)
- .globl _C_LABEL(_cerror)
+ .globl _C_LABEL(__cerror)
diff --git a/lib/libc/arch/vax/gen/_setjmp.S b/lib/libc/arch/vax/gen/_setjmp.S
index a66d561f9b9..0b61bee4307 100644
--- a/lib/libc/arch/vax/gen/_setjmp.S
+++ b/lib/libc/arch/vax/gen/_setjmp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: _setjmp.S,v 1.5 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: _setjmp.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1980, 1993
* The Regents of the University of California. All rights reserved.
@@ -42,49 +42,49 @@
#include "DEFS.h"
ENTRY(_setjmp, 0)
- movl 4(ap),r0
- movl 12(fp),(r0) # save frame pointer of caller
- movl 16(fp),4(r0) # save pc of caller
- clrl r0
+ movl 4(%ap),%r0
+ movl 12(%fp),(%r0) # save frame pointer of caller
+ movl 16(%fp),4(%r0) # save pc of caller
+ clrl %r0
ret
ENTRY(_longjmp, 0)
- movl 8(ap),r0 # return(v)
- movl 4(ap),r1 # fetch buffer
- tstl (r1)
+ movl 8(%ap),%r0 # return(v)
+ movl 4(%ap),%r1 # fetch buffer
+ tstl (%r1)
beql botch
loop:
- bitw $1,6(fp) # r0 saved?
+ bitw $1,6(%fp) # r0 saved?
beql 1f
- movl r0,20(fp)
- bitw $2,6(fp) # was r1 saved?
+ movl %r0,20(%fp)
+ bitw $2,6(%fp) # was r1 saved?
beql 2f
- movl r1,24(fp)
+ movl %r1,24(%fp)
brb 2f
1:
- bitw $2,6(fp) # was r1 saved?
+ bitw $2,6(%fp) # was r1 saved?
beql 2f
- movl r1,20(fp)
+ movl %r1,20(%fp)
2:
- cmpl (r1),12(fp)
+ cmpl (%r1),12(%fp)
beql done
blssu botch
- movl $loop,16(fp)
+ movl $loop,16(%fp)
ret # pop another frame
done:
- cmpb *16(fp),reiins # returning to an "rei"?
+ cmpb *16(%fp),reiins # returning to an "rei"?
bneq 1f
- movab 3f,16(fp) # do return w/ psl-pc pop
+ movab 3f,16(%fp) # do return w/ psl-pc pop
brw 2f
1:
- movab 4f,16(fp) # do standard return
+ movab 4f,16(%fp) # do standard return
2:
ret # unwind stack before signals enabled
3:
- addl2 $8,sp # compensate for PSL-PC push
+ addl2 $8,%sp # compensate for PSL-PC push
4:
- jmp *4(r1) # done, return....
+ jmp *4(%r1) # done, return....
botch:
calls $0,_C_LABEL(longjmperror)
diff --git a/lib/libc/arch/vax/gen/fabs.S b/lib/libc/arch/vax/gen/fabs.S
index 6002cac61d7..3d654bd39cb 100644
--- a/lib/libc/arch/vax/gen/fabs.S
+++ b/lib/libc/arch/vax/gen/fabs.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: fabs.S,v 1.9 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: fabs.S,v 1.10 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -34,8 +34,8 @@
STRONG_ALIAS(fabsl,fabs)
ENTRY(fabs, 0)
- movd 4(ap),r0
+ movd 4(%ap),%r0
bgeq 1f
- mnegd r0,r0
+ mnegd %r0,%r0
1:
ret
diff --git a/lib/libc/arch/vax/gen/ldexp.S b/lib/libc/arch/vax/gen/ldexp.S
index 622e5f5385e..b55d2083dda 100644
--- a/lib/libc/arch/vax/gen/ldexp.S
+++ b/lib/libc/arch/vax/gen/ldexp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: ldexp.S,v 1.9 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: ldexp.S,v 1.10 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -54,24 +54,24 @@
STRONG_ALIAS(ldexpl,ldexp)
ENTRY(ldexp, R2)
- movd 4(ap),r0 /* fetch "value" */
- extzv $7,$8,r0,r2 /* r2 := biased exponent */
+ movd 4(%ap),%r0 /* fetch "value" */
+ extzv $7,$8,%r0,%r2 /* r2 := biased exponent */
jeql 1f /* if zero, done */
- addl2 12(ap),r2 /* r2 := new biased exponent */
+ addl2 12(%ap),%r2 /* r2 := new biased exponent */
jleq 2f /* if <= 0, underflow */
- cmpl r2,$256 /* otherwise check if too big */
+ cmpl %r2,$256 /* otherwise check if too big */
jgeq 3f /* jump if overflow */
- insv r2,$7,$8,r0 /* put exponent back in result */
+ insv %r2,$7,$8,%r0 /* put exponent back in result */
1:
ret
2:
- clrd r0
+ clrd %r0
jbr 1f
3:
- movd huge,r0 /* largest possible floating magnitude */
- jbc $15,4(ap),1f /* jump if argument was positive */
- mnegd r0,r0 /* if arg < 0, make result negative */
+ movd huge,%r0 /* largest possible floating magnitude */
+ jbc $15,4(%ap),1f /* jump if argument was positive */
+ mnegd %r0,%r0 /* if arg < 0, make result negative */
1:
movl $ ERANGE,_C_LABEL(errno)
ret
diff --git a/lib/libc/arch/vax/gen/modf.S b/lib/libc/arch/vax/gen/modf.S
index 1659059831f..5ec88d1eab1 100644
--- a/lib/libc/arch/vax/gen/modf.S
+++ b/lib/libc/arch/vax/gen/modf.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: modf.S,v 1.8 2011/07/08 22:28:33 martynas Exp $ */
+/* $OpenBSD: modf.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -39,16 +39,16 @@
#include "DEFS.h"
ENTRY(modf, R2)
- cvtdl 4(ap),r2
+ cvtdl 4(%ap),%r2
bvs 0f
- cvtld r2,*12(ap)
- subd3 *12(ap),4(ap),r0
+ cvtld %r2,*12(%ap)
+ subd3 *12(%ap),4(%ap),%r0
ret
0:
- emodd 4(ap),$0,$0f1.0,r2,r0
+ emodd 4(%ap),$0,$0f1.0,%r2,%r0
bvs 1f # integer overflow
- cvtld r2,*12(ap)
+ cvtld %r2,*12(%ap)
ret
1:
- subd3 r0,4(ap),*12(ap)
+ subd3 %r0,4(%ap),*12(%ap)
ret
diff --git a/lib/libc/arch/vax/gen/setjmp.S b/lib/libc/arch/vax/gen/setjmp.S
index 2903134f530..e390c1d07cd 100644
--- a/lib/libc/arch/vax/gen/setjmp.S
+++ b/lib/libc/arch/vax/gen/setjmp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: setjmp.S,v 1.8 2011/11/22 21:13:30 guenther Exp $ */
+/* $OpenBSD: setjmp.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -42,46 +42,46 @@
#include "SYS.h"
ENTRY(setjmp, R6)
- movl 4(ap),r6 # construct sigcontext
- subl2 $12,sp # space for current struct sigaltstack
- pushl sp # get current values
+ movl 4(%ap),%r6 # construct sigcontext
+ subl2 $12,%sp # space for current struct sigaltstack
+ pushl %sp # get current values
pushl $0 # no new values
calls $4,_C_LABEL(sigaltstack)# pop args plus signal stack value
- movl (sp)+,(r6)+ # save onsigstack status of caller
+ movl (%sp)+,(%r6)+ # save onsigstack status of caller
pushl $0
calls $1,_C_LABEL(sigblock) # get signal mask
- movl r0,(r6)+ # save signal mask of caller
- movl (ap),r0
- moval 4(ap)[r0],(r6)+ # save sp of caller
- movl 12(fp),(r6)+ # save frame pointer of caller
- movl 8(fp),(r6)+ # save argument pointer of caller
- movl 16(fp),(r6)+ # save pc of caller
- movpsl (r6) # save psl of caller
- movw 4(fp),(r6)
- clrl r0
+ movl %r0,(%r6)+ # save signal mask of caller
+ movl (%ap),%r0
+ moval 4(%ap)[%r0],(%r6)+ # save sp of caller
+ movl 12(%fp),(%r6)+ # save frame pointer of caller
+ movl 8(%fp),(%r6)+ # save argument pointer of caller
+ movl 16(%fp),(%r6)+ # save pc of caller
+ movpsl (%r6) # save psl of caller
+ movw 4(%fp),(%r6)
+ clrl %r0
ret
ENTRY(longjmp, R2)
- movl 8(ap),r0 # return(v)
- movl 4(ap),r1 # fetch buffer
- tstl 12(r1)
+ movl 8(%ap),%r0 # return(v)
+ movl 4(%ap),%r1 # fetch buffer
+ tstl 12(%r1)
beql botch
loop:
- cmpl 12(r1),fp # are we there yet?
+ cmpl 12(%r1),%fp # are we there yet?
beql done
blssu botch
- moval 20(fp),r2
- blbc 6(fp),1f # was r0 saved?
- movl r0,(r2)+
+ moval 20(%fp),%r2
+ blbc 6(%fp),1f # was r0 saved?
+ movl %r0,(%r2)+
1:
- bbc $1,6(fp),2f # was r1 saved?
- movl r1,(r2)
+ bbc $1,6(%fp),2f # was r1 saved?
+ movl %r1,(%r2)
2:
- movl $loop,16(fp)
+ movl $loop,16(%fp)
ret # pop another frame
done:
- pushl r1 # pointer to sigcontext
+ pushl %r1 # pointer to sigcontext
calls $1,_C_LABEL(sigreturn) # restore previous context
# we should never return
botch:
diff --git a/lib/libc/arch/vax/gen/sigsetjmp.S b/lib/libc/arch/vax/gen/sigsetjmp.S
index cf0d0dbcdba..e235df90067 100644
--- a/lib/libc/arch/vax/gen/sigsetjmp.S
+++ b/lib/libc/arch/vax/gen/sigsetjmp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: sigsetjmp.S,v 1.5 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: sigsetjmp.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993, 1995
* The Regents of the University of California. All rights reserved.
@@ -44,16 +44,16 @@
#include <machine/setjmp.h>
ENTRY(sigsetjmp, R6)
- movl 4(ap),r0 # get env pointer
- movl 8(ap),(_JBLEN*4)(r0) # save "savemask"
- tstl 8(ap) # do saving of signal mask?
+ movl 4(%ap),%r0 # get env pointer
+ movl 8(%ap),(_JBLEN*4)(%r0) # save "savemask"
+ tstl 8(%ap) # do saving of signal mask?
beql L1
jmp _C_LABEL(setjmp)+2 # yep, do full setjmp
L1: jmp _C_LABEL(_setjmp)+2 # nope, skip to _setjmp
ENTRY(siglongjmp, 0)
- movl 4(ap),r0 # get env pointer
- tstl (_JBLEN*4)(r0) # test if "savemask" was set
+ movl 4(%ap),%r0 # get env pointer
+ tstl (_JBLEN*4)(%r0) # test if "savemask" was set
beql L2
jmp _C_LABEL(longjmp)+2 # yep, do full longjmp
L2: jmp _C_LABEL(_longjmp)+2 # nope, skip to _longjmp
diff --git a/lib/libc/arch/vax/gen/udiv.S b/lib/libc/arch/vax/gen/udiv.S
index 337a205ce1a..d469016a734 100644
--- a/lib/libc/arch/vax/gen/udiv.S
+++ b/lib/libc/arch/vax/gen/udiv.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: udiv.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: udiv.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
@@ -39,50 +39,58 @@
#include "DEFS.h"
-#define DIVIDEND 4(ap)
-#define DIVISOR 8(ap)
+#define DIVIDEND 4(%ap)
+#define DIVISOR 8(%ap)
+#ifdef __ELF__
+ASENTRY(__udiv, R2)
+#else
ASENTRY(udiv,R2)
- movl DIVISOR,r2
+#endif
+ movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
- movl DIVIDEND,r0
+ movl DIVIDEND,%r0
jlss Lhard # big dividend: extended division
- divl2 r2,r0 # small divisor and dividend: signed division
+ divl2 %r2,%r0 # small divisor and dividend: signed division
ret
Lhard:
- clrl r1
- ediv r2,r0,r0,r1
+ clrl %r1
+ ediv %r2,%r0,%r0,%r1
ret
Leasy:
- cmpl DIVIDEND,r2
+ cmpl DIVIDEND,%r2
jgequ Lone # if dividend is as big or bigger, return 1
- clrl r0 # else return 0
+ clrl %r0 # else return 0
ret
Lone:
- movl $1,r0
+ movl $1,%r0
ret
+#ifdef __ELF__
+ASENTRY(__audiv, R2|R3)
+#else
ASENTRY(audiv,R2|R3)
- movl DIVIDEND,r3
- movl DIVISOR,r2
+#endif
+ movl DIVIDEND,%r3
+ movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
- movl (r3),r0
+ movl (%r3),%r0
jlss La_hard # big dividend: extended division
- divl2 r2,r0 # small divisor and dividend: signed division
- movl r0,(r3) # leave the value of the assignment in r0
+ divl2 %r2,%r0 # small divisor and dividend: signed division
+ movl %r0,(%r3) # leave the value of the assignment in r0
ret
La_hard:
- clrl r1
- ediv r2,r0,r0,r1
- movl r0,(r3)
+ clrl %r1
+ ediv %r2,%r0,%r0,%r1
+ movl %r0,(%r3)
ret
La_easy:
- cmpl (r3),r2
+ cmpl (%r3),%r2
jgequ La_one # if dividend is as big or bigger, return 1
- clrl r0 # else return 0
- clrl (r3)
+ clrl %r0 # else return 0
+ clrl (%r3)
ret
La_one:
- movl $1,r0
- movl r0,(r3)
+ movl $1,%r0
+ movl %r0,(%r3)
ret
diff --git a/lib/libc/arch/vax/gen/urem.S b/lib/libc/arch/vax/gen/urem.S
index 6b7f3834d2f..608564ad128 100644
--- a/lib/libc/arch/vax/gen/urem.S
+++ b/lib/libc/arch/vax/gen/urem.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: urem.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: urem.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
@@ -39,50 +39,58 @@
* aurem() takes a pointer to a dividend and an ordinary divisor.
*/
-#define DIVIDEND 4(ap)
-#define DIVISOR 8(ap)
+#define DIVIDEND 4(%ap)
+#define DIVISOR 8(%ap)
+#ifdef __ELF__
+ASENTRY(__urem, R2)
+#else
ASENTRY(urem,R2)
- movl DIVISOR,r2
+#endif
+ movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
- movl DIVIDEND,r0
+ movl DIVIDEND,%r0
jlss Lhard # big dividend: need extended division
- divl3 r2,r0,r1 # small divisor and dividend: signed modulus
- mull2 r2,r1
- subl2 r1,r0
+ divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
+ mull2 %r2,%r1
+ subl2 %r1,%r0
ret
Lhard:
- clrl r1
- ediv r2,r0,r1,r0
+ clrl %r1
+ ediv %r2,%r0,%r1,%r0
ret
Leasy:
- subl3 r2,DIVIDEND,r0
+ subl3 %r2,DIVIDEND,%r0
jcc Ldifference # if divisor goes in once, return difference
- movl DIVIDEND,r0 # if divisor is bigger, return dividend
+ movl DIVIDEND,%r0 # if divisor is bigger, return dividend
Ldifference:
ret
+#ifdef __ELF__
+ASENTRY(__aurem, R2|R3)
+#else
ASENTRY(aurem,R2|R3)
- movl DIVIDEND,r3
- movl DIVISOR,r2
+#endif
+ movl DIVIDEND,%r3
+ movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
- movl (r3),r0
+ movl (%r3),%r0
jlss La_hard # big dividend: need extended division
- divl3 r2,r0,r1 # small divisor and dividend: signed modulus
- mull2 r2,r1
- subl2 r1,r0
- movl r0,(r3) # leave the value of the assignment in r0
+ divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
+ mull2 %r2,%r1
+ subl2 %r1,%r0
+ movl %r0,(%r3) # leave the value of the assignment in r0
ret
La_hard:
- clrl r1
- ediv r2,r0,r1,r0
- movl r0,(r3)
+ clrl %r1
+ ediv %r2,%r0,%r1,%r0
+ movl %r0,(%r3)
ret
La_easy:
- subl3 r2,(r3),r0
+ subl3 %r2,(%r3),%r0
jcs La_dividend # if divisor is bigger, leave dividend alone
- movl r0,(r3) # if divisor goes in once, store difference
+ movl %r0,(%r3) # if divisor goes in once, store difference
ret
La_dividend:
- movl (r3),r0
+ movl (%r3),%r0
ret
diff --git a/lib/libc/arch/vax/net/htonl.S b/lib/libc/arch/vax/net/htonl.S
index 4ec561802b7..71ca9fcc3d9 100644
--- a/lib/libc/arch/vax/net/htonl.S
+++ b/lib/libc/arch/vax/net/htonl.S
@@ -32,7 +32,7 @@
#include "DEFS.h"
ENTRY(htonl, 0)
- rotl $-8,4(ap),r0
- insv r0,$16,$8,r0
- movb 7(ap),r0
+ rotl $-8,4(%ap),%r0
+ insv %r0,$16,$8,%r0
+ movb 7(%ap),%r0
ret
diff --git a/lib/libc/arch/vax/net/htons.S b/lib/libc/arch/vax/net/htons.S
index a37513d2872..c5b453d0c9f 100644
--- a/lib/libc/arch/vax/net/htons.S
+++ b/lib/libc/arch/vax/net/htons.S
@@ -32,7 +32,7 @@
#include "DEFS.h"
ENTRY(htons, 0)
- rotl $8,4(ap),r0
- movb 5(ap),r0
- movzwl r0,r0
+ rotl $8,4(%ap),%r0
+ movb 5(%ap),%r0
+ movzwl %r0,%r0
ret
diff --git a/lib/libc/arch/vax/net/ntohl.S b/lib/libc/arch/vax/net/ntohl.S
index b41d70b2421..1e862b35ded 100644
--- a/lib/libc/arch/vax/net/ntohl.S
+++ b/lib/libc/arch/vax/net/ntohl.S
@@ -32,7 +32,7 @@
#include "DEFS.h"
ENTRY(ntohl, 0)
- rotl $-8,4(ap),r0
- insv r0,$16,$8,r0
- movb 7(ap),r0
+ rotl $-8,4(%ap),%r0
+ insv %r0,$16,$8,%r0
+ movb 7(%ap),%r0
ret
diff --git a/lib/libc/arch/vax/net/ntohs.S b/lib/libc/arch/vax/net/ntohs.S
index 52230bb5ef9..b9554dfa819 100644
--- a/lib/libc/arch/vax/net/ntohs.S
+++ b/lib/libc/arch/vax/net/ntohs.S
@@ -32,7 +32,7 @@
#include "DEFS.h"
ENTRY(ntohs, 0)
- rotl $8,4(ap),r0
- movb 5(ap),r0
- movzwl r0,r0
+ rotl $8,4(%ap),%r0
+ movb 5(%ap),%r0
+ movzwl %r0,%r0
ret
diff --git a/lib/libc/arch/vax/stdlib/insque.S b/lib/libc/arch/vax/stdlib/insque.S
index e25c69b2ed9..ba606ad5287 100644
--- a/lib/libc/arch/vax/stdlib/insque.S
+++ b/lib/libc/arch/vax/stdlib/insque.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: insque.S,v 1.3 2009/10/28 06:49:55 deraadt Exp $ */
+/* $OpenBSD: insque.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
@@ -36,5 +36,5 @@
#include "DEFS.h"
ENTRY(insque, 0)
- insque *4(ap), *8(ap)
+ insque *4(%ap), *8(%ap)
ret
diff --git a/lib/libc/arch/vax/stdlib/remque.S b/lib/libc/arch/vax/stdlib/remque.S
index a98ba3a4802..e22acfdf807 100644
--- a/lib/libc/arch/vax/stdlib/remque.S
+++ b/lib/libc/arch/vax/stdlib/remque.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: remque.S,v 1.3 2009/10/28 06:49:55 deraadt Exp $ */
+/* $OpenBSD: remque.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
@@ -36,5 +36,5 @@
#include "DEFS.h"
ENTRY(remque, 0)
- remque *4(ap),r0
+ remque *4(%ap),%r0
ret
diff --git a/lib/libc/arch/vax/string/bcmp.S b/lib/libc/arch/vax/string/bcmp.S
index 14711f417f5..ccb63da20f2 100644
--- a/lib/libc/arch/vax/string/bcmp.S
+++ b/lib/libc/arch/vax/string/bcmp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: bcmp.S,v 1.4 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: bcmp.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -34,23 +34,23 @@
/* still, this is four times faster than the generic C version on a uvax2 */
ENTRY(bcmp, 0)
- movl 12(ap),r0 # r0 = n
+ movl 12(%ap),%r0 # r0 = n
jeql 9f
- movq 4(ap),r1 # r1 = s1, r2 = s2
- ashl $-2,r0,r3 # convert len to # of long words
+ movq 4(%ap),%r1 # r1 = s1, r2 = s2
+ ashl $-2,%r0,%r3 # convert len to # of long words
jeql 2f
1:
- cmpl (r1)+,(r2)+ # no "cmpq" alas, so four bytes at a time
+ cmpl (%r1)+,(%r2)+ # no "cmpq" alas, so four bytes at a time
jneq 9f
- sobgtr r3,1b
+ sobgtr %r3,1b
2:
- bicl3 $-4,r0,r3 # handle at most 3 extra bytes
+ bicl3 $-4,%r0,%r3 # handle at most 3 extra bytes
jeql 8f
3:
- cmpb (r1)+,(r2)+
+ cmpb (%r1)+,(%r2)+
jneq 9f
- sobgtr r3,3b
+ sobgtr %r3,3b
8:
- clrl r0 # we have a match!
+ clrl %r0 # we have a match!
9:
ret
diff --git a/lib/libc/arch/vax/string/bcopy.S b/lib/libc/arch/vax/string/bcopy.S
index dfcae2a03e2..5af2edea373 100644
--- a/lib/libc/arch/vax/string/bcopy.S
+++ b/lib/libc/arch/vax/string/bcopy.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: bcopy.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: bcopy.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,39 +33,39 @@
#include "DEFS.h"
ENTRY(bcopy, R2|R3|R4|R5|R6)
- movl 4(ap),r1
- movl 8(ap),r3
- movl 12(ap),r6
- cmpl r1,r3
+ movl 4(%ap),%r1
+ movl 8(%ap),%r3
+ movl 12(%ap),%r6
+ cmpl %r1,%r3
bgtr 2f # normal forward case
blss 3f # overlapping, must do backwards
ret # equal, nothing to do
1:
- subl2 r0,r6
- movc3 r0,(r1),(r3)
+ subl2 %r0,%r6
+ movc3 %r0,(%r1),(%r3)
2:
- movzwl $65535,r0
- cmpl r6,r0
+ movzwl $65535,%r0
+ cmpl %r6,%r0
jgtr 1b
- movc3 r6,(r1),(r3)
+ movc3 %r6,(%r1),(%r3)
ret
3:
- addl2 r6,r1
- addl2 r6,r3
- movzwl $65535,r0
+ addl2 %r6,%r1
+ addl2 %r6,%r3
+ movzwl $65535,%r0
jbr 5f
4:
- subl2 r0,r6
- subl2 r0,r1
- subl2 r0,r3
- movc3 r0,(r1),(r3)
- movzwl $65535,r0
- subl2 r0,r1
- subl2 r0,r3
+ subl2 %r0,%r6
+ subl2 %r0,%r1
+ subl2 %r0,%r3
+ movc3 %r0,(%r1),(%r3)
+ movzwl $65535,%r0
+ subl2 %r0,%r1
+ subl2 %r0,%r3
5:
- cmpl r6,r0
+ cmpl %r6,%r0
jgtr 4b
- subl2 r6,r1
- subl2 r6,r3
- movc3 r6,(r1),(r3)
+ subl2 %r6,%r1
+ subl2 %r6,%r3
+ movc3 %r6,(%r1),(%r3)
ret
diff --git a/lib/libc/arch/vax/string/bzero.S b/lib/libc/arch/vax/string/bzero.S
index 73cfd89537d..55febbe11b5 100644
--- a/lib/libc/arch/vax/string/bzero.S
+++ b/lib/libc/arch/vax/string/bzero.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: bzero.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: bzero.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,14 +33,14 @@
#include "DEFS.h"
ENTRY(bzero, R2|R3|R4|R5)
- movl 4(ap),r3
+ movl 4(%ap),%r3
jbr 2f
1:
- subl2 r0,8(ap)
- movc5 $0,(r3),$0,r0,(r3)
+ subl2 %r0,8(%ap)
+ movc5 $0,(%r3),$0,%r0,(%r3)
2:
- movzwl $65535,r0
- cmpl 8(ap),r0
+ movzwl $65535,%r0
+ cmpl 8(%ap),%r0
jgtr 1b
- movc5 $0,(r3),$0,8(ap),(r3)
+ movc5 $0,(%r3),$0,8(%ap),(%r3)
ret
diff --git a/lib/libc/arch/vax/string/ffs.S b/lib/libc/arch/vax/string/ffs.S
index 50b4669a6ad..e2caa68c607 100644
--- a/lib/libc/arch/vax/string/ffs.S
+++ b/lib/libc/arch/vax/string/ffs.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: ffs.S,v 1.4 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: ffs.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,9 +33,9 @@
#include "DEFS.h"
ENTRY(ffs, 0)
- ffs $0,$32,4(ap),r0
+ ffs $0,$32,4(%ap),%r0
bneq 1f
- mnegl $1,r0
+ mnegl $1,%r0
1:
- incl r0
+ incl %r0
ret
diff --git a/lib/libc/arch/vax/string/index.S b/lib/libc/arch/vax/string/index.S
index 0d4a64724c5..1e578da4cc5 100644
--- a/lib/libc/arch/vax/string/index.S
+++ b/lib/libc/arch/vax/string/index.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: index.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: index.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1980, 1993
* The Regents of the University of California. All rights reserved.
@@ -41,19 +41,19 @@
/* Alas not quite twice as fast as the generic C version on a uvax2 */
ENTRY(index, 0)
- movq 4(ap),r0 # r0 = cp; r1 = c
- tstb r1 # special case, looking for '\0'
+ movq 4(%ap),%r0 # r0 = cp; r1 = c
+ tstb %r1 # special case, looking for '\0'
jeql 3f
1:
- cmpb (r0),r1
+ cmpb (%r0),%r1
jeql 2f
- tstb (r0)+
+ tstb (%r0)+
jneq 1b
- clrl r0 # return NULL if no match
+ clrl %r0 # return NULL if no match
2:
ret
3:
- tstb (r0)+
+ tstb (%r0)+
jneq 3b
- decl r0
+ decl %r0
jbr 2b
diff --git a/lib/libc/arch/vax/string/memcmp.S b/lib/libc/arch/vax/string/memcmp.S
index c863315884f..6a794547d5d 100644
--- a/lib/libc/arch/vax/string/memcmp.S
+++ b/lib/libc/arch/vax/string/memcmp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: memcmp.S,v 1.4 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: memcmp.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,33 +33,33 @@
#include "DEFS.h"
ENTRY(memcmp, 0)
- movl 12(ap),r0
+ movl 12(%ap),%r0
jeql 9f
- movq 4(ap),r1
- ashl $-2,r0,r3 # convert len to long words
+ movq 4(%ap),%r1
+ ashl $-2,%r0,%r3 # convert len to long words
jeql 2f
1:
- cmpl (r1)+,(r2)+ # no "cmpq" alas
+ cmpl (%r1)+,(%r2)+ # no "cmpq" alas
jneq 7f
- sobgtr r3,1b
+ sobgtr %r3,1b
2:
- bicl3 $-4,r0,r3 # handle at most 3 extra bytes
+ bicl3 $-4,%r0,%r3 # handle at most 3 extra bytes
jeql 4f
3:
- cmpb (r1)+,(r2)+
+ cmpb (%r1)+,(%r2)+
jneq 8f
- sobgtr r3,3b
+ sobgtr %r3,3b
4:
- clrl r0 # we had a match
+ clrl %r0 # we had a match
ret
7: # backup, and do a byte compare
- tstl -(r1)
- tstl -(r2)
- movl $4,r3
+ tstl -(%r1)
+ tstl -(%r2)
+ movl $4,%r3
jbr 3b
8:
- movzbl -(r1),r3
- movzbl -(r2),r4
- subl3 r4,r3,r0
+ movzbl -(%r1),%r3
+ movzbl -(%r2),%r4
+ subl3 %r4,%r3,%r0
9:
ret
diff --git a/lib/libc/arch/vax/string/memcpy.S b/lib/libc/arch/vax/string/memcpy.S
index 80794a49349..7fa0fa30cc6 100644
--- a/lib/libc/arch/vax/string/memcpy.S
+++ b/lib/libc/arch/vax/string/memcpy.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: memcpy.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: memcpy.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@@ -39,48 +39,48 @@
#include "DEFS.h"
ENTRY(memcpy, R2|R3|R4|R5)
- movzwl $65535,r0 /* r0 = 64K (needed below) */
- movq 8(ap),r1 /* r1 = src, r2 = length */
- movl 4(ap),r3 /* r3 = dst */
- cmpl r1,r3
+ movzwl $65535,%r0 /* r0 = 64K (needed below) */
+ movq 8(%ap),%r1 /* r1 = src, r2 = length */
+ movl 4(%ap),%r3 /* r3 = dst */
+ cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
- addl2 r2,r1 /* overlaps iff src<dst but src+len>dst */
- cmpl r1,r3
+ addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
+ cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
- subl2 r2,r1
+ subl2 %r2,%r1
1: /* move forward */
- cmpl r2,r0
+ cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
- movc3 r2,(r1),(r3) /* move it all */
+ movc3 %r2,(%r1),(%r3) /* move it all */
2:
- movl 4(ap),r0 /* return original dst */
+ movl 4(%ap),%r0 /* return original dst */
ret
3:
- subl2 r0,12(ap) /* adjust length by 64K */
- movc3 r0,(r1),(r3) /* move 64K */
- movl 12(ap),r2
- decw r0 /* from 0 to 65535 */
+ subl2 %r0,12(%ap) /* adjust length by 64K */
+ movc3 %r0,(%r1),(%r3) /* move 64K */
+ movl 12(%ap),%r2
+ decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
- addl2 r2,r3
+ addl2 %r2,%r3
5:
- cmpl r2,r0
+ cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
- subl2 r2,r1
- subl2 r2,r3
- movc3 r2,(r1),(r3) /* move it all */
- movl 4(ap),r0 /* return original dst */
+ subl2 %r2,%r1
+ subl2 %r2,%r3
+ movc3 %r2,(%r1),(%r3) /* move it all */
+ movl 4(%ap),%r0 /* return original dst */
ret
6:
- subl2 r0,12(ap) /* adjust length by 64K */
- subl2 r0,r1
- subl2 r0,r3
- movc3 r0,(r1),(r3) /* move 64K */
- movl 12(ap),r2
- decw r0
- subl2 r0,r1
- subl2 r0,r3
+ subl2 %r0,12(%ap) /* adjust length by 64K */
+ subl2 %r0,%r1
+ subl2 %r0,%r3
+ movc3 %r0,(%r1),(%r3) /* move 64K */
+ movl 12(%ap),%r2
+ decw %r0
+ subl2 %r0,%r1
+ subl2 %r0,%r3
brb 5b
diff --git a/lib/libc/arch/vax/string/memmove.S b/lib/libc/arch/vax/string/memmove.S
index af138f3b6e5..86513fdbba9 100644
--- a/lib/libc/arch/vax/string/memmove.S
+++ b/lib/libc/arch/vax/string/memmove.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: memmove.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: memmove.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@@ -39,48 +39,48 @@
#include "DEFS.h"
ENTRY(memmove, R2|R3|R4|R5)
- movzwl $65535,r0 /* r0 = 64K (needed below) */
- movq 8(ap),r1 /* r1 = src, r2 = length */
- movl 4(ap),r3 /* r3 = dst */
- cmpl r1,r3
+ movzwl $65535,%r0 /* r0 = 64K (needed below) */
+ movq 8(%ap),%r1 /* r1 = src, r2 = length */
+ movl 4(%ap),%r3 /* r3 = dst */
+ cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
- addl2 r2,r1 /* overlaps iff src<dst but src+len>dst */
- cmpl r1,r3
+ addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
+ cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
- subl2 r2,r1
+ subl2 %r2,%r1
1: /* move forward */
- cmpl r2,r0
+ cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
- movc3 r2,(r1),(r3) /* move it all */
+ movc3 %r2,(%r1),(%r3) /* move it all */
2:
- movl 4(ap),r0 /* return original dst */
+ movl 4(%ap),%r0 /* return original dst */
ret
3:
- subl2 r0,12(ap) /* adjust length by 64K */
- movc3 r0,(r1),(r3) /* move 64K */
- movl 12(ap),r2
- decw r0 /* from 0 to 65535 */
+ subl2 %r0,12(%ap) /* adjust length by 64K */
+ movc3 %r0,(%r1),(%r3) /* move 64K */
+ movl 12(%ap),%r2
+ decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
- addl2 r2,r3
+ addl2 %r2,%r3
5:
- cmpl r2,r0
+ cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
- subl2 r2,r1
- subl2 r2,r3
- movc3 r2,(r1),(r3) /* move it all */
- movl 4(ap),r0 /* return original dst */
+ subl2 %r2,%r1
+ subl2 %r2,%r3
+ movc3 %r2,(%r1),(%r3) /* move it all */
+ movl 4(%ap),%r0 /* return original dst */
ret
6:
- subl2 r0,12(ap) /* adjust length by 64K */
- subl2 r0,r1
- subl2 r0,r3
- movc3 r0,(r1),(r3) /* move 64K */
- movl 12(ap),r2
- decw r0
- subl2 r0,r1
- subl2 r0,r3
+ subl2 %r0,12(%ap) /* adjust length by 64K */
+ subl2 %r0,%r1
+ subl2 %r0,%r3
+ movc3 %r0,(%r1),(%r3) /* move 64K */
+ movl 12(%ap),%r2
+ decw %r0
+ subl2 %r0,%r1
+ subl2 %r0,%r3
brb 5b
diff --git a/lib/libc/arch/vax/string/memset.S b/lib/libc/arch/vax/string/memset.S
index a7e73377070..b861477884f 100644
--- a/lib/libc/arch/vax/string/memset.S
+++ b/lib/libc/arch/vax/string/memset.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: memset.S,v 1.5 2005/08/07 16:40:15 espie Exp $ */
+/* $OpenBSD: memset.S,v 1.6 2013/07/05 21:10:50 miod Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,16 +33,16 @@
#include "DEFS.h"
ENTRY(memset, R2|R3|R4|R5)
- movl 4(ap),r3
+ movl 4(%ap),%r3
1:
- movzwl $65535,r0
- movq 8(ap),r1
- cmpl r2,r0
+ movzwl $65535,%r0
+ movq 8(%ap),%r1
+ cmpl %r2,%r0
jgtru 2f
- movc5 $0,(r3),r1,r2,(r3)
- movl r1,r0
+ movc5 $0,(%r3),%r1,%r2,(%r3)
+ movl %r1,%r0
ret
2:
- subl2 r0,12(ap)
- movc5 $0,(r3),r1,r0,(r3)
+ subl2 %r0,12(%ap)
+ movc5 $0,(%r3),%r1,%r0,(%r3)
jbr 1b
diff --git a/lib/libc/arch/vax/sys/Ovfork.S b/lib/libc/arch/vax/sys/Ovfork.S
index f2ab5a01f8a..cadbff6cb76 100644
--- a/lib/libc/arch/vax/sys/Ovfork.S
+++ b/lib/libc/arch/vax/sys/Ovfork.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: Ovfork.S,v 1.7 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: Ovfork.S,v 1.8 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -47,18 +47,18 @@
*/
SYSENTRY(vfork)
- movl 16(fp),r2 # save return address before we smash it
- movab here,16(fp)
+ movl 16(%fp),%r2 # save return address before we smash it
+ movab here,16(%fp)
ret
here:
chmk $ SYS_vfork
bcs err # if failed, set errno and return -1
/* this next trick is Chris Torek's fault */
- mnegl r1,r1 # r1 = 0xffffffff if child, 0 if parent
- bicl2 r1,r0 # r0 &= ~r1, i.e., 0 if child, else unchanged
- jmp (r2)
+ mnegl %r1,%r1 # r1 = 0xffffffff if child, 0 if parent
+ bicl2 %r1,%r0 # r0 &= ~r1, i.e., 0 if child, else unchanged
+ jmp (%r2)
err:
- movl r0,_C_LABEL(errno)
- mnegl $1,r0
- jmp (r2)
+ movl %r0,_C_LABEL(errno)
+ mnegl $1,%r0
+ jmp (%r2)
diff --git a/lib/libc/arch/vax/sys/brk.S b/lib/libc/arch/vax/sys/brk.S
index 33d5849dbb8..39a708d254c 100644
--- a/lib/libc/arch/vax/sys/brk.S
+++ b/lib/libc/arch/vax/sys/brk.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: brk.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: brk.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -33,14 +33,14 @@
.globl __curbrk
.globl _C_LABEL(minbrk)
ENTRY(brk, 0)
- cmpl _C_LABEL(minbrk),4(ap)
+ cmpl _C_LABEL(minbrk),4(%ap)
blequ ok
- movl _C_LABEL(minbrk),4(ap)
+ movl _C_LABEL(minbrk),4(%ap)
ok:
chmk $ SYS_break
jcs err
- movl 4(ap),__curbrk
- clrl r0
+ movl 4(%ap),__curbrk
+ clrl %r0
ret
err:
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
diff --git a/lib/libc/arch/vax/sys/cerror.S b/lib/libc/arch/vax/sys/cerror.S
index fea12994e2e..e838aec2b0c 100644
--- a/lib/libc/arch/vax/sys/cerror.S
+++ b/lib/libc/arch/vax/sys/cerror.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cerror.S,v 1.9 2011/04/09 15:45:17 deraadt Exp $ */
+/* $OpenBSD: cerror.S,v 1.10 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -30,11 +30,11 @@
#include "SYS.h"
-WEAK_ALIAS(_cerror,__cerror)
+WEAK_ALIAS(__cerror,___cerror)
.globl _C_LABEL(errno)
_C_LABEL(__cerror):
- movl r0,_C_LABEL(errno)
- mnegl $1,r0
- mnegl $1,r1
+ movl %r0,_C_LABEL(errno)
+ mnegl $1,%r0
+ mnegl $1,%r1
ret
diff --git a/lib/libc/arch/vax/sys/exect.S b/lib/libc/arch/vax/sys/exect.S
index c4776b87c6d..97832482a96 100644
--- a/lib/libc/arch/vax/sys/exect.S
+++ b/lib/libc/arch/vax/sys/exect.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: exect.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: exect.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -34,4 +34,4 @@
SYSENTRY(exect)
bispsw $PSL_T
chmk $ SYS_execve
- jmp _C_LABEL(_cerror) # exect(file, argv, env)
+ jmp _C_LABEL(__cerror) # exect(file, argv, env)
diff --git a/lib/libc/arch/vax/sys/fork.S b/lib/libc/arch/vax/sys/fork.S
index 7f0a08e100a..449f185f73d 100644
--- a/lib/libc/arch/vax/sys/fork.S
+++ b/lib/libc/arch/vax/sys/fork.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: fork.S,v 1.4 2005/08/07 16:40:16 espie Exp $ */
+/* $OpenBSD: fork.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -31,7 +31,7 @@
#include "SYS.h"
SYSCALL(fork)
- jlbc r1,1f # parent, since r1 == 0 in parent, 1 in child
- clrl r0
+ jlbc %r1,1f # parent, since r1 == 0 in parent, 1 in child
+ clrl %r0
1:
ret # pid = fork()
diff --git a/lib/libc/arch/vax/sys/sbrk.S b/lib/libc/arch/vax/sys/sbrk.S
index ded7c12f83f..dc6c2901ea0 100644
--- a/lib/libc/arch/vax/sys/sbrk.S
+++ b/lib/libc/arch/vax/sys/sbrk.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: sbrk.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: sbrk.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -43,14 +43,14 @@ __curbrk:
.text
ENTRY(sbrk, R3)
- addl3 __curbrk,4(ap),-(sp)
+ addl3 __curbrk,4(%ap),-(%sp)
pushl $1
- movl ap,r3
- movl sp,ap
+ movl %ap,%r3
+ movl %sp,%ap
chmk $ SYS_break
jcs err
- movl __curbrk,r0
- addl2 4(r3),__curbrk
+ movl __curbrk,%r0
+ addl2 4(%r3),__curbrk
ret
err:
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
diff --git a/lib/libc/arch/vax/sys/sigpending.S b/lib/libc/arch/vax/sys/sigpending.S
index 41f468d82e4..b066d0918a7 100644
--- a/lib/libc/arch/vax/sys/sigpending.S
+++ b/lib/libc/arch/vax/sys/sigpending.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: sigpending.S,v 1.4 2005/08/07 16:40:16 espie Exp $ */
+/* $OpenBSD: sigpending.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
@@ -31,6 +31,6 @@
#include "SYS.h"
SYSCALL(sigpending)
- movl r0,*4(ap) # store old mask
- clrl r0
+ movl %r0,*4(%ap) # store old mask
+ clrl %r0
ret
diff --git a/lib/libc/arch/vax/sys/sigprocmask.S b/lib/libc/arch/vax/sys/sigprocmask.S
index 388bb98fcb2..40a6827cf6b 100644
--- a/lib/libc/arch/vax/sys/sigprocmask.S
+++ b/lib/libc/arch/vax/sys/sigprocmask.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: sigprocmask.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: sigprocmask.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
@@ -31,20 +31,20 @@
#include "SYS.h"
err:
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
SYSENTRY(sigprocmask)
- tstl 8(ap) # check new sigset pointer
+ tstl 8(%ap) # check new sigset pointer
bneq 1f # if not null, indirect
-/* movl $0,8(ap) # null mask pointer: block empty set */
- movl $1,4(ap) # SIG_BLOCK
+/* movl $0,8(%ap) # null mask pointer: block empty set */
+ movl $1,4(%ap) # SIG_BLOCK
jbr 2f
-1: movl *8(ap),8(ap) # indirect to new mask arg
+1: movl *8(%ap),8(%ap) # indirect to new mask arg
2: chmk $ SYS_sigprocmask
jcs err
- tstl 12(ap) # test if old mask requested
+ tstl 12(%ap) # test if old mask requested
beql out
- movl r0,*12(ap) # store old mask
+ movl %r0,*12(%ap) # store old mask
out:
- clrl r0
+ clrl %r0
ret
diff --git a/lib/libc/arch/vax/sys/sigsuspend.S b/lib/libc/arch/vax/sys/sigsuspend.S
index 7f2bd086459..9ebd1c76e54 100644
--- a/lib/libc/arch/vax/sys/sigsuspend.S
+++ b/lib/libc/arch/vax/sys/sigsuspend.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: sigsuspend.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: sigsuspend.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
@@ -31,11 +31,11 @@
#include "SYS.h"
err:
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
SYSENTRY(sigsuspend)
- movl *4(ap),4(ap) # indirect to mask arg
+ movl *4(%ap),4(%ap) # indirect to mask arg
chmk $ SYS_sigsuspend
jcs err
- clrl r0 # shouldnt happen
+ clrl %r0 # shouldnt happen
ret
diff --git a/lib/libc/arch/vax/sys/syscall.S b/lib/libc/arch/vax/sys/syscall.S
index c9a2a6d23c7..94859f0e39c 100644
--- a/lib/libc/arch/vax/sys/syscall.S
+++ b/lib/libc/arch/vax/sys/syscall.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: syscall.S,v 1.8 2008/05/21 20:39:32 miod Exp $ */
+/* $OpenBSD: syscall.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -36,10 +36,10 @@
#include "SYS.h"
SYSENTRY(syscall)
- movl 4(ap),r0 # syscall number
- subl3 $1,(ap)+,(ap) # one fewer arguments
- chmk r0
+ movl 4(%ap),%r0 # syscall number
+ subl3 $1,(%ap)+,(%ap) # one fewer arguments
+ chmk %r0
jcs 1f
ret
1:
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
diff --git a/lib/libc/arch/vax/sys/tfork_thread.S b/lib/libc/arch/vax/sys/tfork_thread.S
index 123c4a83641..d14fd84cfc6 100644
--- a/lib/libc/arch/vax/sys/tfork_thread.S
+++ b/lib/libc/arch/vax/sys/tfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: tfork_thread.S,v 1.2 2012/06/21 00:56:59 guenther Exp $ */
+/* $OpenBSD: tfork_thread.S,v 1.3 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -34,13 +34,13 @@ ENTRY(__tfork_thread, R2|R3)
/*
* Save thread creation arguments into registers.
*/
- movl 12(ap), r2 /* func */
- movl 16(ap), r3 /* arg */
+ movl 12(%ap), %r2 /* func */
+ movl 16(%ap), %r3 /* arg */
__DO_SYSCALL(__tfork)
jcs 9f
- cmpl r0, $0
+ cmpl %r0, $0
beql 1f
/*
@@ -54,9 +54,9 @@ ENTRY(__tfork_thread, R2|R3)
* Note that since we can not pass a register to calls, we need
* to waste 4 bytes of stack in every thread.
*/
- pushl r2 /* func */
- pushl r3 /* arg */
- calls $1, *4(sp) /* func */
+ pushl %r2 /* func */
+ pushl %r3 /* arg */
+ calls $1, *4(%sp) /* func */
__DO_SYSCALL(__threxit)
@@ -64,4 +64,4 @@ ENTRY(__tfork_thread, R2|R3)
/*
* system call failed.
*/
- jmp _C_LABEL(_cerror)
+ jmp _C_LABEL(__cerror)
diff --git a/lib/libm/arch/vax/n_argred.S b/lib/libm/arch/vax/n_argred.S
index 82010515b08..822486cbaa6 100644
--- a/lib/libm/arch/vax/n_argred.S
+++ b/lib/libm/arch/vax/n_argred.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_argred.S,v 1.3 2008/05/21 20:37:10 miod Exp $ */
+/* $OpenBSD: n_argred.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_argred.S,v 1.1 1995/10/10 23:40:21 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -50,11 +50,11 @@ _ALTENTRY(__libm_argred)
* Compare the argument with the largest possible that can
* be reduced by table lookup. r3 := |x| will be used in table_lookup .
*/
- movd r0,r3
+ movd %r0,%r3
bgeq abs1
- mnegd r3,r3
+ mnegd %r3,%r3
abs1:
- cmpd r3,$0d+4.55530934770520019583e+01
+ cmpd %r3,$0d+4.55530934770520019583e+01
blss small_arg
jsb trigred
rsb
@@ -72,51 +72,51 @@ _ALTENTRY(__libm_sincos)
/*
* Compensate for a cosine entry by adding one to the quadrant number.
*/
- addl2 r4,r0
+ addl2 %r4,%r0
/*
* Polyd clobbers r5-r0 ; save X in r7/r6 .
* This can be avoided by rewriting trigred .
*/
- movd r1,r6
+ movd %r1,%r6
/*
* Likewise, save alpha in r8 .
* This can be avoided by rewriting trigred .
*/
- movf r3,r8
+ movf %r3,%r8
/*
* Odd or even quadrant? cosine if odd, sine otherwise.
* Save floor(quadrant/2) in r9 ; it determines the final sign.
*/
- rotl $-1,r0,r9
+ rotl $-1,%r0,%r9
blss cosine
sine:
- muld2 r1,r1 # Xsq = X * X
- cmpw $0x2480,r1 # [zl] Xsq > 2^-56?
+ muld2 %r1,%r1 # Xsq = X * X
+ cmpw $0x2480,%r1 # [zl] Xsq > 2^-56?
blss 1f # [zl] yes, go ahead and do polyd
- clrq r1 # [zl] work around 11/780 FPA polyd bug
+ clrq %r1 # [zl] work around 11/780 FPA polyd bug
1:
- polyd r1,$7,sin_coef # Q = P(Xsq) , of deg 7
- mulf3 $0f3.0,r8,r4 # beta = 3 * alpha
- mulf2 r0,r4 # beta = Q * beta
- addf2 r8,r4 # beta = alpha + beta
- muld2 r6,r0 # S(X) = X * Q
-/* cvtfd r4,r4 ... r5 = 0 after a polyd. */
- addd2 r4,r0 # S(X) = beta + S(X)
- addd2 r6,r0 # S(X) = X + S(X)
+ polyd %r1,$7,sin_coef # Q = P(Xsq) , of deg 7
+ mulf3 $0f3.0,%r8,%r4 # beta = 3 * alpha
+ mulf2 %r0,%r4 # beta = Q * beta
+ addf2 %r8,%r4 # beta = alpha + beta
+ muld2 %r6,%r0 # S(X) = X * Q
+/* cvtfd %r4,%r4 ... r5 = 0 after a polyd. */
+ addd2 %r4,%r0 # S(X) = beta + S(X)
+ addd2 %r6,%r0 # S(X) = X + S(X)
brb done
cosine:
- muld2 r6,r6 # Xsq = X * X
+ muld2 %r6,%r6 # Xsq = X * X
beql zero_arg
- mulf2 r1,r8 # beta = X * alpha
- polyd r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
- subd3 r0,r8,r0 # beta = beta - Q
- subw2 $0x80,r6 # Xsq = Xsq / 2
- addd2 r0,r6 # Xsq = Xsq + beta
+ mulf2 %r1,%r8 # beta = X * alpha
+ polyd %r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
+ subd3 %r0,%r8,%r0 # beta = beta - Q
+ subw2 $0x80,%r6 # Xsq = Xsq / 2
+ addd2 %r0,%r6 # Xsq = Xsq + beta
zero_arg:
- subd3 r6,$0d1.0,r0 # C(X) = 1 - Xsq
+ subd3 %r6,$0d1.0,%r0 # C(X) = 1 - Xsq
done:
- blbc r9,even
- mnegd r0,r0
+ blbc %r9,even
+ mnegd %r0,%r0
even:
rsb
@@ -263,31 +263,31 @@ twoOverPi:
_ALIGN_TEXT
table_lookup:
- muld3 r3,twoOverPi,r0
- cvtrdl r0,r0 # n = nearest int to ((2/pi)*|x|) rnded
- mull3 $8,r0,r5
- subd2 leading(r5),r3 # p = (|x| - leading n*pi/2) exactly
- subd3 middle(r5),r3,r1 # q = (p - middle n*pi/2) rounded
- subd2 r1,r3 # r = (p - q)
- subd2 middle(r5),r3 # r = r - middle n*pi/2
- subd2 trailing(r5),r3 # r = r - trailing n*pi/2 rounded
+ muld3 %r3,twoOverPi,%r0
+ cvtrdl %r0,%r0 # n = nearest int to ((2/pi)*|x|) rnded
+ mull3 $8,%r0,%r5
+ subd2 leading(%r5),%r3 # p = (|x| - leading n*pi/2) exactly
+ subd3 middle(%r5),%r3,%r1 # q = (p - middle n*pi/2) rounded
+ subd2 %r1,%r3 # r = (p - q)
+ subd2 middle(%r5),%r3 # r = r - middle n*pi/2
+ subd2 trailing(%r5),%r3 # r = r - trailing n*pi/2 rounded
/*
* If the original argument was negative,
* negate the reduce argument and
* adjust the octant/quadrant number.
*/
- tstw 4(ap)
+ tstw 4(%ap)
bgeq abs2
- mnegf r1,r1
- mnegf r3,r3
-/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
- subb3 r0,$4,r0
+ mnegf %r1,%r1
+ mnegf %r3,%r3
+/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
+ subb3 %r0,$4,%r0
abs2:
/*
* Clear all unneeded octant/quadrant bits.
*/
-/* bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
- bicb2 $0xfc,r0
+/* bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
+ bicb2 $0xfc,%r0
rsb
/*
* p.0
@@ -342,19 +342,19 @@ trigred:
/*
* Save the sign of the input argument.
*/
- movw r0,-(sp)
+ movw %r0,-(%sp)
/*
* Extract the exponent field.
*/
- extzv $7,$7,r0,r2
+ extzv $7,$7,%r0,%r2
/*
* Convert the fraction part of the input
* argument into a quadword integer.
*/
- bicw2 $0xff80,r0
- bisb2 $0x80,r0 # -S.McD
- rotl $16,r0,r0
- rotl $16,r1,r1
+ bicw2 $0xff80,%r0
+ bisb2 $0x80,%r0 # -S.McD
+ rotl $16,%r0,%r0
+ rotl $16,%r1,%r1
/*
* If r1 is negative, add 1 to r0 . This
* adjustment is made so that the two's
@@ -362,7 +362,7 @@ trigred:
* will produce unsigned results.
*/
bgeq posmid
- incl r0
+ incl %r0
posmid:
/* p.3
*
@@ -371,54 +371,54 @@ posmid:
* The address is longword aligned to ensure
* efficient access.
*/
- ashl $-3,r2,r3
- bicb2 $3,r3
- subl3 r3,$bits2opi,r3
+ ashl $-3,%r2,%r3
+ bicb2 $3,%r3
+ subl3 %r3,$bits2opi,%r3
/*
* Set r2 to the size of the shift needed to
* obtain the correct portion of 2/pi .
*/
- bicb2 $0xe0,r2
+ bicb2 $0xe0,%r2
/* p.4
*
* Move the needed 128 bits of 2/pi into
* r11 - r8 . Adjust the numbers to allow
* for unsigned multiplication.
*/
- ashq r2,(r3),r10
+ ashq %r2,(%r3),%r10
- subl2 $4,r3
- ashq r2,(r3),r9
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r9
bgeq signoff1
- incl r11
+ incl %r11
signoff1:
- subl2 $4,r3
- ashq r2,(r3),r8
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r8
bgeq signoff2
- incl r10
+ incl %r10
signoff2:
- subl2 $4,r3
- ashq r2,(r3),r7
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r7
bgeq signoff3
- incl r9
+ incl %r9
signoff3:
/* p.5
*
* Multiply the contents of r0/r1 by the
* slice of 2/pi in r11 - r8 .
*/
- emul r0,r8,$0,r4
- emul r0,r9,r5,r5
- emul r0,r10,r6,r6
+ emul %r0,%r8,$0,%r4
+ emul %r0,%r9,%r5,%r5
+ emul %r0,%r10,%r6,%r6
- emul r1,r8,$0,r7
- emul r1,r9,r8,r8
- emul r1,r10,r9,r9
- emul r1,r11,r10,r10
+ emul %r1,%r8,$0,%r7
+ emul %r1,%r9,%r8,%r8
+ emul %r1,%r10,%r9,%r9
+ emul %r1,%r11,%r10,%r10
- addl2 r4,r8
- adwc r5,r9
- adwc r6,r10
+ addl2 %r4,%r8
+ adwc %r5,%r9
+ adwc %r6,%r10
/* p.6
*
* If there are more than five leading zeros
@@ -427,42 +427,42 @@ signoff3:
* two quotient bits, generate more fraction bits.
* Otherwise, branch to code to produce the result.
*/
- bicl3 $0xc1ffffff,r10,r4
+ bicl3 $0xc1ffffff,%r10,%r4
beql more1
- cmpl $0x3e000000,r4
+ cmpl $0x3e000000,%r4
bneq result
more1:
/* p.7
*
* generate another 32 result bits.
*/
- subl2 $4,r3
- ashq r2,(r3),r5
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r5
bgeq signoff4
- emul r1,r6,$0,r4
- addl2 r1,r5
- emul r0,r6,r5,r5
- addl2 r0,r6
+ emul %r1,%r6,$0,%r4
+ addl2 %r1,%r5
+ emul %r0,%r6,%r5,%r5
+ addl2 %r0,%r6
brb addbits1
signoff4:
- emul r1,r6,$0,r4
- emul r0,r6,r5,r5
+ emul %r1,%r6,$0,%r4
+ emul %r0,%r6,%r5,%r5
addbits1:
- addl2 r5,r7
- adwc r6,r8
- adwc $0,r9
- adwc $0,r10
+ addl2 %r5,%r7
+ adwc %r6,%r8
+ adwc $0,%r9
+ adwc $0,%r10
/* p.8
*
* Check for massive cancellation.
*/
- bicl3 $0xc0000000,r10,r6
+ bicl3 $0xc0000000,%r10,%r6
/* bneq more2 -S.McD Test was backwards */
beql more2
- cmpl $0x3fffffff,r6
+ cmpl $0x3fffffff,%r6
bneq result
more2:
/* p.9
@@ -472,22 +472,22 @@ more2:
* Testing has shown there will always be
* enough bits after this point.
*/
- subl2 $4,r3
- ashq r2,(r3),r5
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r5
bgeq signoff5
- emul r0,r6,r4,r5
- addl2 r0,r6
+ emul %r0,%r6,%r4,%r5
+ addl2 %r0,%r6
brb addbits2
signoff5:
- emul r0,r6,r4,r5
+ emul %r0,%r6,%r4,%r5
addbits2:
- addl2 r6,r7
- adwc $0,r8
- adwc $0,r9
- adwc $0,r10
+ addl2 %r6,%r7
+ adwc $0,%r8
+ adwc $0,%r9
+ adwc $0,%r10
/* p.10
*
* The following code produces the reduced
@@ -498,17 +498,17 @@ result:
/*
* Extract the octant number from r10 .
*/
-/* extzv $29,$3,r10,r0 ...used for pi/4 reduction -S.McD */
- extzv $30,$2,r10,r0
+/* extzv $29,$3,%r10,%r0 ...used for pi/4 reduction -S.McD */
+ extzv $30,$2,%r10,%r0
/*
* Clear the octant bits in r10 .
*/
-/* bicl2 $0xe0000000,r10 ...used for pi/4 reduction -S.McD */
- bicl2 $0xc0000000,r10
+/* bicl2 $0xe0000000,%r10 ...used for pi/4 reduction -S.McD */
+ bicl2 $0xc0000000,%r10
/*
* Zero the sign flag.
*/
- clrl r5
+ clrl %r5
/* p.11
*
* Check to see if the fraction is greater than
@@ -517,16 +517,16 @@ result:
* on, and replace the fraction with 1 minus
* the fraction.
*/
-/* bitl $0x10000000,r10 ...used for pi/4 reduction -S.McD */
- bitl $0x20000000,r10
+/* bitl $0x10000000,%r10 ...used for pi/4 reduction -S.McD */
+ bitl $0x20000000,%r10
beql small
- incl r0
- incl r5
-/* subl3 r10,$0x1fffffff,r10 ...used for pi/4 reduction -S.McD */
- subl3 r10,$0x3fffffff,r10
- mcoml r9,r9
- mcoml r8,r8
- mcoml r7,r7
+ incl %r0
+ incl %r5
+/* subl3 %r10,$0x1fffffff,%r10 ...used for pi/4 reduction -S.McD */
+ subl3 %r10,$0x3fffffff,%r10
+ mcoml %r9,%r9
+ mcoml %r8,%r8
+ mcoml %r7,%r7
small:
/* p.12
*
@@ -534,37 +534,37 @@ small:
* Test whether the first 30 bits of the
* fraction are zero.
*/
- tstl r10
+ tstl %r10
beql tiny
/*
* Find the position of the first one bit in r10 .
*/
- cvtld r10,r1
- extzv $7,$7,r1,r1
+ cvtld %r10,%r1
+ extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
- subl3 r1,$32,r6
+ subl3 %r1,$32,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
- ashq r6,r9,r10
- ashq r6,r8,r9
+ ashq %r6,%r9,%r10
+ ashq %r6,%r8,%r9
brb mult
/* p.13
*
* Test to see if the sign bit of r9 is on.
*/
tiny:
- tstl r9
+ tstl %r9
bgeq tinier
/*
* If it is, shift the product bits up 32 bits.
*/
- movl $32,r6
- movq r8,r10
- tstl r10
+ movl $32,%r6
+ movq %r8,%r10
+ tstl %r10
brb mult
/* p.14
*
@@ -578,19 +578,19 @@ tinier:
/*
* Find the position of the first one bit in r9 .
*/
- cvtld r9,r1
- extzv $7,$7,r1,r1
+ cvtld %r9,%r1
+ extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
- subl3 r1,$32,r1
- addl3 $32,r1,r6
+ subl3 %r1,$32,%r1
+ addl3 $32,%r1,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
- ashq r1,r8,r10
- ashq r1,r7,r9
+ ashq %r1,%r8,%r10
+ ashq %r1,%r7,%r9
brb mult
/* p.15
*
@@ -598,9 +598,9 @@ tinier:
* argument to zero.
*/
zero:
- clrl r1
- clrl r2
- clrl r3
+ clrl %r1
+ clrl %r2
+ clrl %r3
brw return
/* p.16
*
@@ -617,65 +617,65 @@ mult:
/*
* Save r11/r10 in r4/r1 . -S.McD
*/
- movl r11,r4
- movl r10,r1
+ movl %r11,%r4
+ movl %r10,%r1
/*
* If the sign bit of r10 is on, add 1 to r11 .
*/
bgeq signoff6
- incl r11
+ incl %r11
signoff6:
/* p.17
*
* Move pi/2 into r3/r2 .
*/
- movq $0xc90fdaa22168c235,r2
+ movq $0xc90fdaa22168c235,%r2
/*
* Multiply the fraction by the portion of pi/2
* in r2 .
*/
- emul r2,r10,$0,r7
- emul r2,r11,r8,r7
+ emul %r2,%r10,$0,%r7
+ emul %r2,%r11,%r8,%r7
/*
* Multiply the fraction by the portion of pi/2
* in r3 .
*/
- emul r3,r10,$0,r9
- emul r3,r11,r10,r10
+ emul %r3,%r10,$0,%r9
+ emul %r3,%r11,%r10,%r10
/*
* Add the product bits together.
*/
- addl2 r7,r9
- adwc r8,r10
- adwc $0,r11
+ addl2 %r7,%r9
+ adwc %r8,%r10
+ adwc $0,%r11
/*
* Compensate for not sign extending r8 above.-S.McD
*/
- tstl r8
+ tstl %r8
bgeq signoff6a
- decl r11
+ decl %r11
signoff6a:
/*
* Compensate for r11/r10 being unsigned. -S.McD
*/
- addl2 r2,r10
- adwc r3,r11
+ addl2 %r2,%r10
+ adwc %r3,%r11
/*
* Compensate for r3/r2 being unsigned. -S.McD
*/
- addl2 r1,r10
- adwc r4,r11
+ addl2 %r1,%r10
+ adwc %r4,%r11
/* p.18
*
* If the sign bit of r11 is zero, shift the
* product bits up one bit and increment r6 .
*/
blss signon
- incl r6
- ashq $1,r10,r10
- tstl r9
+ incl %r6
+ ashq $1,%r10,%r10
+ tstl %r9
bgeq signoff7
- incl r10
+ incl %r10
signoff7:
signon:
/* p.19
@@ -684,19 +684,19 @@ signon:
* bits into r9/r8 . The sign extension
* will be handled later.
*/
- ashq $-8,r10,r8
+ ashq $-8,%r10,%r8
/*
* Convert the low order 8 bits of r10
* into an F-format number.
*/
- cvtbf r10,r3
+ cvtbf %r10,%r3
/*
* If the result of the conversion was
* negative, add 1 to r9/r8 .
*/
bgeq chop
- incl r8
- adwc $0,r9
+ incl %r8
+ adwc $0,%r9
/*
* If r9 is now zero, branch to special
* code to handle that possibility.
@@ -708,27 +708,27 @@ chop:
* Convert the number in r9/r8 into
* D-format number in r2/r1 .
*/
- rotl $16,r8,r2
- rotl $16,r9,r1
+ rotl $16,%r8,%r2
+ rotl $16,%r9,%r1
/*
* Set the exponent field to the appropriate
* value. Note that the extra bits created by
* sign extension are now eliminated.
*/
- subw3 r6,$131,r6
- insv r6,$7,$9,r1
+ subw3 %r6,$131,%r6
+ insv %r6,$7,$9,%r1
/*
* Set the exponent field of the F-format
* number in r3 to the appropriate value.
*/
- tstf r3
+ tstf %r3
beql return
-/* extzv $7,$8,r3,r4 -S.McD */
- extzv $7,$7,r3,r4
- addw2 r4,r6
-/* subw2 $217,r6 -S.McD */
- subw2 $64,r6
- insv r6,$7,$8,r3
+/* extzv $7,$8,%r3,%r4 -S.McD */
+ extzv $7,$7,%r3,%r4
+ addw2 %r4,%r6
+/* subw2 $217,%r6 -S.McD */
+ subw2 $64,%r6
+ insv %r6,$7,$8,%r3
brb return
/* p.21
*
@@ -738,16 +738,16 @@ chop:
* a carry out.
*/
carryout:
- clrl r1
- clrl r2
- subw3 r6,$132,r6
- insv r6,$7,$9,r1
- tstf r3
+ clrl %r1
+ clrl %r2
+ subw3 %r6,$132,%r6
+ insv %r6,$7,$9,%r1
+ tstf %r3
beql return
- extzv $7,$8,r3,r4
- addw2 r4,r6
- subw2 $218,r6
- insv r6,$7,$8,r3
+ extzv $7,$8,%r3,%r4
+ addw2 %r4,%r6
+ subw2 $218,%r6
+ insv %r6,$7,$8,%r3
/* p.22
*
* The following code makes an needed
@@ -761,9 +761,9 @@ return:
* equal to 1/2 . If so, negate the reduced
* argument.
*/
- blbc r5,signoff8
- mnegf r1,r1
- mnegf r3,r3
+ blbc %r5,signoff8
+ mnegf %r1,%r1
+ mnegf %r3,%r3
signoff8:
/* p.23
*
@@ -771,18 +771,18 @@ signoff8:
* negate the reduce argument and
* adjust the octant number.
*/
- tstw (sp)+
+ tstw (%sp)+
bgeq signoff9
- mnegf r1,r1
- mnegf r3,r3
-/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
- subb3 r0,$4,r0
+ mnegf %r1,%r1
+ mnegf %r3,%r3
+/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
+ subb3 %r0,$4,%r0
signoff9:
/*
* Clear all unneeded octant bits.
*
- * bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
- bicb2 $0xfc,r0
+ * bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
+ bicb2 $0xfc,%r0
/*
* Return.
*/
diff --git a/lib/libm/arch/vax/n_atan2.S b/lib/libm/arch/vax/n_atan2.S
index 3a281d700ec..287848aa5a1 100644
--- a/lib/libm/arch/vax/n_atan2.S
+++ b/lib/libm/arch/vax/n_atan2.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_atan2.S,v 1.7 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_atan2.S,v 1.8 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_atan2.S,v 1.1 1995/10/10 23:40:25 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -75,128 +75,128 @@
STRONG_ALIAS(atan2l,atan2)
ENTRY(atan2, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r2 # r2 = y
- movq 12(ap),r4 # r4 = x
- bicw3 $0x7f,r2,r0
- bicw3 $0x7f,r4,r1
- cmpw r0,$0x8000 # y is the reserved operand
+ movq 4(%ap),%r2 # r2 = y
+ movq 12(%ap),%r4 # r4 = x
+ bicw3 $0x7f,%r2,%r0
+ bicw3 $0x7f,%r4,%r1
+ cmpw %r0,$0x8000 # y is the reserved operand
jeql resop
- cmpw r1,$0x8000 # x is the reserved operand
+ cmpw %r1,$0x8000 # x is the reserved operand
jeql resop
- subl2 $8,sp
- bicw3 $0x7fff,r2,-4(fp) # copy y sign bit to -4(fp)
- bicw3 $0x7fff,r4,-8(fp) # copy x sign bit to -8(fp)
- cmpd r4,$0x4080 # x = 1.0 ?
+ subl2 $8,%sp
+ bicw3 $0x7fff,%r2,-4(%fp) # copy y sign bit to -4(fp)
+ bicw3 $0x7fff,%r4,-8(%fp) # copy x sign bit to -8(fp)
+ cmpd %r4,$0x4080 # x = 1.0 ?
bneq xnot1
- movq r2,r0
- bicw2 $0x8000,r0 # t = |y|
- movq r0,r2 # y = |y|
+ movq %r2,%r0
+ bicw2 $0x8000,%r0 # t = |y|
+ movq %r0,%r2 # y = |y|
brb begin
xnot1:
- bicw3 $0x807f,r2,r11 # yexp
+ bicw3 $0x807f,%r2,%r11 # yexp
jeql yeq0 # if y=0 goto yeq0
- bicw3 $0x807f,r4,r10 # xexp
+ bicw3 $0x807f,%r4,%r10 # xexp
jeql pio2 # if x=0 goto pio2
- subw2 r10,r11 # k = yexp - xexp
- cmpw r11,$0x2000 # k >= 64 (exp) ?
+ subw2 %r10,%r11 # k = yexp - xexp
+ cmpw %r11,$0x2000 # k >= 64 (exp) ?
jgeq pio2 # atan2 = +-pi/2
- divd3 r4,r2,r0 # t = y/x never overflow
- bicw2 $0x8000,r0 # t > 0
- bicw2 $0xff80,r2 # clear the exponent of y
- bicw2 $0xff80,r4 # clear the exponent of x
- bisw2 $0x4080,r2 # normalize y to [1,2)
- bisw2 $0x4080,r4 # normalize x to [1,2)
- subw2 r11,r4 # scale x so that yexp-xexp=k
+ divd3 %r4,%r2,%r0 # t = y/x never overflow
+ bicw2 $0x8000,%r0 # t > 0
+ bicw2 $0xff80,%r2 # clear the exponent of y
+ bicw2 $0xff80,%r4 # clear the exponent of x
+ bisw2 $0x4080,%r2 # normalize y to [1,2)
+ bisw2 $0x4080,%r4 # normalize x to [1,2)
+ subw2 %r11,%r4 # scale x so that yexp-xexp=k
begin:
- cmpw r0,$0x411c # t : 39/16
+ cmpw %r0,$0x411c # t : 39/16
jgeq L50
- addl3 $0x180,r0,r10 # 8*t
- cvtrfl r10,r10 # [8*t] rounded to int
- ashl $-1,r10,r10 # [8*t]/2
- casel r10,$0,$4
-L1:
+ addl3 $0x180,%r0,%r10 # 8*t
+ cvtrfl %r10,%r10 # [8*t] rounded to int
+ ashl $-1,%r10,%r10 # [8*t]/2
+ casel %r10,$0,$4
+L1:
.word L20-L1
.word L20-L1
.word L30-L1
.word L40-L1
.word L40-L1
-L10:
- movq $0xb4d9940f985e407b,r6 # Hi=.98279372324732906796d0
- movq $0x21b1879a3bc2a2fc,r8 # Lo=-.17092002525602665777d-17
- subd3 r4,r2,r0 # y-x
- addw2 $0x80,r0 # 2(y-x)
- subd2 r4,r0 # 2(y-x)-x
- addw2 $0x80,r4 # 2x
- movq r2,r10
- addw2 $0x80,r10 # 2y
- addd2 r10,r2 # 3y
- addd2 r4,r2 # 3y+2x
- divd2 r2,r0 # (2y-3x)/(2x+3y)
+L10:
+ movq $0xb4d9940f985e407b,%r6 # Hi=.98279372324732906796d0
+ movq $0x21b1879a3bc2a2fc,%r8 # Lo=-.17092002525602665777d-17
+ subd3 %r4,%r2,%r0 # y-x
+ addw2 $0x80,%r0 # 2(y-x)
+ subd2 %r4,%r0 # 2(y-x)-x
+ addw2 $0x80,%r4 # 2x
+ movq %r2,%r10
+ addw2 $0x80,%r10 # 2y
+ addd2 %r10,%r2 # 3y
+ addd2 %r4,%r2 # 3y+2x
+ divd2 %r2,%r0 # (2y-3x)/(2x+3y)
brw L60
-L20:
- cmpw r0,$0x3280 # t : 2**(-28)
+L20:
+ cmpw %r0,$0x3280 # t : 2**(-28)
jlss L80
- clrq r6 # Hi=r6=0, Lo=r8=0
- clrq r8
+ clrq %r6 # Hi=r6=0, Lo=r8=0
+ clrq %r8
brw L60
-L30:
- movq $0xda7b2b0d63383fed,r6 # Hi=.46364760900080611433d0
- movq $0xf0ea17b2bf912295,r8 # Lo=.10147340032515978826d-17
- movq r2,r0
- addw2 $0x80,r0 # 2y
- subd2 r4,r0 # 2y-x
- addw2 $0x80,r4 # 2x
- addd2 r2,r4 # 2x+y
- divd2 r4,r0 # (2y-x)/(2x+y)
+L30:
+ movq $0xda7b2b0d63383fed,%r6 # Hi=.46364760900080611433d0
+ movq $0xf0ea17b2bf912295,%r8 # Lo=.10147340032515978826d-17
+ movq %r2,%r0
+ addw2 $0x80,%r0 # 2y
+ subd2 %r4,%r0 # 2y-x
+ addw2 $0x80,%r4 # 2x
+ addd2 %r2,%r4 # 2x+y
+ divd2 %r4,%r0 # (2y-x)/(2x+y)
brb L60
-L50:
- movq $0x68c2a2210fda40c9,r6 # Hi=1.5707963267948966135d1
- movq $0x06e0145c26332326,r8 # Lo=.22517417741562176079d-17
- cmpw r0,$0x5100 # y : 2**57
+L50:
+ movq $0x68c2a2210fda40c9,%r6 # Hi=1.5707963267948966135d1
+ movq $0x06e0145c26332326,%r8 # Lo=.22517417741562176079d-17
+ cmpw %r0,$0x5100 # y : 2**57
bgeq L90
- divd3 r2,r4,r0
- bisw2 $0x8000,r0 # -x/y
+ divd3 %r2,%r4,%r0
+ bisw2 $0x8000,%r0 # -x/y
brb L60
-L40:
- movq $0x68c2a2210fda4049,r6 # Hi=.78539816339744830676d0
- movq $0x06e0145c263322a6,r8 # Lo=.11258708870781088040d-17
- subd3 r4,r2,r0 # y-x
- addd2 r4,r2 # y+x
- divd2 r2,r0 # (y-x)/(y+x)
-L60:
- movq r0,r10
- muld2 r0,r0
- polyd r0,$12,ptable
- muld2 r10,r0
- subd2 r0,r8
- addd3 r8,r10,r0
- addd2 r6,r0
-L80:
- movw -8(fp),r2
+L40:
+ movq $0x68c2a2210fda4049,%r6 # Hi=.78539816339744830676d0
+ movq $0x06e0145c263322a6,%r8 # Lo=.11258708870781088040d-17
+ subd3 %r4,%r2,%r0 # y-x
+ addd2 %r4,%r2 # y+x
+ divd2 %r2,%r0 # (y-x)/(y+x)
+L60:
+ movq %r0,%r10
+ muld2 %r0,%r0
+ polyd %r0,$12,ptable
+ muld2 %r10,%r0
+ subd2 %r0,%r8
+ addd3 %r8,%r10,%r0
+ addd2 %r6,%r0
+L80:
+ movw -8(%fp),%r2
bneq pim
- bisw2 -4(fp),r0 # return sign(y)*r0
+ bisw2 -4(%fp),%r0 # return sign(y)*r0
ret
L90: # x >= 2**25
- movq r6,r0
+ movq %r6,%r0
brb L80
pim:
- subd3 r0,$0x68c2a2210fda4149,r0 # pi-t
- bisw2 -4(fp),r0
+ subd3 %r0,$0x68c2a2210fda4149,%r0 # pi-t
+ bisw2 -4(%fp),%r0
ret
yeq0:
- movw -8(fp),r2
+ movw -8(%fp),%r2
beql zero # if sign(x)=1 return pi
- movq $0x68c2a2210fda4149,r0 # pi=3.1415926535897932270d1
+ movq $0x68c2a2210fda4149,%r0 # pi=3.1415926535897932270d1
ret
zero:
- clrq r0 # return 0
+ clrq %r0 # return 0
ret
pio2:
- movq $0x68c2a2210fda40c9,r0 # pi/2=1.5707963267948966135d1
- bisw2 -4(fp),r0 # return sign(y)*pi/2
+ movq $0x68c2a2210fda40c9,%r0 # pi/2=1.5707963267948966135d1
+ bisw2 -4(%fp),%r0 # return sign(y)*pi/2
ret
resop:
- movq $0x8000,r0 # propagate the reserved operand
+ movq $0x8000,%r0 # propagate the reserved operand
ret
.align 2
ptable:
diff --git a/lib/libm/arch/vax/n_cbrt.S b/lib/libm/arch/vax/n_cbrt.S
index dd03a8fa229..d5669952618 100644
--- a/lib/libm/arch/vax/n_cbrt.S
+++ b/lib/libm/arch/vax/n_cbrt.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_cbrt.S,v 1.4 2008/09/16 22:13:12 martynas Exp $ */
+/* $OpenBSD: n_cbrt.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_cbrt.S,v 1.1 1995/10/10 23:40:26 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -43,37 +43,37 @@
.text
_ALIGN_TEXT
ENTRY(cbrt, R2|R3|R4|R5|R6|R7)
- movq 4(ap),r0 # r0 = argument x
- bicw3 $0x807f,r0,r2 # biased exponent of x
+ movq 4(%ap),%r0 # r0 = argument x
+ bicw3 $0x807f,%r0,%r2 # biased exponent of x
jeql return # cbrt(0)=0 cbrt(res)=res. operand
- bicw3 $0x7fff,r0,ap # ap has sign(x)
- xorw2 ap,r0 # r0 is abs(x)
- movl r0,r2 # r2 has abs(x)
- rotl $16,r2,r2 # r2 = |x| with bits unscrambled
- divl2 $3,r2 # rough cbrt with bias/3
- addl2 B,r2 # restore bias, diminish fraction
- rotl $16,r2,r2 # r2=|q|=|cbrt| to 5 bits
- mulf3 r2,r2,r3 # r3 =qq
- divf2 r0,r3 # r3 = qq/x
- mulf2 r2,r3
- addf2 C,r3 # r3 = s = C + qqq/x
- divf3 r3,D,r4 # r4 = D/s
- addf2 E,r4
- addf2 r4,r3 # r3 = s + E + D/s
- divf3 r3,F,r3 # r3 = F / (s + E + D/s)
- addf2 G,r3 # r3 = G + F / (s + E + D/s)
- mulf2 r3,r2 # r2 = qr3 = new q to 23 bits
- clrl r3 # r2:r3 = q as double float
- muld3 r2,r2,r4 # r4:r5 = qq exactly
- divd2 r4,r0 # r0:r1 = x/(q*q) rounded
- subd3 r2,r0,r6 # r6:r7 = x/(q*q) - q exactly
- movq r2,r4 # r4:r5 = q
- addw2 $0x80,r4 # r4:r5 = 2 * q
- addd2 r0,r4 # r4:r5 = 2*q + x/(q*q)
- divd2 r4,r6 # r6:r7 = (x/(q*q)-q)/(2*q+x/(q*q))
- muld2 r2,r6 # r6:r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
- addd3 r6,r2,r0 # r0:r1 = q + r6:r7
- bisw2 ap,r0 # restore the sign bit
+ bicw3 $0x7fff,%r0,%ap # ap has sign(x)
+ xorw2 %ap,%r0 # r0 is abs(x)
+ movl %r0,%r2 # r2 has abs(x)
+ rotl $16,%r2,%r2 # r2 = |x| with bits unscrambled
+ divl2 $3,%r2 # rough cbrt with bias/3
+ addl2 B,%r2 # restore bias, diminish fraction
+ rotl $16,%r2,%r2 # r2=|q|=|cbrt| to 5 bits
+ mulf3 %r2,%r2,%r3 # r3 =qq
+ divf2 %r0,%r3 # r3 = qq/x
+ mulf2 %r2,%r3
+ addf2 C,%r3 # r3 = s = C + qqq/x
+ divf3 %r3,D,%r4 # r4 = D/s
+ addf2 E,%r4
+ addf2 %r4,%r3 # r3 = s + E + D/s
+ divf3 %r3,F,%r3 # r3 = F / (s + E + D/s)
+ addf2 G,%r3 # r3 = G + F / (s + E + D/s)
+ mulf2 %r3,%r2 # r2 = qr3 = new q to 23 bits
+ clrl %r3 # r2:r3 = q as double float
+ muld3 %r2,%r2,%r4 # r4:r5 = qq exactly
+ divd2 %r4,%r0 # r0:r1 = x/(q*q) rounded
+ subd3 %r2,%r0,%r6 # r6:r7 = x/(q*q) - q exactly
+ movq %r2,%r4 # r4:r5 = q
+ addw2 $0x80,%r4 # r4:r5 = 2 * q
+ addd2 %r0,%r4 # r4:r5 = 2*q + x/(q*q)
+ divd2 %r4,%r6 # r6:r7 = (x/(q*q)-q)/(2*q+x/(q*q))
+ muld2 %r2,%r6 # r6:r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
+ addd3 %r6,%r2,%r0 # r0:r1 = q + r6:r7
+ bisw2 %ap,%r0 # restore the sign bit
return:
ret # error less than 0.667 ulps
diff --git a/lib/libm/arch/vax/n_hypot.S b/lib/libm/arch/vax/n_hypot.S
index bd5cbf49df1..72f5d773de5 100644
--- a/lib/libm/arch/vax/n_hypot.S
+++ b/lib/libm/arch/vax/n_hypot.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_hypot.S,v 1.1 2008/10/07 22:25:53 martynas Exp $ */
+/* $OpenBSD: n_hypot.S,v 1.2 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_cabs.S,v 1.1 1995/10/10 23:40:26 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -46,23 +46,23 @@
.text
_ALIGN_TEXT
ENTRY(hypot, 0x8000|R2|R3|R4|R5|R6) # enable floating overflow
- movq 4(ap),r0 # r0:1 = x
- movq 12(ap),r2 # r2:3 = y
- bicw3 $0x7f,r0,r4 # r4 has signed biased exp of x
- cmpw $0x8000,r4
+ movq 4(%ap),%r0 # r0:1 = x
+ movq 12(%ap),%r2 # r2:3 = y
+ bicw3 $0x7f,%r0,%r4 # r4 has signed biased exp of x
+ cmpw $0x8000,%r4
jeql return # x is a reserved operand, so return it
- bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r2,%r5 # r5 has signed biased exp of y
+ cmpw $0x8000,%r5
jneq cont /* y isn't a reserved operand */
- movq r2,r0 /* return y if it's reserved */
+ movq %r2,%r0 /* return y if it's reserved */
ret
cont:
bsbb regs_set # r0:1 = dsqrt(x^2+y^2)/2^r6
- addw2 r6,r0 # unscaled cdabs in r0:1
+ addw2 %r6,%r0 # unscaled cdabs in r0:1
jvc return # unless it overflows
- subw2 $0x80,r0 # halve r0 to get meaningful overflow
- addd2 r0,r0 # overflow; r0 is half of true abs value
+ subw2 $0x80,%r0 # halve r0 to get meaningful overflow
+ addd2 %r0,%r0 # overflow; r0 is half of true abs value
return:
ret
@@ -71,47 +71,47 @@ _ALTENTRY(__libm_cdabs_r6) # ENTRY POINT for cdsqrt
# calculates a scaled (factor in r6)
# complex absolute value
- movq (r4)+,r0 # r0:r1 = x via indirect addressing
- movq (r4),r2 # r2:r3 = y via indirect addressing
+ movq (%r4)+,%r0 # r0:r1 = x via indirect addressing
+ movq (%r4),%r2 # r2:r3 = y via indirect addressing
- bicw3 $0x7f,r0,r5 # r5 has signed biased exp of x
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r0,%r5 # r5 has signed biased exp of x
+ cmpw $0x8000,%r5
jeql cdreserved # x is a reserved operand
- bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r2,%r5 # r5 has signed biased exp of y
+ cmpw $0x8000,%r5
jneq regs_set /* y isn't a reserved operand either? */
cdreserved:
- movl *4(ap),r4 # r4 -> (u,v), if x or y is reserved
- movq r0,(r4)+ # copy u and v as is and return
- movq r2,(r4) # (again addressing is indirect)
+ movl *4(%ap),%r4 # r4 -> (u,v), if x or y is reserved
+ movq %r0,(%r4)+ # copy u and v as is and return
+ movq %r2,(%r4) # (again addressing is indirect)
ret
#endif
regs_set:
- bicw2 $0x8000,r0 # r0:r1 = dabs(x)
- bicw2 $0x8000,r2 # r2:r3 = dabs(y)
- cmpw r0,r2
+ bicw2 $0x8000,%r0 # r0:r1 = dabs(x)
+ bicw2 $0x8000,%r2 # r2:r3 = dabs(y)
+ cmpw %r0,%r2
jgeq ordered
- movq r0,r4
- movq r2,r0
- movq r4,r2 # force y's exp <= x's exp
+ movq %r0,%r4
+ movq %r2,%r0
+ movq %r4,%r2 # force y's exp <= x's exp
ordered:
- bicw3 $0x7f,r0,r6 # r6 = exponent(x) + bias(129)
+ bicw3 $0x7f,%r0,%r6 # r6 = exponent(x) + bias(129)
jeql retsb # if x = y = 0 then cdabs(x,y) = 0
- subw2 $0x4780,r6 # r6 = exponent(x) - 14
- subw2 r6,r0 # 2^14 <= scaled x < 2^15
- bitw $0xff80,r2
+ subw2 $0x4780,%r6 # r6 = exponent(x) - 14
+ subw2 %r6,%r0 # 2^14 <= scaled x < 2^15
+ bitw $0xff80,%r2
jeql retsb # if y = 0 return dabs(x)
- subw2 r6,r2
- cmpw $0x3780,r2 # if scaled y < 2^-18
+ subw2 %r6,%r2
+ cmpw $0x3780,%r2 # if scaled y < 2^-18
jgtr retsb # return dabs(x)
- emodd r0,$0,r0,r4,r0 # r4 + r0:1 = scaled x^2
- emodd r2,$0,r2,r5,r2 # r5 + r2:3 = scaled y^2
- addd2 r2,r0
- addl2 r5,r4
- cvtld r4,r2
- addd2 r2,r0 # r0:1 = scaled x^2 + y^2
+ emodd %r0,$0,%r0,%r4,%r0 # r4 + r0:1 = scaled x^2
+ emodd %r2,$0,%r2,%r5,%r2 # r5 + r2:3 = scaled y^2
+ addd2 %r2,%r0
+ addl2 %r5,%r4
+ cvtld %r4,%r2
+ addd2 %r2,%r0 # r0:1 = scaled x^2 + y^2
jmp __libm_dsqrt_r5 # r0:1 = dsqrt(x^2+y^2)/2^r6
retsb:
rsb # error < 0.86 ulp
diff --git a/lib/libm/arch/vax/n_infnan.S b/lib/libm/arch/vax/n_infnan.S
index 6edf8d7c9c9..aff89c9fd46 100644
--- a/lib/libm/arch/vax/n_infnan.S
+++ b/lib/libm/arch/vax/n_infnan.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_infnan.S,v 1.3 2008/05/21 20:37:10 miod Exp $ */
+/* $OpenBSD: n_infnan.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_infnan.S,v 1.1 1995/10/10 23:40:27 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -43,10 +43,10 @@
* The Reserved Operand Fault is generated inside of this routine.
*/
ENTRY(infnan,0)
- cmpl 4(ap),$ERANGE
+ cmpl 4(%ap),$ERANGE
bneq 1f
movl $ERANGE,_C_LABEL(errno)
brb 2f
1: movl $EDOM,_C_LABEL(errno)
-2: emodd $0,$0,$0x8000,r0,r0 # generates the reserved operand fault
+2: emodd $0,$0,$0x8000,%r0,%r0 # generates the reserved operand fault
ret
diff --git a/lib/libm/arch/vax/n_sincos.S b/lib/libm/arch/vax/n_sincos.S
index ececb38d0d2..00e2831e86d 100644
--- a/lib/libm/arch/vax/n_sincos.S
+++ b/lib/libm/arch/vax/n_sincos.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_sincos.S,v 1.8 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_sincos.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_sincos.S,v 1.1 1995/10/10 23:40:28 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -50,14 +50,14 @@
STRONG_ALIAS(sinl,sin)
ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x807f,r0,r2
+ movq 4(%ap),%r0
+ bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -66,9 +66,9 @@ ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* Entered by sine ; save 0 in r4 .
*/
jsb __libm_argred
- movl $0,r4
+ movl $0,%r4
jsb __libm_sincos
- bispsw (sp)+
+ bispsw (%sp)+
1: ret
/*
@@ -80,15 +80,15 @@ ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
STRONG_ALIAS(cosl,cos)
ENTRY(cos, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x7f,r0,r2
- cmpw $0x8000,r2
+ movq 4(%ap),%r0
+ bicw3 $0x7f,%r0,%r2
+ cmpw $0x8000,%r2
beql 1f # if x is reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -97,7 +97,7 @@ ENTRY(cos, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* Entered by cosine ; save 1 in r4 .
*/
jsb __libm_argred
- movl $1,r4
+ movl $1,%r4
jsb __libm_sincos
- bispsw (sp)+
+ bispsw (%sp)+
1: ret
diff --git a/lib/libm/arch/vax/n_sqrt.S b/lib/libm/arch/vax/n_sqrt.S
index 163ec9245b9..60f666da190 100644
--- a/lib/libm/arch/vax/n_sqrt.S
+++ b/lib/libm/arch/vax/n_sqrt.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_sqrt.S,v 1.9 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_sqrt.S,v 1.10 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_sqrt.S,v 1.1 1995/10/10 23:40:29 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -47,8 +47,8 @@
STRONG_ALIAS(sqrtl,sqrt)
ENTRY(sqrt, R2|R3|R4|R5)
- movq 4(ap),r0
-dsqrt2: bicw3 $0x807f,r0,r2 # check exponent of input
+ movq 4(%ap),%r0
+dsqrt2: bicw3 $0x807f,%r0,%r2 # check exponent of input
jeql noexp # biased exponent is zero -> 0.0 or reserved
bsbb __libm_dsqrt_r5
noexp: ret
@@ -59,30 +59,30 @@ _ALTENTRY(__libm_dsqrt_r5) /* ENTRY POINT FOR cdabs and cdsqrt */
/* returns double square root scaled by */
/* 2^r6 */
- movd r0,r4
+ movd %r0,%r4
jleq nonpos # argument is not positive
- movzwl r4,r2
- ashl $-1,r2,r0
- addw2 $0x203c,r0 # r0 has magic initial approximation
+ movzwl %r4,%r2
+ ashl $-1,%r2,%r0
+ addw2 $0x203c,%r0 # r0 has magic initial approximation
/*
* Do two steps of Heron's rule
* ((arg/guess) + guess) / 2 = better guess
*/
- divf3 r0,r4,r2
- addf2 r2,r0
- subw2 $0x80,r0 # divide by two
+ divf3 %r0,%r4,%r2
+ addf2 %r2,%r0
+ subw2 $0x80,%r0 # divide by two
- divf3 r0,r4,r2
- addf2 r2,r0
- subw2 $0x80,r0 # divide by two
+ divf3 %r0,%r4,%r2
+ addf2 %r2,%r0
+ subw2 $0x80,%r0 # divide by two
/* Scale argument and approximation to prevent over/underflow */
- bicw3 $0x807f,r4,r1
- subw2 $0x4080,r1 # r1 contains scaling factor
- subw2 r1,r4
- movl r0,r2
- subw2 r1,r2
+ bicw3 $0x807f,%r4,%r1
+ subw2 $0x4080,%r1 # r1 contains scaling factor
+ subw2 %r1,%r4
+ movl %r0,%r2
+ subw2 %r1,%r2
/* Cubic step
*
@@ -90,16 +90,16 @@ _ALTENTRY(__libm_dsqrt_r5) /* ENTRY POINT FOR cdabs and cdsqrt */
* a is approximation, and n is the original argument.
* (let s be scale factor in the following comments)
*/
- clrl r1
- clrl r3
- muld2 r0,r2 # r2:r3 = a*a/s
- subd2 r2,r4 # r4:r5 = n/s - a*a/s
- addw2 $0x100,r2 # r2:r3 = 4*a*a/s
- addd2 r4,r2 # r2:r3 = n/s + 3*a*a/s
- muld2 r0,r4 # r4:r5 = a*n/s - a*a*a/s
- divd2 r2,r4 # r4:r5 = a*(n-a*a)/(n+3*a*a)
- addw2 $0x80,r4 # r4:r5 = 2*a*(n-a*a)/(n+3*a*a)
- addd2 r4,r0 # r0:r1 = a + 2*a*(n-a*a)/(n+3*a*a)
+ clrl %r1
+ clrl %r3
+ muld2 %r0,%r2 # r2:r3 = a*a/s
+ subd2 %r2,%r4 # r4:r5 = n/s - a*a/s
+ addw2 $0x100,%r2 # r2:r3 = 4*a*a/s
+ addd2 %r4,%r2 # r2:r3 = n/s + 3*a*a/s
+ muld2 %r0,%r4 # r4:r5 = a*n/s - a*a*a/s
+ divd2 %r2,%r4 # r4:r5 = a*(n-a*a)/(n+3*a*a)
+ addw2 $0x80,%r4 # r4:r5 = 2*a*(n-a*a)/(n+3*a*a)
+ addd2 %r4,%r0 # r0:r1 = a + 2*a*(n-a*a)/(n+3*a*a)
rsb # DONE!
nonpos:
jneq negarg
diff --git a/lib/libm/arch/vax/n_support.S b/lib/libm/arch/vax/n_support.S
index c8fdad7e203..3216dc1ab06 100644
--- a/lib/libm/arch/vax/n_support.S
+++ b/lib/libm/arch/vax/n_support.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_support.S,v 1.17 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_support.S,v 1.18 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_support.S,v 1.1 1995/10/10 23:40:30 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -53,12 +53,12 @@
STRONG_ALIAS(copysignl,copysign)
ENTRY(copysign, R2)
- movq 4(ap),r0 # load x into r0
- bicw3 $0x807f,r0,r2 # mask off the exponent of x
+ movq 4(%ap),%r0 # load x into r0
+ bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
beql Lz # if zero or reserved op then return x
- bicw3 $0x7fff,12(ap),r2 # copy the sign bit of y into r2
- bicw2 $0x8000,r0 # replace x by |x|
- bisw2 r2,r0 # copy the sign bit of y to x
+ bicw3 $0x7fff,12(%ap),%r2 # copy the sign bit of y into r2
+ bicw2 $0x8000,%r0 # replace x by |x|
+ bisw2 %r2,%r0 # copy the sign bit of y to x
Lz: ret
/*
@@ -67,12 +67,12 @@ Lz: ret
*/
ENTRY(copysignf, R2)
- movl 4(ap),r0 # load x into r0
- bicw3 $0x807f,r0,r2 # mask off the exponent of x
+ movl 4(%ap),%r0 # load x into r0
+ bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
beql Fz # if zero or reserved op then return x
- bicw3 $0x7fff,8(ap),r2 # copy the sign bit of y into r2
- bicw2 $0x8000,r0 # replace x by |x|
- bisw2 r2,r0 # copy the sign bit of y to x
+ bicw3 $0x7fff,8(%ap),%r2 # copy the sign bit of y into r2
+ bicw2 $0x8000,%r0 # replace x by |x|
+ bisw2 %r2,%r0 # copy the sign bit of y to x
Fz: ret
/*
@@ -82,15 +82,15 @@ Fz: ret
STRONG_ALIAS(logbl,logb)
ENTRY(logb, 0)
- bicl3 $0xffff807f,4(ap),r0 # mask off the exponent of x
+ bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
beql Ln
- ashl $-7,r0,r0 # get the bias exponent
- subl2 $129,r0 # get the unbias exponent
- cvtld r0,r0 # return the answer in double
+ ashl $-7,%r0,%r0 # get the bias exponent
+ subl2 $129,%r0 # get the unbias exponent
+ cvtld %r0,%r0 # return the answer in double
ret
-Ln: movq 4(ap),r0 # r0:1 = x (zero or reserved op)
+Ln: movq 4(%ap),%r0 # r0:1 = x (zero or reserved op)
bneq 1f # simply return if reserved op
- movq $0x0000fe00ffffcfff,r0 # -2147483647.0
+ movq $0x0000fe00ffffcfff,%r0 # -2147483647.0
1: ret
/*
@@ -99,15 +99,15 @@ Ln: movq 4(ap),r0 # r0:1 = x (zero or reserved op)
*/
ENTRY(logbf, 0)
- bicl3 $0xffff807f,4(ap),r0 # mask off the exponent of x
+ bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
beql Fn
- ashl $-7,r0,r0 # get the bias exponent
- subl2 $129,r0 # get the unbias exponent
- cvtlf r0,r0 # return the answer in float
+ ashl $-7,%r0,%r0 # get the bias exponent
+ subl2 $129,%r0 # get the unbias exponent
+ cvtlf %r0,%r0 # return the answer in float
ret
-Fn: movl 4(ap),r0 # r0:1 = x (zero or reserved op)
+Fn: movl 4(%ap),%r0 # r0:1 = x (zero or reserved op)
bneq 1f # simply return if reserved op
- movl $0x0000d000,r0 # -2147483647.0
+ movl $0x0000d000,%r0 # -2147483647.0
1: ret
/*
@@ -117,27 +117,27 @@ Fn: movl 4(ap),r0 # r0:1 = x (zero or reserved op)
STRONG_ALIAS(scalbnl,scalbn)
ENTRY(scalbn, R2|R3)
- movq 4(ap),r0
- bicl3 $0xffff807f,r0,r3
+ movq 4(%ap),%r0
+ bicl3 $0xffff807f,%r0,%r3
beql ret1 # 0 or reserved operand
- movl 12(ap),r2
- cmpl r2,$0x12c
+ movl 12(%ap),%r2
+ cmpl %r2,$0x12c
bgeq ovfl
- cmpl r2,$-0x12c
+ cmpl %r2,$-0x12c
bleq unfl
- ashl $7,r2,r2
- addl2 r2,r3
+ ashl $7,%r2,%r2
+ addl2 %r2,%r3
bleq unfl
- cmpl r3,$0x8000
+ cmpl %r3,$0x8000
bgeq ovfl
- addl2 r2,r0
+ addl2 %r2,%r0
ret
ovfl: pushl $ERANGE
calls $1,_C_LABEL(infnan) # if it returns
- bicw3 $0x7fff,4(ap),r2 # get the sign of input arg
- bisw2 r2,r0 # re-attach the sign to r0/1
+ bicw3 $0x7fff,4(%ap),%r2 # get the sign of input arg
+ bisw2 %r2,%r0 # re-attach the sign to r0/1
ret
-unfl: movq $0,r0
+unfl: movq $0,%r0
ret1: ret
/*
@@ -149,83 +149,83 @@ ret1: ret
ALTENTRY(drem)
ENTRY(remainder, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- subl2 $12,sp
- movq 4(ap),r0 #r0=x
- movq 12(ap),r2 #r2=y
+ subl2 $12,%sp
+ movq 4(%ap),%r0 #r0=x
+ movq 12(%ap),%r2 #r2=y
jeql Rop #if y=0 then generate reserved op fault
- bicw3 $0x007f,r0,r4 #check if x is Rop
- cmpw r4,$0x8000
+ bicw3 $0x007f,%r0,%r4 #check if x is Rop
+ cmpw %r4,$0x8000
jeql Ret #if x is Rop then return Rop
- bicl3 $0x007f,r2,r4 #check if y is Rop
- cmpw r4,$0x8000
+ bicl3 $0x007f,%r2,%r4 #check if y is Rop
+ cmpw %r4,$0x8000
jeql Ret #if y is Rop then return Rop
- bicw2 $0x8000,r2 #y := |y|
- movw $0,-4(fp) #-4(fp) = nx := 0
- cmpw r2,$0x1c80 #yexp ? 57
+ bicw2 $0x8000,%r2 #y := |y|
+ movw $0,-4(%fp) #-4(fp) = nx := 0
+ cmpw %r2,$0x1c80 #yexp ? 57
bgtr C1 #if yexp > 57 goto C1
- addw2 $0x1c80,r2 #scale up y by 2**57
- movw $0x1c80,-4(fp) #nx := 57 (exponent field)
+ addw2 $0x1c80,%r2 #scale up y by 2**57
+ movw $0x1c80,-4(%fp) #nx := 57 (exponent field)
C1:
- movw -4(fp),-8(fp) #-8(fp) = nf := nx
- bicw3 $0x7fff,r0,-12(fp) #-12(fp) = sign of x
- bicw2 $0x8000,r0 #x := |x|
- movq r2,r10 #y1 := y
- bicl2 $0xffff07ff,r11 #clear the last 27 bits of y1
+ movw -4(%fp),-8(%fp) #-8(fp) = nf := nx
+ bicw3 $0x7fff,%r0,-12(%fp) #-12(fp) = sign of x
+ bicw2 $0x8000,%r0 #x := |x|
+ movq %r2,%r10 #y1 := y
+ bicl2 $0xffff07ff,%r11 #clear the last 27 bits of y1
loop:
- cmpd r0,r2 #x ? y
+ cmpd %r0,%r2 #x ? y
bleq E1 #if x <= y goto E1
/* begin argument reduction */
- movq r2,r4 #t =y
- movq r10,r6 #t1=y1
- bicw3 $0x807f,r0,r8 #xexp= exponent of x
- bicw3 $0x807f,r2,r9 #yexp= exponent fo y
- subw2 r9,r8 #xexp-yexp
- subw2 $0x0c80,r8 #k=xexp-yexp-25(exponent bit field)
+ movq %r2,%r4 #t =y
+ movq %r10,%r6 #t1=y1
+ bicw3 $0x807f,%r0,%r8 #xexp= exponent of x
+ bicw3 $0x807f,%r2,%r9 #yexp= exponent fo y
+ subw2 %r9,%r8 #xexp-yexp
+ subw2 $0x0c80,%r8 #k=xexp-yexp-25(exponent bit field)
blss C2 #if k<0 goto C2
- addw2 r8,r4 #t +=k
- addw2 r8,r6 #t1+=k, scale up t and t1
+ addw2 %r8,%r4 #t +=k
+ addw2 %r8,%r6 #t1+=k, scale up t and t1
C2:
- divd3 r4,r0,r8 #x/t
- cvtdl r8,r8 #n=[x/t] truncated
- cvtld r8,r8 #float(n)
- subd2 r6,r4 #t:=t-t1
- muld2 r8,r4 #n*(t-t1)
- muld2 r8,r6 #n*t1
- subd2 r6,r0 #x-n*t1
- subd2 r4,r0 #(x-n*t1)-n*(t-t1)
+ divd3 %r4,%r0,%r8 #x/t
+ cvtdl %r8,%r8 #n=[x/t] truncated
+ cvtld %r8,%r8 #float(n)
+ subd2 %r6,%r4 #t:=t-t1
+ muld2 %r8,%r4 #n*(t-t1)
+ muld2 %r8,%r6 #n*t1
+ subd2 %r6,%r0 #x-n*t1
+ subd2 %r4,%r0 #(x-n*t1)-n*(t-t1)
brb loop
E1:
- movw -4(fp),r6 #r6=nx
+ movw -4(%fp),%r6 #r6=nx
beql C3 #if nx=0 goto C3
- addw2 r6,r0 #x:=x*2**57 scale up x by nx
- movw $0,-4(fp) #clear nx
+ addw2 %r6,%r0 #x:=x*2**57 scale up x by nx
+ movw $0,-4(%fp) #clear nx
brb loop
C3:
- movq r2,r4 #r4 = y
- subw2 $0x80,r4 #r4 = y/2
- cmpd r0,r4 #x:y/2
+ movq %r2,%r4 #r4 = y
+ subw2 $0x80,%r4 #r4 = y/2
+ cmpd %r0,%r4 #x:y/2
blss E2 #if x < y/2 goto E2
bgtr C4 #if x > y/2 goto C4
- cvtdl r8,r8 #ifix(float(n))
- blbc r8,E2 #if the last bit is zero, goto E2
+ cvtdl %r8,%r8 #ifix(float(n))
+ blbc %r8,E2 #if the last bit is zero, goto E2
C4:
- subd2 r2,r0 #x-y
+ subd2 %r2,%r0 #x-y
E2:
- xorw2 -12(fp),r0 #x^sign (exclusive or)
- movw -8(fp),r6 #r6=nf
- bicw3 $0x807f,r0,r8 #r8=exponent of x
- bicw2 $0x7f80,r0 #clear the exponent of x
- subw2 r6,r8 #r8=xexp-nf
+ xorw2 -12(%fp),%r0 #x^sign (exclusive or)
+ movw -8(%fp),%r6 #r6=nf
+ bicw3 $0x807f,%r0,%r8 #r8=exponent of x
+ bicw2 $0x7f80,%r0 #clear the exponent of x
+ subw2 %r6,%r8 #r8=xexp-nf
bgtr C5 #if xexp-nf is positive goto C5
- movw $0,r8 #clear r8
- movq $0,r0 #x underflow to zero
+ movw $0,%r8 #clear r8
+ movq $0,%r0 #x underflow to zero
C5:
- bisw2 r8,r0 #put r8 into exponent field of x
+ bisw2 %r8,%r0 #put r8 into exponent field of x
ret
Rop: #Reserved operand
pushl $EDOM
calls $1,_C_LABEL(infnan) #generate reserved op fault
ret
Ret:
- movq $0x8000,r0 #propagate reserved op
+ movq $0x8000,%r0 #propagate reserved op
ret
diff --git a/lib/libm/arch/vax/n_tan.S b/lib/libm/arch/vax/n_tan.S
index 0077694d64d..aedb143b0b6 100644
--- a/lib/libm/arch/vax/n_tan.S
+++ b/lib/libm/arch/vax/n_tan.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_tan.S,v 1.8 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_tan.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_tan.S,v 1.1 1995/10/10 23:40:31 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -48,14 +48,14 @@
STRONG_ALIAS(tanl,tan)
ENTRY(tan, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x807f,r0,r2
+ movq 4(%ap),%r0
+ bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -69,24 +69,24 @@ ENTRY(tan, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
*
* Save r3/r0 so that we can call cosine after calling sine.
*/
- movq r2,-(sp)
- movq r0,-(sp)
+ movq %r2,-(%sp)
+ movq %r0,-(%sp)
/*
* Call sine. r4 = 0 implies sine.
*/
- movl $0,r4
+ movl $0,%r4
jsb __libm_sincos
/*
* Save sin(x) in r11/r10 .
*/
- movd r0,r10
+ movd %r0,%r10
/*
* Call cosine. r4 = 1 implies cosine.
*/
- movq (sp)+,r0
- movq (sp)+,r2
- movl $1,r4
+ movq (%sp)+,%r0
+ movq (%sp)+,%r2
+ movl $1,%r4
jsb __libm_sincos
- divd3 r0,r10,r0
- bispsw (sp)+
+ divd3 %r0,%r10,%r0
+ bispsw (%sp)+
1: ret
diff --git a/lib/librthread/arch/vax/cerror.S b/lib/librthread/arch/vax/cerror.S
index b5c468deeac..e860dadbf70 100644
--- a/lib/librthread/arch/vax/cerror.S
+++ b/lib/librthread/arch/vax/cerror.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cerror.S,v 1.1 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: cerror.S,v 1.2 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 1983, 1993
* The Regents of the University of California. All rights reserved.
@@ -32,9 +32,9 @@
.globl _C_LABEL(__errno)
_C_LABEL(_cerror):
- pushl r0
+ pushl %r0
calls $0, _C_LABEL(__errno)
- movl (sp)+, (r0)
- mnegl $1,r0
- movl r0,r1
+ movl (%sp)+, (%r0)
+ mnegl $1,%r0
+ movl %r0,%r1
ret
diff --git a/lib/librthread/rthread.c b/lib/librthread/rthread.c
index bf9eff0dd4d..030fbfd6454 100644
--- a/lib/librthread/rthread.c
+++ b/lib/librthread/rthread.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.c,v 1.71 2013/06/01 23:06:26 tedu Exp $ */
+/* $OpenBSD: rthread.c,v 1.72 2013/07/05 21:10:50 miod Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -184,7 +184,7 @@ _rthread_init(void)
_rthread_debug(1, "rthread init\n");
-#if defined(__ELF__)
+#if defined(__ELF__) && !defined(__vax__)
if (_DYNAMIC) {
/*
* To avoid recursion problems in ld.so, we need to trigger the
diff --git a/lib/libssl/crypto/arch/vax/bn_asm_vax.S b/lib/libssl/crypto/arch/vax/bn_asm_vax.S
index f1e46b2010e..efa9b6ebd65 100644
--- a/lib/libssl/crypto/arch/vax/bn_asm_vax.S
+++ b/lib/libssl/crypto/arch/vax/bn_asm_vax.S
@@ -1,4 +1,4 @@
-# $OpenBSD: bn_asm_vax.S,v 1.2 2012/10/13 21:31:56 djm Exp $
+# $OpenBSD: bn_asm_vax.S,v 1.3 2013/07/05 21:10:50 miod Exp $
# $NetBSD: bn_asm_vax.S,v 1.1 2003/11/03 10:22:28 ragge Exp $
#include <machine/asm.h>
@@ -15,38 +15,38 @@
# }
ENTRY(bn_mul_add_words,R6)
- movl 4(ap),r2 # *r
- movl 8(ap),r3 # *a
- movl 12(ap),r4 # n
- movl 16(ap),r5 # w
- clrl r6 # return value ("carry")
+ movl 4(%ap),%r2 # *r
+ movl 8(%ap),%r3 # *a
+ movl 12(%ap),%r4 # n
+ movl 16(%ap),%r5 # w
+ clrl %r6 # return value ("carry")
-0: emul r5,(r3),(r2),r0 # w * a[0] + r[0] -> r0
+0: emul %r5,(%r3),(%r2),%r0 # w * a[0] + r[0] -> r0
# fixup for "negative" r[]
- tstl (r2)
+ tstl (%r2)
bgeq 1f
- incl r1 # add 1 to highword
+ incl %r1 # add 1 to highword
1: # add saved carry to result
- addl2 r6,r0
- adwc $0,r1
+ addl2 %r6,%r0
+ adwc $0,%r1
# combined fixup for "negative" w, a[]
- tstl r5 # if w is negative...
+ tstl %r5 # if w is negative...
bgeq 1f
- addl2 (r3),r1 # ...add a[0] again to highword
-1: tstl (r3) # if a[0] is negative...
+ addl2 (%r3),%r1 # ...add a[0] again to highword
+1: tstl (%r3) # if a[0] is negative...
bgeq 1f
- addl2 r5,r1 # ...add w again to highword
+ addl2 %r5,%r1 # ...add w again to highword
1:
- movl r0,(r2)+ # save low word in dest & advance *r
- addl2 $4,r3 # advance *a
- movl r1,r6 # high word in r6 for return value
+ movl %r0,(%r2)+ # save low word in dest & advance *r
+ addl2 $4,%r3 # advance *a
+ movl %r1,%r6 # high word in r6 for return value
- sobgtr r4,0b # loop?
+ sobgtr %r4,0b # loop?
- movl r6,r0
+ movl %r6,%r0
ret
# .title vax_bn_mul_words unsigned multiply & add, 32*32+32=>64
@@ -64,34 +64,34 @@ ENTRY(bn_mul_add_words,R6)
#
ENTRY(bn_mul_words,R6)
- movl 4(ap),r2 # *r
- movl 8(ap),r3 # *a
- movl 12(ap),r4 # n
- movl 16(ap),r5 # w
- clrl r6 # carry
+ movl 4(%ap),%r2 # *r
+ movl 8(%ap),%r3 # *a
+ movl 12(%ap),%r4 # n
+ movl 16(%ap),%r5 # w
+ clrl %r6 # carry
-0: emul r5,(r3),r6,r0 # w * a[0] + carry -> r0
+0: emul %r5,(%r3),%r6,%r0 # w * a[0] + carry -> r0
# fixup for "negative" carry
- tstl r6
+ tstl %r6
bgeq 1f
- incl r1
+ incl %r1
1: # combined fixup for "negative" w, a[]
- tstl r5
+ tstl %r5
bgeq 1f
- addl2 (r3),r1
-1: tstl (r3)
+ addl2 (%r3),%r1
+1: tstl (%r3)
bgeq 1f
- addl2 r5,r1
+ addl2 %r5,%r1
-1: movl r0,(r2)+
- addl2 $4,r3
- movl r1,r6
+1: movl %r0,(%r2)+
+ addl2 $4,%r3
+ movl %r1,%r6
- sobgtr r4,0b
+ sobgtr %r4,0b
- movl r6,r0
+ movl %r6,%r0
ret
@@ -109,23 +109,23 @@ ENTRY(bn_mul_words,R6)
#
ENTRY(bn_sqr_words,0)
- movl 4(ap),r2 # r
- movl 8(ap),r3 # a
- movl 12(ap),r4 # n
+ movl 4(%ap),%r2 # r
+ movl 8(%ap),%r3 # a
+ movl 12(%ap),%r4 # n
-0: movl (r3)+,r5 # r5 = a[] & advance
+0: movl (%r3)+,%r5 # r5 = a[] & advance
- emul r5,r5,$0,r0 # a[0] * a[0] + 0 -> r0
+ emul %r5,%r5,$0,%r0 # a[0] * a[0] + 0 -> r0
# fixup for "negative" a[]
- tstl r5
+ tstl %r5
bgeq 1f
- addl2 r5,r1
- addl2 r5,r1
+ addl2 %r5,%r1
+ addl2 %r5,%r1
-1: movq r0,(r2)+ # store 64-bit result
+1: movq %r0,(%r2)+ # store 64-bit result
- sobgtr r4,0b # loop
+ sobgtr %r4,0b # loop
ret
@@ -219,90 +219,90 @@ ENTRY(bn_sqr_words,0)
#
ENTRY(bn_div_words,R6|R7|R8)
- movl 4(ap),r3 # h
- movl 8(ap),r2 # l
- movl 12(ap),r4 # d
+ movl 4(%ap),%r3 # h
+ movl 8(%ap),%r2 # l
+ movl 12(%ap),%r4 # d
- bicl3 $-8,r2,r5 # l' = l & 7
- bicl3 $7,r2,r2
+ bicl3 $-8,%r2,%r5 # l' = l & 7
+ bicl3 $7,%r2,%r2
- bicl3 $-8,r3,r6
- bicl3 $7,r3,r3
+ bicl3 $-8,%r3,%r6
+ bicl3 $7,%r3,%r3
- addl2 r6,r2
+ addl2 %r6,%r2
- rotl $-3,r2,r2 # l = l >> 3
- rotl $-3,r3,r3 # h = h >> 3
+ rotl $-3,%r2,%r2 # l = l >> 3
+ rotl $-3,%r3,%r3 # h = h >> 3
- movl r4,r7 # d' = d
+ movl %r4,%r7 # d' = d
- clrl r6 # r' = 0
- clrl r8 # q' = 0
+ clrl %r6 # r' = 0
+ clrl %r8 # q' = 0
- tstl r4
+ tstl %r4
beql 0f # Uh-oh, the divisor is 0...
bgtr 1f
- rotl $-1,r4,r4 # If d is negative, shift it right.
- bicl2 $0x80000000,r4 # Since d is then a large number, the
+ rotl $-1,%r4,%r4 # If d is negative, shift it right.
+ bicl2 $0x80000000,%r4 # Since d is then a large number, the
# lowest bit is insignificant
# (contradict that, and I'll fix the problem!)
1:
- ediv r4,r2,r2,r3 # Do the actual division
+ ediv %r4,%r2,%r2,%r3 # Do the actual division
- tstl r2
+ tstl %r2
bgeq 1f
- mnegl r2,r2 # if q < 0, negate it
+ mnegl %r2,%r2 # if q < 0, negate it
1:
- tstl r7
+ tstl %r7
blss 1f
- rotl $3,r2,r2 # q = q << 3
- bicl3 $-8,r2,r8 # q' gets the high bits from q
- bicl3 $7,r2,r2
+ rotl $3,%r2,%r2 # q = q << 3
+ bicl3 $-8,%r2,%r8 # q' gets the high bits from q
+ bicl3 $7,%r2,%r2
brb 2f
1: # else
- rotl $2,r2,r2 # q = q << 2
- bicl3 $-4,r2,r8 # q' gets the high bits from q
- bicl3 $3,r2,r2
+ rotl $2,%r2,%r2 # q = q << 2
+ bicl3 $-4,%r2,%r8 # q' gets the high bits from q
+ bicl3 $3,%r2,%r2
2:
- rotl $3,r3,r3 # r = r << 3
- bicl3 $-8,r3,r6 # r' gets the high bits from r
- bicl3 $7,r3,r3
- addl2 r5,r3 # r = r + l'
+ rotl $3,%r3,%r3 # r = r << 3
+ bicl3 $-8,%r3,%r6 # r' gets the high bits from r
+ bicl3 $7,%r3,%r3
+ addl2 %r5,%r3 # r = r + l'
- tstl r7
+ tstl %r7
bgeq 5f
- bitl $1,r7
+ bitl $1,%r7
beql 5f # if d' < 0 && d' & 1
- subl2 r2,r3 # [r',r] = [r',r] - [q',q]
- sbwc r8,r6
+ subl2 %r2,%r3 # [r',r] = [r',r] - [q',q]
+ sbwc %r8,%r6
3:
bgeq 5f # while r < 0
- decl r2 # [q',q] = [q',q] - 1
- sbwc $0,r8
- addl2 r7,r3 # [r',r] = [r',r] + d'
- adwc $0,r6
+ decl %r2 # [q',q] = [q',q] - 1
+ sbwc $0,%r8
+ addl2 %r7,%r3 # [r',r] = [r',r] + d'
+ adwc $0,%r6
brb 3b
# The return points are placed in the middle to keep a short distance from
# all the branch points
1:
-# movl r3,r1
- movl r2,r0
+# movl %r3,%r1
+ movl %r2,%r0
ret
0:
- movl $-1,r0
+ movl $-1,%r0
ret
5:
- tstl r6
+ tstl %r6
bneq 6f
- cmpl r3,r7
+ cmpl %r3,%r7
blssu 1b # while [r',r] >= d'
6:
- subl2 r7,r3 # [r',r] = [r',r] - d'
- sbwc $0,r6
- incl r2 # [q',q] = [q',q] + 1
- adwc $0,r8
+ subl2 %r7,%r3 # [r',r] = [r',r] - d'
+ sbwc $0,%r6
+ incl %r2 # [q',q] = [q',q] + 1
+ adwc $0,%r8
brb 5b
@@ -320,21 +320,21 @@ ENTRY(bn_div_words,R6|R7|R8)
#
ENTRY(bn_add_words,0)
- movl 4(ap),r2 # r
- movl 8(ap),r3 # a
- movl 12(ap),r4 # b
- movl 16(ap),r5 # n
- clrl r0
+ movl 4(%ap),%r2 # r
+ movl 8(%ap),%r3 # a
+ movl 12(%ap),%r4 # b
+ movl 16(%ap),%r5 # n
+ clrl %r0
- tstl r5
+ tstl %r5
bleq 1f
-0: movl (r3)+,r1 # carry untouched
- adwc (r4)+,r1 # carry used and touched
- movl r1,(r2)+ # carry untouched
- sobgtr r5,0b # carry untouched
+0: movl (%r3)+,%r1 # carry untouched
+ adwc (%r4)+,%r1 # carry used and touched
+ movl %r1,(%r2)+ # carry untouched
+ sobgtr %r5,0b # carry untouched
- adwc $0,r0
+ adwc $0,%r0
1: ret
#;
@@ -349,21 +349,21 @@ ENTRY(bn_add_words,0)
#
ENTRY(bn_sub_words,R6)
- movl 4(ap),r2 # r
- movl 8(ap),r3 # a
- movl 12(ap),r4 # b
- movl 16(ap),r5 # n
- clrl r0
+ movl 4(%ap),%r2 # r
+ movl 8(%ap),%r3 # a
+ movl 12(%ap),%r4 # b
+ movl 16(%ap),%r5 # n
+ clrl %r0
- tstl r5
+ tstl %r5
bleq 1f
-0: movl (r3)+,r6 # carry untouched
- sbwc (r4)+,r6 # carry used and touched
- movl r6,(r2)+ # carry untouched
- sobgtr r5,0b # carry untouched
+0: movl (%r3)+,%r6 # carry untouched
+ sbwc (%r4)+,%r6 # carry used and touched
+ movl %r6,(%r2)+ # carry untouched
+ sobgtr %r5,0b # carry untouched
-1: adwc $0,r0
+1: adwc $0,%r0
ret
#
@@ -374,63 +374,63 @@ ENTRY(bn_sub_words,R6)
#
ENTRY(bn_mul_comba4,R6|R7|R8|R9)
- movl $4,r9 # 4*4
+ movl $4,%r9 # 4*4
brb 6f
ENTRY(bn_mul_comba8,R6|R7|R8|R9)
- movl $8,r9 # 8*8
+ movl $8,%r9 # 8*8
-6: movl 8(ap),r3 # a[]
- movl 12(ap),r7 # b[]
+6: movl 8(%ap),%r3 # a[]
+ movl 12(%ap),%r7 # b[]
brb 5f
ENTRY(bn_sqr_comba4,R6|R7|R8|R9)
- movl $4,r9 # 4*4
+ movl $4,%r9 # 4*4
brb 0f
ENTRY(bn_sqr_comba8,R6|R7|R8|R9)
- movl $8,r9 # 8*8
+ movl $8,%r9 # 8*8
0:
- movl 8(ap),r3 # a[]
- movl r3,r7 # a[]
+ movl 8(%ap),%r3 # a[]
+ movl %r3,%r7 # a[]
-5: movl 4(ap),r5 # r[]
- movl r9,r8
+5: movl 4(%ap),%r5 # r[]
+ movl %r9,%r8
- clrq (r5) # clear destinatino, for add.
- clrq 8(r5)
- clrq 16(r5) # these only needed for comba8
- clrq 24(r5)
+ clrq (%r5) # clear destinatino, for add.
+ clrq 8(%r5)
+ clrq 16(%r5) # these only needed for comba8
+ clrq 24(%r5)
-2: clrl r4 # carry
- movl r9,r6 # inner loop count
- movl (r7)+,r2 # value to multiply with
+2: clrl %r4 # carry
+ movl %r9,%r6 # inner loop count
+ movl (%r7)+,%r2 # value to multiply with
-1: emul r2,(r3),r4,r0
- tstl r4
+1: emul %r2,(%r3),%r4,%r0
+ tstl %r4
bgeq 3f
- incl r1
-3: tstl r2
+ incl %r1
+3: tstl %r2
bgeq 3f
- addl2 (r3),r1
-3: tstl (r3)
+ addl2 (%r3),%r1
+3: tstl (%r3)
bgeq 3f
- addl2 r2,r1
+ addl2 %r2,%r1
-3: addl2 r0,(r5)+ # add to destination
- adwc $0,r1 # remember carry
- movl r1,r4 # add carry in next emul
- addl2 $4,r3
- sobgtr r6,1b
+3: addl2 %r0,(%r5)+ # add to destination
+ adwc $0,%r1 # remember carry
+ movl %r1,%r4 # add carry in next emul
+ addl2 $4,%r3
+ sobgtr %r6,1b
- movl r4,(r5) # save highest add result
+ movl %r4,(%r5) # save highest add result
- ashl $2,r9,r4
- subl2 r4,r3
- subl2 $4,r4
- subl2 r4,r5
+ ashl $2,%r9,%r4
+ subl2 %r4,%r3
+ subl2 $4,%r4
+ subl2 %r4,%r5
- sobgtr r8,2b
+ sobgtr %r8,2b
ret