summaryrefslogtreecommitdiff
path: root/lib/libm/arch/vax
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libm/arch/vax')
-rw-r--r--lib/libm/arch/vax/n_argred.S374
-rw-r--r--lib/libm/arch/vax/n_atan2.S176
-rw-r--r--lib/libm/arch/vax/n_cbrt.S62
-rw-r--r--lib/libm/arch/vax/n_hypot.S76
-rw-r--r--lib/libm/arch/vax/n_infnan.S6
-rw-r--r--lib/libm/arch/vax/n_sincos.S28
-rw-r--r--lib/libm/arch/vax/n_sqrt.S56
-rw-r--r--lib/libm/arch/vax/n_support.S174
-rw-r--r--lib/libm/arch/vax/n_tan.S28
9 files changed, 490 insertions, 490 deletions
diff --git a/lib/libm/arch/vax/n_argred.S b/lib/libm/arch/vax/n_argred.S
index 82010515b08..822486cbaa6 100644
--- a/lib/libm/arch/vax/n_argred.S
+++ b/lib/libm/arch/vax/n_argred.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_argred.S,v 1.3 2008/05/21 20:37:10 miod Exp $ */
+/* $OpenBSD: n_argred.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_argred.S,v 1.1 1995/10/10 23:40:21 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -50,11 +50,11 @@ _ALTENTRY(__libm_argred)
* Compare the argument with the largest possible that can
* be reduced by table lookup. r3 := |x| will be used in table_lookup .
*/
- movd r0,r3
+ movd %r0,%r3
bgeq abs1
- mnegd r3,r3
+ mnegd %r3,%r3
abs1:
- cmpd r3,$0d+4.55530934770520019583e+01
+ cmpd %r3,$0d+4.55530934770520019583e+01
blss small_arg
jsb trigred
rsb
@@ -72,51 +72,51 @@ _ALTENTRY(__libm_sincos)
/*
* Compensate for a cosine entry by adding one to the quadrant number.
*/
- addl2 r4,r0
+ addl2 %r4,%r0
/*
* Polyd clobbers r5-r0 ; save X in r7/r6 .
* This can be avoided by rewriting trigred .
*/
- movd r1,r6
+ movd %r1,%r6
/*
* Likewise, save alpha in r8 .
* This can be avoided by rewriting trigred .
*/
- movf r3,r8
+ movf %r3,%r8
/*
* Odd or even quadrant? cosine if odd, sine otherwise.
* Save floor(quadrant/2) in r9 ; it determines the final sign.
*/
- rotl $-1,r0,r9
+ rotl $-1,%r0,%r9
blss cosine
sine:
- muld2 r1,r1 # Xsq = X * X
- cmpw $0x2480,r1 # [zl] Xsq > 2^-56?
+ muld2 %r1,%r1 # Xsq = X * X
+ cmpw $0x2480,%r1 # [zl] Xsq > 2^-56?
blss 1f # [zl] yes, go ahead and do polyd
- clrq r1 # [zl] work around 11/780 FPA polyd bug
+ clrq %r1 # [zl] work around 11/780 FPA polyd bug
1:
- polyd r1,$7,sin_coef # Q = P(Xsq) , of deg 7
- mulf3 $0f3.0,r8,r4 # beta = 3 * alpha
- mulf2 r0,r4 # beta = Q * beta
- addf2 r8,r4 # beta = alpha + beta
- muld2 r6,r0 # S(X) = X * Q
-/* cvtfd r4,r4 ... r5 = 0 after a polyd. */
- addd2 r4,r0 # S(X) = beta + S(X)
- addd2 r6,r0 # S(X) = X + S(X)
+ polyd %r1,$7,sin_coef # Q = P(Xsq) , of deg 7
+ mulf3 $0f3.0,%r8,%r4 # beta = 3 * alpha
+ mulf2 %r0,%r4 # beta = Q * beta
+ addf2 %r8,%r4 # beta = alpha + beta
+ muld2 %r6,%r0 # S(X) = X * Q
+/* cvtfd %r4,%r4 ... r5 = 0 after a polyd. */
+ addd2 %r4,%r0 # S(X) = beta + S(X)
+ addd2 %r6,%r0 # S(X) = X + S(X)
brb done
cosine:
- muld2 r6,r6 # Xsq = X * X
+ muld2 %r6,%r6 # Xsq = X * X
beql zero_arg
- mulf2 r1,r8 # beta = X * alpha
- polyd r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
- subd3 r0,r8,r0 # beta = beta - Q
- subw2 $0x80,r6 # Xsq = Xsq / 2
- addd2 r0,r6 # Xsq = Xsq + beta
+ mulf2 %r1,%r8 # beta = X * alpha
+ polyd %r6,$7,cos_coef /* Q = P'(Xsq) , of deg 7 */
+ subd3 %r0,%r8,%r0 # beta = beta - Q
+ subw2 $0x80,%r6 # Xsq = Xsq / 2
+ addd2 %r0,%r6 # Xsq = Xsq + beta
zero_arg:
- subd3 r6,$0d1.0,r0 # C(X) = 1 - Xsq
+ subd3 %r6,$0d1.0,%r0 # C(X) = 1 - Xsq
done:
- blbc r9,even
- mnegd r0,r0
+ blbc %r9,even
+ mnegd %r0,%r0
even:
rsb
@@ -263,31 +263,31 @@ twoOverPi:
_ALIGN_TEXT
table_lookup:
- muld3 r3,twoOverPi,r0
- cvtrdl r0,r0 # n = nearest int to ((2/pi)*|x|) rnded
- mull3 $8,r0,r5
- subd2 leading(r5),r3 # p = (|x| - leading n*pi/2) exactly
- subd3 middle(r5),r3,r1 # q = (p - middle n*pi/2) rounded
- subd2 r1,r3 # r = (p - q)
- subd2 middle(r5),r3 # r = r - middle n*pi/2
- subd2 trailing(r5),r3 # r = r - trailing n*pi/2 rounded
+ muld3 %r3,twoOverPi,%r0
+ cvtrdl %r0,%r0 # n = nearest int to ((2/pi)*|x|) rnded
+ mull3 $8,%r0,%r5
+ subd2 leading(%r5),%r3 # p = (|x| - leading n*pi/2) exactly
+ subd3 middle(%r5),%r3,%r1 # q = (p - middle n*pi/2) rounded
+ subd2 %r1,%r3 # r = (p - q)
+ subd2 middle(%r5),%r3 # r = r - middle n*pi/2
+ subd2 trailing(%r5),%r3 # r = r - trailing n*pi/2 rounded
/*
* If the original argument was negative,
* negate the reduce argument and
* adjust the octant/quadrant number.
*/
- tstw 4(ap)
+ tstw 4(%ap)
bgeq abs2
- mnegf r1,r1
- mnegf r3,r3
-/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
- subb3 r0,$4,r0
+ mnegf %r1,%r1
+ mnegf %r3,%r3
+/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
+ subb3 %r0,$4,%r0
abs2:
/*
* Clear all unneeded octant/quadrant bits.
*/
-/* bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
- bicb2 $0xfc,r0
+/* bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
+ bicb2 $0xfc,%r0
rsb
/*
* p.0
@@ -342,19 +342,19 @@ trigred:
/*
* Save the sign of the input argument.
*/
- movw r0,-(sp)
+ movw %r0,-(%sp)
/*
* Extract the exponent field.
*/
- extzv $7,$7,r0,r2
+ extzv $7,$7,%r0,%r2
/*
* Convert the fraction part of the input
* argument into a quadword integer.
*/
- bicw2 $0xff80,r0
- bisb2 $0x80,r0 # -S.McD
- rotl $16,r0,r0
- rotl $16,r1,r1
+ bicw2 $0xff80,%r0
+ bisb2 $0x80,%r0 # -S.McD
+ rotl $16,%r0,%r0
+ rotl $16,%r1,%r1
/*
* If r1 is negative, add 1 to r0 . This
* adjustment is made so that the two's
@@ -362,7 +362,7 @@ trigred:
* will produce unsigned results.
*/
bgeq posmid
- incl r0
+ incl %r0
posmid:
/* p.3
*
@@ -371,54 +371,54 @@ posmid:
* The address is longword aligned to ensure
* efficient access.
*/
- ashl $-3,r2,r3
- bicb2 $3,r3
- subl3 r3,$bits2opi,r3
+ ashl $-3,%r2,%r3
+ bicb2 $3,%r3
+ subl3 %r3,$bits2opi,%r3
/*
* Set r2 to the size of the shift needed to
* obtain the correct portion of 2/pi .
*/
- bicb2 $0xe0,r2
+ bicb2 $0xe0,%r2
/* p.4
*
* Move the needed 128 bits of 2/pi into
* r11 - r8 . Adjust the numbers to allow
* for unsigned multiplication.
*/
- ashq r2,(r3),r10
+ ashq %r2,(%r3),%r10
- subl2 $4,r3
- ashq r2,(r3),r9
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r9
bgeq signoff1
- incl r11
+ incl %r11
signoff1:
- subl2 $4,r3
- ashq r2,(r3),r8
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r8
bgeq signoff2
- incl r10
+ incl %r10
signoff2:
- subl2 $4,r3
- ashq r2,(r3),r7
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r7
bgeq signoff3
- incl r9
+ incl %r9
signoff3:
/* p.5
*
* Multiply the contents of r0/r1 by the
* slice of 2/pi in r11 - r8 .
*/
- emul r0,r8,$0,r4
- emul r0,r9,r5,r5
- emul r0,r10,r6,r6
+ emul %r0,%r8,$0,%r4
+ emul %r0,%r9,%r5,%r5
+ emul %r0,%r10,%r6,%r6
- emul r1,r8,$0,r7
- emul r1,r9,r8,r8
- emul r1,r10,r9,r9
- emul r1,r11,r10,r10
+ emul %r1,%r8,$0,%r7
+ emul %r1,%r9,%r8,%r8
+ emul %r1,%r10,%r9,%r9
+ emul %r1,%r11,%r10,%r10
- addl2 r4,r8
- adwc r5,r9
- adwc r6,r10
+ addl2 %r4,%r8
+ adwc %r5,%r9
+ adwc %r6,%r10
/* p.6
*
* If there are more than five leading zeros
@@ -427,42 +427,42 @@ signoff3:
* two quotient bits, generate more fraction bits.
* Otherwise, branch to code to produce the result.
*/
- bicl3 $0xc1ffffff,r10,r4
+ bicl3 $0xc1ffffff,%r10,%r4
beql more1
- cmpl $0x3e000000,r4
+ cmpl $0x3e000000,%r4
bneq result
more1:
/* p.7
*
* generate another 32 result bits.
*/
- subl2 $4,r3
- ashq r2,(r3),r5
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r5
bgeq signoff4
- emul r1,r6,$0,r4
- addl2 r1,r5
- emul r0,r6,r5,r5
- addl2 r0,r6
+ emul %r1,%r6,$0,%r4
+ addl2 %r1,%r5
+ emul %r0,%r6,%r5,%r5
+ addl2 %r0,%r6
brb addbits1
signoff4:
- emul r1,r6,$0,r4
- emul r0,r6,r5,r5
+ emul %r1,%r6,$0,%r4
+ emul %r0,%r6,%r5,%r5
addbits1:
- addl2 r5,r7
- adwc r6,r8
- adwc $0,r9
- adwc $0,r10
+ addl2 %r5,%r7
+ adwc %r6,%r8
+ adwc $0,%r9
+ adwc $0,%r10
/* p.8
*
* Check for massive cancellation.
*/
- bicl3 $0xc0000000,r10,r6
+ bicl3 $0xc0000000,%r10,%r6
/* bneq more2 -S.McD Test was backwards */
beql more2
- cmpl $0x3fffffff,r6
+ cmpl $0x3fffffff,%r6
bneq result
more2:
/* p.9
@@ -472,22 +472,22 @@ more2:
* Testing has shown there will always be
* enough bits after this point.
*/
- subl2 $4,r3
- ashq r2,(r3),r5
+ subl2 $4,%r3
+ ashq %r2,(%r3),%r5
bgeq signoff5
- emul r0,r6,r4,r5
- addl2 r0,r6
+ emul %r0,%r6,%r4,%r5
+ addl2 %r0,%r6
brb addbits2
signoff5:
- emul r0,r6,r4,r5
+ emul %r0,%r6,%r4,%r5
addbits2:
- addl2 r6,r7
- adwc $0,r8
- adwc $0,r9
- adwc $0,r10
+ addl2 %r6,%r7
+ adwc $0,%r8
+ adwc $0,%r9
+ adwc $0,%r10
/* p.10
*
* The following code produces the reduced
@@ -498,17 +498,17 @@ result:
/*
* Extract the octant number from r10 .
*/
-/* extzv $29,$3,r10,r0 ...used for pi/4 reduction -S.McD */
- extzv $30,$2,r10,r0
+/* extzv $29,$3,%r10,%r0 ...used for pi/4 reduction -S.McD */
+ extzv $30,$2,%r10,%r0
/*
* Clear the octant bits in r10 .
*/
-/* bicl2 $0xe0000000,r10 ...used for pi/4 reduction -S.McD */
- bicl2 $0xc0000000,r10
+/* bicl2 $0xe0000000,%r10 ...used for pi/4 reduction -S.McD */
+ bicl2 $0xc0000000,%r10
/*
* Zero the sign flag.
*/
- clrl r5
+ clrl %r5
/* p.11
*
* Check to see if the fraction is greater than
@@ -517,16 +517,16 @@ result:
* on, and replace the fraction with 1 minus
* the fraction.
*/
-/* bitl $0x10000000,r10 ...used for pi/4 reduction -S.McD */
- bitl $0x20000000,r10
+/* bitl $0x10000000,%r10 ...used for pi/4 reduction -S.McD */
+ bitl $0x20000000,%r10
beql small
- incl r0
- incl r5
-/* subl3 r10,$0x1fffffff,r10 ...used for pi/4 reduction -S.McD */
- subl3 r10,$0x3fffffff,r10
- mcoml r9,r9
- mcoml r8,r8
- mcoml r7,r7
+ incl %r0
+ incl %r5
+/* subl3 %r10,$0x1fffffff,%r10 ...used for pi/4 reduction -S.McD */
+ subl3 %r10,$0x3fffffff,%r10
+ mcoml %r9,%r9
+ mcoml %r8,%r8
+ mcoml %r7,%r7
small:
/* p.12
*
@@ -534,37 +534,37 @@ small:
* Test whether the first 30 bits of the
* fraction are zero.
*/
- tstl r10
+ tstl %r10
beql tiny
/*
* Find the position of the first one bit in r10 .
*/
- cvtld r10,r1
- extzv $7,$7,r1,r1
+ cvtld %r10,%r1
+ extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
- subl3 r1,$32,r6
+ subl3 %r1,$32,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
- ashq r6,r9,r10
- ashq r6,r8,r9
+ ashq %r6,%r9,%r10
+ ashq %r6,%r8,%r9
brb mult
/* p.13
*
* Test to see if the sign bit of r9 is on.
*/
tiny:
- tstl r9
+ tstl %r9
bgeq tinier
/*
* If it is, shift the product bits up 32 bits.
*/
- movl $32,r6
- movq r8,r10
- tstl r10
+ movl $32,%r6
+ movq %r8,%r10
+ tstl %r10
brb mult
/* p.14
*
@@ -578,19 +578,19 @@ tinier:
/*
* Find the position of the first one bit in r9 .
*/
- cvtld r9,r1
- extzv $7,$7,r1,r1
+ cvtld %r9,%r1
+ extzv $7,$7,%r1,%r1
/*
* Compute the size of the shift needed.
*/
- subl3 r1,$32,r1
- addl3 $32,r1,r6
+ subl3 %r1,$32,%r1
+ addl3 $32,%r1,%r6
/*
* Shift up the high order 64 bits of the
* product.
*/
- ashq r1,r8,r10
- ashq r1,r7,r9
+ ashq %r1,%r8,%r10
+ ashq %r1,%r7,%r9
brb mult
/* p.15
*
@@ -598,9 +598,9 @@ tinier:
* argument to zero.
*/
zero:
- clrl r1
- clrl r2
- clrl r3
+ clrl %r1
+ clrl %r2
+ clrl %r3
brw return
/* p.16
*
@@ -617,65 +617,65 @@ mult:
/*
* Save r11/r10 in r4/r1 . -S.McD
*/
- movl r11,r4
- movl r10,r1
+ movl %r11,%r4
+ movl %r10,%r1
/*
* If the sign bit of r10 is on, add 1 to r11 .
*/
bgeq signoff6
- incl r11
+ incl %r11
signoff6:
/* p.17
*
* Move pi/2 into r3/r2 .
*/
- movq $0xc90fdaa22168c235,r2
+ movq $0xc90fdaa22168c235,%r2
/*
* Multiply the fraction by the portion of pi/2
* in r2 .
*/
- emul r2,r10,$0,r7
- emul r2,r11,r8,r7
+ emul %r2,%r10,$0,%r7
+ emul %r2,%r11,%r8,%r7
/*
* Multiply the fraction by the portion of pi/2
* in r3 .
*/
- emul r3,r10,$0,r9
- emul r3,r11,r10,r10
+ emul %r3,%r10,$0,%r9
+ emul %r3,%r11,%r10,%r10
/*
* Add the product bits together.
*/
- addl2 r7,r9
- adwc r8,r10
- adwc $0,r11
+ addl2 %r7,%r9
+ adwc %r8,%r10
+ adwc $0,%r11
/*
* Compensate for not sign extending r8 above.-S.McD
*/
- tstl r8
+ tstl %r8
bgeq signoff6a
- decl r11
+ decl %r11
signoff6a:
/*
* Compensate for r11/r10 being unsigned. -S.McD
*/
- addl2 r2,r10
- adwc r3,r11
+ addl2 %r2,%r10
+ adwc %r3,%r11
/*
* Compensate for r3/r2 being unsigned. -S.McD
*/
- addl2 r1,r10
- adwc r4,r11
+ addl2 %r1,%r10
+ adwc %r4,%r11
/* p.18
*
* If the sign bit of r11 is zero, shift the
* product bits up one bit and increment r6 .
*/
blss signon
- incl r6
- ashq $1,r10,r10
- tstl r9
+ incl %r6
+ ashq $1,%r10,%r10
+ tstl %r9
bgeq signoff7
- incl r10
+ incl %r10
signoff7:
signon:
/* p.19
@@ -684,19 +684,19 @@ signon:
* bits into r9/r8 . The sign extension
* will be handled later.
*/
- ashq $-8,r10,r8
+ ashq $-8,%r10,%r8
/*
* Convert the low order 8 bits of r10
* into an F-format number.
*/
- cvtbf r10,r3
+ cvtbf %r10,%r3
/*
* If the result of the conversion was
* negative, add 1 to r9/r8 .
*/
bgeq chop
- incl r8
- adwc $0,r9
+ incl %r8
+ adwc $0,%r9
/*
* If r9 is now zero, branch to special
* code to handle that possibility.
@@ -708,27 +708,27 @@ chop:
* Convert the number in r9/r8 into
* D-format number in r2/r1 .
*/
- rotl $16,r8,r2
- rotl $16,r9,r1
+ rotl $16,%r8,%r2
+ rotl $16,%r9,%r1
/*
* Set the exponent field to the appropriate
* value. Note that the extra bits created by
* sign extension are now eliminated.
*/
- subw3 r6,$131,r6
- insv r6,$7,$9,r1
+ subw3 %r6,$131,%r6
+ insv %r6,$7,$9,%r1
/*
* Set the exponent field of the F-format
* number in r3 to the appropriate value.
*/
- tstf r3
+ tstf %r3
beql return
-/* extzv $7,$8,r3,r4 -S.McD */
- extzv $7,$7,r3,r4
- addw2 r4,r6
-/* subw2 $217,r6 -S.McD */
- subw2 $64,r6
- insv r6,$7,$8,r3
+/* extzv $7,$8,%r3,%r4 -S.McD */
+ extzv $7,$7,%r3,%r4
+ addw2 %r4,%r6
+/* subw2 $217,%r6 -S.McD */
+ subw2 $64,%r6
+ insv %r6,$7,$8,%r3
brb return
/* p.21
*
@@ -738,16 +738,16 @@ chop:
* a carry out.
*/
carryout:
- clrl r1
- clrl r2
- subw3 r6,$132,r6
- insv r6,$7,$9,r1
- tstf r3
+ clrl %r1
+ clrl %r2
+ subw3 %r6,$132,%r6
+ insv %r6,$7,$9,%r1
+ tstf %r3
beql return
- extzv $7,$8,r3,r4
- addw2 r4,r6
- subw2 $218,r6
- insv r6,$7,$8,r3
+ extzv $7,$8,%r3,%r4
+ addw2 %r4,%r6
+ subw2 $218,%r6
+ insv %r6,$7,$8,%r3
/* p.22
*
* The following code makes an needed
@@ -761,9 +761,9 @@ return:
* equal to 1/2 . If so, negate the reduced
* argument.
*/
- blbc r5,signoff8
- mnegf r1,r1
- mnegf r3,r3
+ blbc %r5,signoff8
+ mnegf %r1,%r1
+ mnegf %r3,%r3
signoff8:
/* p.23
*
@@ -771,18 +771,18 @@ signoff8:
* negate the reduce argument and
* adjust the octant number.
*/
- tstw (sp)+
+ tstw (%sp)+
bgeq signoff9
- mnegf r1,r1
- mnegf r3,r3
-/* subb3 r0,$8,r0 ...used for pi/4 reduction -S.McD */
- subb3 r0,$4,r0
+ mnegf %r1,%r1
+ mnegf %r3,%r3
+/* subb3 %r0,$8,%r0 ...used for pi/4 reduction -S.McD */
+ subb3 %r0,$4,%r0
signoff9:
/*
* Clear all unneeded octant bits.
*
- * bicb2 $0xf8,r0 ...used for pi/4 reduction -S.McD */
- bicb2 $0xfc,r0
+ * bicb2 $0xf8,%r0 ...used for pi/4 reduction -S.McD */
+ bicb2 $0xfc,%r0
/*
* Return.
*/
diff --git a/lib/libm/arch/vax/n_atan2.S b/lib/libm/arch/vax/n_atan2.S
index 3a281d700ec..287848aa5a1 100644
--- a/lib/libm/arch/vax/n_atan2.S
+++ b/lib/libm/arch/vax/n_atan2.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_atan2.S,v 1.7 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_atan2.S,v 1.8 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_atan2.S,v 1.1 1995/10/10 23:40:25 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -75,128 +75,128 @@
STRONG_ALIAS(atan2l,atan2)
ENTRY(atan2, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r2 # r2 = y
- movq 12(ap),r4 # r4 = x
- bicw3 $0x7f,r2,r0
- bicw3 $0x7f,r4,r1
- cmpw r0,$0x8000 # y is the reserved operand
+ movq 4(%ap),%r2 # r2 = y
+ movq 12(%ap),%r4 # r4 = x
+ bicw3 $0x7f,%r2,%r0
+ bicw3 $0x7f,%r4,%r1
+ cmpw %r0,$0x8000 # y is the reserved operand
jeql resop
- cmpw r1,$0x8000 # x is the reserved operand
+ cmpw %r1,$0x8000 # x is the reserved operand
jeql resop
- subl2 $8,sp
- bicw3 $0x7fff,r2,-4(fp) # copy y sign bit to -4(fp)
- bicw3 $0x7fff,r4,-8(fp) # copy x sign bit to -8(fp)
- cmpd r4,$0x4080 # x = 1.0 ?
+ subl2 $8,%sp
+ bicw3 $0x7fff,%r2,-4(%fp) # copy y sign bit to -4(fp)
+ bicw3 $0x7fff,%r4,-8(%fp) # copy x sign bit to -8(fp)
+ cmpd %r4,$0x4080 # x = 1.0 ?
bneq xnot1
- movq r2,r0
- bicw2 $0x8000,r0 # t = |y|
- movq r0,r2 # y = |y|
+ movq %r2,%r0
+ bicw2 $0x8000,%r0 # t = |y|
+ movq %r0,%r2 # y = |y|
brb begin
xnot1:
- bicw3 $0x807f,r2,r11 # yexp
+ bicw3 $0x807f,%r2,%r11 # yexp
jeql yeq0 # if y=0 goto yeq0
- bicw3 $0x807f,r4,r10 # xexp
+ bicw3 $0x807f,%r4,%r10 # xexp
jeql pio2 # if x=0 goto pio2
- subw2 r10,r11 # k = yexp - xexp
- cmpw r11,$0x2000 # k >= 64 (exp) ?
+ subw2 %r10,%r11 # k = yexp - xexp
+ cmpw %r11,$0x2000 # k >= 64 (exp) ?
jgeq pio2 # atan2 = +-pi/2
- divd3 r4,r2,r0 # t = y/x never overflow
- bicw2 $0x8000,r0 # t > 0
- bicw2 $0xff80,r2 # clear the exponent of y
- bicw2 $0xff80,r4 # clear the exponent of x
- bisw2 $0x4080,r2 # normalize y to [1,2)
- bisw2 $0x4080,r4 # normalize x to [1,2)
- subw2 r11,r4 # scale x so that yexp-xexp=k
+ divd3 %r4,%r2,%r0 # t = y/x never overflow
+ bicw2 $0x8000,%r0 # t > 0
+ bicw2 $0xff80,%r2 # clear the exponent of y
+ bicw2 $0xff80,%r4 # clear the exponent of x
+ bisw2 $0x4080,%r2 # normalize y to [1,2)
+ bisw2 $0x4080,%r4 # normalize x to [1,2)
+ subw2 %r11,%r4 # scale x so that yexp-xexp=k
begin:
- cmpw r0,$0x411c # t : 39/16
+ cmpw %r0,$0x411c # t : 39/16
jgeq L50
- addl3 $0x180,r0,r10 # 8*t
- cvtrfl r10,r10 # [8*t] rounded to int
- ashl $-1,r10,r10 # [8*t]/2
- casel r10,$0,$4
-L1:
+ addl3 $0x180,%r0,%r10 # 8*t
+ cvtrfl %r10,%r10 # [8*t] rounded to int
+ ashl $-1,%r10,%r10 # [8*t]/2
+ casel %r10,$0,$4
+L1:
.word L20-L1
.word L20-L1
.word L30-L1
.word L40-L1
.word L40-L1
-L10:
- movq $0xb4d9940f985e407b,r6 # Hi=.98279372324732906796d0
- movq $0x21b1879a3bc2a2fc,r8 # Lo=-.17092002525602665777d-17
- subd3 r4,r2,r0 # y-x
- addw2 $0x80,r0 # 2(y-x)
- subd2 r4,r0 # 2(y-x)-x
- addw2 $0x80,r4 # 2x
- movq r2,r10
- addw2 $0x80,r10 # 2y
- addd2 r10,r2 # 3y
- addd2 r4,r2 # 3y+2x
- divd2 r2,r0 # (2y-3x)/(2x+3y)
+L10:
+ movq $0xb4d9940f985e407b,%r6 # Hi=.98279372324732906796d0
+ movq $0x21b1879a3bc2a2fc,%r8 # Lo=-.17092002525602665777d-17
+ subd3 %r4,%r2,%r0 # y-x
+ addw2 $0x80,%r0 # 2(y-x)
+ subd2 %r4,%r0 # 2(y-x)-x
+ addw2 $0x80,%r4 # 2x
+ movq %r2,%r10
+ addw2 $0x80,%r10 # 2y
+ addd2 %r10,%r2 # 3y
+ addd2 %r4,%r2 # 3y+2x
+ divd2 %r2,%r0 # (2y-3x)/(2x+3y)
brw L60
-L20:
- cmpw r0,$0x3280 # t : 2**(-28)
+L20:
+ cmpw %r0,$0x3280 # t : 2**(-28)
jlss L80
- clrq r6 # Hi=r6=0, Lo=r8=0
- clrq r8
+ clrq %r6 # Hi=r6=0, Lo=r8=0
+ clrq %r8
brw L60
-L30:
- movq $0xda7b2b0d63383fed,r6 # Hi=.46364760900080611433d0
- movq $0xf0ea17b2bf912295,r8 # Lo=.10147340032515978826d-17
- movq r2,r0
- addw2 $0x80,r0 # 2y
- subd2 r4,r0 # 2y-x
- addw2 $0x80,r4 # 2x
- addd2 r2,r4 # 2x+y
- divd2 r4,r0 # (2y-x)/(2x+y)
+L30:
+ movq $0xda7b2b0d63383fed,%r6 # Hi=.46364760900080611433d0
+ movq $0xf0ea17b2bf912295,%r8 # Lo=.10147340032515978826d-17
+ movq %r2,%r0
+ addw2 $0x80,%r0 # 2y
+ subd2 %r4,%r0 # 2y-x
+ addw2 $0x80,%r4 # 2x
+ addd2 %r2,%r4 # 2x+y
+ divd2 %r4,%r0 # (2y-x)/(2x+y)
brb L60
-L50:
- movq $0x68c2a2210fda40c9,r6 # Hi=1.5707963267948966135d1
- movq $0x06e0145c26332326,r8 # Lo=.22517417741562176079d-17
- cmpw r0,$0x5100 # y : 2**57
+L50:
+ movq $0x68c2a2210fda40c9,%r6 # Hi=1.5707963267948966135d1
+ movq $0x06e0145c26332326,%r8 # Lo=.22517417741562176079d-17
+ cmpw %r0,$0x5100 # y : 2**57
bgeq L90
- divd3 r2,r4,r0
- bisw2 $0x8000,r0 # -x/y
+ divd3 %r2,%r4,%r0
+ bisw2 $0x8000,%r0 # -x/y
brb L60
-L40:
- movq $0x68c2a2210fda4049,r6 # Hi=.78539816339744830676d0
- movq $0x06e0145c263322a6,r8 # Lo=.11258708870781088040d-17
- subd3 r4,r2,r0 # y-x
- addd2 r4,r2 # y+x
- divd2 r2,r0 # (y-x)/(y+x)
-L60:
- movq r0,r10
- muld2 r0,r0
- polyd r0,$12,ptable
- muld2 r10,r0
- subd2 r0,r8
- addd3 r8,r10,r0
- addd2 r6,r0
-L80:
- movw -8(fp),r2
+L40:
+ movq $0x68c2a2210fda4049,%r6 # Hi=.78539816339744830676d0
+ movq $0x06e0145c263322a6,%r8 # Lo=.11258708870781088040d-17
+ subd3 %r4,%r2,%r0 # y-x
+ addd2 %r4,%r2 # y+x
+ divd2 %r2,%r0 # (y-x)/(y+x)
+L60:
+ movq %r0,%r10
+ muld2 %r0,%r0
+ polyd %r0,$12,ptable
+ muld2 %r10,%r0
+ subd2 %r0,%r8
+ addd3 %r8,%r10,%r0
+ addd2 %r6,%r0
+L80:
+ movw -8(%fp),%r2
bneq pim
- bisw2 -4(fp),r0 # return sign(y)*r0
+ bisw2 -4(%fp),%r0 # return sign(y)*r0
ret
L90: # x >= 2**25
- movq r6,r0
+ movq %r6,%r0
brb L80
pim:
- subd3 r0,$0x68c2a2210fda4149,r0 # pi-t
- bisw2 -4(fp),r0
+ subd3 %r0,$0x68c2a2210fda4149,%r0 # pi-t
+ bisw2 -4(%fp),%r0
ret
yeq0:
- movw -8(fp),r2
+ movw -8(%fp),%r2
beql zero # if sign(x)=1 return pi
- movq $0x68c2a2210fda4149,r0 # pi=3.1415926535897932270d1
+ movq $0x68c2a2210fda4149,%r0 # pi=3.1415926535897932270d1
ret
zero:
- clrq r0 # return 0
+ clrq %r0 # return 0
ret
pio2:
- movq $0x68c2a2210fda40c9,r0 # pi/2=1.5707963267948966135d1
- bisw2 -4(fp),r0 # return sign(y)*pi/2
+ movq $0x68c2a2210fda40c9,%r0 # pi/2=1.5707963267948966135d1
+ bisw2 -4(%fp),%r0 # return sign(y)*pi/2
ret
resop:
- movq $0x8000,r0 # propagate the reserved operand
+ movq $0x8000,%r0 # propagate the reserved operand
ret
.align 2
ptable:
diff --git a/lib/libm/arch/vax/n_cbrt.S b/lib/libm/arch/vax/n_cbrt.S
index dd03a8fa229..d5669952618 100644
--- a/lib/libm/arch/vax/n_cbrt.S
+++ b/lib/libm/arch/vax/n_cbrt.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_cbrt.S,v 1.4 2008/09/16 22:13:12 martynas Exp $ */
+/* $OpenBSD: n_cbrt.S,v 1.5 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_cbrt.S,v 1.1 1995/10/10 23:40:26 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -43,37 +43,37 @@
.text
_ALIGN_TEXT
ENTRY(cbrt, R2|R3|R4|R5|R6|R7)
- movq 4(ap),r0 # r0 = argument x
- bicw3 $0x807f,r0,r2 # biased exponent of x
+ movq 4(%ap),%r0 # r0 = argument x
+ bicw3 $0x807f,%r0,%r2 # biased exponent of x
jeql return # cbrt(0)=0 cbrt(res)=res. operand
- bicw3 $0x7fff,r0,ap # ap has sign(x)
- xorw2 ap,r0 # r0 is abs(x)
- movl r0,r2 # r2 has abs(x)
- rotl $16,r2,r2 # r2 = |x| with bits unscrambled
- divl2 $3,r2 # rough cbrt with bias/3
- addl2 B,r2 # restore bias, diminish fraction
- rotl $16,r2,r2 # r2=|q|=|cbrt| to 5 bits
- mulf3 r2,r2,r3 # r3 =qq
- divf2 r0,r3 # r3 = qq/x
- mulf2 r2,r3
- addf2 C,r3 # r3 = s = C + qqq/x
- divf3 r3,D,r4 # r4 = D/s
- addf2 E,r4
- addf2 r4,r3 # r3 = s + E + D/s
- divf3 r3,F,r3 # r3 = F / (s + E + D/s)
- addf2 G,r3 # r3 = G + F / (s + E + D/s)
- mulf2 r3,r2 # r2 = qr3 = new q to 23 bits
- clrl r3 # r2:r3 = q as double float
- muld3 r2,r2,r4 # r4:r5 = qq exactly
- divd2 r4,r0 # r0:r1 = x/(q*q) rounded
- subd3 r2,r0,r6 # r6:r7 = x/(q*q) - q exactly
- movq r2,r4 # r4:r5 = q
- addw2 $0x80,r4 # r4:r5 = 2 * q
- addd2 r0,r4 # r4:r5 = 2*q + x/(q*q)
- divd2 r4,r6 # r6:r7 = (x/(q*q)-q)/(2*q+x/(q*q))
- muld2 r2,r6 # r6:r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
- addd3 r6,r2,r0 # r0:r1 = q + r6:r7
- bisw2 ap,r0 # restore the sign bit
+ bicw3 $0x7fff,%r0,%ap # ap has sign(x)
+ xorw2 %ap,%r0 # r0 is abs(x)
+ movl %r0,%r2 # r2 has abs(x)
+ rotl $16,%r2,%r2 # r2 = |x| with bits unscrambled
+ divl2 $3,%r2 # rough cbrt with bias/3
+ addl2 B,%r2 # restore bias, diminish fraction
+ rotl $16,%r2,%r2 # r2=|q|=|cbrt| to 5 bits
+ mulf3 %r2,%r2,%r3 # r3 =qq
+ divf2 %r0,%r3 # r3 = qq/x
+ mulf2 %r2,%r3
+ addf2 C,%r3 # r3 = s = C + qqq/x
+ divf3 %r3,D,%r4 # r4 = D/s
+ addf2 E,%r4
+ addf2 %r4,%r3 # r3 = s + E + D/s
+ divf3 %r3,F,%r3 # r3 = F / (s + E + D/s)
+ addf2 G,%r3 # r3 = G + F / (s + E + D/s)
+ mulf2 %r3,%r2 # r2 = qr3 = new q to 23 bits
+ clrl %r3 # r2:r3 = q as double float
+ muld3 %r2,%r2,%r4 # r4:r5 = qq exactly
+ divd2 %r4,%r0 # r0:r1 = x/(q*q) rounded
+ subd3 %r2,%r0,%r6 # r6:r7 = x/(q*q) - q exactly
+ movq %r2,%r4 # r4:r5 = q
+ addw2 $0x80,%r4 # r4:r5 = 2 * q
+ addd2 %r0,%r4 # r4:r5 = 2*q + x/(q*q)
+ divd2 %r4,%r6 # r6:r7 = (x/(q*q)-q)/(2*q+x/(q*q))
+ muld2 %r2,%r6 # r6:r7 = q*(x/(q*q)-q)/(2*q+x/(q*q))
+ addd3 %r6,%r2,%r0 # r0:r1 = q + r6:r7
+ bisw2 %ap,%r0 # restore the sign bit
return:
ret # error less than 0.667 ulps
diff --git a/lib/libm/arch/vax/n_hypot.S b/lib/libm/arch/vax/n_hypot.S
index bd5cbf49df1..72f5d773de5 100644
--- a/lib/libm/arch/vax/n_hypot.S
+++ b/lib/libm/arch/vax/n_hypot.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_hypot.S,v 1.1 2008/10/07 22:25:53 martynas Exp $ */
+/* $OpenBSD: n_hypot.S,v 1.2 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_cabs.S,v 1.1 1995/10/10 23:40:26 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -46,23 +46,23 @@
.text
_ALIGN_TEXT
ENTRY(hypot, 0x8000|R2|R3|R4|R5|R6) # enable floating overflow
- movq 4(ap),r0 # r0:1 = x
- movq 12(ap),r2 # r2:3 = y
- bicw3 $0x7f,r0,r4 # r4 has signed biased exp of x
- cmpw $0x8000,r4
+ movq 4(%ap),%r0 # r0:1 = x
+ movq 12(%ap),%r2 # r2:3 = y
+ bicw3 $0x7f,%r0,%r4 # r4 has signed biased exp of x
+ cmpw $0x8000,%r4
jeql return # x is a reserved operand, so return it
- bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r2,%r5 # r5 has signed biased exp of y
+ cmpw $0x8000,%r5
jneq cont /* y isn't a reserved operand */
- movq r2,r0 /* return y if it's reserved */
+ movq %r2,%r0 /* return y if it's reserved */
ret
cont:
bsbb regs_set # r0:1 = dsqrt(x^2+y^2)/2^r6
- addw2 r6,r0 # unscaled cdabs in r0:1
+ addw2 %r6,%r0 # unscaled cdabs in r0:1
jvc return # unless it overflows
- subw2 $0x80,r0 # halve r0 to get meaningful overflow
- addd2 r0,r0 # overflow; r0 is half of true abs value
+ subw2 $0x80,%r0 # halve r0 to get meaningful overflow
+ addd2 %r0,%r0 # overflow; r0 is half of true abs value
return:
ret
@@ -71,47 +71,47 @@ _ALTENTRY(__libm_cdabs_r6) # ENTRY POINT for cdsqrt
# calculates a scaled (factor in r6)
# complex absolute value
- movq (r4)+,r0 # r0:r1 = x via indirect addressing
- movq (r4),r2 # r2:r3 = y via indirect addressing
+ movq (%r4)+,%r0 # r0:r1 = x via indirect addressing
+ movq (%r4),%r2 # r2:r3 = y via indirect addressing
- bicw3 $0x7f,r0,r5 # r5 has signed biased exp of x
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r0,%r5 # r5 has signed biased exp of x
+ cmpw $0x8000,%r5
jeql cdreserved # x is a reserved operand
- bicw3 $0x7f,r2,r5 # r5 has signed biased exp of y
- cmpw $0x8000,r5
+ bicw3 $0x7f,%r2,%r5 # r5 has signed biased exp of y
+ cmpw $0x8000,%r5
jneq regs_set /* y isn't a reserved operand either? */
cdreserved:
- movl *4(ap),r4 # r4 -> (u,v), if x or y is reserved
- movq r0,(r4)+ # copy u and v as is and return
- movq r2,(r4) # (again addressing is indirect)
+ movl *4(%ap),%r4 # r4 -> (u,v), if x or y is reserved
+ movq %r0,(%r4)+ # copy u and v as is and return
+ movq %r2,(%r4) # (again addressing is indirect)
ret
#endif
regs_set:
- bicw2 $0x8000,r0 # r0:r1 = dabs(x)
- bicw2 $0x8000,r2 # r2:r3 = dabs(y)
- cmpw r0,r2
+ bicw2 $0x8000,%r0 # r0:r1 = dabs(x)
+ bicw2 $0x8000,%r2 # r2:r3 = dabs(y)
+ cmpw %r0,%r2
jgeq ordered
- movq r0,r4
- movq r2,r0
- movq r4,r2 # force y's exp <= x's exp
+ movq %r0,%r4
+ movq %r2,%r0
+ movq %r4,%r2 # force y's exp <= x's exp
ordered:
- bicw3 $0x7f,r0,r6 # r6 = exponent(x) + bias(129)
+ bicw3 $0x7f,%r0,%r6 # r6 = exponent(x) + bias(129)
jeql retsb # if x = y = 0 then cdabs(x,y) = 0
- subw2 $0x4780,r6 # r6 = exponent(x) - 14
- subw2 r6,r0 # 2^14 <= scaled x < 2^15
- bitw $0xff80,r2
+ subw2 $0x4780,%r6 # r6 = exponent(x) - 14
+ subw2 %r6,%r0 # 2^14 <= scaled x < 2^15
+ bitw $0xff80,%r2
jeql retsb # if y = 0 return dabs(x)
- subw2 r6,r2
- cmpw $0x3780,r2 # if scaled y < 2^-18
+ subw2 %r6,%r2
+ cmpw $0x3780,%r2 # if scaled y < 2^-18
jgtr retsb # return dabs(x)
- emodd r0,$0,r0,r4,r0 # r4 + r0:1 = scaled x^2
- emodd r2,$0,r2,r5,r2 # r5 + r2:3 = scaled y^2
- addd2 r2,r0
- addl2 r5,r4
- cvtld r4,r2
- addd2 r2,r0 # r0:1 = scaled x^2 + y^2
+ emodd %r0,$0,%r0,%r4,%r0 # r4 + r0:1 = scaled x^2
+ emodd %r2,$0,%r2,%r5,%r2 # r5 + r2:3 = scaled y^2
+ addd2 %r2,%r0
+ addl2 %r5,%r4
+ cvtld %r4,%r2
+ addd2 %r2,%r0 # r0:1 = scaled x^2 + y^2
jmp __libm_dsqrt_r5 # r0:1 = dsqrt(x^2+y^2)/2^r6
retsb:
rsb # error < 0.86 ulp
diff --git a/lib/libm/arch/vax/n_infnan.S b/lib/libm/arch/vax/n_infnan.S
index 6edf8d7c9c9..aff89c9fd46 100644
--- a/lib/libm/arch/vax/n_infnan.S
+++ b/lib/libm/arch/vax/n_infnan.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_infnan.S,v 1.3 2008/05/21 20:37:10 miod Exp $ */
+/* $OpenBSD: n_infnan.S,v 1.4 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_infnan.S,v 1.1 1995/10/10 23:40:27 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -43,10 +43,10 @@
* The Reserved Operand Fault is generated inside of this routine.
*/
ENTRY(infnan,0)
- cmpl 4(ap),$ERANGE
+ cmpl 4(%ap),$ERANGE
bneq 1f
movl $ERANGE,_C_LABEL(errno)
brb 2f
1: movl $EDOM,_C_LABEL(errno)
-2: emodd $0,$0,$0x8000,r0,r0 # generates the reserved operand fault
+2: emodd $0,$0,$0x8000,%r0,%r0 # generates the reserved operand fault
ret
diff --git a/lib/libm/arch/vax/n_sincos.S b/lib/libm/arch/vax/n_sincos.S
index ececb38d0d2..00e2831e86d 100644
--- a/lib/libm/arch/vax/n_sincos.S
+++ b/lib/libm/arch/vax/n_sincos.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_sincos.S,v 1.8 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_sincos.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_sincos.S,v 1.1 1995/10/10 23:40:28 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -50,14 +50,14 @@
STRONG_ALIAS(sinl,sin)
ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x807f,r0,r2
+ movq 4(%ap),%r0
+ bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -66,9 +66,9 @@ ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* Entered by sine ; save 0 in r4 .
*/
jsb __libm_argred
- movl $0,r4
+ movl $0,%r4
jsb __libm_sincos
- bispsw (sp)+
+ bispsw (%sp)+
1: ret
/*
@@ -80,15 +80,15 @@ ENTRY(sin, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
STRONG_ALIAS(cosl,cos)
ENTRY(cos, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x7f,r0,r2
- cmpw $0x8000,r2
+ movq 4(%ap),%r0
+ bicw3 $0x7f,%r0,%r2
+ cmpw $0x8000,%r2
beql 1f # if x is reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -97,7 +97,7 @@ ENTRY(cos, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
* Entered by cosine ; save 1 in r4 .
*/
jsb __libm_argred
- movl $1,r4
+ movl $1,%r4
jsb __libm_sincos
- bispsw (sp)+
+ bispsw (%sp)+
1: ret
diff --git a/lib/libm/arch/vax/n_sqrt.S b/lib/libm/arch/vax/n_sqrt.S
index 163ec9245b9..60f666da190 100644
--- a/lib/libm/arch/vax/n_sqrt.S
+++ b/lib/libm/arch/vax/n_sqrt.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_sqrt.S,v 1.9 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_sqrt.S,v 1.10 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_sqrt.S,v 1.1 1995/10/10 23:40:29 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -47,8 +47,8 @@
STRONG_ALIAS(sqrtl,sqrt)
ENTRY(sqrt, R2|R3|R4|R5)
- movq 4(ap),r0
-dsqrt2: bicw3 $0x807f,r0,r2 # check exponent of input
+ movq 4(%ap),%r0
+dsqrt2: bicw3 $0x807f,%r0,%r2 # check exponent of input
jeql noexp # biased exponent is zero -> 0.0 or reserved
bsbb __libm_dsqrt_r5
noexp: ret
@@ -59,30 +59,30 @@ _ALTENTRY(__libm_dsqrt_r5) /* ENTRY POINT FOR cdabs and cdsqrt */
/* returns double square root scaled by */
/* 2^r6 */
- movd r0,r4
+ movd %r0,%r4
jleq nonpos # argument is not positive
- movzwl r4,r2
- ashl $-1,r2,r0
- addw2 $0x203c,r0 # r0 has magic initial approximation
+ movzwl %r4,%r2
+ ashl $-1,%r2,%r0
+ addw2 $0x203c,%r0 # r0 has magic initial approximation
/*
* Do two steps of Heron's rule
* ((arg/guess) + guess) / 2 = better guess
*/
- divf3 r0,r4,r2
- addf2 r2,r0
- subw2 $0x80,r0 # divide by two
+ divf3 %r0,%r4,%r2
+ addf2 %r2,%r0
+ subw2 $0x80,%r0 # divide by two
- divf3 r0,r4,r2
- addf2 r2,r0
- subw2 $0x80,r0 # divide by two
+ divf3 %r0,%r4,%r2
+ addf2 %r2,%r0
+ subw2 $0x80,%r0 # divide by two
/* Scale argument and approximation to prevent over/underflow */
- bicw3 $0x807f,r4,r1
- subw2 $0x4080,r1 # r1 contains scaling factor
- subw2 r1,r4
- movl r0,r2
- subw2 r1,r2
+ bicw3 $0x807f,%r4,%r1
+ subw2 $0x4080,%r1 # r1 contains scaling factor
+ subw2 %r1,%r4
+ movl %r0,%r2
+ subw2 %r1,%r2
/* Cubic step
*
@@ -90,16 +90,16 @@ _ALTENTRY(__libm_dsqrt_r5) /* ENTRY POINT FOR cdabs and cdsqrt */
* a is approximation, and n is the original argument.
* (let s be scale factor in the following comments)
*/
- clrl r1
- clrl r3
- muld2 r0,r2 # r2:r3 = a*a/s
- subd2 r2,r4 # r4:r5 = n/s - a*a/s
- addw2 $0x100,r2 # r2:r3 = 4*a*a/s
- addd2 r4,r2 # r2:r3 = n/s + 3*a*a/s
- muld2 r0,r4 # r4:r5 = a*n/s - a*a*a/s
- divd2 r2,r4 # r4:r5 = a*(n-a*a)/(n+3*a*a)
- addw2 $0x80,r4 # r4:r5 = 2*a*(n-a*a)/(n+3*a*a)
- addd2 r4,r0 # r0:r1 = a + 2*a*(n-a*a)/(n+3*a*a)
+ clrl %r1
+ clrl %r3
+ muld2 %r0,%r2 # r2:r3 = a*a/s
+ subd2 %r2,%r4 # r4:r5 = n/s - a*a/s
+ addw2 $0x100,%r2 # r2:r3 = 4*a*a/s
+ addd2 %r4,%r2 # r2:r3 = n/s + 3*a*a/s
+ muld2 %r0,%r4 # r4:r5 = a*n/s - a*a*a/s
+ divd2 %r2,%r4 # r4:r5 = a*(n-a*a)/(n+3*a*a)
+ addw2 $0x80,%r4 # r4:r5 = 2*a*(n-a*a)/(n+3*a*a)
+ addd2 %r4,%r0 # r0:r1 = a + 2*a*(n-a*a)/(n+3*a*a)
rsb # DONE!
nonpos:
jneq negarg
diff --git a/lib/libm/arch/vax/n_support.S b/lib/libm/arch/vax/n_support.S
index c8fdad7e203..3216dc1ab06 100644
--- a/lib/libm/arch/vax/n_support.S
+++ b/lib/libm/arch/vax/n_support.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_support.S,v 1.17 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_support.S,v 1.18 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_support.S,v 1.1 1995/10/10 23:40:30 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -53,12 +53,12 @@
STRONG_ALIAS(copysignl,copysign)
ENTRY(copysign, R2)
- movq 4(ap),r0 # load x into r0
- bicw3 $0x807f,r0,r2 # mask off the exponent of x
+ movq 4(%ap),%r0 # load x into r0
+ bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
beql Lz # if zero or reserved op then return x
- bicw3 $0x7fff,12(ap),r2 # copy the sign bit of y into r2
- bicw2 $0x8000,r0 # replace x by |x|
- bisw2 r2,r0 # copy the sign bit of y to x
+ bicw3 $0x7fff,12(%ap),%r2 # copy the sign bit of y into r2
+ bicw2 $0x8000,%r0 # replace x by |x|
+ bisw2 %r2,%r0 # copy the sign bit of y to x
Lz: ret
/*
@@ -67,12 +67,12 @@ Lz: ret
*/
ENTRY(copysignf, R2)
- movl 4(ap),r0 # load x into r0
- bicw3 $0x807f,r0,r2 # mask off the exponent of x
+ movl 4(%ap),%r0 # load x into r0
+ bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
beql Fz # if zero or reserved op then return x
- bicw3 $0x7fff,8(ap),r2 # copy the sign bit of y into r2
- bicw2 $0x8000,r0 # replace x by |x|
- bisw2 r2,r0 # copy the sign bit of y to x
+ bicw3 $0x7fff,8(%ap),%r2 # copy the sign bit of y into r2
+ bicw2 $0x8000,%r0 # replace x by |x|
+ bisw2 %r2,%r0 # copy the sign bit of y to x
Fz: ret
/*
@@ -82,15 +82,15 @@ Fz: ret
STRONG_ALIAS(logbl,logb)
ENTRY(logb, 0)
- bicl3 $0xffff807f,4(ap),r0 # mask off the exponent of x
+ bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
beql Ln
- ashl $-7,r0,r0 # get the bias exponent
- subl2 $129,r0 # get the unbias exponent
- cvtld r0,r0 # return the answer in double
+ ashl $-7,%r0,%r0 # get the bias exponent
+ subl2 $129,%r0 # get the unbias exponent
+ cvtld %r0,%r0 # return the answer in double
ret
-Ln: movq 4(ap),r0 # r0:1 = x (zero or reserved op)
+Ln: movq 4(%ap),%r0 # r0:1 = x (zero or reserved op)
bneq 1f # simply return if reserved op
- movq $0x0000fe00ffffcfff,r0 # -2147483647.0
+ movq $0x0000fe00ffffcfff,%r0 # -2147483647.0
1: ret
/*
@@ -99,15 +99,15 @@ Ln: movq 4(ap),r0 # r0:1 = x (zero or reserved op)
*/
ENTRY(logbf, 0)
- bicl3 $0xffff807f,4(ap),r0 # mask off the exponent of x
+ bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
beql Fn
- ashl $-7,r0,r0 # get the bias exponent
- subl2 $129,r0 # get the unbias exponent
- cvtlf r0,r0 # return the answer in float
+ ashl $-7,%r0,%r0 # get the bias exponent
+ subl2 $129,%r0 # get the unbias exponent
+ cvtlf %r0,%r0 # return the answer in float
ret
-Fn: movl 4(ap),r0 # r0:1 = x (zero or reserved op)
+Fn: movl 4(%ap),%r0 # r0:1 = x (zero or reserved op)
bneq 1f # simply return if reserved op
- movl $0x0000d000,r0 # -2147483647.0
+ movl $0x0000d000,%r0 # -2147483647.0
1: ret
/*
@@ -117,27 +117,27 @@ Fn: movl 4(ap),r0 # r0:1 = x (zero or reserved op)
STRONG_ALIAS(scalbnl,scalbn)
ENTRY(scalbn, R2|R3)
- movq 4(ap),r0
- bicl3 $0xffff807f,r0,r3
+ movq 4(%ap),%r0
+ bicl3 $0xffff807f,%r0,%r3
beql ret1 # 0 or reserved operand
- movl 12(ap),r2
- cmpl r2,$0x12c
+ movl 12(%ap),%r2
+ cmpl %r2,$0x12c
bgeq ovfl
- cmpl r2,$-0x12c
+ cmpl %r2,$-0x12c
bleq unfl
- ashl $7,r2,r2
- addl2 r2,r3
+ ashl $7,%r2,%r2
+ addl2 %r2,%r3
bleq unfl
- cmpl r3,$0x8000
+ cmpl %r3,$0x8000
bgeq ovfl
- addl2 r2,r0
+ addl2 %r2,%r0
ret
ovfl: pushl $ERANGE
calls $1,_C_LABEL(infnan) # if it returns
- bicw3 $0x7fff,4(ap),r2 # get the sign of input arg
- bisw2 r2,r0 # re-attach the sign to r0/1
+ bicw3 $0x7fff,4(%ap),%r2 # get the sign of input arg
+ bisw2 %r2,%r0 # re-attach the sign to r0/1
ret
-unfl: movq $0,r0
+unfl: movq $0,%r0
ret1: ret
/*
@@ -149,83 +149,83 @@ ret1: ret
ALTENTRY(drem)
ENTRY(remainder, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- subl2 $12,sp
- movq 4(ap),r0 #r0=x
- movq 12(ap),r2 #r2=y
+ subl2 $12,%sp
+ movq 4(%ap),%r0 #r0=x
+ movq 12(%ap),%r2 #r2=y
jeql Rop #if y=0 then generate reserved op fault
- bicw3 $0x007f,r0,r4 #check if x is Rop
- cmpw r4,$0x8000
+ bicw3 $0x007f,%r0,%r4 #check if x is Rop
+ cmpw %r4,$0x8000
jeql Ret #if x is Rop then return Rop
- bicl3 $0x007f,r2,r4 #check if y is Rop
- cmpw r4,$0x8000
+ bicl3 $0x007f,%r2,%r4 #check if y is Rop
+ cmpw %r4,$0x8000
jeql Ret #if y is Rop then return Rop
- bicw2 $0x8000,r2 #y := |y|
- movw $0,-4(fp) #-4(fp) = nx := 0
- cmpw r2,$0x1c80 #yexp ? 57
+ bicw2 $0x8000,%r2 #y := |y|
+ movw $0,-4(%fp) #-4(fp) = nx := 0
+ cmpw %r2,$0x1c80 #yexp ? 57
bgtr C1 #if yexp > 57 goto C1
- addw2 $0x1c80,r2 #scale up y by 2**57
- movw $0x1c80,-4(fp) #nx := 57 (exponent field)
+ addw2 $0x1c80,%r2 #scale up y by 2**57
+ movw $0x1c80,-4(%fp) #nx := 57 (exponent field)
C1:
- movw -4(fp),-8(fp) #-8(fp) = nf := nx
- bicw3 $0x7fff,r0,-12(fp) #-12(fp) = sign of x
- bicw2 $0x8000,r0 #x := |x|
- movq r2,r10 #y1 := y
- bicl2 $0xffff07ff,r11 #clear the last 27 bits of y1
+ movw -4(%fp),-8(%fp) #-8(fp) = nf := nx
+ bicw3 $0x7fff,%r0,-12(%fp) #-12(fp) = sign of x
+ bicw2 $0x8000,%r0 #x := |x|
+ movq %r2,%r10 #y1 := y
+ bicl2 $0xffff07ff,%r11 #clear the last 27 bits of y1
loop:
- cmpd r0,r2 #x ? y
+ cmpd %r0,%r2 #x ? y
bleq E1 #if x <= y goto E1
/* begin argument reduction */
- movq r2,r4 #t =y
- movq r10,r6 #t1=y1
- bicw3 $0x807f,r0,r8 #xexp= exponent of x
- bicw3 $0x807f,r2,r9 #yexp= exponent fo y
- subw2 r9,r8 #xexp-yexp
- subw2 $0x0c80,r8 #k=xexp-yexp-25(exponent bit field)
+ movq %r2,%r4 #t =y
+ movq %r10,%r6 #t1=y1
+ bicw3 $0x807f,%r0,%r8 #xexp= exponent of x
+ bicw3 $0x807f,%r2,%r9 #yexp= exponent fo y
+ subw2 %r9,%r8 #xexp-yexp
+ subw2 $0x0c80,%r8 #k=xexp-yexp-25(exponent bit field)
blss C2 #if k<0 goto C2
- addw2 r8,r4 #t +=k
- addw2 r8,r6 #t1+=k, scale up t and t1
+ addw2 %r8,%r4 #t +=k
+ addw2 %r8,%r6 #t1+=k, scale up t and t1
C2:
- divd3 r4,r0,r8 #x/t
- cvtdl r8,r8 #n=[x/t] truncated
- cvtld r8,r8 #float(n)
- subd2 r6,r4 #t:=t-t1
- muld2 r8,r4 #n*(t-t1)
- muld2 r8,r6 #n*t1
- subd2 r6,r0 #x-n*t1
- subd2 r4,r0 #(x-n*t1)-n*(t-t1)
+ divd3 %r4,%r0,%r8 #x/t
+ cvtdl %r8,%r8 #n=[x/t] truncated
+ cvtld %r8,%r8 #float(n)
+ subd2 %r6,%r4 #t:=t-t1
+ muld2 %r8,%r4 #n*(t-t1)
+ muld2 %r8,%r6 #n*t1
+ subd2 %r6,%r0 #x-n*t1
+ subd2 %r4,%r0 #(x-n*t1)-n*(t-t1)
brb loop
E1:
- movw -4(fp),r6 #r6=nx
+ movw -4(%fp),%r6 #r6=nx
beql C3 #if nx=0 goto C3
- addw2 r6,r0 #x:=x*2**57 scale up x by nx
- movw $0,-4(fp) #clear nx
+ addw2 %r6,%r0 #x:=x*2**57 scale up x by nx
+ movw $0,-4(%fp) #clear nx
brb loop
C3:
- movq r2,r4 #r4 = y
- subw2 $0x80,r4 #r4 = y/2
- cmpd r0,r4 #x:y/2
+ movq %r2,%r4 #r4 = y
+ subw2 $0x80,%r4 #r4 = y/2
+ cmpd %r0,%r4 #x:y/2
blss E2 #if x < y/2 goto E2
bgtr C4 #if x > y/2 goto C4
- cvtdl r8,r8 #ifix(float(n))
- blbc r8,E2 #if the last bit is zero, goto E2
+ cvtdl %r8,%r8 #ifix(float(n))
+ blbc %r8,E2 #if the last bit is zero, goto E2
C4:
- subd2 r2,r0 #x-y
+ subd2 %r2,%r0 #x-y
E2:
- xorw2 -12(fp),r0 #x^sign (exclusive or)
- movw -8(fp),r6 #r6=nf
- bicw3 $0x807f,r0,r8 #r8=exponent of x
- bicw2 $0x7f80,r0 #clear the exponent of x
- subw2 r6,r8 #r8=xexp-nf
+ xorw2 -12(%fp),%r0 #x^sign (exclusive or)
+ movw -8(%fp),%r6 #r6=nf
+ bicw3 $0x807f,%r0,%r8 #r8=exponent of x
+ bicw2 $0x7f80,%r0 #clear the exponent of x
+ subw2 %r6,%r8 #r8=xexp-nf
bgtr C5 #if xexp-nf is positive goto C5
- movw $0,r8 #clear r8
- movq $0,r0 #x underflow to zero
+ movw $0,%r8 #clear r8
+ movq $0,%r0 #x underflow to zero
C5:
- bisw2 r8,r0 #put r8 into exponent field of x
+ bisw2 %r8,%r0 #put r8 into exponent field of x
ret
Rop: #Reserved operand
pushl $EDOM
calls $1,_C_LABEL(infnan) #generate reserved op fault
ret
Ret:
- movq $0x8000,r0 #propagate reserved op
+ movq $0x8000,%r0 #propagate reserved op
ret
diff --git a/lib/libm/arch/vax/n_tan.S b/lib/libm/arch/vax/n_tan.S
index 0077694d64d..aedb143b0b6 100644
--- a/lib/libm/arch/vax/n_tan.S
+++ b/lib/libm/arch/vax/n_tan.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: n_tan.S,v 1.8 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: n_tan.S,v 1.9 2013/07/05 21:10:50 miod Exp $ */
/* $NetBSD: n_tan.S,v 1.1 1995/10/10 23:40:31 ragge Exp $ */
/*
* Copyright (c) 1985, 1993
@@ -48,14 +48,14 @@
STRONG_ALIAS(tanl,tan)
ENTRY(tan, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
- movq 4(ap),r0
- bicw3 $0x807f,r0,r2
+ movq 4(%ap),%r0
+ bicw3 $0x807f,%r0,%r2
beql 1f # if x is zero or reserved operand then return x
/*
* Save the PSL's IV & FU bits on the stack.
*/
- movpsl r2
- bicw3 $0xff9f,r2,-(sp)
+ movpsl %r2
+ bicw3 $0xff9f,%r2,-(%sp)
/*
* Clear the IV & FU bits.
*/
@@ -69,24 +69,24 @@ ENTRY(tan, R2|R3|R4|R5|R6|R7|R8|R9|R10|R11)
*
* Save r3/r0 so that we can call cosine after calling sine.
*/
- movq r2,-(sp)
- movq r0,-(sp)
+ movq %r2,-(%sp)
+ movq %r0,-(%sp)
/*
* Call sine. r4 = 0 implies sine.
*/
- movl $0,r4
+ movl $0,%r4
jsb __libm_sincos
/*
* Save sin(x) in r11/r10 .
*/
- movd r0,r10
+ movd %r0,%r10
/*
* Call cosine. r4 = 1 implies cosine.
*/
- movq (sp)+,r0
- movq (sp)+,r2
- movl $1,r4
+ movq (%sp)+,%r0
+ movq (%sp)+,%r2
+ movl $1,%r4
jsb __libm_sincos
- divd3 r0,r10,r0
- bispsw (sp)+
+ divd3 %r0,%r10,%r0
+ bispsw (%sp)+
1: ret