summaryrefslogtreecommitdiff
path: root/sys/arch/m68k/fpsp/l_support.sa
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1995-10-18 08:53:40 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1995-10-18 08:53:40 +0000
commitd6583bb2a13f329cf0332ef2570eb8bb8fc0e39c (patch)
treeece253b876159b39c620e62b6c9b1174642e070e /sys/arch/m68k/fpsp/l_support.sa
initial import of NetBSD tree
Diffstat (limited to 'sys/arch/m68k/fpsp/l_support.sa')
-rw-r--r--sys/arch/m68k/fpsp/l_support.sa388
1 files changed, 388 insertions, 0 deletions
diff --git a/sys/arch/m68k/fpsp/l_support.sa b/sys/arch/m68k/fpsp/l_support.sa
new file mode 100644
index 00000000000..e704484b5a5
--- /dev/null
+++ b/sys/arch/m68k/fpsp/l_support.sa
@@ -0,0 +1,388 @@
+* $NetBSD: l_support.sa,v 1.3 1994/10/26 07:49:16 cgd Exp $
+
+* MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+* M68000 Hi-Performance Microprocessor Division
+* M68040 Software Package
+*
+* M68040 Software Package Copyright (c) 1993, 1994 Motorola Inc.
+* All rights reserved.
+*
+* THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+* To the maximum extent permitted by applicable law,
+* MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+* INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A
+* PARTICULAR PURPOSE and any warranty against infringement with
+* regard to the SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF)
+* and any accompanying written materials.
+*
+* To the maximum extent permitted by applicable law,
+* IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+* (INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS
+* PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR
+* OTHER PECUNIARY LOSS) ARISING OF THE USE OR INABILITY TO USE THE
+* SOFTWARE. Motorola assumes no responsibility for the maintenance
+* and support of the SOFTWARE.
+*
+* You are hereby granted a copyright license to use, modify, and
+* distribute the SOFTWARE so long as this entire notice is retained
+* without alteration in any modified and/or redistributed versions,
+* and that such modified versions are clearly identified as such.
+* No licenses are granted by implication, estoppel or otherwise
+* under any patents or trademarks of Motorola, Inc.
+
+*
+* l_support.sa 1.2 5/1/91
+*
+
+L_SUPPORT IDNT 2,1 Motorola 040 Floating Point Software Package
+
+ section 8
+
+mns_one dc.l $bfff0000,$80000000,$00000000
+pls_one dc.l $3fff0000,$80000000,$00000000
+pls_inf dc.l $7fff0000,$00000000,$00000000
+pls_huge dc.l $7ffe0000,$ffffffff,$ffffffff
+mns_huge dc.l $fffe0000,$ffffffff,$ffffffff
+pls_tiny dc.l $00000000,$80000000,$00000000
+mns_tiny dc.l $80000000,$80000000,$00000000
+small dc.l $20000000,$80000000,$00000000
+pls_zero dc.l $00000000,$00000000,$00000000
+
+ include l_fpsp.h
+
+*
+* tag --- determine the type of an extended precision operand
+*
+* The tag values returned match the way the 68040 would have
+* tagged them.
+*
+* Input: a0 points to operand
+*
+* Output d0.b = $00 norm
+* $20 zero
+* $40 inf
+* $60 nan
+* $80 denorm
+* All other registers are unchanged
+*
+ xdef tag
+tag:
+ move.w LOCAL_EX(a0),d0
+ andi.w #$7fff,d0
+ beq.b chk_zro
+ cmpi.w #$7fff,d0
+ beq.b chk_inf
+tag_nrm:
+ clr.b d0
+ rts
+tag_nan:
+ move.b #$60,d0
+ rts
+tag_dnrm:
+ move.b #$80,d0
+ rts
+chk_zro:
+ btst.b #7,LOCAL_HI(a0) # check if J-bit is set
+ bne.b tag_nrm
+ tst.l LOCAL_HI(a0)
+ bne.b tag_dnrm
+ tst.l LOCAL_LO(a0)
+ bne.b tag_dnrm
+tag_zero:
+ move.b #$20,d0
+ rts
+chk_inf:
+ tst.l LOCAL_HI(a0)
+ bne.b tag_nan
+ tst.l LOCAL_LO(a0)
+ bne.b tag_nan
+tag_inf:
+ move.b #$40,d0
+ rts
+
+*
+* t_dz, t_dz2 --- divide by zero exception
+*
+* t_dz2 is used by monadic functions such as flogn (from do_func).
+* t_dz is used by monadic functions such as satanh (from the
+* transcendental function).
+*
+ xdef t_dz2
+t_dz2:
+ fmovem.x mns_one,fp0
+ fmove.l d1,fpcr
+ fdiv.x pls_zero,fp0
+ rts
+
+ xdef t_dz
+t_dz:
+ btst.b #sign_bit,ETEMP_EX(a6) ;check sign for neg or pos
+ beq.b p_inf ;branch if pos sign
+m_inf:
+ fmovem.x mns_one,fp0
+ fmove.l d1,fpcr
+ fdiv.x pls_zero,fp0
+ rts
+p_inf:
+ fmovem.x pls_one,fp0
+ fmove.l d1,fpcr
+ fdiv.x pls_zero,fp0
+ rts
+*
+* t_operr --- Operand Error exception
+*
+ xdef t_operr
+t_operr:
+ fmovem.x pls_inf,fp0
+ fmove.l d1,fpcr
+ fmul.x pls_zero,fp0
+ rts
+
+*
+* t_unfl --- UNFL exception
+*
+ xdef t_unfl
+t_unfl:
+ btst.b #sign_bit,ETEMP(a6)
+ beq.b unf_pos
+unf_neg:
+ fmovem.x mns_tiny,fp0
+ fmove.l d1,fpcr
+ fmul.x pls_tiny,fp0
+ rts
+
+unf_pos:
+ fmovem.x pls_tiny,fp0
+ fmove.l d1,fpcr
+ fmul.x fp0,fp0
+ rts
+*
+* t_ovfl --- OVFL exception
+*
+* t_ovfl is called as an exit for monadic functions. t_ovfl2
+* is for dyadic exits.
+*
+ xdef t_ovfl
+t_ovfl:
+ xdef t_ovfl2
+ move.l d1,USER_FPCR(a6) user's control register
+ move.l #ovfinx_mask,d0
+ bra.b t_work
+t_ovfl2:
+ move.l #ovfl_inx_mask,d0
+t_work:
+ btst.b #sign_bit,ETEMP(a6)
+ beq.b ovf_pos
+ovf_neg:
+ fmovem.x mns_huge,fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fmul.x pls_huge,fp0
+ fmove.l fpsr,d1
+ or.l d1,d0
+ fmove.l d0,fpsr
+ rts
+ovf_pos:
+ fmovem.x pls_huge,fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fmul.x pls_huge,fp0
+ fmove.l fpsr,d1
+ or.l d1,d0
+ fmove.l d0,fpsr
+ rts
+*
+* t_inx2 --- INEX2 exception (correct fpcr is in USER_FPCR(a6))
+*
+ xdef t_inx2
+t_inx2:
+ fmove.l fpsr,USER_FPSR(a6) capture incoming fpsr
+ fmove.l USER_FPCR(a6),fpcr
+*
+* create an inex2 exception by adding two numbers with very different exponents
+* do the add in fp1 so as to not disturb the result sitting in fp0
+*
+ fmove.x pls_one,fp1
+ fadd.x small,fp1
+*
+ or.l #inx2a_mask,USER_FPSR(a6) ;set INEX2, AINEX
+ fmove.l USER_FPSR(a6),fpsr
+ rts
+*
+* t_frcinx --- Force Inex2 (for monadic functions)
+*
+ xdef t_frcinx
+t_frcinx:
+ fmove.l fpsr,USER_FPSR(a6) capture incoming fpsr
+ fmove.l d1,fpcr
+*
+* create an inex2 exception by adding two numbers with very different exponents
+* do the add in fp1 so as to not disturb the result sitting in fp0
+*
+ fmove.x pls_one,fp1
+ fadd.x small,fp1
+*
+ or.l #inx2a_mask,USER_FPSR(a6) ;set INEX2, AINEX
+ btst.b #unfl_bit,FPSR_EXCEPT(a6) ;test for unfl bit set
+ beq.b no_uacc1 ;if clear, do not set aunfl
+ bset.b #aunfl_bit,FPSR_AEXCEPT(a6)
+no_uacc1:
+ fmove.l USER_FPSR(a6),fpsr
+ rts
+*
+* dst_nan --- force result when destination is a NaN
+*
+ xdef dst_nan
+dst_nan:
+ fmove.l USER_FPCR(a6),fpcr
+ fmove.x FPTEMP(a6),fp0
+ rts
+
+*
+* src_nan --- force result when source is a NaN
+*
+ xdef src_nan
+src_nan:
+ fmove.l USER_FPCR(a6),fpcr
+ fmove.x ETEMP(a6),fp0
+ rts
+*
+* mon_nan --- force result when source is a NaN (monadic version)
+*
+* This is the same as src_nan except that the user's fpcr comes
+* in via d1, not USER_FPCR(a6).
+*
+ xdef mon_nan
+mon_nan:
+ fmove.l d1,fpcr
+ fmove.x ETEMP(a6),fp0
+ rts
+*
+* t_extdnrm, t_resdnrm --- generate results for denorm inputs
+*
+* For all functions that have a denormalized input and that f(x)=x,
+* this is the entry point.
+*
+ xdef t_extdnrm
+t_extdnrm:
+ fmove.l d1,fpcr
+ fmove.x LOCAL_EX(a0),fp0
+ fmove.l fpsr,d0
+ or.l #unfinx_mask,d0
+ fmove.l d0,fpsr
+ rts
+
+ xdef t_resdnrm
+t_resdnrm:
+ fmove.l USER_FPCR(a6),fpcr
+ fmove.x LOCAL_EX(a0),fp0
+ fmove.l fpsr,d0
+ or.l #unfl_mask,d0
+ fmove.l d0,fpsr
+ rts
+*
+*
+*
+ xdef t_avoid_unsupp
+t_avoid_unsupp:
+ fmove.x fp0,fp0
+ rts
+
+ xdef sto_cos
+sto_cos:
+ fmovem.x LOCAL_EX(a0),fp1
+ rts
+*
+* Native instruction support
+*
+* Some systems may need entry points even for 68040 native
+* instructions. These routines are provided for
+* convenience.
+*
+ xdef sadd
+sadd:
+ fmovem.x FPTEMP(a6),fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fadd.x ETEMP(a6),fp0
+ rts
+
+ xdef ssub
+ssub:
+ fmovem.x FPTEMP(a6),fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fsub.x ETEMP(a6),fp0
+ rts
+
+ xdef smul
+smul:
+ fmovem.x FPTEMP(a6),fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fmul.x ETEMP(a6),fp0
+ rts
+
+ xdef sdiv
+sdiv:
+ fmovem.x FPTEMP(a6),fp0
+ fmove.l USER_FPCR(a6),fpcr
+ fdiv.x ETEMP(a6),fp0
+ rts
+
+ xdef sabs
+sabs:
+ fmovem.x ETEMP(a6),fp0
+ fmove.l d1,fpcr
+ fabs.x fp0
+ rts
+
+ xdef sneg
+sneg:
+ fmovem.x ETEMP(a6),fp0
+ fmove.l d1,fpcr
+ fneg.x fp0
+ rts
+
+ xdef ssqrt
+ssqrt:
+ fmovem.x ETEMP(a6),fp0
+ fmove.l d1,fpcr
+ fsqrt.x fp0
+ rts
+
+*
+* l_sint,l_sintrz,l_sintd --- special wrapper for fint and fintrz
+*
+* On entry, move the user's FPCR to USER_FPCR.
+*
+* On return from, we need to pickup the INEX2/AINEX bits
+* that are in USER_FPSR.
+*
+ xref sint
+ xref sintrz
+ xref sintd
+
+ xdef l_sint
+l_sint:
+ move.l d1,USER_FPCR(a6)
+ jsr sint
+ fmove.l fpsr,d0
+ or.l USER_FPSR(a6),d0
+ fmove.l d0,fpsr
+ rts
+
+ xdef l_sintrz
+l_sintrz:
+ move.l d1,USER_FPCR(a6)
+ jsr sintrz
+ fmove.l fpsr,d0
+ or.l USER_FPSR(a6),d0
+ fmove.l d0,fpsr
+ rts
+
+ xdef l_sintd
+l_sintd:
+ move.l d1,USER_FPCR(a6)
+ jsr sintd
+ fmove.l fpsr,d0
+ or.l USER_FPSR(a6),d0
+ fmove.l d0,fpsr
+ rts
+
+ end