summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2011-11-27 12:32:33 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2011-11-27 12:32:33 +0000
commit37554207f12bf7314b90eaef75bbb7a6a23664e9 (patch)
tree157324ada0d0dca1c8e12a8fd11f766627d28c39 /sys
parent5727e544266b813a23ab65926c3991a7385f0872 (diff)
Regen.
Diffstat (limited to 'sys')
-rw-r--r--sys/lib/libkern/arch/hppa64/bcopy.S45
-rw-r--r--sys/lib/libkern/arch/hppa64/spcopy.S36
2 files changed, 40 insertions, 41 deletions
diff --git a/sys/lib/libkern/arch/hppa64/bcopy.S b/sys/lib/libkern/arch/hppa64/bcopy.S
index a75c35e9316..78e548192b6 100644
--- a/sys/lib/libkern/arch/hppa64/bcopy.S
+++ b/sys/lib/libkern/arch/hppa64/bcopy.S
@@ -30,20 +30,19 @@
LEAF_ENTRY(memcpy)
ALTENTRY(memmove)
- copy %arg0, %r22
- copy %arg1, %arg0
- copy %r22, %arg1
copy %arg0, %ret0
+ copy %arg1, %arg0
+ copy %ret0, %arg1
ALTENTRY(ovbcopy)
ALTENTRY(bcopy)
- comb,>,n %arg1, %arg0, $bcopy.reverse
+ cmpb,*>,n %arg1, %arg0, $bcopy.reverse
- comib,>=,n 15, %arg2, $bcopy_f.byte
+ cmpib,*>=,n 15, %arg2, $bcopy_f.byte
extrd,u %arg0, 63, 2, %r20
extrd,u %arg1, 63, 2, %r19
add %arg2, %r19, %arg2
- comb,<> %r20, %r19, $bcopy_f.unaligned
+ cmpb,*<> %r20, %r19, $bcopy_f.unaligned
depd %r0, 63, 2, %arg0
addi -16, %arg2, %arg2
@@ -56,26 +55,26 @@ $bcopy_f.loop16a
stbys,b,m %r22, 4(%sr0, %arg1)
stws,ma %r21, 4(%sr0, %arg1)
stws,ma %r20, 4(%sr0, %arg1)
- addib,>= -16, %arg2, $bcopy_f.loop16a
+ addib,*>= -16, %arg2, $bcopy_f.loop16a
stws,ma %r19, 4(%sr0, %arg1)
- addib,<,n 12, %arg2, $bcopy_f.cleanup
+ addib,*<,n 12, %arg2, $bcopy_f.cleanup
$bcopy_f.word
ldws,ma 4(%sr0, %arg0), %r22
- addib,>= -4, %arg2, $bcopy_f.word
+ addib,*>= -4, %arg2, $bcopy_f.word
stws,ma %r22, 4(%sr0, %arg1)
$bcopy_f.cleanup
- addib,=,n 4, %arg2, $bcopy_f.done
+ addib,*=,n 4, %arg2, $bcopy_f.done
ldws 0(%sr0, %arg0), %r22
add %arg1, %arg2, %arg1
b $bcopy_f.done
stbys,e %r22, 0(%sr0, %arg1)
$bcopy_f.unaligned
- sub,>= %r19, %r20, %r21
+ sub,*>= %r19, %r20, %r21
ldwm 4(%sr0, %arg0), %ret1
- zdep %r21, 28, 29, %r22
+ depd,z %r21, 60, 61, %r22
mtsar %r22
addi -16, %arg2, %arg2
@@ -93,26 +92,26 @@ $bcopy_f.loop16u
stws,ma %r31, 4(%sr0, %arg1)
vshd %r20, %r19, %r31
stws,ma %r31, 4(%sr0, %arg1)
- addib,>= -16, %arg2, $bcopy_f.loop16u
+ addib,*>= -16, %arg2, $bcopy_f.loop16u
copy %r19, %ret1
- addib,<,n 12, %arg2, $bcopy_f.cleanup_un
+ addib,*<,n 12, %arg2, $bcopy_f.cleanup_un
$bcopy_f.word_un
ldws,ma 4(%sr0, %arg0), %r22
vshd %ret1, %r22, %r21
- addib,< -4, %arg2, $bcopy_f.cleanup1_un
+ addib,*< -4, %arg2, $bcopy_f.cleanup1_un
stws,ma %r21, 4(%sr0, %arg1)
ldws,ma 4(%sr0, %arg0), %ret1
vshd %r22, %ret1, %r21
- addib,>= -4, %arg2, $bcopy_f.word_un
+ addib,*>= -4, %arg2, $bcopy_f.word_un
stws,ma %r21, 4(%sr0, %arg1)
$bcopy_f.cleanup_un
- addib,<=,n 4, %arg2, $bcopy_f.done
+ addib,*<=,n 4, %arg2, $bcopy_f.done
mfctl %sar, %r19
add %arg1, %arg2, %arg1
- extru %r19, 28, 2, %r19
- sub,<= %arg2, %r19, %r0
+ extrd,u %r19, 60, 2, %r19
+ sub,*<= %arg2, %r19, %r0
ldws,ma 4(%sr0, %arg0), %r22
vshd %ret1, %r22, %r21
b $bcopy_f.done
@@ -123,10 +122,10 @@ $bcopy_f.cleanup1_un
copy %r22, %ret1
$bcopy_f.byte
- comb,>=,n %r0, %arg2, $bcopy_f.done
+ cmpb,*>=,n %r0, %arg2, $bcopy_f.done
$bcopy_f.byte_loop
ldbs,ma 1(%sr0, %arg0), %r22
- addib,<> -1, %arg2, $bcopy_f.byte_loop
+ addib,*<> -1, %arg2, $bcopy_f.byte_loop
stbs,ma %r22, 1(%sr0, %arg1)
$bcopy_f.done
@@ -138,10 +137,10 @@ $bcopy.reverse
$bcopy_r.byte
- comb,>=,n %r0, %arg2, $bcopy_r.done
+ cmpb,*>=,n %r0, %arg2, $bcopy_r.done
$bcopy_r.byte_loop
ldbs,mb -1(%sr0, %arg0), %r22
- addib,<> -1, %arg2, $bcopy_r.byte_loop
+ addib,*<> -1, %arg2, $bcopy_r.byte_loop
stbs,mb %r22, -1(%sr0, %arg1)
$bcopy_r.done
diff --git a/sys/lib/libkern/arch/hppa64/spcopy.S b/sys/lib/libkern/arch/hppa64/spcopy.S
index c97957c1576..73e7bc37a24 100644
--- a/sys/lib/libkern/arch/hppa64/spcopy.S
+++ b/sys/lib/libkern/arch/hppa64/spcopy.S
@@ -40,7 +40,7 @@
*/
.import copy_on_fault, code
LEAF_ENTRY(spcopy)
- sub,<> %r0, arg4, %r0
+ sub,*<> %r0, arg4, %r0
bv %r0(%rp)
nop
@@ -60,12 +60,12 @@ LEAF_ENTRY(spcopy)
copy arg4, %ret0
- comib,>=,n 15, %ret0, $spcopy.byte
+ cmpib,*>=,n 15, %ret0, $spcopy.byte
extrd,u %arg1, 63, 2, %r20
extrd,u %arg3, 63, 2, %r19
add %ret0, %r19, %ret0
- comb,<> %r20, %r19, $spcopy.unaligned
+ cmpb,*<> %r20, %r19, $spcopy.unaligned
depd %r0, 63, 2, %arg1
addi -16, %ret0, %ret0
@@ -78,26 +78,26 @@ $spcopy.loop16a
stbys,b,m %r22, 4(%sr2, %arg3)
stws,ma %r21, 4(%sr2, %arg3)
stws,ma %r20, 4(%sr2, %arg3)
- addib,>= -16, %ret0, $spcopy.loop16a
+ addib,*>= -16, %ret0, $spcopy.loop16a
stws,ma %r19, 4(%sr2, %arg3)
- addib,<,n 12, %ret0, $spcopy.cleanup
+ addib,*<,n 12, %ret0, $spcopy.cleanup
$spcopy.word
ldws,ma 4(%sr1, %arg1), %r22
- addib,>= -4, %ret0, $spcopy.word
+ addib,*>= -4, %ret0, $spcopy.word
stws,ma %r22, 4(%sr2, %arg3)
$spcopy.cleanup
- addib,=,n 4, %ret0, $spcopy.done
+ addib,*=,n 4, %ret0, $spcopy.done
ldws 0(%sr1, %arg1), %r22
add %arg3, %ret0, %arg3
b $spcopy.done
stbys,e %r22, 0(%sr2, %arg3)
$spcopy.unaligned
- sub,>= %r19, %r20, %r21
+ sub,*>= %r19, %r20, %r21
ldwm 4(%sr1, %arg1), %ret1
- zdep %r21, 28, 29, %r22
+ depd,z %r21, 60, 61, %r22
mtsar %r22
addi -16, %ret0, %ret0
@@ -115,26 +115,26 @@ $spcopy.loop16u
stws,ma %r31, 4(%sr2, %arg3)
vshd %r20, %r19, %r31
stws,ma %r31, 4(%sr2, %arg3)
- addib,>= -16, %ret0, $spcopy.loop16u
+ addib,*>= -16, %ret0, $spcopy.loop16u
copy %r19, %ret1
- addib,<,n 12, %ret0, $spcopy.cleanup_un
+ addib,*<,n 12, %ret0, $spcopy.cleanup_un
$spcopy.word_un
ldws,ma 4(%sr1, %arg1), %r22
vshd %ret1, %r22, %r21
- addib,< -4, %ret0, $spcopy.cleanup1_un
+ addib,*< -4, %ret0, $spcopy.cleanup1_un
stws,ma %r21, 4(%sr2, %arg3)
ldws,ma 4(%sr1, %arg1), %ret1
vshd %r22, %ret1, %r21
- addib,>= -4, %ret0, $spcopy.word_un
+ addib,*>= -4, %ret0, $spcopy.word_un
stws,ma %r21, 4(%sr2, %arg3)
$spcopy.cleanup_un
- addib,<=,n 4, %ret0, $spcopy.done
+ addib,*<=,n 4, %ret0, $spcopy.done
mfctl %sar, %r19
add %arg3, %ret0, %arg3
- extru %r19, 28, 2, %r19
- sub,<= %ret0, %r19, %r0
+ extrd,u %r19, 60, 2, %r19
+ sub,*<= %ret0, %r19, %r0
ldws,ma 4(%sr1, %arg1), %r22
vshd %ret1, %r22, %r21
b $spcopy.done
@@ -145,10 +145,10 @@ $spcopy.cleanup1_un
copy %r22, %ret1
$spcopy.byte
- comb,>=,n %r0, %ret0, $spcopy.done
+ cmpb,*>=,n %r0, %ret0, $spcopy.done
$spcopy.byte_loop
ldbs,ma 1(%sr1, %arg1), %r22
- addib,<> -1, %ret0, $spcopy.byte_loop
+ addib,*<> -1, %ret0, $spcopy.byte_loop
stbs,ma %r22, 1(%sr2, %arg3)
$spcopy.done