summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDamien Miller <djm@cvs.openbsd.org>2009-04-06 06:30:02 +0000
committerDamien Miller <djm@cvs.openbsd.org>2009-04-06 06:30:02 +0000
commitf522ab407012d6a0d19480c28c3111d8f5855035 (patch)
tree9fd4d0474e298dead8a54e5d6dd4215d2dc4cd16 /lib
parentc8e2874aabdb7380d9993523007f332a0cb7416b (diff)
import of OpenSSL 0.9.8k
Diffstat (limited to 'lib')
-rw-r--r--lib/libcrypto/aes/asm/aes-ppc.pl585
-rw-r--r--lib/libcrypto/bn/asm/ppc64-mont.pl338
2 files changed, 282 insertions, 641 deletions
diff --git a/lib/libcrypto/aes/asm/aes-ppc.pl b/lib/libcrypto/aes/asm/aes-ppc.pl
index 7c52cbe5f9f..ce427655ef7 100644
--- a/lib/libcrypto/aes/asm/aes-ppc.pl
+++ b/lib/libcrypto/aes/asm/aes-ppc.pl
@@ -7,7 +7,7 @@
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
-# Needs more work: key setup, CBC routine...
+# Needs more work: key setup, page boundaries, CBC routine...
#
# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
# 128-bit key, which is ~40% better than 64-bit code generated by gcc
@@ -16,30 +16,15 @@
# at 1/2 of ppc_AES_encrypt speed, while ppc_AES_decrypt_compact -
# at 1/3 of ppc_AES_decrypt.
-# February 2010
-#
-# Rescheduling instructions to favour Power6 pipeline gave 10%
-# performance improvement on the platfrom in question (and marginal
-# improvement even on others). It should be noted that Power6 fails
-# to process byte in 18 cycles, only in 23, because it fails to issue
-# 4 load instructions in two cycles, only in 3. As result non-compact
-# block subroutines are 25% slower than one would expect. Compact
-# functions scale better, because they have pure computational part,
-# which scales perfectly with clock frequency. To be specific
-# ppc_AES_encrypt_compact operates at 42 cycles per byte, while
-# ppc_AES_decrypt_compact - at 55 (in 64-bit build).
-
$flavour = shift;
if ($flavour =~ /64/) {
$SIZE_T =8;
- $LRSAVE =2*$SIZE_T;
$STU ="stdu";
$POP ="ld";
$PUSH ="std";
} elsif ($flavour =~ /32/) {
$SIZE_T =4;
- $LRSAVE =$SIZE_T;
$STU ="stwu";
$POP ="lwz";
$PUSH ="stw";
@@ -118,19 +103,15 @@ LAES_Te:
addi $Tbl0,$Tbl0,`128-8`
mtlr r0
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .space `64-9*4`
+ .space `32-24`
LAES_Td:
mflr r0
bcl 20,31,\$+4
mflr $Tbl0 ; vvvvvvvv "distance" between . and 1st data entry
- addi $Tbl0,$Tbl0,`128-64-8+2048+256`
+ addi $Tbl0,$Tbl0,`128-8-32+2048+256`
mtlr r0
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .space `128-64-9*4`
+ .space `128-32-24`
___
&_data_word(
0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
@@ -334,9 +315,10 @@ $code.=<<___;
.globl .AES_encrypt
.align 7
.AES_encrypt:
- $STU $sp,-$FRAME($sp)
mflr r0
+ $STU $sp,-$FRAME($sp)
+ $PUSH r0,`$FRAME-$SIZE_T*21`($sp)
$PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
$PUSH r13,`$FRAME-$SIZE_T*19`($sp)
$PUSH r14,`$FRAME-$SIZE_T*18`($sp)
@@ -357,14 +339,7 @@ $code.=<<___;
$PUSH r29,`$FRAME-$SIZE_T*3`($sp)
$PUSH r30,`$FRAME-$SIZE_T*2`($sp)
$PUSH r31,`$FRAME-$SIZE_T*1`($sp)
- $PUSH r0,`$FRAME+$LRSAVE`($sp)
-
- andi. $t0,$inp,3
- andi. $t1,$out,3
- or. $t0,$t0,$t1
- bne Lenc_unaligned
-Lenc_unaligned_ok:
lwz $s0,0($inp)
lwz $s1,4($inp)
lwz $s2,8($inp)
@@ -375,80 +350,8 @@ Lenc_unaligned_ok:
stw $s1,4($out)
stw $s2,8($out)
stw $s3,12($out)
- b Lenc_done
-
-Lenc_unaligned:
- subfic $t0,$inp,4096
- subfic $t1,$out,4096
- andi. $t0,$t0,4096-16
- beq Lenc_xpage
- andi. $t1,$t1,4096-16
- bne Lenc_unaligned_ok
-Lenc_xpage:
- lbz $acc00,0($inp)
- lbz $acc01,1($inp)
- lbz $acc02,2($inp)
- lbz $s0,3($inp)
- lbz $acc04,4($inp)
- lbz $acc05,5($inp)
- lbz $acc06,6($inp)
- lbz $s1,7($inp)
- lbz $acc08,8($inp)
- lbz $acc09,9($inp)
- lbz $acc10,10($inp)
- insrwi $s0,$acc00,8,0
- lbz $s2,11($inp)
- insrwi $s1,$acc04,8,0
- lbz $acc12,12($inp)
- insrwi $s0,$acc01,8,8
- lbz $acc13,13($inp)
- insrwi $s1,$acc05,8,8
- lbz $acc14,14($inp)
- insrwi $s0,$acc02,8,16
- lbz $s3,15($inp)
- insrwi $s1,$acc06,8,16
- insrwi $s2,$acc08,8,0
- insrwi $s3,$acc12,8,0
- insrwi $s2,$acc09,8,8
- insrwi $s3,$acc13,8,8
- insrwi $s2,$acc10,8,16
- insrwi $s3,$acc14,8,16
-
- bl LAES_Te
- bl Lppc_AES_encrypt_compact
-
- extrwi $acc00,$s0,8,0
- extrwi $acc01,$s0,8,8
- stb $acc00,0($out)
- extrwi $acc02,$s0,8,16
- stb $acc01,1($out)
- stb $acc02,2($out)
- extrwi $acc04,$s1,8,0
- stb $s0,3($out)
- extrwi $acc05,$s1,8,8
- stb $acc04,4($out)
- extrwi $acc06,$s1,8,16
- stb $acc05,5($out)
- stb $acc06,6($out)
- extrwi $acc08,$s2,8,0
- stb $s1,7($out)
- extrwi $acc09,$s2,8,8
- stb $acc08,8($out)
- extrwi $acc10,$s2,8,16
- stb $acc09,9($out)
- stb $acc10,10($out)
- extrwi $acc12,$s3,8,0
- stb $s2,11($out)
- extrwi $acc13,$s3,8,8
- stb $acc12,12($out)
- extrwi $acc14,$s3,8,16
- stb $acc13,13($out)
- stb $acc14,14($out)
- stb $s3,15($out)
-
-Lenc_done:
- $POP r0,`$FRAME+$LRSAVE`($sp)
+ $POP r0,`$FRAME-$SIZE_T*21`($sp)
$POP $toc,`$FRAME-$SIZE_T*20`($sp)
$POP r13,`$FRAME-$SIZE_T*19`($sp)
$POP r14,`$FRAME-$SIZE_T*18`($sp)
@@ -472,21 +375,18 @@ Lenc_done:
mtlr r0
addi $sp,$sp,$FRAME
blr
- .long 0
- .byte 0,12,4,1,0x80,18,3,0
- .long 0
-.align 5
+.align 4
Lppc_AES_encrypt:
lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,3
lwz $t0,0($key)
- addi $Tbl2,$Tbl0,2
lwz $t1,4($key)
- addi $Tbl3,$Tbl0,1
lwz $t2,8($key)
- addi $acc00,$acc00,-1
lwz $t3,12($key)
+ addi $Tbl1,$Tbl0,3
+ addi $Tbl2,$Tbl0,2
+ addi $Tbl3,$Tbl0,1
+ addi $acc00,$acc00,-1
addi $key,$key,16
xor $s0,$s0,$t0
xor $s1,$s1,$t1
@@ -497,47 +397,47 @@ Lppc_AES_encrypt:
Lenc_loop:
rlwinm $acc00,$s0,`32-24+3`,21,28
rlwinm $acc01,$s1,`32-24+3`,21,28
+ lwz $t0,0($key)
+ lwz $t1,4($key)
rlwinm $acc02,$s2,`32-24+3`,21,28
rlwinm $acc03,$s3,`32-24+3`,21,28
- lwz $t0,0($key)
+ lwz $t2,8($key)
+ lwz $t3,12($key)
rlwinm $acc04,$s1,`32-16+3`,21,28
- lwz $t1,4($key)
rlwinm $acc05,$s2,`32-16+3`,21,28
- lwz $t2,8($key)
+ lwzx $acc00,$Tbl0,$acc00
+ lwzx $acc01,$Tbl0,$acc01
rlwinm $acc06,$s3,`32-16+3`,21,28
- lwz $t3,12($key)
rlwinm $acc07,$s0,`32-16+3`,21,28
- lwzx $acc00,$Tbl0,$acc00
+ lwzx $acc02,$Tbl0,$acc02
+ lwzx $acc03,$Tbl0,$acc03
rlwinm $acc08,$s2,`32-8+3`,21,28
- lwzx $acc01,$Tbl0,$acc01
rlwinm $acc09,$s3,`32-8+3`,21,28
- lwzx $acc02,$Tbl0,$acc02
+ lwzx $acc04,$Tbl1,$acc04
+ lwzx $acc05,$Tbl1,$acc05
rlwinm $acc10,$s0,`32-8+3`,21,28
- lwzx $acc03,$Tbl0,$acc03
rlwinm $acc11,$s1,`32-8+3`,21,28
- lwzx $acc04,$Tbl1,$acc04
+ lwzx $acc06,$Tbl1,$acc06
+ lwzx $acc07,$Tbl1,$acc07
rlwinm $acc12,$s3,`0+3`,21,28
- lwzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s0,`0+3`,21,28
- lwzx $acc06,$Tbl1,$acc06
+ lwzx $acc08,$Tbl2,$acc08
+ lwzx $acc09,$Tbl2,$acc09
rlwinm $acc14,$s1,`0+3`,21,28
- lwzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s2,`0+3`,21,28
- lwzx $acc08,$Tbl2,$acc08
+ lwzx $acc10,$Tbl2,$acc10
+ lwzx $acc11,$Tbl2,$acc11
xor $t0,$t0,$acc00
- lwzx $acc09,$Tbl2,$acc09
xor $t1,$t1,$acc01
- lwzx $acc10,$Tbl2,$acc10
+ lwzx $acc12,$Tbl3,$acc12
+ lwzx $acc13,$Tbl3,$acc13
xor $t2,$t2,$acc02
- lwzx $acc11,$Tbl2,$acc11
xor $t3,$t3,$acc03
- lwzx $acc12,$Tbl3,$acc12
+ lwzx $acc14,$Tbl3,$acc14
+ lwzx $acc15,$Tbl3,$acc15
xor $t0,$t0,$acc04
- lwzx $acc13,$Tbl3,$acc13
xor $t1,$t1,$acc05
- lwzx $acc14,$Tbl3,$acc14
xor $t2,$t2,$acc06
- lwzx $acc15,$Tbl3,$acc15
xor $t3,$t3,$acc07
xor $t0,$t0,$acc08
xor $t1,$t1,$acc09
@@ -552,61 +452,61 @@ Lenc_loop:
addi $Tbl2,$Tbl0,2048
nop
- lwz $t0,0($key)
+ lwz $acc08,`2048+0`($Tbl0) ! prefetch Te4
+ lwz $acc09,`2048+32`($Tbl0)
+ lwz $acc10,`2048+64`($Tbl0)
+ lwz $acc11,`2048+96`($Tbl0)
+ lwz $acc08,`2048+128`($Tbl0)
+ lwz $acc09,`2048+160`($Tbl0)
+ lwz $acc10,`2048+192`($Tbl0)
+ lwz $acc11,`2048+224`($Tbl0)
rlwinm $acc00,$s0,`32-24`,24,31
- lwz $t1,4($key)
rlwinm $acc01,$s1,`32-24`,24,31
- lwz $t2,8($key)
+ lwz $t0,0($key)
+ lwz $t1,4($key)
rlwinm $acc02,$s2,`32-24`,24,31
- lwz $t3,12($key)
rlwinm $acc03,$s3,`32-24`,24,31
- lwz $acc08,`2048+0`($Tbl0) ! prefetch Te4
+ lwz $t2,8($key)
+ lwz $t3,12($key)
rlwinm $acc04,$s1,`32-16`,24,31
- lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc05,$s2,`32-16`,24,31
- lwz $acc10,`2048+64`($Tbl0)
+ lbzx $acc00,$Tbl2,$acc00
+ lbzx $acc01,$Tbl2,$acc01
rlwinm $acc06,$s3,`32-16`,24,31
- lwz $acc11,`2048+96`($Tbl0)
rlwinm $acc07,$s0,`32-16`,24,31
- lwz $acc12,`2048+128`($Tbl0)
+ lbzx $acc02,$Tbl2,$acc02
+ lbzx $acc03,$Tbl2,$acc03
rlwinm $acc08,$s2,`32-8`,24,31
- lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc09,$s3,`32-8`,24,31
- lwz $acc14,`2048+192`($Tbl0)
+ lbzx $acc04,$Tbl2,$acc04
+ lbzx $acc05,$Tbl2,$acc05
rlwinm $acc10,$s0,`32-8`,24,31
- lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc00,$Tbl2,$acc00
+ lbzx $acc06,$Tbl2,$acc06
+ lbzx $acc07,$Tbl2,$acc07
rlwinm $acc12,$s3,`0`,24,31
- lbzx $acc01,$Tbl2,$acc01
rlwinm $acc13,$s0,`0`,24,31
- lbzx $acc02,$Tbl2,$acc02
+ lbzx $acc08,$Tbl2,$acc08
+ lbzx $acc09,$Tbl2,$acc09
rlwinm $acc14,$s1,`0`,24,31
- lbzx $acc03,$Tbl2,$acc03
rlwinm $acc15,$s2,`0`,24,31
- lbzx $acc04,$Tbl2,$acc04
+ lbzx $acc10,$Tbl2,$acc10
+ lbzx $acc11,$Tbl2,$acc11
rlwinm $s0,$acc00,24,0,7
- lbzx $acc05,$Tbl2,$acc05
rlwinm $s1,$acc01,24,0,7
- lbzx $acc06,$Tbl2,$acc06
+ lbzx $acc12,$Tbl2,$acc12
+ lbzx $acc13,$Tbl2,$acc13
rlwinm $s2,$acc02,24,0,7
- lbzx $acc07,$Tbl2,$acc07
rlwinm $s3,$acc03,24,0,7
- lbzx $acc08,$Tbl2,$acc08
+ lbzx $acc14,$Tbl2,$acc14
+ lbzx $acc15,$Tbl2,$acc15
rlwimi $s0,$acc04,16,8,15
- lbzx $acc09,$Tbl2,$acc09
rlwimi $s1,$acc05,16,8,15
- lbzx $acc10,$Tbl2,$acc10
rlwimi $s2,$acc06,16,8,15
- lbzx $acc11,$Tbl2,$acc11
rlwimi $s3,$acc07,16,8,15
- lbzx $acc12,$Tbl2,$acc12
rlwimi $s0,$acc08,8,16,23
- lbzx $acc13,$Tbl2,$acc13
rlwimi $s1,$acc09,8,16,23
- lbzx $acc14,$Tbl2,$acc14
rlwimi $s2,$acc10,8,16,23
- lbzx $acc15,$Tbl2,$acc15
rlwimi $s3,$acc11,8,16,23
or $s0,$s0,$acc12
or $s1,$s1,$acc13
@@ -617,80 +517,78 @@ Lenc_loop:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
.align 4
Lppc_AES_encrypt_compact:
lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,2048
lwz $t0,0($key)
- lis $mask80,0x8080
lwz $t1,4($key)
- lis $mask1b,0x1b1b
lwz $t2,8($key)
- ori $mask80,$mask80,0x8080
lwz $t3,12($key)
- ori $mask1b,$mask1b,0x1b1b
+ addi $Tbl1,$Tbl0,2048
+ lis $mask80,0x8080
+ lis $mask1b,0x1b1b
addi $key,$key,16
+ ori $mask80,$mask80,0x8080
+ ori $mask1b,$mask1b,0x1b1b
mtctr $acc00
.align 4
Lenc_compact_loop:
xor $s0,$s0,$t0
xor $s1,$s1,$t1
- rlwinm $acc00,$s0,`32-24`,24,31
xor $s2,$s2,$t2
- rlwinm $acc01,$s1,`32-24`,24,31
xor $s3,$s3,$t3
+ rlwinm $acc00,$s0,`32-24`,24,31
+ rlwinm $acc01,$s1,`32-24`,24,31
rlwinm $acc02,$s2,`32-24`,24,31
rlwinm $acc03,$s3,`32-24`,24,31
+ lbzx $acc00,$Tbl1,$acc00
+ lbzx $acc01,$Tbl1,$acc01
rlwinm $acc04,$s1,`32-16`,24,31
rlwinm $acc05,$s2,`32-16`,24,31
+ lbzx $acc02,$Tbl1,$acc02
+ lbzx $acc03,$Tbl1,$acc03
rlwinm $acc06,$s3,`32-16`,24,31
rlwinm $acc07,$s0,`32-16`,24,31
- lbzx $acc00,$Tbl1,$acc00
+ lbzx $acc04,$Tbl1,$acc04
+ lbzx $acc05,$Tbl1,$acc05
rlwinm $acc08,$s2,`32-8`,24,31
- lbzx $acc01,$Tbl1,$acc01
rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl1,$acc02
+ lbzx $acc06,$Tbl1,$acc06
+ lbzx $acc07,$Tbl1,$acc07
rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl1,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl1,$acc04
+ lbzx $acc08,$Tbl1,$acc08
+ lbzx $acc09,$Tbl1,$acc09
rlwinm $acc12,$s3,`0`,24,31
- lbzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s0,`0`,24,31
- lbzx $acc06,$Tbl1,$acc06
+ lbzx $acc10,$Tbl1,$acc10
+ lbzx $acc11,$Tbl1,$acc11
rlwinm $acc14,$s1,`0`,24,31
- lbzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s2,`0`,24,31
- lbzx $acc08,$Tbl1,$acc08
+ lbzx $acc12,$Tbl1,$acc12
+ lbzx $acc13,$Tbl1,$acc13
rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl1,$acc09
rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl1,$acc10
+ lbzx $acc14,$Tbl1,$acc14
+ lbzx $acc15,$Tbl1,$acc15
rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl1,$acc11
rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl1,$acc12
rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl1,$acc13
rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl1,$acc14
rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl1,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
rlwimi $s2,$acc10,8,16,23
rlwimi $s3,$acc11,8,16,23
lwz $t0,0($key)
- or $s0,$s0,$acc12
lwz $t1,4($key)
+ or $s0,$s0,$acc12
or $s1,$s1,$acc13
lwz $t2,8($key)
- or $s2,$s2,$acc14
lwz $t3,12($key)
+ or $s2,$s2,$acc14
or $s3,$s3,$acc15
addi $key,$key,16
@@ -701,12 +599,12 @@ Lenc_compact_loop:
and $acc02,$s2,$mask80
and $acc03,$s3,$mask80
srwi $acc04,$acc00,7 # r1>>7
- andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
srwi $acc05,$acc01,7
- andc $acc09,$s1,$mask80
srwi $acc06,$acc02,7
- andc $acc10,$s2,$mask80
srwi $acc07,$acc03,7
+ andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
+ andc $acc09,$s1,$mask80
+ andc $acc10,$s2,$mask80
andc $acc11,$s3,$mask80
sub $acc00,$acc00,$acc04 # r1-(r1>>7)
sub $acc01,$acc01,$acc05
@@ -722,32 +620,32 @@ Lenc_compact_loop:
and $acc03,$acc03,$mask1b
xor $acc00,$acc00,$acc08 # r2
xor $acc01,$acc01,$acc09
- rotlwi $acc12,$s0,16 # ROTATE(r0,16)
xor $acc02,$acc02,$acc10
- rotlwi $acc13,$s1,16
xor $acc03,$acc03,$acc11
- rotlwi $acc14,$s2,16
- xor $s0,$s0,$acc00 # r0^r2
+ rotlwi $acc12,$s0,16 # ROTATE(r0,16)
+ rotlwi $acc13,$s1,16
+ rotlwi $acc14,$s2,16
rotlwi $acc15,$s3,16
+ xor $s0,$s0,$acc00 # r0^r2
xor $s1,$s1,$acc01
- rotrwi $s0,$s0,24 # ROTATE(r2^r0,24)
xor $s2,$s2,$acc02
- rotrwi $s1,$s1,24
xor $s3,$s3,$acc03
+ rotrwi $s0,$s0,24 # ROTATE(r2^r0,24)
+ rotrwi $s1,$s1,24
rotrwi $s2,$s2,24
- xor $s0,$s0,$acc00 # ROTATE(r2^r0,24)^r2
rotrwi $s3,$s3,24
+ xor $s0,$s0,$acc00 # ROTATE(r2^r0,24)^r2
xor $s1,$s1,$acc01
xor $s2,$s2,$acc02
xor $s3,$s3,$acc03
rotlwi $acc08,$acc12,8 # ROTATE(r0,24)
- xor $s0,$s0,$acc12 #
rotlwi $acc09,$acc13,8
- xor $s1,$s1,$acc13
rotlwi $acc10,$acc14,8
- xor $s2,$s2,$acc14
rotlwi $acc11,$acc15,8
+ xor $s0,$s0,$acc12 #
+ xor $s1,$s1,$acc13
+ xor $s2,$s2,$acc14
xor $s3,$s3,$acc15
xor $s0,$s0,$acc08 #
xor $s1,$s1,$acc09
@@ -762,15 +660,14 @@ Lenc_compact_done:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
.globl .AES_decrypt
.align 7
.AES_decrypt:
- $STU $sp,-$FRAME($sp)
mflr r0
+ $STU $sp,-$FRAME($sp)
+ $PUSH r0,`$FRAME-$SIZE_T*21`($sp)
$PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
$PUSH r13,`$FRAME-$SIZE_T*19`($sp)
$PUSH r14,`$FRAME-$SIZE_T*18`($sp)
@@ -791,14 +688,7 @@ Lenc_compact_done:
$PUSH r29,`$FRAME-$SIZE_T*3`($sp)
$PUSH r30,`$FRAME-$SIZE_T*2`($sp)
$PUSH r31,`$FRAME-$SIZE_T*1`($sp)
- $PUSH r0,`$FRAME+$LRSAVE`($sp)
-
- andi. $t0,$inp,3
- andi. $t1,$out,3
- or. $t0,$t0,$t1
- bne Ldec_unaligned
-Ldec_unaligned_ok:
lwz $s0,0($inp)
lwz $s1,4($inp)
lwz $s2,8($inp)
@@ -809,80 +699,8 @@ Ldec_unaligned_ok:
stw $s1,4($out)
stw $s2,8($out)
stw $s3,12($out)
- b Ldec_done
-
-Ldec_unaligned:
- subfic $t0,$inp,4096
- subfic $t1,$out,4096
- andi. $t0,$t0,4096-16
- beq Ldec_xpage
- andi. $t1,$t1,4096-16
- bne Ldec_unaligned_ok
-
-Ldec_xpage:
- lbz $acc00,0($inp)
- lbz $acc01,1($inp)
- lbz $acc02,2($inp)
- lbz $s0,3($inp)
- lbz $acc04,4($inp)
- lbz $acc05,5($inp)
- lbz $acc06,6($inp)
- lbz $s1,7($inp)
- lbz $acc08,8($inp)
- lbz $acc09,9($inp)
- lbz $acc10,10($inp)
- insrwi $s0,$acc00,8,0
- lbz $s2,11($inp)
- insrwi $s1,$acc04,8,0
- lbz $acc12,12($inp)
- insrwi $s0,$acc01,8,8
- lbz $acc13,13($inp)
- insrwi $s1,$acc05,8,8
- lbz $acc14,14($inp)
- insrwi $s0,$acc02,8,16
- lbz $s3,15($inp)
- insrwi $s1,$acc06,8,16
- insrwi $s2,$acc08,8,0
- insrwi $s3,$acc12,8,0
- insrwi $s2,$acc09,8,8
- insrwi $s3,$acc13,8,8
- insrwi $s2,$acc10,8,16
- insrwi $s3,$acc14,8,16
-
- bl LAES_Td
- bl Lppc_AES_decrypt_compact
- extrwi $acc00,$s0,8,0
- extrwi $acc01,$s0,8,8
- stb $acc00,0($out)
- extrwi $acc02,$s0,8,16
- stb $acc01,1($out)
- stb $acc02,2($out)
- extrwi $acc04,$s1,8,0
- stb $s0,3($out)
- extrwi $acc05,$s1,8,8
- stb $acc04,4($out)
- extrwi $acc06,$s1,8,16
- stb $acc05,5($out)
- stb $acc06,6($out)
- extrwi $acc08,$s2,8,0
- stb $s1,7($out)
- extrwi $acc09,$s2,8,8
- stb $acc08,8($out)
- extrwi $acc10,$s2,8,16
- stb $acc09,9($out)
- stb $acc10,10($out)
- extrwi $acc12,$s3,8,0
- stb $s2,11($out)
- extrwi $acc13,$s3,8,8
- stb $acc12,12($out)
- extrwi $acc14,$s3,8,16
- stb $acc13,13($out)
- stb $acc14,14($out)
- stb $s3,15($out)
-
-Ldec_done:
- $POP r0,`$FRAME+$LRSAVE`($sp)
+ $POP r0,`$FRAME-$SIZE_T*21`($sp)
$POP $toc,`$FRAME-$SIZE_T*20`($sp)
$POP r13,`$FRAME-$SIZE_T*19`($sp)
$POP r14,`$FRAME-$SIZE_T*18`($sp)
@@ -906,21 +724,18 @@ Ldec_done:
mtlr r0
addi $sp,$sp,$FRAME
blr
- .long 0
- .byte 0,12,4,1,0x80,18,3,0
- .long 0
-.align 5
+.align 4
Lppc_AES_decrypt:
lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,3
lwz $t0,0($key)
- addi $Tbl2,$Tbl0,2
lwz $t1,4($key)
- addi $Tbl3,$Tbl0,1
lwz $t2,8($key)
- addi $acc00,$acc00,-1
lwz $t3,12($key)
+ addi $Tbl1,$Tbl0,3
+ addi $Tbl2,$Tbl0,2
+ addi $Tbl3,$Tbl0,1
+ addi $acc00,$acc00,-1
addi $key,$key,16
xor $s0,$s0,$t0
xor $s1,$s1,$t1
@@ -931,47 +746,47 @@ Lppc_AES_decrypt:
Ldec_loop:
rlwinm $acc00,$s0,`32-24+3`,21,28
rlwinm $acc01,$s1,`32-24+3`,21,28
+ lwz $t0,0($key)
+ lwz $t1,4($key)
rlwinm $acc02,$s2,`32-24+3`,21,28
rlwinm $acc03,$s3,`32-24+3`,21,28
- lwz $t0,0($key)
+ lwz $t2,8($key)
+ lwz $t3,12($key)
rlwinm $acc04,$s3,`32-16+3`,21,28
- lwz $t1,4($key)
rlwinm $acc05,$s0,`32-16+3`,21,28
- lwz $t2,8($key)
+ lwzx $acc00,$Tbl0,$acc00
+ lwzx $acc01,$Tbl0,$acc01
rlwinm $acc06,$s1,`32-16+3`,21,28
- lwz $t3,12($key)
rlwinm $acc07,$s2,`32-16+3`,21,28
- lwzx $acc00,$Tbl0,$acc00
+ lwzx $acc02,$Tbl0,$acc02
+ lwzx $acc03,$Tbl0,$acc03
rlwinm $acc08,$s2,`32-8+3`,21,28
- lwzx $acc01,$Tbl0,$acc01
rlwinm $acc09,$s3,`32-8+3`,21,28
- lwzx $acc02,$Tbl0,$acc02
+ lwzx $acc04,$Tbl1,$acc04
+ lwzx $acc05,$Tbl1,$acc05
rlwinm $acc10,$s0,`32-8+3`,21,28
- lwzx $acc03,$Tbl0,$acc03
rlwinm $acc11,$s1,`32-8+3`,21,28
- lwzx $acc04,$Tbl1,$acc04
+ lwzx $acc06,$Tbl1,$acc06
+ lwzx $acc07,$Tbl1,$acc07
rlwinm $acc12,$s1,`0+3`,21,28
- lwzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s2,`0+3`,21,28
- lwzx $acc06,$Tbl1,$acc06
+ lwzx $acc08,$Tbl2,$acc08
+ lwzx $acc09,$Tbl2,$acc09
rlwinm $acc14,$s3,`0+3`,21,28
- lwzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s0,`0+3`,21,28
- lwzx $acc08,$Tbl2,$acc08
+ lwzx $acc10,$Tbl2,$acc10
+ lwzx $acc11,$Tbl2,$acc11
xor $t0,$t0,$acc00
- lwzx $acc09,$Tbl2,$acc09
xor $t1,$t1,$acc01
- lwzx $acc10,$Tbl2,$acc10
+ lwzx $acc12,$Tbl3,$acc12
+ lwzx $acc13,$Tbl3,$acc13
xor $t2,$t2,$acc02
- lwzx $acc11,$Tbl2,$acc11
xor $t3,$t3,$acc03
- lwzx $acc12,$Tbl3,$acc12
+ lwzx $acc14,$Tbl3,$acc14
+ lwzx $acc15,$Tbl3,$acc15
xor $t0,$t0,$acc04
- lwzx $acc13,$Tbl3,$acc13
xor $t1,$t1,$acc05
- lwzx $acc14,$Tbl3,$acc14
xor $t2,$t2,$acc06
- lwzx $acc15,$Tbl3,$acc15
xor $t3,$t3,$acc07
xor $t0,$t0,$acc08
xor $t1,$t1,$acc09
@@ -986,57 +801,57 @@ Ldec_loop:
addi $Tbl2,$Tbl0,2048
nop
- lwz $t0,0($key)
+ lwz $acc08,`2048+0`($Tbl0) ! prefetch Td4
+ lwz $acc09,`2048+32`($Tbl0)
+ lwz $acc10,`2048+64`($Tbl0)
+ lwz $acc11,`2048+96`($Tbl0)
+ lwz $acc08,`2048+128`($Tbl0)
+ lwz $acc09,`2048+160`($Tbl0)
+ lwz $acc10,`2048+192`($Tbl0)
+ lwz $acc11,`2048+224`($Tbl0)
rlwinm $acc00,$s0,`32-24`,24,31
- lwz $t1,4($key)
rlwinm $acc01,$s1,`32-24`,24,31
- lwz $t2,8($key)
+ lwz $t0,0($key)
+ lwz $t1,4($key)
rlwinm $acc02,$s2,`32-24`,24,31
- lwz $t3,12($key)
rlwinm $acc03,$s3,`32-24`,24,31
- lwz $acc08,`2048+0`($Tbl0) ! prefetch Td4
+ lwz $t2,8($key)
+ lwz $t3,12($key)
rlwinm $acc04,$s3,`32-16`,24,31
- lwz $acc09,`2048+32`($Tbl0)
rlwinm $acc05,$s0,`32-16`,24,31
- lwz $acc10,`2048+64`($Tbl0)
lbzx $acc00,$Tbl2,$acc00
- lwz $acc11,`2048+96`($Tbl0)
lbzx $acc01,$Tbl2,$acc01
- lwz $acc12,`2048+128`($Tbl0)
rlwinm $acc06,$s1,`32-16`,24,31
- lwz $acc13,`2048+160`($Tbl0)
rlwinm $acc07,$s2,`32-16`,24,31
- lwz $acc14,`2048+192`($Tbl0)
+ lbzx $acc02,$Tbl2,$acc02
+ lbzx $acc03,$Tbl2,$acc03
rlwinm $acc08,$s2,`32-8`,24,31
- lwz $acc15,`2048+224`($Tbl0)
rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl2,$acc02
+ lbzx $acc04,$Tbl2,$acc04
+ lbzx $acc05,$Tbl2,$acc05
rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl2,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl2,$acc04
+ lbzx $acc06,$Tbl2,$acc06
+ lbzx $acc07,$Tbl2,$acc07
rlwinm $acc12,$s1,`0`,24,31
- lbzx $acc05,$Tbl2,$acc05
rlwinm $acc13,$s2,`0`,24,31
- lbzx $acc06,$Tbl2,$acc06
+ lbzx $acc08,$Tbl2,$acc08
+ lbzx $acc09,$Tbl2,$acc09
rlwinm $acc14,$s3,`0`,24,31
- lbzx $acc07,$Tbl2,$acc07
rlwinm $acc15,$s0,`0`,24,31
- lbzx $acc08,$Tbl2,$acc08
+ lbzx $acc10,$Tbl2,$acc10
+ lbzx $acc11,$Tbl2,$acc11
rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl2,$acc09
rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl2,$acc10
+ lbzx $acc12,$Tbl2,$acc12
+ lbzx $acc13,$Tbl2,$acc13
rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl2,$acc11
rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl2,$acc12
+ lbzx $acc14,$Tbl2,$acc14
+ lbzx $acc15,$Tbl2,$acc15
rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl2,$acc13
rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl2,$acc14
rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl2,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
@@ -1051,22 +866,20 @@ Ldec_loop:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
.align 4
Lppc_AES_decrypt_compact:
lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,2048
lwz $t0,0($key)
- lis $mask80,0x8080
lwz $t1,4($key)
- lis $mask1b,0x1b1b
lwz $t2,8($key)
- ori $mask80,$mask80,0x8080
lwz $t3,12($key)
- ori $mask1b,$mask1b,0x1b1b
+ addi $Tbl1,$Tbl0,2048
+ lis $mask80,0x8080
+ lis $mask1b,0x1b1b
addi $key,$key,16
+ ori $mask80,$mask80,0x8080
+ ori $mask1b,$mask1b,0x1b1b
___
$code.=<<___ if ($SIZE_T==8);
insrdi $mask80,$mask80,32,0
@@ -1078,59 +891,59 @@ $code.=<<___;
Ldec_compact_loop:
xor $s0,$s0,$t0
xor $s1,$s1,$t1
- rlwinm $acc00,$s0,`32-24`,24,31
xor $s2,$s2,$t2
- rlwinm $acc01,$s1,`32-24`,24,31
xor $s3,$s3,$t3
+ rlwinm $acc00,$s0,`32-24`,24,31
+ rlwinm $acc01,$s1,`32-24`,24,31
rlwinm $acc02,$s2,`32-24`,24,31
rlwinm $acc03,$s3,`32-24`,24,31
+ lbzx $acc00,$Tbl1,$acc00
+ lbzx $acc01,$Tbl1,$acc01
rlwinm $acc04,$s3,`32-16`,24,31
rlwinm $acc05,$s0,`32-16`,24,31
+ lbzx $acc02,$Tbl1,$acc02
+ lbzx $acc03,$Tbl1,$acc03
rlwinm $acc06,$s1,`32-16`,24,31
rlwinm $acc07,$s2,`32-16`,24,31
- lbzx $acc00,$Tbl1,$acc00
+ lbzx $acc04,$Tbl1,$acc04
+ lbzx $acc05,$Tbl1,$acc05
rlwinm $acc08,$s2,`32-8`,24,31
- lbzx $acc01,$Tbl1,$acc01
rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl1,$acc02
+ lbzx $acc06,$Tbl1,$acc06
+ lbzx $acc07,$Tbl1,$acc07
rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl1,$acc03
rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl1,$acc04
+ lbzx $acc08,$Tbl1,$acc08
+ lbzx $acc09,$Tbl1,$acc09
rlwinm $acc12,$s1,`0`,24,31
- lbzx $acc05,$Tbl1,$acc05
rlwinm $acc13,$s2,`0`,24,31
- lbzx $acc06,$Tbl1,$acc06
+ lbzx $acc10,$Tbl1,$acc10
+ lbzx $acc11,$Tbl1,$acc11
rlwinm $acc14,$s3,`0`,24,31
- lbzx $acc07,$Tbl1,$acc07
rlwinm $acc15,$s0,`0`,24,31
- lbzx $acc08,$Tbl1,$acc08
+ lbzx $acc12,$Tbl1,$acc12
+ lbzx $acc13,$Tbl1,$acc13
rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl1,$acc09
rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl1,$acc10
+ lbzx $acc14,$Tbl1,$acc14
+ lbzx $acc15,$Tbl1,$acc15
rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl1,$acc11
rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl1,$acc12
rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl1,$acc13
rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl1,$acc14
rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl1,$acc15
rlwimi $s3,$acc07,16,8,15
rlwimi $s0,$acc08,8,16,23
rlwimi $s1,$acc09,8,16,23
rlwimi $s2,$acc10,8,16,23
rlwimi $s3,$acc11,8,16,23
lwz $t0,0($key)
- or $s0,$s0,$acc12
lwz $t1,4($key)
+ or $s0,$s0,$acc12
or $s1,$s1,$acc13
lwz $t2,8($key)
- or $s2,$s2,$acc14
lwz $t3,12($key)
+ or $s2,$s2,$acc14
or $s3,$s3,$acc15
addi $key,$key,16
@@ -1204,12 +1017,12 @@ $code.=<<___ if ($SIZE_T==4);
and $acc02,$s2,$mask80
and $acc03,$s3,$mask80
srwi $acc04,$acc00,7 # r1>>7
- andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
srwi $acc05,$acc01,7
- andc $acc09,$s1,$mask80
srwi $acc06,$acc02,7
- andc $acc10,$s2,$mask80
srwi $acc07,$acc03,7
+ andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
+ andc $acc09,$s1,$mask80
+ andc $acc10,$s2,$mask80
andc $acc11,$s3,$mask80
sub $acc00,$acc00,$acc04 # r1-(r1>>7)
sub $acc01,$acc01,$acc05
@@ -1233,12 +1046,12 @@ $code.=<<___ if ($SIZE_T==4);
and $acc06,$acc02,$mask80
and $acc07,$acc03,$mask80
srwi $acc08,$acc04,7 # r1>>7
- andc $acc12,$acc00,$mask80 # r2&0x7f7f7f7f
srwi $acc09,$acc05,7
- andc $acc13,$acc01,$mask80
srwi $acc10,$acc06,7
- andc $acc14,$acc02,$mask80
srwi $acc11,$acc07,7
+ andc $acc12,$acc00,$mask80 # r2&0x7f7f7f7f
+ andc $acc13,$acc01,$mask80
+ andc $acc14,$acc02,$mask80
andc $acc15,$acc03,$mask80
sub $acc04,$acc04,$acc08 # r1-(r1>>7)
sub $acc05,$acc05,$acc09
@@ -1259,13 +1072,13 @@ $code.=<<___ if ($SIZE_T==4);
and $acc08,$acc04,$mask80 # r1=r4&0x80808080
and $acc09,$acc05,$mask80
- srwi $acc12,$acc08,7 # r1>>7
and $acc10,$acc06,$mask80
- srwi $acc13,$acc09,7
and $acc11,$acc07,$mask80
+ srwi $acc12,$acc08,7 # r1>>7
+ srwi $acc13,$acc09,7
srwi $acc14,$acc10,7
- sub $acc08,$acc08,$acc12 # r1-(r1>>7)
srwi $acc15,$acc11,7
+ sub $acc08,$acc08,$acc12 # r1-(r1>>7)
sub $acc09,$acc09,$acc13
sub $acc10,$acc10,$acc14
sub $acc11,$acc11,$acc15
@@ -1298,10 +1111,10 @@ ___
$code.=<<___;
rotrwi $s0,$s0,8 # = ROTATE(r0,8)
rotrwi $s1,$s1,8
- xor $s0,$s0,$acc00 # ^= r2^r0
rotrwi $s2,$s2,8
- xor $s1,$s1,$acc01
rotrwi $s3,$s3,8
+ xor $s0,$s0,$acc00 # ^= r2^r0
+ xor $s1,$s1,$acc01
xor $s2,$s2,$acc02
xor $s3,$s3,$acc03
xor $acc00,$acc00,$acc08
@@ -1309,32 +1122,32 @@ $code.=<<___;
xor $acc02,$acc02,$acc10
xor $acc03,$acc03,$acc11
xor $s0,$s0,$acc04 # ^= r4^r0
- rotrwi $acc00,$acc00,24
xor $s1,$s1,$acc05
- rotrwi $acc01,$acc01,24
xor $s2,$s2,$acc06
- rotrwi $acc02,$acc02,24
xor $s3,$s3,$acc07
+ rotrwi $acc00,$acc00,24
+ rotrwi $acc01,$acc01,24
+ rotrwi $acc02,$acc02,24
rotrwi $acc03,$acc03,24
xor $acc04,$acc04,$acc08
xor $acc05,$acc05,$acc09
xor $acc06,$acc06,$acc10
xor $acc07,$acc07,$acc11
xor $s0,$s0,$acc08 # ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
- rotrwi $acc04,$acc04,16
xor $s1,$s1,$acc09
- rotrwi $acc05,$acc05,16
xor $s2,$s2,$acc10
- rotrwi $acc06,$acc06,16
xor $s3,$s3,$acc11
+ rotrwi $acc04,$acc04,16
+ rotrwi $acc05,$acc05,16
+ rotrwi $acc06,$acc06,16
rotrwi $acc07,$acc07,16
xor $s0,$s0,$acc00 # ^= ROTATE(r8^r2^r0,24)
- rotrwi $acc08,$acc08,8
xor $s1,$s1,$acc01
- rotrwi $acc09,$acc09,8
xor $s2,$s2,$acc02
- rotrwi $acc10,$acc10,8
xor $s3,$s3,$acc03
+ rotrwi $acc08,$acc08,8
+ rotrwi $acc09,$acc09,8
+ rotrwi $acc10,$acc10,8
rotrwi $acc11,$acc11,8
xor $s0,$s0,$acc04 # ^= ROTATE(r8^r4^r0,16)
xor $s1,$s1,$acc05
@@ -1353,9 +1166,7 @@ Ldec_compact_done:
xor $s2,$s2,$t2
xor $s3,$s3,$t3
blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
+.long 0
.asciz "AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
.align 7
___
diff --git a/lib/libcrypto/bn/asm/ppc64-mont.pl b/lib/libcrypto/bn/asm/ppc64-mont.pl
index a14e769ad05..3449b35855d 100644
--- a/lib/libcrypto/bn/asm/ppc64-mont.pl
+++ b/lib/libcrypto/bn/asm/ppc64-mont.pl
@@ -45,40 +45,23 @@
# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
# in absolute terms, but it's apparently the way Power 6 is...
-# December 2009
-
-# Adapted for 32-bit build this module delivers 25-120%, yes, more
-# than *twice* for longer keys, performance improvement over 32-bit
-# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
-# even 64-bit integer operations and the trouble is that most PPC
-# operating systems don't preserve upper halves of general purpose
-# registers upon 32-bit signal delivery. They do preserve them upon
-# context switch, but not signalling:-( This means that asynchronous
-# signals have to be blocked upon entry to this subroutine. Signal
-# masking (and of course complementary unmasking) has quite an impact
-# on performance, naturally larger for shorter keys. It's so severe
-# that 512-bit key performance can be as low as 1/3 of expected one.
-# This is why this routine can be engaged for longer key operations
-# only on these OSes, see crypto/ppccap.c for further details. MacOS X
-# is an exception from this and doesn't require signal masking, and
-# that's where above improvement coefficients were collected. For
-# others alternative would be to break dependence on upper halves of
-# GPRs by sticking to 32-bit integer operations...
-
$flavour = shift;
if ($flavour =~ /32/) {
$SIZE_T=4;
$RZONE= 224;
- $fname= "bn_mul_mont_fpu64";
+ $FRAME= $SIZE_T*12+8*12;
+ $fname= "bn_mul_mont_ppc64";
$STUX= "stwux"; # store indexed and update
$PUSH= "stw";
$POP= "lwz";
+ die "not implemented yet";
} elsif ($flavour =~ /64/) {
$SIZE_T=8;
$RZONE= 288;
- $fname= "bn_mul_mont_fpu64";
+ $FRAME= $SIZE_T*12+8*12;
+ $fname= "bn_mul_mont";
# same as above, but 64-bit mnemonics...
$STUX= "stdux"; # store indexed and update
@@ -93,7 +76,7 @@ die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-$FRAME=64; # padded frame header
+$FRAME=($FRAME+63)&~63;
$TRANSFER=16*8;
$carry="r0";
@@ -110,16 +93,16 @@ $tp="r10";
$j="r11";
$i="r12";
# non-volatile registers
-$nap_d="r22"; # interleaved ap and np in double format
-$a0="r23"; # ap[0]
-$t0="r24"; # temporary registers
-$t1="r25";
-$t2="r26";
-$t3="r27";
-$t4="r28";
-$t5="r29";
-$t6="r30";
-$t7="r31";
+$nap_d="r14"; # interleaved ap and np in double format
+$a0="r15"; # ap[0]
+$t0="r16"; # temporary registers
+$t1="r17";
+$t2="r18";
+$t3="r19";
+$t4="r20";
+$t5="r21";
+$t6="r22";
+$t7="r23";
# PPC offers enough register bank capacity to unroll inner loops twice
#
@@ -149,17 +132,28 @@ $ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
$dota="f8"; $dotb="f9";
$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
-$N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
-$T0a="f24"; $T0b="f25";
-$T1a="f26"; $T1b="f27";
-$T2a="f28"; $T2b="f29";
-$T3a="f30"; $T3b="f31";
+$N0="f14"; $N1="f15"; $N2="f16"; $N3="f17";
+$T0a="f18"; $T0b="f19";
+$T1a="f20"; $T1b="f21";
+$T2a="f22"; $T2b="f23";
+$T3a="f24"; $T3b="f25";
# sp----------->+-------------------------------+
# | saved sp |
# +-------------------------------+
+# | |
+# +-------------------------------+
+# | 10 saved gpr, r14-r23 |
+# . .
+# . .
+# +12*size_t +-------------------------------+
+# | 12 saved fpr, f14-f25 |
# . .
-# +64 +-------------------------------+
+# . .
+# +12*8 +-------------------------------+
+# | padding to 64 byte boundary |
+# . .
+# +X +-------------------------------+
# | 16 gpr<->fpr transfer zone |
# . .
# . .
@@ -179,16 +173,6 @@ $T3a="f30"; $T3b="f31";
# . .
# . .
# +-------------------------------+
-# . .
-# -12*size_t +-------------------------------+
-# | 10 saved gpr, r22-r31 |
-# . .
-# . .
-# -12*8 +-------------------------------+
-# | 12 saved fpr, f20-f31 |
-# . .
-# . .
-# +-------------------------------+
$code=<<___;
.machine "any"
@@ -197,14 +181,14 @@ $code=<<___;
.globl .$fname
.align 5
.$fname:
- cmpwi $num,`3*8/$SIZE_T`
+ cmpwi $num,4
mr $rp,r3 ; $rp is reassigned
li r3,0 ; possible "not handled" return code
bltlr-
- andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
+ andi. r0,$num,1 ; $num has to be even
bnelr-
- slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
+ slwi $num,$num,3 ; num*=8
li $i,-4096
slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
add $tp,$tp,$num ; place for tp[num+1]
@@ -212,50 +196,35 @@ $code=<<___;
subf $tp,$tp,$sp ; $sp-$tp
and $tp,$tp,$i ; minimize TLB usage
subf $tp,$sp,$tp ; $tp-$sp
- mr $i,$sp
$STUX $sp,$sp,$tp ; alloca
- $PUSH r22,`-12*8-10*$SIZE_T`($i)
- $PUSH r23,`-12*8-9*$SIZE_T`($i)
- $PUSH r24,`-12*8-8*$SIZE_T`($i)
- $PUSH r25,`-12*8-7*$SIZE_T`($i)
- $PUSH r26,`-12*8-6*$SIZE_T`($i)
- $PUSH r27,`-12*8-5*$SIZE_T`($i)
- $PUSH r28,`-12*8-4*$SIZE_T`($i)
- $PUSH r29,`-12*8-3*$SIZE_T`($i)
- $PUSH r30,`-12*8-2*$SIZE_T`($i)
- $PUSH r31,`-12*8-1*$SIZE_T`($i)
- stfd f20,`-12*8`($i)
- stfd f21,`-11*8`($i)
- stfd f22,`-10*8`($i)
- stfd f23,`-9*8`($i)
- stfd f24,`-8*8`($i)
- stfd f25,`-7*8`($i)
- stfd f26,`-6*8`($i)
- stfd f27,`-5*8`($i)
- stfd f28,`-4*8`($i)
- stfd f29,`-3*8`($i)
- stfd f30,`-2*8`($i)
- stfd f31,`-1*8`($i)
-___
-$code.=<<___ if ($SIZE_T==8);
+ $PUSH r14,`2*$SIZE_T`($sp)
+ $PUSH r15,`3*$SIZE_T`($sp)
+ $PUSH r16,`4*$SIZE_T`($sp)
+ $PUSH r17,`5*$SIZE_T`($sp)
+ $PUSH r18,`6*$SIZE_T`($sp)
+ $PUSH r19,`7*$SIZE_T`($sp)
+ $PUSH r20,`8*$SIZE_T`($sp)
+ $PUSH r21,`9*$SIZE_T`($sp)
+ $PUSH r22,`10*$SIZE_T`($sp)
+ $PUSH r23,`11*$SIZE_T`($sp)
+ stfd f14,`12*$SIZE_T+0`($sp)
+ stfd f15,`12*$SIZE_T+8`($sp)
+ stfd f16,`12*$SIZE_T+16`($sp)
+ stfd f17,`12*$SIZE_T+24`($sp)
+ stfd f18,`12*$SIZE_T+32`($sp)
+ stfd f19,`12*$SIZE_T+40`($sp)
+ stfd f20,`12*$SIZE_T+48`($sp)
+ stfd f21,`12*$SIZE_T+56`($sp)
+ stfd f22,`12*$SIZE_T+64`($sp)
+ stfd f23,`12*$SIZE_T+72`($sp)
+ stfd f24,`12*$SIZE_T+80`($sp)
+ stfd f25,`12*$SIZE_T+88`($sp)
+
ld $a0,0($ap) ; pull ap[0] value
ld $n0,0($n0) ; pull n0[0] value
ld $t3,0($bp) ; bp[0]
-___
-$code.=<<___ if ($SIZE_T==4);
- mr $t1,$n0
- lwz $a0,0($ap) ; pull ap[0,1] value
- lwz $t0,4($ap)
- lwz $n0,0($t1) ; pull n0[0,1] value
- lwz $t1,4($t1)
- lwz $t3,0($bp) ; bp[0,1]
- lwz $t2,4($bp)
- insrdi $a0,$t0,32,0
- insrdi $n0,$t1,32,0
- insrdi $t3,$t2,32,0
-___
-$code.=<<___;
+
addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
li $i,-64
add $nap_d,$tp,$num
@@ -289,8 +258,6 @@ $code.=<<___;
std $t5,`$FRAME+40`($sp)
std $t6,`$FRAME+48`($sp)
std $t7,`$FRAME+56`($sp)
-___
-$code.=<<___ if ($SIZE_T==8);
lwz $t0,4($ap) ; load a[j] as 32-bit word pair
lwz $t1,0($ap)
lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
@@ -299,18 +266,6 @@ $code.=<<___ if ($SIZE_T==8);
lwz $t5,0($np)
lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
lwz $t7,8($np)
-___
-$code.=<<___ if ($SIZE_T==4);
- lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
- lwz $t1,4($ap)
- lwz $t2,8($ap)
- lwz $t3,12($ap)
- lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
- lwz $t5,4($np)
- lwz $t6,8($np)
- lwz $t7,12($np)
-___
-$code.=<<___;
lfd $ba,`$FRAME+0`($sp)
lfd $bb,`$FRAME+8`($sp)
lfd $bc,`$FRAME+16`($sp)
@@ -419,8 +374,6 @@ $code.=<<___;
.align 5
L1st:
-___
-$code.=<<___ if ($SIZE_T==8);
lwz $t0,4($ap) ; load a[j] as 32-bit word pair
lwz $t1,0($ap)
lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
@@ -429,18 +382,6 @@ $code.=<<___ if ($SIZE_T==8);
lwz $t5,0($np)
lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
lwz $t7,8($np)
-___
-$code.=<<___ if ($SIZE_T==4);
- lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
- lwz $t1,4($ap)
- lwz $t2,8($ap)
- lwz $t3,12($ap)
- lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
- lwz $t5,4($np)
- lwz $t6,8($np)
- lwz $t7,12($np)
-___
-$code.=<<___;
std $t0,`$FRAME+64`($sp)
std $t1,`$FRAME+72`($sp)
std $t2,`$FRAME+80`($sp)
@@ -618,17 +559,7 @@ $code.=<<___;
li $i,8 ; i=1
.align 5
Louter:
-___
-$code.=<<___ if ($SIZE_T==8);
ldx $t3,$bp,$i ; bp[i]
-___
-$code.=<<___ if ($SIZE_T==4);
- add $t0,$bp,$i
- lwz $t3,0($t0) ; bp[i,i+1]
- lwz $t0,4($t0)
- insrdi $t3,$t0,32,0
-___
-$code.=<<___;
ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
mulld $t7,$a0,$t3 ; ap[0]*bp[i]
@@ -830,13 +761,6 @@ Linner:
stfd $T0b,`$FRAME+8`($sp)
add $t7,$t7,$carry
addc $t3,$t0,$t1
-___
-$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
- extrdi $t0,$t0,32,0
- extrdi $t1,$t1,32,0
- adde $t0,$t0,$t1
-___
-$code.=<<___;
stfd $T1a,`$FRAME+16`($sp)
stfd $T1b,`$FRAME+24`($sp)
insrdi $t4,$t7,16,0 ; 64..127 bits
@@ -844,13 +768,6 @@ $code.=<<___;
stfd $T2a,`$FRAME+32`($sp)
stfd $T2b,`$FRAME+40`($sp)
adde $t5,$t4,$t2
-___
-$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
- extrdi $t4,$t4,32,0
- extrdi $t2,$t2,32,0
- adde $t4,$t4,$t2
-___
-$code.=<<___;
stfd $T3a,`$FRAME+48`($sp)
stfd $T3b,`$FRAME+56`($sp)
addze $carry,$carry
@@ -899,21 +816,7 @@ $code.=<<___;
ld $t7,`$FRAME+72`($sp)
addc $t3,$t0,$t1
-___
-$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
- extrdi $t0,$t0,32,0
- extrdi $t1,$t1,32,0
- adde $t0,$t0,$t1
-___
-$code.=<<___;
adde $t5,$t4,$t2
-___
-$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
- extrdi $t4,$t4,32,0
- extrdi $t2,$t2,32,0
- adde $t4,$t4,$t2
-___
-$code.=<<___;
addze $carry,$carry
std $t3,-16($tp) ; tp[j-1]
@@ -932,9 +835,7 @@ $code.=<<___;
subf $nap_d,$t7,$nap_d ; rewind pointer
cmpw $i,$num
blt- Louter
-___
-$code.=<<___ if ($SIZE_T==8);
subf $np,$num,$np ; rewind np
addi $j,$j,1 ; restore counter
subfc $i,$i,$i ; j=0 and "clear" XER[CA]
@@ -982,105 +883,34 @@ Lcopy: ; copy or in-place refresh
stdx $i,$t4,$i
addi $i,$i,16
bdnz- Lcopy
-___
-$code.=<<___ if ($SIZE_T==4);
- subf $np,$num,$np ; rewind np
- addi $j,$j,1 ; restore counter
- subfc $i,$i,$i ; j=0 and "clear" XER[CA]
- addi $tp,$sp,`$FRAME+$TRANSFER`
- addi $np,$np,-4
- addi $rp,$rp,-4
- addi $ap,$sp,`$FRAME+$TRANSFER+4`
- mtctr $j
-
-.align 4
-Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order
- ldu $t2,16($tp)
- lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
- lwz $t5,8($np)
- lwz $t6,12($np)
- lwzu $t7,16($np)
- extrdi $t1,$t0,32,0
- extrdi $t3,$t2,32,0
- subfe $t4,$t4,$t0 ; tp[j]-np[j]
- stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
- subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
- stw $t1,8($ap)
- subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
- stw $t2,12($ap)
- subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
- stwu $t3,16($ap)
- stw $t4,4($rp)
- stw $t5,8($rp)
- stw $t6,12($rp)
- stwu $t7,16($rp)
- bdnz- Lsub
-
- li $i,0
- subfe $ovf,$i,$ovf ; handle upmost overflow bit
- addi $tp,$sp,`$FRAME+$TRANSFER+4`
- subf $rp,$num,$rp ; rewind rp
- and $ap,$tp,$ovf
- andc $np,$rp,$ovf
- or $ap,$ap,$np ; ap=borrow?tp:rp
- addi $tp,$sp,`$FRAME+$TRANSFER`
- mtctr $j
-
-.align 4
-Lcopy: ; copy or in-place refresh
- lwz $t0,4($ap)
- lwz $t1,8($ap)
- lwz $t2,12($ap)
- lwzu $t3,16($ap)
- std $i,8($nap_d) ; zap nap_d
- std $i,16($nap_d)
- std $i,24($nap_d)
- std $i,32($nap_d)
- std $i,40($nap_d)
- std $i,48($nap_d)
- std $i,56($nap_d)
- stdu $i,64($nap_d)
- stw $t0,4($rp)
- stw $t1,8($rp)
- stw $t2,12($rp)
- stwu $t3,16($rp)
- std $i,8($tp) ; zap tp at once
- stdu $i,16($tp)
- bdnz- Lcopy
-___
-$code.=<<___;
- $POP $i,0($sp)
+ $POP r14,`2*$SIZE_T`($sp)
+ $POP r15,`3*$SIZE_T`($sp)
+ $POP r16,`4*$SIZE_T`($sp)
+ $POP r17,`5*$SIZE_T`($sp)
+ $POP r18,`6*$SIZE_T`($sp)
+ $POP r19,`7*$SIZE_T`($sp)
+ $POP r20,`8*$SIZE_T`($sp)
+ $POP r21,`9*$SIZE_T`($sp)
+ $POP r22,`10*$SIZE_T`($sp)
+ $POP r23,`11*$SIZE_T`($sp)
+ lfd f14,`12*$SIZE_T+0`($sp)
+ lfd f15,`12*$SIZE_T+8`($sp)
+ lfd f16,`12*$SIZE_T+16`($sp)
+ lfd f17,`12*$SIZE_T+24`($sp)
+ lfd f18,`12*$SIZE_T+32`($sp)
+ lfd f19,`12*$SIZE_T+40`($sp)
+ lfd f20,`12*$SIZE_T+48`($sp)
+ lfd f21,`12*$SIZE_T+56`($sp)
+ lfd f22,`12*$SIZE_T+64`($sp)
+ lfd f23,`12*$SIZE_T+72`($sp)
+ lfd f24,`12*$SIZE_T+80`($sp)
+ lfd f25,`12*$SIZE_T+88`($sp)
+ $POP $sp,0($sp)
li r3,1 ; signal "handled"
- $POP r22,`-12*8-10*$SIZE_T`($i)
- $POP r23,`-12*8-9*$SIZE_T`($i)
- $POP r24,`-12*8-8*$SIZE_T`($i)
- $POP r25,`-12*8-7*$SIZE_T`($i)
- $POP r26,`-12*8-6*$SIZE_T`($i)
- $POP r27,`-12*8-5*$SIZE_T`($i)
- $POP r28,`-12*8-4*$SIZE_T`($i)
- $POP r29,`-12*8-3*$SIZE_T`($i)
- $POP r30,`-12*8-2*$SIZE_T`($i)
- $POP r31,`-12*8-1*$SIZE_T`($i)
- lfd f20,`-12*8`($i)
- lfd f21,`-11*8`($i)
- lfd f22,`-10*8`($i)
- lfd f23,`-9*8`($i)
- lfd f24,`-8*8`($i)
- lfd f25,`-7*8`($i)
- lfd f26,`-6*8`($i)
- lfd f27,`-5*8`($i)
- lfd f28,`-4*8`($i)
- lfd f29,`-3*8`($i)
- lfd f30,`-2*8`($i)
- lfd f31,`-1*8`($i)
- mr $sp,$i
blr
.long 0
- .byte 0,12,4,0,0x8c,10,6,0
- .long 0
-
-.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>"
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;