summaryrefslogtreecommitdiff
path: root/lib/libssl
diff options
context:
space:
mode:
authorDamien Miller <djm@cvs.openbsd.org>2009-04-06 06:30:02 +0000
committerDamien Miller <djm@cvs.openbsd.org>2009-04-06 06:30:02 +0000
commit67407fe64f0e8293726820a65dd7990e2ae09204 (patch)
treec90da488273e9fa6c838b42842d978b524d0ee11 /lib/libssl
parent0ebb179f8108d6c0f5211846e5bb79b796978bb3 (diff)
import of OpenSSL 0.9.8k
Diffstat (limited to 'lib/libssl')
-rw-r--r--lib/libssl/src/crypto/aes/asm/aes-armv4.pl528
-rw-r--r--lib/libssl/src/crypto/aes/asm/aes-s390x.pl1058
2 files changed, 289 insertions, 1297 deletions
diff --git a/lib/libssl/src/crypto/aes/asm/aes-armv4.pl b/lib/libssl/src/crypto/aes/asm/aes-armv4.pl
index 86b86c4a0fb..15742c1ec54 100644
--- a/lib/libssl/src/crypto/aes/asm/aes-armv4.pl
+++ b/lib/libssl/src/crypto/aes/asm/aes-armv4.pl
@@ -16,25 +16,12 @@
# allows to merge logical or arithmetic operation with shift or rotate
# in one instruction and emit combined result every cycle. The module
# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
-# key [on single-issue Xscale PXA250 core].
+# key.
# May 2007.
#
# AES_set_[en|de]crypt_key is added.
-# July 2010.
-#
-# Rescheduling for dual-issue pipeline resulted in 12% improvement on
-# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
-
-# February 2011.
-#
-# Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~21.5 cycles per byte.
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
$s0="r0";
$s1="r1";
$s2="r2";
@@ -51,7 +38,6 @@ $key="r11";
$rounds="r12";
$code=<<___;
-#include "arm_arch.h"
.text
.code 32
@@ -172,243 +158,219 @@ AES_encrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_encrypt-AES_Te @ Te
-#if __ARM_ARCH__<7
+
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
orr $s3,$s3,$t2,lsl#16
orr $s3,$s3,$t3,lsl#24
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
-#endif
+
bl _armv4_AES_encrypt
ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
- str $s0,[$rounds,#0]
- str $s1,[$rounds,#4]
- str $s2,[$rounds,#8]
- str $s3,[$rounds,#12]
-#else
mov $t1,$s0,lsr#24 @ write output in endian-neutral
mov $t2,$s0,lsr#16 @ manner...
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
- mov $t1,$s1,lsr#24
strb $t3,[$rounds,#2]
- mov $t2,$s1,lsr#16
strb $s0,[$rounds,#3]
+ mov $t1,$s1,lsr#24
+ mov $t2,$s1,lsr#16
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
- mov $t1,$s2,lsr#24
strb $t3,[$rounds,#6]
- mov $t2,$s2,lsr#16
strb $s1,[$rounds,#7]
+ mov $t1,$s2,lsr#24
+ mov $t2,$s2,lsr#16
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
- mov $t1,$s3,lsr#24
strb $t3,[$rounds,#10]
- mov $t2,$s3,lsr#16
strb $s2,[$rounds,#11]
+ mov $t1,$s3,lsr#24
+ mov $t2,$s3,lsr#16
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
strb $t3,[$rounds,#14]
strb $s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
+
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-#endif
.size AES_encrypt,.-AES_encrypt
.type _armv4_AES_encrypt,%function
.align 2
_armv4_AES_encrypt:
str lr,[sp,#-4]! @ push lr
- ldmia $key!,{$t1-$i1}
- eor $s0,$s0,$t1
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
ldr $rounds,[$key,#240-16]
+ eor $s0,$s0,$t1
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
- and $i1,lr,$s0
+.Lenc_loop:
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0,lsr#16
+ and $i1,lr,$s0
mov $s0,$s0,lsr#24
-.Lenc_loop:
ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0]
- and $i1,lr,$s1,lsr#16 @ i0
+ ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8]
- and $i2,lr,$s1
ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16]
+
+ and $i1,lr,$s1,lsr#16 @ i0
+ and $i2,lr,$s1
and $i3,lr,$s1,lsr#8
- ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
mov $s1,$s1,lsr#24
-
ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16]
+ ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0]
ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8]
eor $s0,$s0,$i1,ror#8
- ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
+ eor $s1,$s1,$t1,ror#24
eor $t2,$t2,$i2,ror#8
- and $i2,lr,$s2,lsr#16 @ i1
eor $t3,$t3,$i3,ror#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2,lsr#16 @ i1
and $i3,lr,$s2
+ mov $s2,$s2,lsr#24
ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
- eor $s1,$s1,$t1,ror#24
ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
- mov $s2,$s2,lsr#24
-
+ ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
eor $s0,$s0,$i1,ror#16
- ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
- and $i1,lr,$s3 @ i0
eor $s1,$s1,$i2,ror#8
- and $i2,lr,$s3,lsr#8 @ i1
+ eor $s2,$s2,$t2,ror#16
eor $t3,$t3,$i3,ror#16
+
+ and $i1,lr,$s3 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
and $i3,lr,$s3,lsr#16 @ i2
+ mov $s3,$s3,lsr#24
ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
- eor $s2,$s2,$t2,ror#16
ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
- mov $s3,$s3,lsr#24
-
ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
eor $s0,$s0,$i1,ror#24
- ldr $i1,[$key],#16
eor $s1,$s1,$i2,ror#16
- ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
eor $s2,$s2,$i3,ror#8
- ldr $t1,[$key,#-12]
eor $s3,$s3,$t3,ror#8
- ldr $t2,[$key,#-8]
- eor $s0,$s0,$i1
- ldr $t3,[$key,#-4]
- and $i1,lr,$s0
- eor $s1,$s1,$t1
- and $i2,lr,$s0,lsr#8
- eor $s2,$s2,$t2
- and $i3,lr,$s0,lsr#16
- eor $s3,$s3,$t3
- mov $s0,$s0,lsr#24
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
subs $rounds,$rounds,#1
bne .Lenc_loop
add $tbl,$tbl,#2
+ and $i1,lr,$s0
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0,lsr#16
+ mov $s0,$s0,lsr#24
ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0]
- and $i1,lr,$s1,lsr#16 @ i0
+ ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8]
- and $i2,lr,$s1
ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16]
+
+ and $i1,lr,$s1,lsr#16 @ i0
+ and $i2,lr,$s1
and $i3,lr,$s1,lsr#8
- ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
mov $s1,$s1,lsr#24
-
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16]
+ ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8]
eor $s0,$i1,$s0,lsl#8
- ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
+ eor $s1,$t1,$s1,lsl#24
eor $t2,$i2,$t2,lsl#8
- and $i2,lr,$s2,lsr#16 @ i1
eor $t3,$i3,$t3,lsl#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2,lsr#16 @ i1
and $i3,lr,$s2
+ mov $s2,$s2,lsr#24
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
- eor $s1,$t1,$s1,lsl#24
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
- mov $s2,$s2,lsr#24
-
+ ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
eor $s0,$i1,$s0,lsl#8
- ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
- and $i1,lr,$s3 @ i0
eor $s1,$s1,$i2,lsl#16
- and $i2,lr,$s3,lsr#8 @ i1
+ eor $s2,$t2,$s2,lsl#24
eor $t3,$i3,$t3,lsl#8
+
+ and $i1,lr,$s3 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
and $i3,lr,$s3,lsr#16 @ i2
+ mov $s3,$s3,lsr#24
ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
- eor $s2,$t2,$s2,lsl#24
ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
- mov $s3,$s3,lsr#24
-
ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
- eor $s0,$i1,$s0,lsl#8
- ldr $i1,[$key,#0]
ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
+ eor $s0,$i1,$s0,lsl#8
eor $s1,$s1,$i2,lsl#8
- ldr $t1,[$key,#4]
eor $s2,$s2,$i3,lsl#16
- ldr $t2,[$key,#8]
eor $s3,$t3,$s3,lsl#24
- ldr $t3,[$key,#12]
- eor $s0,$s0,$i1
- eor $s1,$s1,$t1
- eor $s2,$s2,$t2
- eor $s3,$s3,$t3
+ ldr lr,[sp],#4 @ pop lr
+ ldr $t1,[$key,#0]
+ ldr $t2,[$key,#4]
+ ldr $t3,[$key,#8]
+ ldr $i1,[$key,#12]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
sub $tbl,$tbl,#2
- ldr pc,[sp],#4 @ pop and return
+ mov pc,lr @ return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
-.global private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,%function
+.global AES_set_encrypt_key
+.type AES_set_encrypt_key,%function
.align 5
-private_AES_set_encrypt_key:
-_armv4_AES_set_encrypt_key:
+AES_set_encrypt_key:
sub r3,pc,#8 @ AES_set_encrypt_key
teq r0,#0
moveq r0,#-1
@@ -426,61 +388,44 @@ _armv4_AES_set_encrypt_key:
bne .Labrt
.Lok: stmdb sp!,{r4-r12,lr}
- sub $tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
+ sub $tbl,r3,#AES_set_encrypt_key-AES_Te-1024 @ Te4
mov $rounds,r0 @ inp
mov lr,r1 @ bits
mov $key,r2 @ key
-#if __ARM_ARCH__<7
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
- str $s0,[$key],#16
orr $s3,$s3,$t2,lsl#16
- str $s1,[$key,#-12]
orr $s3,$s3,$t3,lsl#24
- str $s2,[$key,#-8]
- str $s3,[$key,#-4]
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
str $s0,[$key],#16
str $s1,[$key,#-12]
str $s2,[$key,#-8]
str $s3,[$key,#-4]
-#endif
teq lr,#128
bne .Lnot128
@@ -492,58 +437,48 @@ _armv4_AES_set_encrypt_key:
.L128_loop:
and $t2,lr,$s3,lsr#24
and $i1,lr,$s3,lsr#16
- ldrb $t2,[$tbl,$t2]
and $i2,lr,$s3,lsr#8
- ldrb $i1,[$tbl,$i1]
and $i3,lr,$s3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
orr $t2,$t2,$i3,lsl#8
eor $t2,$t2,$t1
eor $s0,$s0,$t2 @ rk[4]=rk[0]^...
eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4]
- str $s0,[$key],#16
eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5]
- str $s1,[$key,#-12]
eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6]
+ str $s0,[$key],#16
+ str $s1,[$key,#-12]
str $s2,[$key,#-8]
- subs $rounds,$rounds,#1
str $s3,[$key,#-4]
+
+ subs $rounds,$rounds,#1
bne .L128_loop
sub r2,$key,#176
b .Ldone
.Lnot128:
-#if __ARM_ARCH__<7
ldrb $i2,[$rounds,#19]
ldrb $t1,[$rounds,#18]
ldrb $t2,[$rounds,#17]
ldrb $t3,[$rounds,#16]
orr $i2,$i2,$t1,lsl#8
- ldrb $i3,[$rounds,#23]
orr $i2,$i2,$t2,lsl#16
- ldrb $t1,[$rounds,#22]
orr $i2,$i2,$t3,lsl#24
+ ldrb $i3,[$rounds,#23]
+ ldrb $t1,[$rounds,#22]
ldrb $t2,[$rounds,#21]
ldrb $t3,[$rounds,#20]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
- str $i2,[$key],#8
orr $i3,$i3,$t3,lsl#24
- str $i3,[$key,#-4]
-#else
- ldr $i2,[$rounds,#16]
- ldr $i3,[$rounds,#20]
-#ifdef __ARMEL__
- rev $i2,$i2
- rev $i3,$i3
-#endif
str $i2,[$key],#8
str $i3,[$key,#-4]
-#endif
teq lr,#192
bne .Lnot192
@@ -556,26 +491,27 @@ _armv4_AES_set_encrypt_key:
.L192_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
- ldrb $t2,[$tbl,$t2]
and $i2,lr,$i3,lsr#8
- ldrb $i1,[$tbl,$i1]
and $i3,lr,$i3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[6]=rk[0]^...
eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6]
- str $s0,[$key],#24
eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7]
- str $s1,[$key,#-20]
eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8]
+ str $s0,[$key],#24
+ str $s1,[$key,#-20]
str $s2,[$key,#-16]
- subs $rounds,$rounds,#1
str $s3,[$key,#-12]
+
+ subs $rounds,$rounds,#1
subeq r2,$key,#216
beq .Ldone
@@ -588,33 +524,22 @@ _armv4_AES_set_encrypt_key:
b .L192_loop
.Lnot192:
-#if __ARM_ARCH__<7
ldrb $i2,[$rounds,#27]
ldrb $t1,[$rounds,#26]
ldrb $t2,[$rounds,#25]
ldrb $t3,[$rounds,#24]
orr $i2,$i2,$t1,lsl#8
- ldrb $i3,[$rounds,#31]
orr $i2,$i2,$t2,lsl#16
- ldrb $t1,[$rounds,#30]
orr $i2,$i2,$t3,lsl#24
+ ldrb $i3,[$rounds,#31]
+ ldrb $t1,[$rounds,#30]
ldrb $t2,[$rounds,#29]
ldrb $t3,[$rounds,#28]
orr $i3,$i3,$t1,lsl#8
orr $i3,$i3,$t2,lsl#16
- str $i2,[$key],#8
orr $i3,$i3,$t3,lsl#24
- str $i3,[$key,#-4]
-#else
- ldr $i2,[$rounds,#24]
- ldr $i3,[$rounds,#28]
-#ifdef __ARMEL__
- rev $i2,$i2
- rev $i3,$i3
-#endif
str $i2,[$key],#8
str $i3,[$key,#-4]
-#endif
mov $rounds,#14
str $rounds,[$key,#240-32]
@@ -625,51 +550,52 @@ _armv4_AES_set_encrypt_key:
.L256_loop:
and $t2,lr,$i3,lsr#24
and $i1,lr,$i3,lsr#16
- ldrb $t2,[$tbl,$t2]
and $i2,lr,$i3,lsr#8
- ldrb $i1,[$tbl,$i1]
and $i3,lr,$i3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
orr $t2,$t2,$i3,lsl#8
eor $i3,$t2,$t1
eor $s0,$s0,$i3 @ rk[8]=rk[0]^...
eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8]
- str $s0,[$key],#32
eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9]
- str $s1,[$key,#-28]
eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10]
+ str $s0,[$key],#32
+ str $s1,[$key,#-28]
str $s2,[$key,#-24]
- subs $rounds,$rounds,#1
str $s3,[$key,#-20]
+
+ subs $rounds,$rounds,#1
subeq r2,$key,#256
beq .Ldone
and $t2,lr,$s3
and $i1,lr,$s3,lsr#8
- ldrb $t2,[$tbl,$t2]
and $i2,lr,$s3,lsr#16
- ldrb $i1,[$tbl,$i1]
and $i3,lr,$s3,lsr#24
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#8
ldrb $i3,[$tbl,$i3]
+ orr $t2,$t2,$i1,lsl#8
orr $t2,$t2,$i2,lsl#16
- ldr $t1,[$key,#-48]
orr $t2,$t2,$i3,lsl#24
+ ldr $t1,[$key,#-48]
ldr $i1,[$key,#-44]
ldr $i2,[$key,#-40]
- eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
ldr $i3,[$key,#-36]
+ eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12]
- str $t1,[$key,#-16]
eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13]
- str $i1,[$key,#-12]
eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14]
+ str $t1,[$key,#-16]
+ str $i1,[$key,#-12]
str $i2,[$key,#-8]
str $i3,[$key,#-4]
b .L256_loop
@@ -679,14 +605,14 @@ _armv4_AES_set_encrypt_key:
.Labrt: tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
-.global private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,%function
+.global AES_set_decrypt_key
+.type AES_set_decrypt_key,%function
.align 5
-private_AES_set_decrypt_key:
+AES_set_decrypt_key:
str lr,[sp,#-4]! @ push lr
- bl _armv4_AES_set_encrypt_key
+ bl AES_set_encrypt_key
teq r0,#0
ldrne lr,[sp],#4 @ pop lr
bne .Labrt
@@ -765,15 +691,11 @@ $code.=<<___;
bne .Lmix
mov r0,#0
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-#endif
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+.size AES_set_decrypt_key,.-AES_set_decrypt_key
.type AES_Td,%object
.align 5
@@ -888,247 +810,221 @@ AES_decrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_decrypt-AES_Td @ Td
-#if __ARM_ARCH__<7
+
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
ldrb $t3,[$rounds,#0]
orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
ldrb $t2,[$rounds,#5]
ldrb $t3,[$rounds,#4]
orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
ldrb $t2,[$rounds,#9]
ldrb $t3,[$rounds,#8]
orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
ldrb $t2,[$rounds,#13]
ldrb $t3,[$rounds,#12]
orr $s3,$s3,$t1,lsl#8
orr $s3,$s3,$t2,lsl#16
orr $s3,$s3,$t3,lsl#24
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
-#endif
+
bl _armv4_AES_decrypt
ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
- str $s0,[$rounds,#0]
- str $s1,[$rounds,#4]
- str $s2,[$rounds,#8]
- str $s3,[$rounds,#12]
-#else
mov $t1,$s0,lsr#24 @ write output in endian-neutral
mov $t2,$s0,lsr#16 @ manner...
mov $t3,$s0,lsr#8
strb $t1,[$rounds,#0]
strb $t2,[$rounds,#1]
- mov $t1,$s1,lsr#24
strb $t3,[$rounds,#2]
- mov $t2,$s1,lsr#16
strb $s0,[$rounds,#3]
+ mov $t1,$s1,lsr#24
+ mov $t2,$s1,lsr#16
mov $t3,$s1,lsr#8
strb $t1,[$rounds,#4]
strb $t2,[$rounds,#5]
- mov $t1,$s2,lsr#24
strb $t3,[$rounds,#6]
- mov $t2,$s2,lsr#16
strb $s1,[$rounds,#7]
+ mov $t1,$s2,lsr#24
+ mov $t2,$s2,lsr#16
mov $t3,$s2,lsr#8
strb $t1,[$rounds,#8]
strb $t2,[$rounds,#9]
- mov $t1,$s3,lsr#24
strb $t3,[$rounds,#10]
- mov $t2,$s3,lsr#16
strb $s2,[$rounds,#11]
+ mov $t1,$s3,lsr#24
+ mov $t2,$s3,lsr#16
mov $t3,$s3,lsr#8
strb $t1,[$rounds,#12]
strb $t2,[$rounds,#13]
strb $t3,[$rounds,#14]
strb $s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
+
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-#endif
.size AES_decrypt,.-AES_decrypt
.type _armv4_AES_decrypt,%function
.align 2
_armv4_AES_decrypt:
str lr,[sp,#-4]! @ push lr
- ldmia $key!,{$t1-$i1}
- eor $s0,$s0,$t1
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
ldr $rounds,[$key,#240-16]
+ eor $s0,$s0,$t1
eor $s1,$s1,$t2
eor $s2,$s2,$t3
eor $s3,$s3,$i1
sub $rounds,$rounds,#1
mov lr,#255
+.Ldec_loop:
and $i1,lr,$s0,lsr#16
and $i2,lr,$s0,lsr#8
and $i3,lr,$s0
mov $s0,$s0,lsr#24
-.Ldec_loop:
ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16]
- and $i1,lr,$s1 @ i0
+ ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8]
- and $i2,lr,$s1,lsr#16
ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0]
+
+ and $i1,lr,$s1 @ i0
+ and $i2,lr,$s1,lsr#16
and $i3,lr,$s1,lsr#8
- ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
mov $s1,$s1,lsr#24
-
ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0]
+ ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16]
ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8]
eor $s0,$s0,$i1,ror#24
- ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
+ eor $s1,$s1,$t1,ror#8
eor $t2,$i2,$t2,ror#8
- and $i2,lr,$s2 @ i1
eor $t3,$i3,$t3,ror#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2 @ i1
and $i3,lr,$s2,lsr#16
+ mov $s2,$s2,lsr#24
ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
- eor $s1,$s1,$t1,ror#8
ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
- mov $s2,$s2,lsr#24
-
+ ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
eor $s0,$s0,$i1,ror#16
- ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
- and $i1,lr,$s3,lsr#16 @ i0
eor $s1,$s1,$i2,ror#24
- and $i2,lr,$s3,lsr#8 @ i1
+ eor $s2,$s2,$t2,ror#8
eor $t3,$i3,$t3,ror#8
+
+ and $i1,lr,$s3,lsr#16 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
and $i3,lr,$s3 @ i2
+ mov $s3,$s3,lsr#24
ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
- eor $s2,$s2,$t2,ror#8
ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
- mov $s3,$s3,lsr#24
-
ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
eor $s0,$s0,$i1,ror#8
- ldr $i1,[$key],#16
eor $s1,$s1,$i2,ror#16
- ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
eor $s2,$s2,$i3,ror#24
-
- ldr $t1,[$key,#-12]
- eor $s0,$s0,$i1
- ldr $t2,[$key,#-8]
eor $s3,$s3,$t3,ror#8
- ldr $t3,[$key,#-4]
- and $i1,lr,$s0,lsr#16
- eor $s1,$s1,$t1
- and $i2,lr,$s0,lsr#8
- eor $s2,$s2,$t2
- and $i3,lr,$s0
- eor $s3,$s3,$t3
- mov $s0,$s0,lsr#24
+
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
subs $rounds,$rounds,#1
bne .Ldec_loop
add $tbl,$tbl,#1024
- ldr $t2,[$tbl,#0] @ prefetch Td4
- ldr $t3,[$tbl,#32]
- ldr $t1,[$tbl,#64]
- ldr $t2,[$tbl,#96]
- ldr $t3,[$tbl,#128]
- ldr $t1,[$tbl,#160]
- ldr $t2,[$tbl,#192]
- ldr $t3,[$tbl,#224]
+ ldr $t1,[$tbl,#0] @ prefetch Td4
+ ldr $t2,[$tbl,#32]
+ ldr $t3,[$tbl,#64]
+ ldr $i1,[$tbl,#96]
+ ldr $i2,[$tbl,#128]
+ ldr $i3,[$tbl,#160]
+ ldr $t1,[$tbl,#192]
+ ldr $t2,[$tbl,#224]
- ldrb $s0,[$tbl,$s0] @ Td4[s0>>24]
+ and $i1,lr,$s0,lsr#16
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0
+ ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24]
ldrb $t1,[$tbl,$i1] @ Td4[s0>>16]
- and $i1,lr,$s1 @ i0
ldrb $t2,[$tbl,$i2] @ Td4[s0>>8]
- and $i2,lr,$s1,lsr#16
ldrb $t3,[$tbl,$i3] @ Td4[s0>>0]
- and $i3,lr,$s1,lsr#8
+ and $i1,lr,$s1 @ i0
+ and $i2,lr,$s1,lsr#16
+ and $i3,lr,$s1,lsr#8
ldrb $i1,[$tbl,$i1] @ Td4[s1>>0]
ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24]
ldrb $i2,[$tbl,$i2] @ Td4[s1>>16]
- eor $s0,$i1,$s0,lsl#24
ldrb $i3,[$tbl,$i3] @ Td4[s1>>8]
+ eor $s0,$i1,$s0,lsl#24
eor $s1,$t1,$s1,lsl#8
- and $i1,lr,$s2,lsr#8 @ i0
eor $t2,$t2,$i2,lsl#8
+ eor $t3,$t3,$i3,lsl#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
and $i2,lr,$s2 @ i1
+ and $i3,lr,$s2,lsr#16
ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
- eor $t3,$t3,$i3,lsl#8
ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
- and $i3,lr,$s2,lsr#16
-
ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24]
- eor $s0,$s0,$i1,lsl#8
ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
+ eor $s0,$s0,$i1,lsl#8
eor $s1,$i2,$s1,lsl#16
- and $i1,lr,$s3,lsr#16 @ i0
eor $s2,$t2,$s2,lsl#16
+ eor $t3,$t3,$i3,lsl#16
+
+ and $i1,lr,$s3,lsr#16 @ i0
and $i2,lr,$s3,lsr#8 @ i1
+ and $i3,lr,$s3 @ i2
ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
- eor $t3,$t3,$i3,lsl#16
ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
- and $i3,lr,$s3 @ i2
-
ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24]
eor $s0,$s0,$i1,lsl#16
- ldr $i1,[$key,#0]
eor $s1,$s1,$i2,lsl#8
- ldr $t1,[$key,#4]
eor $s2,$i3,$s2,lsl#8
- ldr $t2,[$key,#8]
eor $s3,$t3,$s3,lsl#24
- ldr $t3,[$key,#12]
- eor $s0,$s0,$i1
- eor $s1,$s1,$t1
- eor $s2,$s2,$t2
- eor $s3,$s3,$t3
+ ldr lr,[sp],#4 @ pop lr
+ ldr $t1,[$key,#0]
+ ldr $t2,[$key,#4]
+ ldr $t3,[$key,#8]
+ ldr $i1,[$key,#12]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
sub $tbl,$tbl,#1024
- ldr pc,[sp],#4 @ pop and return
+ mov pc,lr @ return
.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
___
$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
print $code;
-close STDOUT; # enforce flush
diff --git a/lib/libssl/src/crypto/aes/asm/aes-s390x.pl b/lib/libssl/src/crypto/aes/asm/aes-s390x.pl
index e75dcd0315e..4b27afd92fc 100644
--- a/lib/libssl/src/crypto/aes/asm/aes-s390x.pl
+++ b/lib/libssl/src/crypto/aes/asm/aes-s390x.pl
@@ -44,57 +44,12 @@
# Unlike previous version hardware support detection takes place only
# at the moment of key schedule setup, which is denoted in key->rounds.
# This is done, because deferred key setup can't be made MT-safe, not
-# for keys longer than 128 bits.
+# for key lengthes longer than 128 bits.
#
# Add AES_cbc_encrypt, which gives incredible performance improvement,
# it was measured to be ~6.6x. It's less than previously mentioned 8x,
# because software implementation was optimized.
-# May 2010.
-#
-# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
-# performance improvement over "generic" counter mode routine relying
-# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
-# to the fact that exact throughput value depends on current stack
-# frame alignment within 4KB page. In worst case you get ~75% of the
-# maximum, but *on average* it would be as much as ~98%. Meaning that
-# worst case is unlike, it's like hitting ravine on plateau.
-
-# November 2010.
-#
-# Adapt for -m31 build. If kernel supports what's called "highgprs"
-# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
-# instructions and achieve "64-bit" performance even in 31-bit legacy
-# application context. The feature is not specific to any particular
-# processor, as long as it's "z-CPU". Latter implies that the code
-# remains z/Architecture specific. On z990 it was measured to perform
-# 2x better than code generated by gcc 4.3.
-
-# December 2010.
-#
-# Add support for z196 "cipher message with counter" instruction.
-# Note however that it's disengaged, because it was measured to
-# perform ~12% worse than vanilla km-based code...
-
-# February 2011.
-#
-# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
-# instructions, which deliver ~70% improvement at 8KB block size over
-# vanilla km-based code, 37% - at most like 512-bytes block size.
-
-$flavour = shift;
-
-if ($flavour =~ /3[12]/) {
- $SIZE_T=4;
- $g="";
-} else {
- $SIZE_T=8;
- $g="g";
-}
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
$softonly=0; # allow hardware support
$t0="%r0"; $mask="%r0";
@@ -114,8 +69,6 @@ $rounds="%r13";
$ra="%r14";
$sp="%r15";
-$stdframe=16*$SIZE_T+4*8;
-
sub _data_word()
{ my $i;
while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
@@ -257,7 +210,7 @@ $code.=<<___ if (!$softonly);
.Lesoft:
___
$code.=<<___;
- stm${g} %r3,$ra,3*$SIZE_T($sp)
+ stmg %r3,$ra,24($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
@@ -267,20 +220,20 @@ $code.=<<___;
larl $tbl,AES_Te
bras $ra,_s390x_AES_encrypt
- l${g} $out,3*$SIZE_T($sp)
+ lg $out,24($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
st $s3,12($out)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
+ lmg %r6,$ra,48($sp)
br $ra
.size AES_encrypt,.-AES_encrypt
.type _s390x_AES_encrypt,\@function
.align 16
_s390x_AES_encrypt:
- st${g} $ra,15*$SIZE_T($sp)
+ stg $ra,152($sp)
x $s0,0($key)
x $s1,4($key)
x $s2,8($key)
@@ -444,7 +397,7 @@ _s390x_AES_encrypt:
or $s2,$i3
or $s3,$t3
- l${g} $ra,15*$SIZE_T($sp)
+ lg $ra,152($sp)
xr $s0,$t0
xr $s1,$t2
x $s2,24($key)
@@ -583,7 +536,7 @@ $code.=<<___ if (!$softonly);
.Ldsoft:
___
$code.=<<___;
- stm${g} %r3,$ra,3*$SIZE_T($sp)
+ stmg %r3,$ra,24($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
@@ -593,20 +546,20 @@ $code.=<<___;
larl $tbl,AES_Td
bras $ra,_s390x_AES_decrypt
- l${g} $out,3*$SIZE_T($sp)
+ lg $out,24($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
st $s3,12($out)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
+ lmg %r6,$ra,48($sp)
br $ra
.size AES_decrypt,.-AES_decrypt
.type _s390x_AES_decrypt,\@function
.align 16
_s390x_AES_decrypt:
- st${g} $ra,15*$SIZE_T($sp)
+ stg $ra,152($sp)
x $s0,0($key)
x $s1,4($key)
x $s2,8($key)
@@ -750,7 +703,7 @@ _s390x_AES_decrypt:
nr $i1,$mask
nr $i2,$mask
- l${g} $ra,15*$SIZE_T($sp)
+ lg $ra,152($sp)
or $s1,$t1
l $t0,16($key)
l $t1,20($key)
@@ -779,15 +732,14 @@ ___
$code.=<<___;
# void AES_set_encrypt_key(const unsigned char *in, int bits,
# AES_KEY *key) {
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,\@function
+.globl AES_set_encrypt_key
+.type AES_set_encrypt_key,\@function
.align 16
-private_AES_set_encrypt_key:
-_s390x_AES_set_encrypt_key:
+AES_set_encrypt_key:
lghi $t0,0
- cl${g}r $inp,$t0
+ clgr $inp,$t0
je .Lminus1
- cl${g}r $key,$t0
+ clgr $key,$t0
je .Lminus1
lghi $t0,128
@@ -813,11 +765,6 @@ $code.=<<___ if (!$softonly);
srl %r5,6
ar %r5,%r0
- larl %r1,OPENSSL_s390xcap_P
- lg %r0,0(%r1)
- tmhl %r0,0x4000 # check for message-security assist
- jz .Lekey_internal
-
lghi %r0,0 # query capability vector
la %r1,16($sp)
.long 0xb92f0042 # kmc %r4,%r2
@@ -837,8 +784,7 @@ $code.=<<___ if (!$softonly);
je 1f
lg %r1,24($inp)
stg %r1,24($key)
-1: st $bits,236($key) # save bits [for debugging purposes]
- lgr $t0,%r5
+1: st $bits,236($key) # save bits
st %r5,240($key) # save km code
lghi %r2,0
br %r14
@@ -846,7 +792,7 @@ ___
$code.=<<___;
.align 16
.Lekey_internal:
- stm${g} %r4,%r13,4*$SIZE_T($sp) # all non-volatile regs and $key
+ stmg %r6,%r13,48($sp) # all non-volatile regs
larl $tbl,AES_Te+2048
@@ -906,9 +852,8 @@ $code.=<<___;
la $key,16($key) # key+=4
la $t3,4($t3) # i++
brct $rounds,.L128_loop
- lghi $t0,10
lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
+ lmg %r6,%r13,48($sp)
br $ra
.align 16
@@ -955,9 +900,8 @@ $code.=<<___;
st $s2,32($key)
st $s3,36($key)
brct $rounds,.L192_continue
- lghi $t0,12
lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
+ lmg %r6,%r13,48($sp)
br $ra
.align 16
@@ -1018,9 +962,8 @@ $code.=<<___;
st $s2,40($key)
st $s3,44($key)
brct $rounds,.L256_continue
- lghi $t0,14
lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
+ lmg %r6,%r13,48($sp)
br $ra
.align 16
@@ -1063,34 +1006,42 @@ $code.=<<___;
.Lminus1:
lghi %r2,-1
br $ra
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
# void AES_set_decrypt_key(const unsigned char *in, int bits,
# AES_KEY *key) {
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,\@function
+.globl AES_set_decrypt_key
+.type AES_set_decrypt_key,\@function
.align 16
-private_AES_set_decrypt_key:
- #st${g} $key,4*$SIZE_T($sp) # I rely on AES_set_encrypt_key to
- st${g} $ra,14*$SIZE_T($sp) # save non-volatile registers and $key!
- bras $ra,_s390x_AES_set_encrypt_key
- #l${g} $key,4*$SIZE_T($sp)
- l${g} $ra,14*$SIZE_T($sp)
+AES_set_decrypt_key:
+ stg $key,32($sp) # I rely on AES_set_encrypt_key to
+ stg $ra,112($sp) # save non-volatile registers!
+ bras $ra,AES_set_encrypt_key
+ lg $key,32($sp)
+ lg $ra,112($sp)
ltgr %r2,%r2
bnzr $ra
___
$code.=<<___ if (!$softonly);
- #l $t0,240($key)
+ l $t0,240($key)
lhi $t1,16
cr $t0,$t1
jl .Lgo
oill $t0,0x80 # set "decrypt" bit
st $t0,240($key)
br $ra
+
+.align 16
+.Ldkey_internal:
+ stg $key,32($sp)
+ stg $ra,40($sp)
+ bras $ra,.Lekey_internal
+ lg $key,32($sp)
+ lg $ra,40($sp)
___
$code.=<<___;
-.align 16
-.Lgo: lgr $rounds,$t0 #llgf $rounds,240($key)
+
+.Lgo: llgf $rounds,240($key)
la $i1,0($key)
sllg $i2,$rounds,4
la $i2,0($i2,$key)
@@ -1167,14 +1118,13 @@ $code.=<<___;
la $key,4($key)
brct $rounds,.Lmix
- lm${g} %r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
+ lmg %r6,%r13,48($sp)# as was saved by AES_set_encrypt_key!
lghi %r2,0
br $ra
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+.size AES_set_decrypt_key,.-AES_set_decrypt_key
___
-########################################################################
-# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+#void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# size_t length, const AES_KEY *key,
# unsigned char *ivec, const int enc)
{
@@ -1208,7 +1158,7 @@ $code.=<<___ if (!$softonly);
l %r0,240($key) # load kmc code
lghi $key,15 # res=len%16, len-=res;
ngr $key,$len
- sl${g}r $len,$key
+ slgr $len,$key
la %r1,16($sp) # parameter block - ivec || key
jz .Lkmc_truncated
.long 0xb92f0042 # kmc %r4,%r2
@@ -1226,34 +1176,34 @@ $code.=<<___ if (!$softonly);
tmll %r0,0x80
jnz .Lkmc_truncated_dec
lghi %r1,0
- stg %r1,16*$SIZE_T($sp)
- stg %r1,16*$SIZE_T+8($sp)
+ stg %r1,128($sp)
+ stg %r1,136($sp)
bras %r1,1f
- mvc 16*$SIZE_T(1,$sp),0($inp)
+ mvc 128(1,$sp),0($inp)
1: ex $key,0(%r1)
la %r1,16($sp) # restore parameter block
- la $inp,16*$SIZE_T($sp)
+ la $inp,128($sp)
lghi $len,16
.long 0xb92f0042 # kmc %r4,%r2
j .Lkmc_done
.align 16
.Lkmc_truncated_dec:
- st${g} $out,4*$SIZE_T($sp)
- la $out,16*$SIZE_T($sp)
+ stg $out,64($sp)
+ la $out,128($sp)
lghi $len,16
.long 0xb92f0042 # kmc %r4,%r2
- l${g} $out,4*$SIZE_T($sp)
+ lg $out,64($sp)
bras %r1,2f
- mvc 0(1,$out),16*$SIZE_T($sp)
+ mvc 0(1,$out),128($sp)
2: ex $key,0(%r1)
j .Lkmc_done
.align 16
.Lcbc_software:
___
$code.=<<___;
- stm${g} $key,$ra,5*$SIZE_T($sp)
+ stmg $key,$ra,40($sp)
lhi %r0,0
- cl %r0,`$stdframe+$SIZE_T-4`($sp)
+ cl %r0,164($sp)
je .Lcbc_decrypt
larl $tbl,AES_Te
@@ -1264,10 +1214,10 @@ $code.=<<___;
llgf $s3,12($ivp)
lghi $t0,16
- sl${g}r $len,$t0
+ slgr $len,$t0
brc 4,.Lcbc_enc_tail # if borrow
.Lcbc_enc_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
+ stmg $inp,$out,16($sp)
x $s0,0($inp)
x $s1,4($inp)
x $s2,8($inp)
@@ -1276,7 +1226,7 @@ $code.=<<___;
bras $ra,_s390x_AES_encrypt
- lm${g} $inp,$key,2*$SIZE_T($sp)
+ lmg $inp,$key,16($sp)
st $s0,0($out)
st $s1,4($out)
st $s2,8($out)
@@ -1285,33 +1235,33 @@ $code.=<<___;
la $inp,16($inp)
la $out,16($out)
lghi $t0,16
- lt${g}r $len,$len
+ ltgr $len,$len
jz .Lcbc_enc_done
- sl${g}r $len,$t0
+ slgr $len,$t0
brc 4,.Lcbc_enc_tail # if borrow
j .Lcbc_enc_loop
.align 16
.Lcbc_enc_done:
- l${g} $ivp,6*$SIZE_T($sp)
+ lg $ivp,48($sp)
st $s0,0($ivp)
st $s1,4($ivp)
st $s2,8($ivp)
st $s3,12($ivp)
- lm${g} %r7,$ra,7*$SIZE_T($sp)
+ lmg %r7,$ra,56($sp)
br $ra
.align 16
.Lcbc_enc_tail:
aghi $len,15
lghi $t0,0
- stg $t0,16*$SIZE_T($sp)
- stg $t0,16*$SIZE_T+8($sp)
+ stg $t0,128($sp)
+ stg $t0,136($sp)
bras $t1,3f
- mvc 16*$SIZE_T(1,$sp),0($inp)
+ mvc 128(1,$sp),0($inp)
3: ex $len,0($t1)
lghi $len,0
- la $inp,16*$SIZE_T($sp)
+ la $inp,128($sp)
j .Lcbc_enc_loop
.align 16
@@ -1320,10 +1270,10 @@ $code.=<<___;
lg $t0,0($ivp)
lg $t1,8($ivp)
- stmg $t0,$t1,16*$SIZE_T($sp)
+ stmg $t0,$t1,128($sp)
.Lcbc_dec_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
+ stmg $inp,$out,16($sp)
llgf $s0,0($inp)
llgf $s1,4($inp)
llgf $s2,8($inp)
@@ -1332,7 +1282,7 @@ $code.=<<___;
bras $ra,_s390x_AES_decrypt
- lm${g} $inp,$key,2*$SIZE_T($sp)
+ lmg $inp,$key,16($sp)
sllg $s0,$s0,32
sllg $s2,$s2,32
lr $s0,$s1
@@ -1340,15 +1290,15 @@ $code.=<<___;
lg $t0,0($inp)
lg $t1,8($inp)
- xg $s0,16*$SIZE_T($sp)
- xg $s2,16*$SIZE_T+8($sp)
+ xg $s0,128($sp)
+ xg $s2,136($sp)
lghi $s1,16
- sl${g}r $len,$s1
+ slgr $len,$s1
brc 4,.Lcbc_dec_tail # if borrow
brc 2,.Lcbc_dec_done # if zero
stg $s0,0($out)
stg $s2,8($out)
- stmg $t0,$t1,16*$SIZE_T($sp)
+ stmg $t0,$t1,128($sp)
la $inp,16($inp)
la $out,16($out)
@@ -1358,7 +1308,7 @@ $code.=<<___;
stg $s0,0($out)
stg $s2,8($out)
.Lcbc_dec_exit:
- lm${g} %r6,$ra,6*$SIZE_T($sp)
+ lmg $ivp,$ra,48($sp)
stmg $t0,$t1,0($ivp)
br $ra
@@ -1366,872 +1316,18 @@ $code.=<<___;
.align 16
.Lcbc_dec_tail:
aghi $len,15
- stg $s0,16*$SIZE_T($sp)
- stg $s2,16*$SIZE_T+8($sp)
+ stg $s0,128($sp)
+ stg $s2,136($sp)
bras $s1,4f
- mvc 0(1,$out),16*$SIZE_T($sp)
+ mvc 0(1,$out),128($sp)
4: ex $len,0($s1)
j .Lcbc_dec_exit
.size AES_cbc_encrypt,.-AES_cbc_encrypt
___
}
-########################################################################
-# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
-# size_t blocks, const AES_KEY *key,
-# const unsigned char *ivec)
-{
-my $inp="%r2";
-my $out="%r4"; # blocks and out are swapped
-my $len="%r3";
-my $key="%r5"; my $iv0="%r5";
-my $ivp="%r6";
-my $fp ="%r7";
-
-$code.=<<___;
-.globl AES_ctr32_encrypt
-.type AES_ctr32_encrypt,\@function
-.align 16
-AES_ctr32_encrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
- llgfr $len,$len # safe in ctr32 subroutine even in 64-bit case
-___
-$code.=<<___ if (!$softonly);
- l %r0,240($key)
- lhi %r1,16
- clr %r0,%r1
- jl .Lctr32_software
-
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- slgr $out,$inp
- la %r1,0($key) # %r1 is permanent copy of $key
- lg $iv0,0($ivp) # load ivec
- lg $ivp,8($ivp)
-
- # prepare and allocate stack frame at the top of 4K page
- # with 1K reserved for eventual signal handling
- lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
- lghi $s1,-4096
- algr $s0,$sp
- lgr $fp,$sp
- ngr $s0,$s1 # align at page boundary
- slgr $fp,$s0 # total buffer size
- lgr $s2,$sp
- lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
- slgr $fp,$s1 # deduct reservation to get usable buffer size
- # buffer size is at lest 256 and at most 3072+256-16
-
- la $sp,1024($s0) # alloca
- srlg $fp,$fp,4 # convert bytes to blocks, minimum 16
- st${g} $s2,0($sp) # back-chain
- st${g} $fp,$SIZE_T($sp)
-
- slgr $len,$fp
- brc 1,.Lctr32_hw_switch # not zero, no borrow
- algr $fp,$len # input is shorter than allocated buffer
- lghi $len,0
- st${g} $fp,$SIZE_T($sp)
-
-.Lctr32_hw_switch:
-___
-$code.=<<___ if (0); ######### kmctr code was measured to be ~12% slower
- larl $s0,OPENSSL_s390xcap_P
- lg $s0,8($s0)
- tmhh $s0,0x0004 # check for message_security-assist-4
- jz .Lctr32_km_loop
-
- llgfr $s0,%r0
- lgr $s1,%r1
- lghi %r0,0
- la %r1,16($sp)
- .long 0xb92d2042 # kmctr %r4,%r2,%r2
-
- llihh %r0,0x8000 # check if kmctr supports the function code
- srlg %r0,%r0,0($s0)
- ng %r0,16($sp)
- lgr %r0,$s0
- lgr %r1,$s1
- jz .Lctr32_km_loop
-
-####### kmctr code
- algr $out,$inp # restore $out
- lgr $s1,$len # $s1 undertakes $len
- j .Lctr32_kmctr_loop
-.align 16
-.Lctr32_kmctr_loop:
- la $s2,16($sp)
- lgr $s3,$fp
-.Lctr32_kmctr_prepare:
- stg $iv0,0($s2)
- stg $ivp,8($s2)
- la $s2,16($s2)
- ahi $ivp,1 # 32-bit increment, preserves upper half
- brct $s3,.Lctr32_kmctr_prepare
-
- #la $inp,0($inp) # inp
- sllg $len,$fp,4 # len
- #la $out,0($out) # out
- la $s2,16($sp) # iv
- .long 0xb92da042 # kmctr $out,$s2,$inp
- brc 1,.-4 # pay attention to "partial completion"
-
- slgr $s1,$fp
- brc 1,.Lctr32_kmctr_loop # not zero, no borrow
- algr $fp,$s1
- lghi $s1,0
- brc 4+1,.Lctr32_kmctr_loop # not zero
-
- l${g} $sp,0($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-___
-$code.=<<___;
-.Lctr32_km_loop:
- la $s2,16($sp)
- lgr $s3,$fp
-.Lctr32_km_prepare:
- stg $iv0,0($s2)
- stg $ivp,8($s2)
- la $s2,16($s2)
- ahi $ivp,1 # 32-bit increment, preserves upper half
- brct $s3,.Lctr32_km_prepare
-
- la $s0,16($sp) # inp
- sllg $s1,$fp,4 # len
- la $s2,16($sp) # out
- .long 0xb92e00a8 # km %r10,%r8
- brc 1,.-4 # pay attention to "partial completion"
-
- la $s2,16($sp)
- lgr $s3,$fp
- slgr $s2,$inp
-.Lctr32_km_xor:
- lg $s0,0($inp)
- lg $s1,8($inp)
- xg $s0,0($s2,$inp)
- xg $s1,8($s2,$inp)
- stg $s0,0($out,$inp)
- stg $s1,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lctr32_km_xor
-
- slgr $len,$fp
- brc 1,.Lctr32_km_loop # not zero, no borrow
- algr $fp,$len
- lghi $len,0
- brc 4+1,.Lctr32_km_loop # not zero
-
- l${g} $s0,0($sp)
- l${g} $s1,$SIZE_T($sp)
- la $s2,16($sp)
-.Lctr32_km_zap:
- stg $s0,0($s2)
- stg $s0,8($s2)
- la $s2,16($s2)
- brct $s1,.Lctr32_km_zap
-
- la $sp,0($s0)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lctr32_software:
-___
-$code.=<<___;
- stm${g} $key,$ra,5*$SIZE_T($sp)
- sl${g}r $inp,$out
- larl $tbl,AES_Te
- llgf $t1,12($ivp)
-
-.Lctr32_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
- llgf $s0,0($ivp)
- llgf $s1,4($ivp)
- llgf $s2,8($ivp)
- lgr $s3,$t1
- st $t1,16*$SIZE_T($sp)
- lgr %r4,$key
-
- bras $ra,_s390x_AES_encrypt
-
- lm${g} $inp,$ivp,2*$SIZE_T($sp)
- llgf $t1,16*$SIZE_T($sp)
- x $s0,0($inp,$out)
- x $s1,4($inp,$out)
- x $s2,8($inp,$out)
- x $s3,12($inp,$out)
- stm $s0,$s3,0($out)
-
- la $out,16($out)
- ahi $t1,1 # 32-bit increment
- brct $len,.Lctr32_loop
-
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_ctr32_encrypt,.-AES_ctr32_encrypt
-___
-}
-
-########################################################################
-# void AES_xts_encrypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-{
-my $inp="%r2";
-my $out="%r4"; # len and out are swapped
-my $len="%r3";
-my $key1="%r5"; # $i1
-my $key2="%r6"; # $i2
-my $fp="%r7"; # $i3
-my $tweak=16*$SIZE_T+16; # or $stdframe-16, bottom of the frame...
-
-$code.=<<___;
-.type _s390x_xts_km,\@function
-.align 16
-_s390x_xts_km:
-___
-$code.=<<___ if(1);
- llgfr $s0,%r0 # put aside the function code
- lghi $s1,0x7f
- nr $s1,%r0
- lghi %r0,0 # query capability vector
- la %r1,$tweak-16($sp)
- .long 0xb92e0042 # km %r4,%r2
- llihh %r1,0x8000
- srlg %r1,%r1,32($s1) # check for 32+function code
- ng %r1,$tweak-16($sp)
- lgr %r0,$s0 # restore the function code
- la %r1,0($key1) # restore $key1
- jz .Lxts_km_vanilla
-
- lmg $i2,$i3,$tweak($sp) # put aside the tweak value
- algr $out,$inp
-
- oill %r0,32 # switch to xts function code
- aghi $s1,-18 #
- sllg $s1,$s1,3 # (function code - 18)*8, 0 or 16
- la %r1,$tweak-16($sp)
- slgr %r1,$s1 # parameter block position
- lmg $s0,$s3,0($key1) # load 256 bits of key material,
- stmg $s0,$s3,0(%r1) # and copy it to parameter block.
- # yes, it contains junk and overlaps
- # with the tweak in 128-bit case.
- # it's done to avoid conditional
- # branch.
- stmg $i2,$i3,$tweak($sp) # "re-seat" the tweak value
-
- .long 0xb92e0042 # km %r4,%r2
- brc 1,.-4 # pay attention to "partial completion"
-
- lrvg $s0,$tweak+0($sp) # load the last tweak
- lrvg $s1,$tweak+8($sp)
- stmg %r0,%r3,$tweak-32($sp) # wipe copy of the key
-
- nill %r0,0xffdf # switch back to original function code
- la %r1,0($key1) # restore pointer to $key1
- slgr $out,$inp
-
- llgc $len,2*$SIZE_T-1($sp)
- nill $len,0x0f # $len%=16
- br $ra
-
-.align 16
-.Lxts_km_vanilla:
-___
-$code.=<<___;
- # prepare and allocate stack frame at the top of 4K page
- # with 1K reserved for eventual signal handling
- lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
- lghi $s1,-4096
- algr $s0,$sp
- lgr $fp,$sp
- ngr $s0,$s1 # align at page boundary
- slgr $fp,$s0 # total buffer size
- lgr $s2,$sp
- lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
- slgr $fp,$s1 # deduct reservation to get usable buffer size
- # buffer size is at lest 256 and at most 3072+256-16
-
- la $sp,1024($s0) # alloca
- nill $fp,0xfff0 # round to 16*n
- st${g} $s2,0($sp) # back-chain
- nill $len,0xfff0 # redundant
- st${g} $fp,$SIZE_T($sp)
-
- slgr $len,$fp
- brc 1,.Lxts_km_go # not zero, no borrow
- algr $fp,$len # input is shorter than allocated buffer
- lghi $len,0
- st${g} $fp,$SIZE_T($sp)
-
-.Lxts_km_go:
- lrvg $s0,$tweak+0($s2) # load the tweak value in little-endian
- lrvg $s1,$tweak+8($s2)
-
- la $s2,16($sp) # vector of ascending tweak values
- slgr $s2,$inp
- srlg $s3,$fp,4
- j .Lxts_km_start
-
-.Lxts_km_loop:
- la $s2,16($sp)
- slgr $s2,$inp
- srlg $s3,$fp,4
-.Lxts_km_prepare:
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
-.Lxts_km_start:
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- stg $i1,0($s2,$inp)
- stg $i2,8($s2,$inp)
- xg $i1,0($inp)
- xg $i2,8($inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lxts_km_prepare
-
- slgr $inp,$fp # rewind $inp
- la $s2,0($out,$inp)
- lgr $s3,$fp
- .long 0xb92e00aa # km $s2,$s2
- brc 1,.-4 # pay attention to "partial completion"
-
- la $s2,16($sp)
- slgr $s2,$inp
- srlg $s3,$fp,4
-.Lxts_km_xor:
- lg $i1,0($out,$inp)
- lg $i2,8($out,$inp)
- xg $i1,0($s2,$inp)
- xg $i2,8($s2,$inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lxts_km_xor
-
- slgr $len,$fp
- brc 1,.Lxts_km_loop # not zero, no borrow
- algr $fp,$len
- lghi $len,0
- brc 4+1,.Lxts_km_loop # not zero
-
- l${g} $i1,0($sp) # back-chain
- llgf $fp,`2*$SIZE_T-4`($sp) # bytes used
- la $i2,16($sp)
- srlg $fp,$fp,4
-.Lxts_km_zap:
- stg $i1,0($i2)
- stg $i1,8($i2)
- la $i2,16($i2)
- brct $fp,.Lxts_km_zap
-
- la $sp,0($i1)
- llgc $len,2*$SIZE_T-1($i1)
- nill $len,0x0f # $len%=16
- bzr $ra
-
- # generate one more tweak...
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
-
- ltr $len,$len # clear zero flag
- br $ra
-.size _s390x_xts_km,.-_s390x_xts_km
-
-.globl AES_xts_encrypt
-.type AES_xts_encrypt,\@function
-.align 16
-AES_xts_encrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
- llgfr $len,$len
-___
-$code.=<<___;
- st${g} $len,1*$SIZE_T($sp) # save copy of $len
- srag $len,$len,4 # formally wrong, because it expands
- # sign byte, but who can afford asking
- # to process more than 2^63-1 bytes?
- # I use it, because it sets condition
- # code...
- bcr 8,$ra # abort if zero (i.e. less than 16)
-___
-$code.=<<___ if (!$softonly);
- llgf %r0,240($key2)
- lhi %r1,16
- clr %r0,%r1
- jl .Lxts_enc_software
-
- st${g} $ra,5*$SIZE_T($sp)
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- sllg $len,$len,4 # $len&=~15
- slgr $out,$inp
-
- # generate the tweak value
- l${g} $s3,$stdframe($sp) # pointer to iv
- la $s2,$tweak($sp)
- lmg $s0,$s1,0($s3)
- lghi $s3,16
- stmg $s0,$s1,0($s2)
- la %r1,0($key2) # $key2 is not needed anymore
- .long 0xb92e00aa # km $s2,$s2, generate the tweak
- brc 1,.-4 # can this happen?
-
- l %r0,240($key1)
- la %r1,0($key1) # $key1 is not needed anymore
- bras $ra,_s390x_xts_km
- jz .Lxts_enc_km_done
-
- aghi $inp,-16 # take one step back
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_enc_km_steal:
- llgc $i1,16($inp)
- llgc $i2,0($out,$inp)
- stc $i1,0($out,$inp)
- stc $i2,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_enc_km_steal
-
- la $s2,0($i3)
- lghi $s3,16
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- xg $i1,0($s2)
- xg $i2,8($s2)
- stg $i1,0($s2)
- stg $i2,8($s2)
- .long 0xb92e00aa # km $s2,$s2
- brc 1,.-4 # can this happen?
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- xg $i1,0($i3)
- xg $i2,8($i3)
- stg $i1,0($i3)
- stg $i2,8($i3)
-
-.Lxts_enc_km_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$tweak+8($sp)
- l${g} $ra,5*$SIZE_T($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lxts_enc_software:
-___
-$code.=<<___;
- stm${g} %r6,$ra,6*$SIZE_T($sp)
-
- slgr $out,$inp
-
- l${g} $s3,$stdframe($sp) # ivp
- llgf $s0,0($s3) # load iv
- llgf $s1,4($s3)
- llgf $s2,8($s3)
- llgf $s3,12($s3)
- stm${g} %r2,%r5,2*$SIZE_T($sp)
- la $key,0($key2)
- larl $tbl,AES_Te
- bras $ra,_s390x_AES_encrypt # generate the tweak
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- stm $s0,$s3,$tweak($sp) # save the tweak
- j .Lxts_enc_enter
-
-.align 16
-.Lxts_enc_loop:
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
- la $inp,16($inp) # $inp+=16
-.Lxts_enc_enter:
- x $s0,0($inp) # ^=*($inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
- la $key,0($key1)
- bras $ra,_s390x_AES_encrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
- brct${g} $len,.Lxts_enc_loop
-
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- jz .Lxts_enc_done
-
- la $i3,0($inp,$out) # put aside real $out
-.Lxts_enc_steal:
- llgc %r0,16($inp)
- llgc %r1,0($out,$inp)
- stc %r0,0($out,$inp)
- stc %r1,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_enc_steal
- la $out,0($i3) # restore real $out
-
- # generate last tweak...
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
-
- x $s0,0($out) # ^=*(inp)|stolen cipther-text
- x $s1,4($out)
- x $s2,8($out)
- x $s3,12($out)
- st${g} $out,4*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_encrypt
- l${g} $out,4*$SIZE_T($sp)
- x $s0,`$tweak+0`($sp) # ^=tweak
- x $s1,`$tweak+4`($sp)
- x $s2,`$tweak+8`($sp)
- x $s3,`$tweak+12`($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
-
-.Lxts_enc_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$twesk+8($sp)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_xts_encrypt,.-AES_xts_encrypt
-___
-# void AES_xts_decrypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-$code.=<<___;
-.globl AES_xts_decrypt
-.type AES_xts_decrypt,\@function
-.align 16
-AES_xts_decrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
- llgfr $len,$len
-___
-$code.=<<___;
- st${g} $len,1*$SIZE_T($sp) # save copy of $len
- aghi $len,-16
- bcr 4,$ra # abort if less than zero. formally
- # wrong, because $len is unsigned,
- # but who can afford asking to
- # process more than 2^63-1 bytes?
- tmll $len,0x0f
- jnz .Lxts_dec_proceed
- aghi $len,16
-.Lxts_dec_proceed:
-___
-$code.=<<___ if (!$softonly);
- llgf %r0,240($key2)
- lhi %r1,16
- clr %r0,%r1
- jl .Lxts_dec_software
-
- st${g} $ra,5*$SIZE_T($sp)
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- nill $len,0xfff0 # $len&=~15
- slgr $out,$inp
-
- # generate the tweak value
- l${g} $s3,$stdframe($sp) # pointer to iv
- la $s2,$tweak($sp)
- lmg $s0,$s1,0($s3)
- lghi $s3,16
- stmg $s0,$s1,0($s2)
- la %r1,0($key2) # $key2 is not needed past this point
- .long 0xb92e00aa # km $s2,$s2, generate the tweak
- brc 1,.-4 # can this happen?
-
- l %r0,240($key1)
- la %r1,0($key1) # $key1 is not needed anymore
-
- ltgr $len,$len
- jz .Lxts_dec_km_short
- bras $ra,_s390x_xts_km
- jz .Lxts_dec_km_done
-
- lrvgr $s2,$s0 # make copy in reverse byte order
- lrvgr $s3,$s1
- j .Lxts_dec_km_2ndtweak
-
-.Lxts_dec_km_short:
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%=16
- lrvg $s0,$tweak+0($sp) # load the tweak
- lrvg $s1,$tweak+8($sp)
- lrvgr $s2,$s0 # make copy in reverse byte order
- lrvgr $s3,$s1
-
-.Lxts_dec_km_2ndtweak:
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
-
- xg $i1,0($inp)
- xg $i2,8($inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $i2,0($out,$inp)
- lghi $i3,16
- .long 0xb92e0066 # km $i2,$i2
- brc 1,.-4 # can this happen?
- lrvgr $i1,$s0
- lrvgr $i2,$s1
- xg $i1,0($out,$inp)
- xg $i2,8($out,$inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
-
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_dec_km_steal:
- llgc $i1,16($inp)
- llgc $i2,0($out,$inp)
- stc $i1,0($out,$inp)
- stc $i2,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_dec_km_steal
-
- lgr $s0,$s2
- lgr $s1,$s3
- xg $s0,0($i3)
- xg $s1,8($i3)
- stg $s0,0($i3)
- stg $s1,8($i3)
- la $s0,0($i3)
- lghi $s1,16
- .long 0xb92e0088 # km $s0,$s0
- brc 1,.-4 # can this happen?
- xg $s2,0($i3)
- xg $s3,8($i3)
- stg $s2,0($i3)
- stg $s3,8($i3)
-.Lxts_dec_km_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$tweak+8($sp)
- l${g} $ra,5*$SIZE_T($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lxts_dec_software:
-___
-$code.=<<___;
- stm${g} %r6,$ra,6*$SIZE_T($sp)
-
- srlg $len,$len,4
- slgr $out,$inp
-
- l${g} $s3,$stdframe($sp) # ivp
- llgf $s0,0($s3) # load iv
- llgf $s1,4($s3)
- llgf $s2,8($s3)
- llgf $s3,12($s3)
- stm${g} %r2,%r5,2*$SIZE_T($sp)
- la $key,0($key2)
- larl $tbl,AES_Te
- bras $ra,_s390x_AES_encrypt # generate the tweak
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- larl $tbl,AES_Td
- lt${g}r $len,$len
- stm $s0,$s3,$tweak($sp) # save the tweak
- jz .Lxts_dec_short
- j .Lxts_dec_enter
-
-.align 16
-.Lxts_dec_loop:
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
-.Lxts_dec_enter:
- x $s0,0($inp) # tweak^=*(inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
- la $inp,16($inp)
- brct${g} $len,.Lxts_dec_loop
-
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- jz .Lxts_dec_done
-
- # generate pair of tweaks...
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $i2,$s1 # flip byte order
- lrvgr $i3,$s3
- stmg $i2,$i3,$tweak($sp) # save the 1st tweak
- j .Lxts_dec_2ndtweak
-
-.align 16
-.Lxts_dec_short:
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
-.Lxts_dec_2ndtweak:
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak-16+0($sp) # save the 2nd tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak-16+8($sp)
- llgfr $s3,$s3
-
- x $s0,0($inp) # tweak_the_2nd^=*(inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak-16+0($sp) # ^=tweak_the_2nd
- x $s1,$tweak-16+4($sp)
- x $s2,$tweak-16+8($sp)
- x $s3,$tweak-16+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
-
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_dec_steal:
- llgc %r0,16($inp)
- llgc %r1,0($out,$inp)
- stc %r0,0($out,$inp)
- stc %r1,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_dec_steal
- la $out,0($i3) # restore real $out
-
- lm $s0,$s3,$tweak($sp) # load the 1st tweak
- x $s0,0($out) # tweak^=*(inp)|stolen cipher-text
- x $s1,4($out)
- x $s2,8($out)
- x $s3,12($out)
- st${g} $out,4*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- l${g} $out,4*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
- stg $sp,$tweak-16+0($sp) # wipe 2nd tweak
- stg $sp,$tweak-16+8($sp)
-.Lxts_dec_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$twesk+8($sp)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_xts_decrypt,.-AES_xts_decrypt
-___
-}
$code.=<<___;
.string "AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm OPENSSL_s390xcap_P,16,8
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
print $code;
-close STDOUT; # force flush