summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2018-01-07 12:35:53 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2018-01-07 12:35:53 +0000
commit65501df21360d72c71e42bc36904d6cba6689dbe (patch)
treef3c71ece353c9d8d9d8514cf870e17335ba1537d /lib
parent81e611098adaef5c41becf31123421f4f8d24f3b (diff)
On OpenBSD/armv7 we deliberately trap unaligned access. Unfortunately
the assembly code in libcrypto assumes unaligned access is allowed for ARMv7. Make these paths conditional on __STRICT_ALIGNMENT not being defined and define __STRICT_ALIGNMENT in arm_arch.h for OpenBSD. ok tom@
Diffstat (limited to 'lib')
-rw-r--r--lib/libcrypto/aes/asm/aes-armv4.pl14
-rw-r--r--lib/libcrypto/arm_arch.h6
-rw-r--r--lib/libcrypto/sha/asm/sha1-armv4-large.pl100
-rw-r--r--lib/libcrypto/sha/asm/sha256-armv4.pl78
-rw-r--r--lib/libcrypto/sha/asm/sha512-armv4.pl387
5 files changed, 408 insertions, 177 deletions
diff --git a/lib/libcrypto/aes/asm/aes-armv4.pl b/lib/libcrypto/aes/asm/aes-armv4.pl
index 717cc1ed7f0..1cb9586d4b9 100644
--- a/lib/libcrypto/aes/asm/aes-armv4.pl
+++ b/lib/libcrypto/aes/asm/aes-armv4.pl
@@ -172,7 +172,7 @@ AES_encrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_encrypt-AES_Te @ Te
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -216,7 +216,7 @@ AES_encrypt:
bl _armv4_AES_encrypt
ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
#ifdef __ARMEL__
rev $s0,$s0
rev $s1,$s1
@@ -432,7 +432,7 @@ _armv4_AES_set_encrypt_key:
mov lr,r1 @ bits
mov $key,r2 @ key
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -517,7 +517,7 @@ _armv4_AES_set_encrypt_key:
b .Ldone
.Lnot128:
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $i2,[$rounds,#19]
ldrb $t1,[$rounds,#18]
ldrb $t2,[$rounds,#17]
@@ -588,7 +588,7 @@ _armv4_AES_set_encrypt_key:
b .L192_loop
.Lnot192:
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $i2,[$rounds,#27]
ldrb $t1,[$rounds,#26]
ldrb $t2,[$rounds,#25]
@@ -888,7 +888,7 @@ AES_decrypt:
mov $rounds,r0 @ inp
mov $key,r2
sub $tbl,r3,#AES_decrypt-AES_Td @ Td
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
ldrb $t1,[$rounds,#2] @ manner...
ldrb $t2,[$rounds,#1]
@@ -932,7 +932,7 @@ AES_decrypt:
bl _armv4_AES_decrypt
ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
#ifdef __ARMEL__
rev $s0,$s0
rev $s1,$s1
diff --git a/lib/libcrypto/arm_arch.h b/lib/libcrypto/arm_arch.h
index 3304be81ab1..8b8a05b5f71 100644
--- a/lib/libcrypto/arm_arch.h
+++ b/lib/libcrypto/arm_arch.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: arm_arch.h,v 1.7 2015/06/29 06:40:06 jsg Exp $ */
+/* $OpenBSD: arm_arch.h,v 1.8 2018/01/07 12:35:52 kettenis Exp $ */
#ifndef __ARM_ARCH_H__
#define __ARM_ARCH_H__
@@ -44,4 +44,8 @@ extern unsigned int OPENSSL_armcap_P;
#define ARMV7_NEON (1<<0)
#endif
+#if defined(__OpenBSD__)
+#define __STRICT_ALIGNMENT
+#endif
+
#endif
diff --git a/lib/libcrypto/sha/asm/sha1-armv4-large.pl b/lib/libcrypto/sha/asm/sha1-armv4-large.pl
index 88861af6411..8f0cdaf83c8 100644
--- a/lib/libcrypto/sha/asm/sha1-armv4-large.pl
+++ b/lib/libcrypto/sha/asm/sha1-armv4-large.pl
@@ -37,9 +37,22 @@
# modes are limited. As result it takes more instructions to do
# the same job in Thumb, therefore the code is never twice as
# small and always slower.
-# [***] which is also ~35% better than compiler generated code.
+# [***] which is also ~35% better than compiler generated code. Dual-
+# issue Cortex A8 core was measured to process input block in
+# ~990 cycles.
-$output=shift;
+# August 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 13% improvement on
+# Cortex A8 core and in absolute terms ~870 cycles per input block
+# [or 13.6 cycles per byte].
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 10%
+# improvement on Cortex A8 core and 12.2 cycles per byte.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$ctx="r0";
@@ -58,63 +71,62 @@ $t3="r12";
$Xi="r14";
@V=($a,$b,$c,$d,$e);
-# One can optimize this for aligned access on big-endian architecture,
-# but code's endian neutrality makes it too pretty:-)
-sub Xload {
-my ($a,$b,$c,$d,$e)=@_;
-$code.=<<___;
- ldrb $t0,[$inp],#4
- ldrb $t1,[$inp,#-3]
- ldrb $t2,[$inp,#-2]
- ldrb $t3,[$inp,#-1]
- add $e,$K,$e,ror#2 @ E+=K_00_19
- orr $t0,$t1,$t0,lsl#8
- add $e,$e,$a,ror#27 @ E+=ROR(A,27)
- orr $t0,$t2,$t0,lsl#8
- eor $t1,$c,$d @ F_xx_xx
- orr $t0,$t3,$t0,lsl#8
- add $e,$e,$t0 @ E+=X[i]
- str $t0,[$Xi,#-4]!
-___
-}
sub Xupdate {
-my ($a,$b,$c,$d,$e,$flag)=@_;
+my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
$code.=<<___;
ldr $t0,[$Xi,#15*4]
ldr $t1,[$Xi,#13*4]
ldr $t2,[$Xi,#7*4]
- ldr $t3,[$Xi,#2*4]
add $e,$K,$e,ror#2 @ E+=K_xx_xx
+ ldr $t3,[$Xi,#2*4]
eor $t0,$t0,$t1
- eor $t0,$t0,$t2
- eor $t0,$t0,$t3
- add $e,$e,$a,ror#27 @ E+=ROR(A,27)
-___
-$code.=<<___ if (!defined($flag));
- eor $t1,$c,$d @ F_xx_xx, but not in 40_59
-___
-$code.=<<___;
+ eor $t2,$t2,$t3 @ 1 cycle stall
+ eor $t1,$c,$d @ F_xx_xx
mov $t0,$t0,ror#31
- add $e,$e,$t0 @ E+=X[i]
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+ eor $t0,$t0,$t2,ror#31
str $t0,[$Xi,#-4]!
+ $opt1 @ F_xx_xx
+ $opt2 @ F_xx_xx
+ add $e,$e,$t0 @ E+=X[i]
___
}
sub BODY_00_15 {
my ($a,$b,$c,$d,$e)=@_;
- &Xload(@_);
$code.=<<___;
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb $t1,[$inp,#2]
+ ldrb $t0,[$inp,#3]
+ ldrb $t2,[$inp,#1]
+ add $e,$K,$e,ror#2 @ E+=K_00_19
+ ldrb $t3,[$inp],#4
+ orr $t0,$t0,$t1,lsl#8
+ eor $t1,$c,$d @ F_xx_xx
+ orr $t0,$t0,$t2,lsl#16
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+ orr $t0,$t0,$t3,lsl#24
+#else
+ ldr $t0,[$inp],#4 @ handles unaligned
+ add $e,$K,$e,ror#2 @ E+=K_00_19
+ eor $t1,$c,$d @ F_xx_xx
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev $t0,$t0 @ byte swap
+#endif
+#endif
and $t1,$b,$t1,ror#2
+ add $e,$e,$t0 @ E+=X[i]
eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
+ str $t0,[$Xi,#-4]!
add $e,$e,$t1 @ E+=F_00_19(B,C,D)
___
}
sub BODY_16_19 {
my ($a,$b,$c,$d,$e)=@_;
- &Xupdate(@_);
+ &Xupdate(@_,"and $t1,$b,$t1,ror#2");
$code.=<<___;
- and $t1,$b,$t1,ror#2
eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
add $e,$e,$t1 @ E+=F_00_19(B,C,D)
___
@@ -122,26 +134,24 @@ ___
sub BODY_20_39 {
my ($a,$b,$c,$d,$e)=@_;
- &Xupdate(@_);
+ &Xupdate(@_,"eor $t1,$b,$t1,ror#2");
$code.=<<___;
- eor $t1,$b,$t1,ror#2 @ F_20_39(B,C,D)
add $e,$e,$t1 @ E+=F_20_39(B,C,D)
___
}
sub BODY_40_59 {
my ($a,$b,$c,$d,$e)=@_;
- &Xupdate(@_,1);
+ &Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
$code.=<<___;
- and $t1,$b,$c,ror#2
- orr $t2,$b,$c,ror#2
- and $t2,$t2,$d,ror#2
- orr $t1,$t1,$t2 @ F_40_59(B,C,D)
add $e,$e,$t1 @ E+=F_40_59(B,C,D)
+ add $e,$e,$t2,ror#2
___
}
$code=<<___;
+#include "arm_arch.h"
+
.text
.global sha1_block_data_order
@@ -167,6 +177,7 @@ for($i=0;$i<5;$i++) {
$code.=<<___;
teq $Xi,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
+ sub sp,sp,#25*4
___
&BODY_00_15(@V); unshift(@V,pop(@V));
&BODY_16_19(@V); unshift(@V,pop(@V));
@@ -176,7 +187,6 @@ ___
$code.=<<___;
ldr $K,.LK_20_39 @ [+15+16*4]
- sub sp,sp,#25*4
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
___
@@ -215,10 +225,14 @@ $code.=<<___;
teq $inp,$len
bne .Lloop @ [+18], total 1307
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
+#endif
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
diff --git a/lib/libcrypto/sha/asm/sha256-armv4.pl b/lib/libcrypto/sha/asm/sha256-armv4.pl
index 48d846deec3..292520731cd 100644
--- a/lib/libcrypto/sha/asm/sha256-armv4.pl
+++ b/lib/libcrypto/sha/asm/sha256-armv4.pl
@@ -11,13 +11,23 @@
# Performance is ~2x better than gcc 3.4 generated code and in "abso-
# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
-# byte.
+# byte [on single-issue Xscale PXA250 core].
-$output=shift;
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~17 cycles per processed byte.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$ctx="r0"; $t0="r0";
-$inp="r1";
+$inp="r1"; $t3="r1";
$len="r2"; $t1="r2";
$T1="r3";
$A="r4";
@@ -41,6 +51,9 @@ sub BODY_00_15 {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___ if ($i<16);
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr $T1,[$inp],#4
+#else
ldrb $T1,[$inp,#3] @ $i
ldrb $t2,[$inp,#2]
ldrb $t1,[$inp,#1]
@@ -48,31 +61,42 @@ $code.=<<___ if ($i<16);
orr $T1,$T1,$t2,lsl#8
orr $T1,$T1,$t1,lsl#16
orr $T1,$T1,$t0,lsl#24
- `"str $inp,[sp,#17*4]" if ($i==15)`
+#endif
___
$code.=<<___;
- ldr $t2,[$Ktbl],#4 @ *K256++
- str $T1,[sp,#`$i%16`*4]
mov $t0,$e,ror#$Sigma1[0]
+ ldr $t2,[$Ktbl],#4 @ *K256++
eor $t0,$t0,$e,ror#$Sigma1[1]
- eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
- add $T1,$T1,$t0
eor $t1,$f,$g
+#if $i>=16
+ add $T1,$T1,$t3 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev $T1,$T1
+#endif
+#if $i==15
+ str $inp,[sp,#17*4] @ leave room for $t3
+#endif
+ eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
and $t1,$t1,$e
+ str $T1,[sp,#`$i%16`*4]
+ add $T1,$T1,$t0
eor $t1,$t1,$g @ Ch(e,f,g)
- add $T1,$T1,$t1
add $T1,$T1,$h
- add $T1,$T1,$t2
mov $h,$a,ror#$Sigma0[0]
+ add $T1,$T1,$t1
eor $h,$h,$a,ror#$Sigma0[1]
+ add $T1,$T1,$t2
eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
+#if $i>=15
+ ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx
+#endif
orr $t0,$a,$b
- and $t0,$t0,$c
and $t1,$a,$b
+ and $t0,$t0,$c
+ add $h,$h,$T1
orr $t0,$t0,$t1 @ Maj(a,b,c)
- add $h,$h,$t0
add $d,$d,$T1
- add $h,$h,$T1
+ add $h,$h,$t0
___
}
@@ -80,24 +104,26 @@ sub BODY_16_XX {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
- ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i
ldr $t2,[sp,#`($i+14)%16`*4]
+ mov $t0,$t3,ror#$sigma0[0]
ldr $T1,[sp,#`($i+0)%16`*4]
- ldr $inp,[sp,#`($i+9)%16`*4]
- mov $t0,$t1,ror#$sigma0[0]
- eor $t0,$t0,$t1,ror#$sigma0[1]
- eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
- mov $t1,$t2,ror#$sigma1[0]
- eor $t1,$t1,$t2,ror#$sigma1[1]
- eor $t1,$t1,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
+ eor $t0,$t0,$t3,ror#$sigma0[1]
+ ldr $t1,[sp,#`($i+9)%16`*4]
+ eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1])
+ mov $t3,$t2,ror#$sigma1[0]
add $T1,$T1,$t0
+ eor $t3,$t3,$t2,ror#$sigma1[1]
add $T1,$T1,$t1
- add $T1,$T1,$inp
+ eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
+ @ add $T1,$T1,$t3
___
&BODY_00_15(@_);
}
$code=<<___;
+#include "arm_arch.h"
+
.text
.code 32
@@ -127,7 +153,7 @@ K256:
sha256_block_data_order:
sub r3,pc,#8 @ sha256_block_data_order
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
- stmdb sp!,{$ctx,$inp,$len,r4-r12,lr}
+ stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
sub $Ktbl,r3,#256 @ K256
sub sp,sp,#16*4 @ alloca(X[16])
@@ -166,10 +192,14 @@ $code.=<<___;
bne .Loop
add sp,sp,#`16+3`*4 @ destroy frame
- ldmia sp!,{r4-r12,lr}
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r11,pc}
+#else
+ ldmia sp!,{r4-r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
+#endif
.size sha256_block_data_order,.-sha256_block_data_order
.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
diff --git a/lib/libcrypto/sha/asm/sha512-armv4.pl b/lib/libcrypto/sha/asm/sha512-armv4.pl
index 4fbb94a914f..28ae155f4b4 100644
--- a/lib/libcrypto/sha/asm/sha512-armv4.pl
+++ b/lib/libcrypto/sha/asm/sha512-armv4.pl
@@ -10,24 +10,41 @@
# SHA512 block procedure for ARMv4. September 2007.
# This code is ~4.5 (four and a half) times faster than code generated
-# by gcc 3.4 and it spends ~72 clock cycles per byte.
+# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+# Xscale PXA250 core].
+#
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 6% improvement on
+# Cortex A8 core and ~40 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 7%
+# improvement on Coxtex A8 core and ~38 cycles per byte.
+
+# March 2011.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process
+# one byte in 25.5 cycles or 47% faster than integer-only code.
# Byte order [in]dependence. =========================================
#
-# Caller is expected to maintain specific *dword* order in h[0-7],
-# namely with most significant dword at *lower* address, which is
-# reflected in below two parameters. *Byte* order within these dwords
-# in turn is whatever *native* byte order on current platform.
-$hi=0;
-$lo=4;
+# Originally caller was expected to maintain specific *dword* order in
+# h[0-7], namely with most significant dword at *lower* address, which
+# was reflected in below two parameters as 0 and 4. Now caller is
+# expected to maintain native byte order for whole 64-bit values.
+$hi="HI";
+$lo="LO";
# ====================================================================
-$output=shift;
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
-$ctx="r0";
+$ctx="r0"; # parameter block
$inp="r1";
$len="r2";
+
$Tlo="r3";
$Thi="r4";
$Alo="r5";
@@ -55,15 +72,17 @@ $Xoff=8*8;
sub BODY_00_15() {
my $magic = shift;
$code.=<<___;
- ldr $t2,[sp,#$Hoff+0] @ h.lo
- ldr $t3,[sp,#$Hoff+4] @ h.hi
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov $t0,$Elo,lsr#14
+ str $Tlo,[sp,#$Xoff+0]
mov $t1,$Ehi,lsr#14
+ str $Thi,[sp,#$Xoff+4]
eor $t0,$t0,$Ehi,lsl#18
+ ldr $t2,[sp,#$Hoff+0] @ h.lo
eor $t1,$t1,$Elo,lsl#18
+ ldr $t3,[sp,#$Hoff+4] @ h.hi
eor $t0,$t0,$Elo,lsr#18
eor $t1,$t1,$Ehi,lsr#18
eor $t0,$t0,$Ehi,lsl#14
@@ -73,44 +92,41 @@ $code.=<<___;
eor $t0,$t0,$Elo,lsl#23
eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
adds $Tlo,$Tlo,$t0
- adc $Thi,$Thi,$t1 @ T += Sigma1(e)
- adds $Tlo,$Tlo,$t2
- adc $Thi,$Thi,$t3 @ T += h
-
ldr $t0,[sp,#$Foff+0] @ f.lo
+ adc $Thi,$Thi,$t1 @ T += Sigma1(e)
ldr $t1,[sp,#$Foff+4] @ f.hi
+ adds $Tlo,$Tlo,$t2
ldr $t2,[sp,#$Goff+0] @ g.lo
+ adc $Thi,$Thi,$t3 @ T += h
ldr $t3,[sp,#$Goff+4] @ g.hi
- str $Elo,[sp,#$Eoff+0]
- str $Ehi,[sp,#$Eoff+4]
- str $Alo,[sp,#$Aoff+0]
- str $Ahi,[sp,#$Aoff+4]
eor $t0,$t0,$t2
+ str $Elo,[sp,#$Eoff+0]
eor $t1,$t1,$t3
+ str $Ehi,[sp,#$Eoff+4]
and $t0,$t0,$Elo
+ str $Alo,[sp,#$Aoff+0]
and $t1,$t1,$Ehi
+ str $Ahi,[sp,#$Aoff+4]
eor $t0,$t0,$t2
+ ldr $t2,[$Ktbl,#$lo] @ K[i].lo
eor $t1,$t1,$t3 @ Ch(e,f,g)
-
- ldr $t2,[$Ktbl,#4] @ K[i].lo
- ldr $t3,[$Ktbl,#0] @ K[i].hi
- ldr $Elo,[sp,#$Doff+0] @ d.lo
- ldr $Ehi,[sp,#$Doff+4] @ d.hi
+ ldr $t3,[$Ktbl,#$hi] @ K[i].hi
adds $Tlo,$Tlo,$t0
+ ldr $Elo,[sp,#$Doff+0] @ d.lo
adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
+ ldr $Ehi,[sp,#$Doff+4] @ d.hi
adds $Tlo,$Tlo,$t2
+ and $t0,$t2,#0xff
adc $Thi,$Thi,$t3 @ T += K[i]
adds $Elo,$Elo,$Tlo
+ ldr $t2,[sp,#$Boff+0] @ b.lo
adc $Ehi,$Ehi,$Thi @ d += T
-
- and $t0,$t2,#0xff
teq $t0,#$magic
- orreq $Ktbl,$Ktbl,#1
- ldr $t2,[sp,#$Boff+0] @ b.lo
ldr $t3,[sp,#$Coff+0] @ c.lo
+ orreq $Ktbl,$Ktbl,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
@@ -127,80 +143,100 @@ $code.=<<___;
eor $t0,$t0,$Alo,lsl#25
eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a)
adds $Tlo,$Tlo,$t0
+ and $t0,$Alo,$t2
adc $Thi,$Thi,$t1 @ T += Sigma0(a)
- and $t0,$Alo,$t2
- orr $Alo,$Alo,$t2
ldr $t1,[sp,#$Boff+4] @ b.hi
+ orr $Alo,$Alo,$t2
ldr $t2,[sp,#$Coff+4] @ c.hi
and $Alo,$Alo,$t3
- orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo
and $t3,$Ahi,$t1
orr $Ahi,$Ahi,$t1
+ orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo
and $Ahi,$Ahi,$t2
- orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi
adds $Alo,$Alo,$Tlo
- adc $Ahi,$Ahi,$Thi @ h += T
-
+ orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi
sub sp,sp,#8
+ adc $Ahi,$Ahi,$Thi @ h += T
+ tst $Ktbl,#1
add $Ktbl,$Ktbl,#8
___
}
$code=<<___;
+#include "arm_arch.h"
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
+#endif
+
.text
.code 32
.type K512,%object
.align 5
K512:
-.word 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
-.word 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
-.word 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
-.word 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
-.word 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
-.word 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
-.word 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
-.word 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
-.word 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
-.word 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
-.word 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
-.word 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
-.word 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
-.word 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
-.word 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
-.word 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
-.word 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
-.word 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
-.word 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
-.word 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
-.word 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
-.word 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
-.word 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
-.word 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
-.word 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
-.word 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
-.word 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
-.word 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
-.word 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
-.word 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
-.word 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
-.word 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
-.word 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
-.word 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
-.word 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
-.word 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
-.word 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
-.word 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
-.word 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
-.word 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha512_block_data_order
+.skip 32-4
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
sub r3,pc,#8 @ sha512_block_data_order
add $len,$inp,$len,lsl#7 @ len to point at the end of inp
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#1
+ bne .LNEON
+#endif
stmdb sp!,{r4-r12,lr}
- sub $Ktbl,r3,#640 @ K512
+ sub $Ktbl,r3,#672 @ K512
sub sp,sp,#9*8
ldr $Elo,[$ctx,#$Eoff+$lo]
@@ -234,6 +270,7 @@ sha512_block_data_order:
str $Thi,[sp,#$Foff+4]
.L00_15:
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
ldrb $Tlo,[$inp,#7]
ldrb $t0, [$inp,#6]
ldrb $t1, [$inp,#5]
@@ -248,26 +285,30 @@ sha512_block_data_order:
orr $Thi,$Thi,$t3,lsl#8
orr $Thi,$Thi,$t0,lsl#16
orr $Thi,$Thi,$t1,lsl#24
- str $Tlo,[sp,#$Xoff+0]
- str $Thi,[sp,#$Xoff+4]
+#else
+ ldr $Tlo,[$inp,#4]
+ ldr $Thi,[$inp],#8
+#ifdef __ARMEL__
+ rev $Tlo,$Tlo
+ rev $Thi,$Thi
+#endif
+#endif
___
&BODY_00_15(0x94);
$code.=<<___;
tst $Ktbl,#1
beq .L00_15
- bic $Ktbl,$Ktbl,#1
-
-.L16_79:
ldr $t0,[sp,#`$Xoff+8*(16-1)`+0]
ldr $t1,[sp,#`$Xoff+8*(16-1)`+4]
- ldr $t2,[sp,#`$Xoff+8*(16-14)`+0]
- ldr $t3,[sp,#`$Xoff+8*(16-14)`+4]
-
+ bic $Ktbl,$Ktbl,#1
+.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov $Tlo,$t0,lsr#1
+ ldr $t2,[sp,#`$Xoff+8*(16-14)`+0]
mov $Thi,$t1,lsr#1
+ ldr $t3,[sp,#`$Xoff+8*(16-14)`+4]
eor $Tlo,$Tlo,$t1,lsl#31
eor $Thi,$Thi,$t0,lsl#31
eor $Tlo,$Tlo,$t0,lsr#8
@@ -291,25 +332,24 @@ $code.=<<___;
eor $t1,$t1,$t3,lsl#3
eor $t0,$t0,$t2,lsr#6
eor $t1,$t1,$t3,lsr#6
+ ldr $t2,[sp,#`$Xoff+8*(16-9)`+0]
eor $t0,$t0,$t3,lsl#26
- ldr $t2,[sp,#`$Xoff+8*(16-9)`+0]
ldr $t3,[sp,#`$Xoff+8*(16-9)`+4]
adds $Tlo,$Tlo,$t0
+ ldr $t0,[sp,#`$Xoff+8*16`+0]
adc $Thi,$Thi,$t1
- ldr $t0,[sp,#`$Xoff+8*16`+0]
ldr $t1,[sp,#`$Xoff+8*16`+4]
adds $Tlo,$Tlo,$t2
adc $Thi,$Thi,$t3
adds $Tlo,$Tlo,$t0
adc $Thi,$Thi,$t1
- str $Tlo,[sp,#$Xoff+0]
- str $Thi,[sp,#$Xoff+4]
___
&BODY_00_15(0x17);
$code.=<<___;
- tst $Ktbl,#1
+ ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0]
+ ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4]
beq .L16_79
bic $Ktbl,$Ktbl,#1
@@ -320,12 +360,12 @@ $code.=<<___;
ldr $t2, [$ctx,#$Boff+$lo]
ldr $t3, [$ctx,#$Boff+$hi]
adds $t0,$Alo,$t0
- adc $t1,$Ahi,$t1
- adds $t2,$Tlo,$t2
- adc $t3,$Thi,$t3
str $t0, [$ctx,#$Aoff+$lo]
+ adc $t1,$Ahi,$t1
str $t1, [$ctx,#$Aoff+$hi]
+ adds $t2,$Tlo,$t2
str $t2, [$ctx,#$Boff+$lo]
+ adc $t3,$Thi,$t3
str $t3, [$ctx,#$Boff+$hi]
ldr $Alo,[sp,#$Coff+0]
@@ -337,12 +377,12 @@ $code.=<<___;
ldr $t2, [$ctx,#$Doff+$lo]
ldr $t3, [$ctx,#$Doff+$hi]
adds $t0,$Alo,$t0
- adc $t1,$Ahi,$t1
- adds $t2,$Tlo,$t2
- adc $t3,$Thi,$t3
str $t0, [$ctx,#$Coff+$lo]
+ adc $t1,$Ahi,$t1
str $t1, [$ctx,#$Coff+$hi]
+ adds $t2,$Tlo,$t2
str $t2, [$ctx,#$Doff+$lo]
+ adc $t3,$Thi,$t3
str $t3, [$ctx,#$Doff+$hi]
ldr $Tlo,[sp,#$Foff+0]
@@ -352,12 +392,12 @@ $code.=<<___;
ldr $t2, [$ctx,#$Foff+$lo]
ldr $t3, [$ctx,#$Foff+$hi]
adds $Elo,$Elo,$t0
- adc $Ehi,$Ehi,$t1
- adds $t2,$Tlo,$t2
- adc $t3,$Thi,$t3
str $Elo,[$ctx,#$Eoff+$lo]
+ adc $Ehi,$Ehi,$t1
str $Ehi,[$ctx,#$Eoff+$hi]
+ adds $t2,$Tlo,$t2
str $t2, [$ctx,#$Foff+$lo]
+ adc $t3,$Thi,$t3
str $t3, [$ctx,#$Foff+$hi]
ldr $Alo,[sp,#$Goff+0]
@@ -369,12 +409,12 @@ $code.=<<___;
ldr $t2, [$ctx,#$Hoff+$lo]
ldr $t3, [$ctx,#$Hoff+$hi]
adds $t0,$Alo,$t0
- adc $t1,$Ahi,$t1
- adds $t2,$Tlo,$t2
- adc $t3,$Thi,$t3
str $t0, [$ctx,#$Goff+$lo]
+ adc $t1,$Ahi,$t1
str $t1, [$ctx,#$Goff+$hi]
+ adds $t2,$Tlo,$t2
str $t2, [$ctx,#$Hoff+$lo]
+ adc $t3,$Thi,$t3
str $t3, [$ctx,#$Hoff+$hi]
add sp,sp,#640
@@ -384,13 +424,156 @@ $code.=<<___;
bne .Loop
add sp,sp,#8*9 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
-.size sha512_block_data_order,.-sha512_block_data_order
-.asciz "SHA512 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+#endif
+___
+
+{
+my @Sigma0=(28,34,39);
+my @Sigma1=(14,18,41);
+my @sigma0=(1, 8, 7);
+my @sigma1=(19,61,6);
+
+my $Ktbl="r3";
+my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch
+
+my @X=map("d$_",(0..15));
+my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
+
+sub NEON_00_15() {
+my $i=shift;
+my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps
+
+$code.=<<___ if ($i<16 || $i&1);
+ vshr.u64 $t0,$e,#@Sigma1[0] @ $i
+#if $i<16
+ vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned
+#endif
+ vshr.u64 $t1,$e,#@Sigma1[1]
+ vshr.u64 $t2,$e,#@Sigma1[2]
+___
+$code.=<<___;
+ vld1.64 {$K},[$Ktbl,:64]! @ K[i++]
+ vsli.64 $t0,$e,#`64-@Sigma1[0]`
+ vsli.64 $t1,$e,#`64-@Sigma1[1]`
+ vsli.64 $t2,$e,#`64-@Sigma1[2]`
+#if $i<16 && defined(__ARMEL__)
+ vrev64.8 @X[$i],@X[$i]
+#endif
+ vadd.i64 $T1,$K,$h
+ veor $Ch,$f,$g
+ veor $t0,$t1
+ vand $Ch,$e
+ veor $t0,$t2 @ Sigma1(e)
+ veor $Ch,$g @ Ch(e,f,g)
+ vadd.i64 $T1,$t0
+ vshr.u64 $t0,$a,#@Sigma0[0]
+ vadd.i64 $T1,$Ch
+ vshr.u64 $t1,$a,#@Sigma0[1]
+ vshr.u64 $t2,$a,#@Sigma0[2]
+ vsli.64 $t0,$a,#`64-@Sigma0[0]`
+ vsli.64 $t1,$a,#`64-@Sigma0[1]`
+ vsli.64 $t2,$a,#`64-@Sigma0[2]`
+ vadd.i64 $T1,@X[$i%16]
+ vorr $Maj,$a,$c
+ vand $Ch,$a,$c
+ veor $h,$t0,$t1
+ vand $Maj,$b
+ veor $h,$t2 @ Sigma0(a)
+ vorr $Maj,$Ch @ Maj(a,b,c)
+ vadd.i64 $h,$T1
+ vadd.i64 $d,$T1
+ vadd.i64 $h,$Maj
+___
+}
+
+sub NEON_16_79() {
+my $i=shift;
+
+if ($i&1) { &NEON_00_15($i,@_); return; }
+
+# 2x-vectorized, therefore runs every 2nd round
+my @X=map("q$_",(0..7)); # view @X as 128-bit vector
+my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps
+my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15
+my $e=@_[4]; # $e from NEON_00_15
+$i /= 2;
+$code.=<<___;
+ vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0]
+ vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1]
+ vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2]
+ vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]`
+ vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1]
+ vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]`
+ veor $s1,$t0
+ vshr.u64 $t0,$s0,#@sigma0[0]
+ veor $s1,$t1 @ sigma1(X[i+14])
+ vshr.u64 $t1,$s0,#@sigma0[1]
+ vadd.i64 @X[$i%8],$s1
+ vshr.u64 $s1,$s0,#@sigma0[2]
+ vsli.64 $t0,$s0,#`64-@sigma0[0]`
+ vsli.64 $t1,$s0,#`64-@sigma0[1]`
+ vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9]
+ veor $s1,$t0
+ vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15
+ vadd.i64 @X[$i%8],$s0
+ vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15
+ veor $s1,$t1 @ sigma0(X[i+1])
+ vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15
+ vadd.i64 @X[$i%8],$s1
+___
+ &NEON_00_15(2*$i,@_);
+}
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.align 4
+.LNEON:
+ dmb @ errata #451034 on early Cortex A8
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ sub $Ktbl,r3,#672 @ K512
+ vldmia $ctx,{$A-$H} @ load context
+.Loop_neon:
+___
+for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ mov $cnt,#4
+.L16_79_neon:
+ subs $cnt,#1
+___
+for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ bne .L16_79_neon
+
+ vldmia $ctx,{d24-d31} @ load context to temp
+ vadd.i64 q8,q12 @ vectorized accumulate
+ vadd.i64 q9,q13
+ vadd.i64 q10,q14
+ vadd.i64 q11,q15
+ vstmia $ctx,{$A-$H} @ save context
+ teq $inp,$len
+ sub $Ktbl,#640 @ rewind K512
+ bne .Loop_neon
+
+ vldmia sp!,{d8-d15} @ epilogue
+ bx lr
+#endif
+___
+}
+$code.=<<___;
+.size sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
+.comm OPENSSL_armcap_P,4,4
___
$code =~ s/\`([^\`]*)\`/eval $1/gem;