diff options
author | Joel Sing <jsing@cvs.openbsd.org> | 2023-04-12 04:54:17 +0000 |
---|---|---|
committer | Joel Sing <jsing@cvs.openbsd.org> | 2023-04-12 04:54:17 +0000 |
commit | 19b19965bdd40c85aacd06622bee5576ed2dc57f (patch) | |
tree | ec0a3f8411047a696d71e343c93350ded947a368 | |
parent | 0e62b81c95b61ac66c2d4df0ba3575c9550c1b90 (diff) |
Provide and use crypto_ro{l,r}_u{32,64}().
Various code in libcrypto needs bitwise rotation - rather than defining
different versions across the code base, provide a common set that can
be reused. Any sensible compiler optimises these to a single instruction
where the architecture supports it, which means we can ditch the inline
assembly.
On the chance that we need to provide a platform specific versions, this
follows the approach used in BN where a MD crypto_arch.h header could be
added in the future, which would then provide more specific versions of
these functions.
ok tb@
-rw-r--r-- | lib/libcrypto/crypto_internal.h | 34 | ||||
-rw-r--r-- | lib/libcrypto/md32_common.h | 14 | ||||
-rw-r--r-- | lib/libcrypto/sha/sha512.c | 16 |
3 files changed, 39 insertions, 25 deletions
diff --git a/lib/libcrypto/crypto_internal.h b/lib/libcrypto/crypto_internal.h index af2a87216e2..fa1dc504f74 100644 --- a/lib/libcrypto/crypto_internal.h +++ b/lib/libcrypto/crypto_internal.h @@ -1,4 +1,4 @@ -/* $OpenBSD: crypto_internal.h,v 1.1 2023/04/12 04:40:39 jsing Exp $ */ +/* $OpenBSD: crypto_internal.h,v 1.2 2023/04/12 04:54:15 jsing Exp $ */ /* * Copyright (c) 2023 Joel Sing <jsing@openbsd.org> * @@ -31,4 +31,36 @@ crypto_store_htobe64(uint8_t *dst, uint64_t v) } #endif +#ifndef HAVE_CRYPTO_ROL_U32 +static inline uint32_t +crypto_rol_u32(uint32_t v, size_t shift) +{ + return (v << shift) | (v >> (32 - shift)); +} +#endif + +#ifndef HAVE_CRYPTO_ROR_U32 +static inline uint32_t +crypto_ror_u32(uint32_t v, size_t shift) +{ + return (v << (32 - shift)) | (v >> shift); +} +#endif + +#ifndef HAVE_CRYPTO_ROL_U64 +static inline uint64_t +crypto_rol_u64(uint64_t v, size_t shift) +{ + return (v << shift) | (v >> (64 - shift)); +} +#endif + +#ifndef HAVE_CRYPTO_ROR_U64 +static inline uint64_t +crypto_ror_u64(uint64_t v, size_t shift) +{ + return (v << (64 - shift)) | (v >> shift); +} +#endif + #endif diff --git a/lib/libcrypto/md32_common.h b/lib/libcrypto/md32_common.h index a8b0d9ab740..cce4cfb0f7e 100644 --- a/lib/libcrypto/md32_common.h +++ b/lib/libcrypto/md32_common.h @@ -1,4 +1,4 @@ -/* $OpenBSD: md32_common.h,v 1.23 2022/12/26 07:18:50 jmc Exp $ */ +/* $OpenBSD: md32_common.h,v 1.24 2023/04/12 04:54:15 jsing Exp $ */ /* ==================================================================== * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. * @@ -111,6 +111,8 @@ #include <openssl/opensslconf.h> +#include "crypto_internal.h" + #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) #error "DATA_ORDER must be defined!" #endif @@ -139,15 +141,7 @@ #error "HASH_BLOCK_DATA_ORDER must be defined!" #endif -/* - * This common idiom is recognized by the compiler and turned into a - * CPU-specific intrinsic as appropriate. - * e.g. GCC optimizes to roll on amd64 at -O0 - */ -static inline uint32_t ROTATE(uint32_t a, uint32_t n) -{ - return (a<<n)|(a>>(32-n)); -} +#define ROTATE(a, n) crypto_rol_u32(a, n) #if defined(DATA_ORDER_IS_BIG_ENDIAN) diff --git a/lib/libcrypto/sha/sha512.c b/lib/libcrypto/sha/sha512.c index 14c4cbd4f3a..ff9ca889e0b 100644 --- a/lib/libcrypto/sha/sha512.c +++ b/lib/libcrypto/sha/sha512.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sha512.c,v 1.31 2023/04/12 04:40:39 jsing Exp $ */ +/* $OpenBSD: sha512.c,v 1.32 2023/04/12 04:54:16 jsing Exp $ */ /* ==================================================================== * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. * @@ -119,11 +119,6 @@ static const SHA_LONG64 K512[80] = { #if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if defined(__x86_64) || defined(__x86_64__) -# define ROTR(a, n) ({ SHA_LONG64 ret; \ - asm ("rorq %1,%0" \ - : "=r"(ret) \ - : "J"(n),"0"(a) \ - : "cc"); ret; }) # define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ asm ("bswapq %0" \ : "=r"(ret) \ @@ -135,11 +130,6 @@ static const SHA_LONG64 K512[80] = { : "=r"(lo),"=r"(hi) \ : "0"(lo),"1"(hi)); \ ((SHA_LONG64)hi)<<32|lo; }) -# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64) -# define ROTR(a, n) ({ SHA_LONG64 ret; \ - asm ("rotrdi %0,%1,%2" \ - : "=r"(ret) \ - : "r"(a),"K"(n)); ret; }) # endif #endif @@ -152,9 +142,7 @@ static const SHA_LONG64 K512[80] = { #endif #endif -#ifndef ROTR -#define ROTR(x, s) (((x)>>s) | (x)<<(64-s)) -#endif +#define ROTR(x, s) crypto_ror_u64(x, s) #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) |