From 84ba1e83d5755dd6ba8a75641caf8d31c42c122d Mon Sep 17 00:00:00 2001 From: miod Date: Thu, 17 Apr 2014 21:07:04 +0000 Subject: [PATCH] Remove support for big-endian i386 and amd64. Before someone suggests the OpenSSL people are junkies, here is what they mention about this: /* Most will argue that x86_64 is always little-endian. Well, * yes, but then we have stratus.com who has modified gcc to * "emulate" big-endian on x86. Is there evidence that they * [or somebody else] won't do same for x86_64? Naturally no. * And this line is waiting ready for that brave soul:-) */ So, yes, they are on drugs. But they are not alone, the stratus.com people are, too. --- lib/libcrypto/md32_common.h | 5 ----- lib/libcrypto/sha/sha512.c | 8 +------- lib/libcrypto/whrlpool/wp_block.c | 10 ---------- lib/libssl/src/crypto/md32_common.h | 5 ----- lib/libssl/src/crypto/sha/sha512.c | 8 +------- lib/libssl/src/crypto/whrlpool/wp_block.c | 10 ---------- 6 files changed, 2 insertions(+), 44 deletions(-) diff --git a/lib/libcrypto/md32_common.h b/lib/libcrypto/md32_common.h index bfb610e8022..5276abfadca 100644 --- a/lib/libcrypto/md32_common.h +++ b/lib/libcrypto/md32_common.h @@ -199,7 +199,6 @@ # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \ (defined(__x86_64) || defined(__x86_64__)) -# if !defined(B_ENDIAN) /* * This gives ~30-40% performance improvement in SHA-256 compiled * with gcc [on P4]. Well, first macro to be frank. We can pull @@ -212,7 +211,6 @@ # define HOST_l2c(l,c) ({ unsigned int r=(l); \ asm ("bswapl %0":"=r"(r):"0"(r)); \ *((unsigned int *)(c))=r; (c)+=4; r; }) -# endif # endif # endif #endif @@ -251,11 +249,8 @@ # endif #endif #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) -# ifndef B_ENDIAN -/* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) -# endif #endif #ifndef HOST_c2l diff --git a/lib/libcrypto/sha/sha512.c b/lib/libcrypto/sha/sha512.c index 50c229ddebf..32bfecbf9bf 100644 --- a/lib/libcrypto/sha/sha512.c +++ b/lib/libcrypto/sha/sha512.c @@ -318,13 +318,11 @@ static const SHA_LONG64 K512[80] = { : "=r"(ret) \ : "J"(n),"0"(a) \ : "cc"); ret; }) -# if !defined(B_ENDIAN) # define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ asm ("bswapq %0" \ : "=r"(ret) \ : "0"(ret)); ret; }) -# endif -# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) +# elif (defined(__i386) || defined(__i386__)) # if defined(I386_ONLY) # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ unsigned int hi=p[0],lo=p[1]; \ @@ -421,11 +419,7 @@ static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num for (i=0;i<16;i++,F--) { -#ifdef B_ENDIAN - T = W[i]; -#else T = PULL64(W[i]); -#endif F[0] = A; F[4] = E; F[8] = T; diff --git a/lib/libcrypto/whrlpool/wp_block.c b/lib/libcrypto/whrlpool/wp_block.c index 824ed1827c4..ce977083add 100644 --- a/lib/libcrypto/whrlpool/wp_block.c +++ b/lib/libcrypto/whrlpool/wp_block.c @@ -84,18 +84,8 @@ typedef unsigned long long u64; # endif #elif defined(__GNUC__) && __GNUC__>=2 # if defined(__x86_64) || defined(__x86_64__) -# if defined(L_ENDIAN) # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) -# elif defined(B_ENDIAN) - /* Most will argue that x86_64 is always little-endian. Well, - * yes, but then we have stratus.com who has modified gcc to - * "emulate" big-endian on x86. Is there evidence that they - * [or somebody else] won't do same for x86_64? Naturally no. - * And this line is waiting ready for that brave soul:-) */ -# define ROTATE(a,n) ({ u64 ret; asm ("rorq %1,%0" \ - : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) -# endif # elif defined(__ia64) || defined(__ia64__) # if defined(L_ENDIAN) # define ROTATE(a,n) ({ u64 ret; asm ("shrp %0=%1,%1,%2" \ diff --git a/lib/libssl/src/crypto/md32_common.h b/lib/libssl/src/crypto/md32_common.h index bfb610e8022..5276abfadca 100644 --- a/lib/libssl/src/crypto/md32_common.h +++ b/lib/libssl/src/crypto/md32_common.h @@ -199,7 +199,6 @@ # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \ (defined(__x86_64) || defined(__x86_64__)) -# if !defined(B_ENDIAN) /* * This gives ~30-40% performance improvement in SHA-256 compiled * with gcc [on P4]. Well, first macro to be frank. We can pull @@ -212,7 +211,6 @@ # define HOST_l2c(l,c) ({ unsigned int r=(l); \ asm ("bswapl %0":"=r"(r):"0"(r)); \ *((unsigned int *)(c))=r; (c)+=4; r; }) -# endif # endif # endif #endif @@ -251,11 +249,8 @@ # endif #endif #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) -# ifndef B_ENDIAN -/* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) -# endif #endif #ifndef HOST_c2l diff --git a/lib/libssl/src/crypto/sha/sha512.c b/lib/libssl/src/crypto/sha/sha512.c index 50c229ddebf..32bfecbf9bf 100644 --- a/lib/libssl/src/crypto/sha/sha512.c +++ b/lib/libssl/src/crypto/sha/sha512.c @@ -318,13 +318,11 @@ static const SHA_LONG64 K512[80] = { : "=r"(ret) \ : "J"(n),"0"(a) \ : "cc"); ret; }) -# if !defined(B_ENDIAN) # define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \ asm ("bswapq %0" \ : "=r"(ret) \ : "0"(ret)); ret; }) -# endif -# elif (defined(__i386) || defined(__i386__)) && !defined(B_ENDIAN) +# elif (defined(__i386) || defined(__i386__)) # if defined(I386_ONLY) # define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\ unsigned int hi=p[0],lo=p[1]; \ @@ -421,11 +419,7 @@ static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num for (i=0;i<16;i++,F--) { -#ifdef B_ENDIAN - T = W[i]; -#else T = PULL64(W[i]); -#endif F[0] = A; F[4] = E; F[8] = T; diff --git a/lib/libssl/src/crypto/whrlpool/wp_block.c b/lib/libssl/src/crypto/whrlpool/wp_block.c index 824ed1827c4..ce977083add 100644 --- a/lib/libssl/src/crypto/whrlpool/wp_block.c +++ b/lib/libssl/src/crypto/whrlpool/wp_block.c @@ -84,18 +84,8 @@ typedef unsigned long long u64; # endif #elif defined(__GNUC__) && __GNUC__>=2 # if defined(__x86_64) || defined(__x86_64__) -# if defined(L_ENDIAN) # define ROTATE(a,n) ({ u64 ret; asm ("rolq %1,%0" \ : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) -# elif defined(B_ENDIAN) - /* Most will argue that x86_64 is always little-endian. Well, - * yes, but then we have stratus.com who has modified gcc to - * "emulate" big-endian on x86. Is there evidence that they - * [or somebody else] won't do same for x86_64? Naturally no. - * And this line is waiting ready for that brave soul:-) */ -# define ROTATE(a,n) ({ u64 ret; asm ("rorq %1,%0" \ - : "=r"(ret) : "J"(n),"0"(a) : "cc"); ret; }) -# endif # elif defined(__ia64) || defined(__ia64__) # if defined(L_ENDIAN) # define ROTATE(a,n) ({ u64 ret; asm ("shrp %0=%1,%1,%2" \ -- 2.20.1