#endif /* OPENSSL_NO_RC4 */
-#else /* !OPENSSL_NO_RSA */
-
-# if PEDANTIC
-static void *dummy = &dummy;
-# endif
-
#endif
#else
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) \
- && !defined(PEDANTIC) && !defined(BN_DIV3W)
+ && !defined(BN_DIV3W)
# if defined(__GNUC__) && __GNUC__>=2
# if defined(__i386) || defined (__i386__)
/*
#define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) /* 32 */
#define BN_MONT_CTX_SET_SIZE_WORD (64) /* 32 */
-#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* BN_UMULT_HIGH section.
*
#include <stdlib.h>
/* 32-bit rotations */
-#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__GNUC__) && __GNUC__>=2
# if defined(__i386) || defined(__x86_64)
# define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; })
} \
}
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ("rorl %1,%0" \
defined(_M_AMD64) || defined(_M_X64) || \
defined(__INTEL__) )
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2
# define BSWAP(x) ({ unsigned int r=(x); asm ("bswapl %0":"=r"(r):"0"(r)); r; })
#endif
return 1;
}
-#else
-
-# ifdef PEDANTIC
-static void *dummy=&dummy;
-# endif
-
#endif
i = EVP_DecryptInit_ex(ctx,NULL,NULL,NULL,NULL);
return(i);
}
-#else /* !OPENSSL_NO_RSA */
-
-# ifdef PEDANTIC
-static void *dummy=&dummy;
-# endif
-
#endif
* Engage compiler specific rotate intrinsic function if available.
*/
#undef ROTATE
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
* what we need here...
* <appro@fy.chalmers.se>
*/
-# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"roll %1,%0" \
: "=r"(ret) \
: "cc"); \
ret; \
})
-# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
+# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"rlwinm %0,%1,%2,0,31" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
-# elif defined(__s390x__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# elif defined(__s390x__)
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ("rll %0,%1,%2" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
-# endif
# endif
-#endif /* PEDANTIC */
+#endif
#ifndef ROTATE
#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
(defined(__x86_64) || defined(__x86_64__))
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* this trick on x86* platforms only, because these CPUs can fetch
* unaligned data without raising an exception.
*/
-# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
+# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
(c)+=4; (l)=r; })
-# define HOST_l2c(l,c) ({ unsigned int r=(l); \
+# define HOST_l2c(l,c) ({ unsigned int r=(l); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
*((unsigned int *)(c))=r; (c)+=4; r; })
-# endif
# endif
#endif
#if defined(__s390__) || defined(__s390x__)
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__s390x__)
-# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__s390x__)
+# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
:"=d"(l) :"m"(*(const unsigned int *)(c)));\
(c)+=4; (l); })
-# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
+# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
:"=m"(*(unsigned int *)(c)) :"d"(l));\
(c)+=4; (l); })
-# endif
# endif
#endif
#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# undef STRICT_ALIGNMENT
#endif
-#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
#if defined(__GNUC__) && __GNUC__>=2
# if defined(__x86_64) || defined(__x86_64__)
# define BSWAP8(x) ({ u64 ret=(x); \
free(s);
return (ret);
}
-#else /* !OPENSSL_NO_RSA */
-
-# if PEDANTIC
-static void *dummy = &dummy;
-# endif
-
#endif
*((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
*((c)++)=(unsigned char)(((l) )&0xff))
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE_l32(a,n) ({ register unsigned int ret; \
asm ("roll %%cl,%0" \
U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a),
U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) };
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__x86_64) || defined(__x86_64__)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__x86_64) || defined(__x86_64__)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rorq %1,%0" \
: "=r"(ret) \
: "J"(n),"0"(a) \
: "cc"); ret; })
-# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
+# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
asm ("bswapq %0" \
: "=r"(ret) \
: "0"(ret)); ret; })
-# elif (defined(__i386) || defined(__i386__))
-# if defined(I386_ONLY)
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+# elif (defined(__i386) || defined(__i386__))
+# if defined(I386_ONLY)
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
unsigned int hi=p[0],lo=p[1]; \
asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
"roll $16,%%eax; roll $16,%%edx; "\
: "=a"(lo),"=d"(hi) \
: "0"(lo),"1"(hi) : "cc"); \
((SHA_LONG64)hi)<<32|lo; })
-# else
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+# else
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
unsigned int hi=p[0],lo=p[1]; \
asm ("bswapl %0; bswapl %1;" \
: "=r"(lo),"=r"(hi) \
: "0"(lo),"1"(hi)); \
((SHA_LONG64)hi)<<32|lo; })
-# endif
-# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+# endif
+# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rotrdi %0,%1,%2" \
: "=r"(ret) \
: "r"(a),"K"(n)); ret; })
-# endif
# endif
#endif
#endif /* SHA512_ASM */
-#else /* !OPENSSL_NO_SHA512 */
-
-#if defined(PEDANTIC) || defined(__DECC) || defined(OPENSSL_SYS_MACOSX)
-static void *dummy=&dummy;
-#endif
-
#endif /* !OPENSSL_NO_SHA512 */
apps_shutdown();
return (ret);
}
-#else /* !OPENSSL_NO_DH */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
return 1;
}
-#else /* !OPENSSL_NO_DH */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
apps_shutdown();
return (ret);
}
-#else /* !OPENSSL_NO_DSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
#endif
return 1;
}
-#else /* !OPENSSL_NO_DSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
apps_shutdown();
return (ret);
}
-#else /* !OPENSSL_NO_EC */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
BIO_printf(out, "\n\t};\n\n");
return 1;
}
-#else /* !OPENSSL_NO_EC */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
apps_shutdown();
return (ret);
}
-#else
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
#endif
return 1;
}
-#else /* !OPENSSL_NO_DH */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
apps_shutdown();
return (ret);
}
-#else /* !OPENSSL_NO_DSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
#endif
return 1;
}
-#else /* !OPENSSL_NO_RSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
buf_perm[dest] = buf[source];
buf_perm[14] = buf[5];
buf_perm[15] = buf[11];
-#ifndef PEDANTIC /* Unfortunately, this generates a "no
- * effect" warning */
assert(16 == sizeof buf_perm);
-#endif
output = salt_out + salt_len;
assert(output == out_buf + strlen(out_buf));
apps_shutdown();
return (ret);
}
-#else /* !OPENSSL_NO_RSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
}
-#else /* !OPENSSL_NO_RSA */
-
-#if PEDANTIC
-static void *dummy = &dummy;
-#endif
-
#endif
#endif /* OPENSSL_NO_RC4 */
-#else /* !OPENSSL_NO_RSA */
-
-# if PEDANTIC
-static void *dummy = &dummy;
-# endif
-
#endif
#else
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) \
- && !defined(PEDANTIC) && !defined(BN_DIV3W)
+ && !defined(BN_DIV3W)
# if defined(__GNUC__) && __GNUC__>=2
# if defined(__i386) || defined (__i386__)
/*
#define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) /* 32 */
#define BN_MONT_CTX_SET_SIZE_WORD (64) /* 32 */
-#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* BN_UMULT_HIGH section.
*
#include <stdlib.h>
/* 32-bit rotations */
-#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__GNUC__) && __GNUC__>=2
# if defined(__i386) || defined(__x86_64)
# define RightRotate(x,s) ({u32 ret; asm ("rorl %1,%0":"=r"(ret):"I"(s),"0"(x):"cc"); ret; })
} \
}
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ("rorl %1,%0" \
defined(_M_AMD64) || defined(_M_X64) || \
defined(__INTEL__) )
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2
# define BSWAP(x) ({ unsigned int r=(x); asm ("bswapl %0":"=r"(r):"0"(r)); r; })
#endif
return 1;
}
-#else
-
-# ifdef PEDANTIC
-static void *dummy=&dummy;
-# endif
-
#endif
i = EVP_DecryptInit_ex(ctx,NULL,NULL,NULL,NULL);
return(i);
}
-#else /* !OPENSSL_NO_RSA */
-
-# ifdef PEDANTIC
-static void *dummy=&dummy;
-# endif
-
#endif
* Engage compiler specific rotate intrinsic function if available.
*/
#undef ROTATE
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
* what we need here...
* <appro@fy.chalmers.se>
*/
-# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"roll %1,%0" \
: "=r"(ret) \
: "cc"); \
ret; \
})
-# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
+# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"rlwinm %0,%1,%2,0,31" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
-# elif defined(__s390x__)
-# define ROTATE(a,n) ({ register unsigned int ret; \
+# elif defined(__s390x__)
+# define ROTATE(a,n) ({ register unsigned int ret; \
asm ("rll %0,%1,%2" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
-# endif
# endif
-#endif /* PEDANTIC */
+#endif
#ifndef ROTATE
#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
(defined(__x86_64) || defined(__x86_64__))
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* this trick on x86* platforms only, because these CPUs can fetch
* unaligned data without raising an exception.
*/
-# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
+# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
(c)+=4; (l)=r; })
-# define HOST_l2c(l,c) ({ unsigned int r=(l); \
+# define HOST_l2c(l,c) ({ unsigned int r=(l); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
*((unsigned int *)(c))=r; (c)+=4; r; })
-# endif
# endif
#endif
#if defined(__s390__) || defined(__s390x__)
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__s390x__)
-# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__s390x__)
+# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
:"=d"(l) :"m"(*(const unsigned int *)(c)));\
(c)+=4; (l); })
-# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
+# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
:"=m"(*(unsigned int *)(c)) :"d"(l));\
(c)+=4; (l); })
-# endif
# endif
#endif
#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# undef STRICT_ALIGNMENT
#endif
-#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
#if defined(__GNUC__) && __GNUC__>=2
# if defined(__x86_64) || defined(__x86_64__)
# define BSWAP8(x) ({ u64 ret=(x); \
free(s);
return (ret);
}
-#else /* !OPENSSL_NO_RSA */
-
-# if PEDANTIC
-static void *dummy = &dummy;
-# endif
-
#endif
*((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
*((c)++)=(unsigned char)(((l) )&0xff))
-#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(__STRICT_ANSI__) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE_l32(a,n) ({ register unsigned int ret; \
asm ("roll %%cl,%0" \
U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a),
U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) };
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__x86_64) || defined(__x86_64__)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__x86_64) || defined(__x86_64__)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rorq %1,%0" \
: "=r"(ret) \
: "J"(n),"0"(a) \
: "cc"); ret; })
-# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
+# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
asm ("bswapq %0" \
: "=r"(ret) \
: "0"(ret)); ret; })
-# elif (defined(__i386) || defined(__i386__))
-# if defined(I386_ONLY)
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+# elif (defined(__i386) || defined(__i386__))
+# if defined(I386_ONLY)
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
unsigned int hi=p[0],lo=p[1]; \
asm("xchgb %%ah,%%al;xchgb %%dh,%%dl;"\
"roll $16,%%eax; roll $16,%%edx; "\
: "=a"(lo),"=d"(hi) \
: "0"(lo),"1"(hi) : "cc"); \
((SHA_LONG64)hi)<<32|lo; })
-# else
-# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+# else
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
unsigned int hi=p[0],lo=p[1]; \
asm ("bswapl %0; bswapl %1;" \
: "=r"(lo),"=r"(hi) \
: "0"(lo),"1"(hi)); \
((SHA_LONG64)hi)<<32|lo; })
-# endif
-# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
-# define ROTR(a,n) ({ SHA_LONG64 ret; \
+# endif
+# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
asm ("rotrdi %0,%1,%2" \
: "=r"(ret) \
: "r"(a),"K"(n)); ret; })
-# endif
# endif
#endif
#endif /* SHA512_ASM */
-#else /* !OPENSSL_NO_SHA512 */
-
-#if defined(PEDANTIC) || defined(__DECC) || defined(OPENSSL_SYS_MACOSX)
-static void *dummy=&dummy;
-#endif
-
#endif /* !OPENSSL_NO_SHA512 */