From: jsing Date: Sun, 29 Jan 2023 14:00:41 +0000 (+0000) Subject: Use s2n-bignum assembly implementations for libcrypto bignum on amd64. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=588b959bae4a5b78fd706db29b9c009cf25b538e;p=openbsd Use s2n-bignum assembly implementations for libcrypto bignum on amd64. This switches the core bignum assembly implementations from x86_64-gcc.c to s2n-bignum for amd64. ok miod@ tb@ --- diff --git a/lib/libcrypto/arch/amd64/Makefile.inc b/lib/libcrypto/arch/amd64/Makefile.inc index 1fd9f689197..5e433b572d4 100644 --- a/lib/libcrypto/arch/amd64/Makefile.inc +++ b/lib/libcrypto/arch/amd64/Makefile.inc @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile.inc,v 1.10 2023/01/21 17:29:56 jsing Exp $ +# $OpenBSD: Makefile.inc,v 1.11 2023/01/29 14:00:41 jsing Exp $ # amd64-specific libcrypto build rules @@ -29,7 +29,16 @@ SSLASM+= bn x86_64-gf2m # bn s2n-bignum SRCS += bn_arch.c +SRCS += bignum_add.S +SRCS += bignum_cmadd.S +SRCS += bignum_cmul.S +SRCS += bignum_mul.S +SRCS += bignum_mul_4_8_alt.S +SRCS += bignum_mul_8_16_alt.S SRCS += bignum_sqr.S +SRCS += bignum_sqr_4_8_alt.S +SRCS += bignum_sqr_8_16_alt.S +SRCS += bignum_sub.S # camellia SRCS+= cmll_misc.c @@ -75,7 +84,7 @@ ${f}.S: ${LCRYPTO_SRC}/${dir}/asm/${f}.pl ${EXTRA_PL} .endfor CFLAGS+= -DOPENSSL_CPUID_OBJ -SRCS+= x86_64cpuid.S x86_64-gcc.c +SRCS+= x86_64cpuid.S GENERATED+=x86_64cpuid.S x86_64cpuid.S: ${LCRYPTO_SRC}/x86_64cpuid.pl ${EXTRA_PL} diff --git a/lib/libcrypto/bn/arch/amd64/bn_arch.c b/lib/libcrypto/bn/arch/amd64/bn_arch.c index 240575955ce..aedefc76e22 100644 --- a/lib/libcrypto/bn/arch/amd64/bn_arch.c +++ b/lib/libcrypto/bn/arch/amd64/bn_arch.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bn_arch.c,v 1.1 2023/01/21 17:29:56 jsing Exp $ */ +/* $OpenBSD: bn_arch.c,v 1.2 2023/01/29 14:00:41 jsing Exp $ */ /* * Copyright (c) 2023 Joel Sing * @@ -21,6 +21,58 @@ #include "bn_local.h" #include "s2n_bignum.h" +#ifdef HAVE_BN_ADD_WORDS +BN_ULONG +bn_add_words(BN_ULONG *rd, const BN_ULONG *ad, const BN_ULONG *bd, int n) +{ + return bignum_add(n, (uint64_t *)rd, n, (uint64_t *)ad, n, + (uint64_t *)bd); +} +#endif + +#ifdef HAVE_BN_SUB_WORDS +BN_ULONG +bn_sub_words(BN_ULONG *rd, const BN_ULONG *ad, const BN_ULONG *bd, int n) +{ + return bignum_sub(n, (uint64_t *)rd, n, (uint64_t *)ad, n, + (uint64_t *)bd); +} +#endif + +#ifdef HAVE_BN_MUL_ADD_WORDS +BN_ULONG +bn_mul_add_words(BN_ULONG *rd, const BN_ULONG *ad, int num, BN_ULONG w) +{ + return bignum_cmadd(num, (uint64_t *)rd, w, num, (uint64_t *)ad); +} +#endif + +#ifdef HAVE_BN_MUL_WORDS +BN_ULONG +bn_mul_words(BN_ULONG *rd, const BN_ULONG *ad, int num, BN_ULONG w) +{ + return bignum_cmul(num, (uint64_t *)rd, w, num, (uint64_t *)ad); +} +#endif + +#ifdef HAVE_BN_MUL_COMBA4 +void +bn_mul_comba4(BN_ULONG *rd, BN_ULONG *ad, BN_ULONG *bd) +{ + /* XXX - consider using non-alt on CPUs that have the ADX extension. */ + bignum_mul_4_8_alt((uint64_t *)rd, (uint64_t *)ad, (uint64_t *)bd); +} +#endif + +#ifdef HAVE_BN_MUL_COMBA8 +void +bn_mul_comba8(BN_ULONG *rd, BN_ULONG *ad, BN_ULONG *bd) +{ + /* XXX - consider using non-alt on CPUs that have the ADX extension. */ + bignum_mul_8_16_alt((uint64_t *)rd, (uint64_t *)ad, (uint64_t *)bd); +} +#endif + #ifdef HAVE_BN_SQR int bn_sqr(BIGNUM *r, const BIGNUM *a, int rn, BN_CTX *ctx) @@ -30,3 +82,29 @@ bn_sqr(BIGNUM *r, const BIGNUM *a, int rn, BN_CTX *ctx) return 1; } #endif + +#ifdef HAVE_BN_SQR_COMBA4 +void +bn_sqr_comba4(BN_ULONG *rd, const BN_ULONG *ad) +{ + /* XXX - consider using non-alt on CPUs that have the ADX extension. */ + bignum_sqr_4_8_alt((uint64_t *)rd, (uint64_t *)ad); +} +#endif + +#ifdef HAVE_BN_SQR_COMBA8 +void +bn_sqr_comba8(BN_ULONG *rd, const BN_ULONG *ad) +{ + /* XXX - consider using non-alt on CPUs that have the ADX extension. */ + bignum_sqr_8_16_alt((uint64_t *)rd, (uint64_t *)ad); +} +#endif + +#ifdef HAVE_BN_SQR_WORDS +void +bn_sqr_words(BN_ULONG *rd, const BN_ULONG *ad, int num) +{ + bignum_sqr(num, (uint64_t *)rd, num, (uint64_t *)ad); +} +#endif