-/* $OpenBSD: cbc128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: cbc128.c,v 1.7 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
#define STRICT_ALIGNMENT 0
#endif
-void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], block128_f block)
+void
+CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], block128_f block)
{
size_t n;
const unsigned char *iv = ivec;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (STRICT_ALIGNMENT &&
- ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
- while (len>=16) {
- for(n=0; n<16; ++n)
+ ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) != 0) {
+ while (len >= 16) {
+ for (n = 0; n < 16; ++n)
out[n] = in[n] ^ iv[n];
(*block)(out, out, key);
iv = out;
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
} else {
- while (len>=16) {
- for(n=0; n<16; n+=sizeof(size_t))
- *(size_t*)(out+n) =
- *(size_t*)(in+n) ^ *(size_t*)(iv+n);
+ while (len >= 16) {
+ for (n = 0; n < 16; n += sizeof(size_t))
+ *(size_t *)(out + n) =
+ *(size_t *)(in + n) ^ *(size_t *)(iv + n);
(*block)(out, out, key);
iv = out;
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
}
#endif
while (len) {
- for(n=0; n<16 && n<len; ++n)
+ for (n = 0; n < 16 && n < len; ++n)
out[n] = in[n] ^ iv[n];
- for(; n<16; ++n)
+ for (; n < 16; ++n)
out[n] = iv[n];
(*block)(out, out, key);
iv = out;
- if (len<=16) break;
+ if (len <= 16)
+ break;
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
- memmove(ivec,iv,16);
+ memmove(ivec, iv, 16);
}
-void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], block128_f block)
+void
+CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], block128_f block)
{
size_t n;
- union { size_t t[16/sizeof(size_t)]; unsigned char c[16]; } tmp;
+ union {
+ size_t t[16/sizeof(size_t)];
+ unsigned char c[16];
+ } tmp;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (in != out) {
const unsigned char *iv = ivec;
if (STRICT_ALIGNMENT &&
- ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
- while (len>=16) {
+ ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) !=
+ 0) {
+ while (len >= 16) {
(*block)(in, out, key);
- for(n=0; n<16; ++n)
+ for (n = 0; n < 16; ++n)
out[n] ^= iv[n];
iv = in;
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
- } else if (16%sizeof(size_t) == 0) { /* always true */
- while (len>=16) {
- size_t *out_t=(size_t *)out, *iv_t=(size_t *)iv;
+ } else if (16 % sizeof(size_t) == 0) { /* always true */
+ while (len >= 16) {
+ size_t *out_t = (size_t *)out,
+ *iv_t = (size_t *)iv;
(*block)(in, out, key);
- for(n=0; n<16/sizeof(size_t); n++)
+ for (n = 0; n < 16/sizeof(size_t); n++)
out_t[n] ^= iv_t[n];
iv = in;
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
}
- memmove(ivec,iv,16);
+ memmove(ivec, iv, 16);
} else {
if (STRICT_ALIGNMENT &&
- ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
+ ((size_t)in|(size_t)out|(size_t)ivec) % sizeof(size_t) !=
+ 0) {
unsigned char c;
- while (len>=16) {
+ while (len >= 16) {
(*block)(in, tmp.c, key);
- for(n=0; n<16; ++n) {
+ for (n = 0; n < 16; ++n) {
c = in[n];
out[n] = tmp.c[n] ^ ivec[n];
ivec[n] = c;
}
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
- } else if (16%sizeof(size_t) == 0) { /* always true */
- while (len>=16) {
- size_t c, *out_t=(size_t *)out, *ivec_t=(size_t *)ivec;
- const size_t *in_t=(const size_t *)in;
+ } else if (16 % sizeof(size_t) == 0) { /* always true */
+ while (len >= 16) {
+ size_t c, *out_t = (size_t *)out,
+ *ivec_t = (size_t *)ivec;
+ const size_t *in_t = (const size_t *)in;
(*block)(in, tmp.c, key);
- for(n=0; n<16/sizeof(size_t); n++) {
+ for (n = 0; n < 16/sizeof(size_t); n++) {
c = in_t[n];
out_t[n] = tmp.t[n] ^ ivec_t[n];
ivec_t[n] = c;
}
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
}
while (len) {
unsigned char c;
(*block)(in, tmp.c, key);
- for(n=0; n<16 && n<len; ++n) {
+ for (n = 0; n < 16 && n < len; ++n) {
c = in[n];
out[n] = tmp.c[n] ^ ivec[n];
ivec[n] = c;
}
- if (len<=16) {
- for (; n<16; ++n)
+ if (len <= 16) {
+ for (; n < 16; ++n)
ivec[n] = in[n];
break;
}
len -= 16;
- in += 16;
+ in += 16;
out += 16;
}
}
-/* $OpenBSD: ccm128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: ccm128.c,v 1.7 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2011 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
/* First you setup M and L parameters and pass the key schedule.
* This is called once per session setup... */
-void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
- unsigned int M,unsigned int L,void *key,block128_f block)
+void
+CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
+ unsigned int M, unsigned int L, void *key, block128_f block)
{
- memset(ctx->nonce.c,0,sizeof(ctx->nonce.c));
- ctx->nonce.c[0] = ((u8)(L-1)&7) | (u8)(((M-2)/2)&7)<<3;
+ memset(ctx->nonce.c, 0, sizeof(ctx->nonce.c));
+ ctx->nonce.c[0] = ((u8)(L - 1) & 7) | (u8)(((M - 2)/2) & 7) << 3;
ctx->blocks = 0;
ctx->block = block;
ctx->key = key;
/* !!! Following interfaces are to be called *once* per packet !!! */
/* Then you setup per-message nonce and pass the length of the message */
-int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
- const unsigned char *nonce,size_t nlen,size_t mlen)
+int
+CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
+ const unsigned char *nonce, size_t nlen, size_t mlen)
{
- unsigned int L = ctx->nonce.c[0]&7; /* the L parameter */
+ unsigned int L = ctx->nonce.c[0] & 7; /* the L parameter */
- if (nlen<(14-L)) return -1; /* nonce is too short */
+ if (nlen < (14 - L))
+ return -1; /* nonce is too short */
- if (sizeof(mlen)==8 && L>=3) {
- ctx->nonce.c[8] = (u8)(mlen>>(56%(sizeof(mlen)*8)));
- ctx->nonce.c[9] = (u8)(mlen>>(48%(sizeof(mlen)*8)));
- ctx->nonce.c[10] = (u8)(mlen>>(40%(sizeof(mlen)*8)));
- ctx->nonce.c[11] = (u8)(mlen>>(32%(sizeof(mlen)*8)));
- }
- else
+ if (sizeof(mlen) == 8 && L >= 3) {
+ ctx->nonce.c[8] = (u8)(mlen >> (56 % (sizeof(mlen)*8)));
+ ctx->nonce.c[9] = (u8)(mlen >> (48 % (sizeof(mlen)*8)));
+ ctx->nonce.c[10] = (u8)(mlen >> (40 % (sizeof(mlen)*8)));
+ ctx->nonce.c[11] = (u8)(mlen >> (32 % (sizeof(mlen)*8)));
+ } else
ctx->nonce.u[1] = 0;
- ctx->nonce.c[12] = (u8)(mlen>>24);
- ctx->nonce.c[13] = (u8)(mlen>>16);
- ctx->nonce.c[14] = (u8)(mlen>>8);
+ ctx->nonce.c[12] = (u8)(mlen >> 24);
+ ctx->nonce.c[13] = (u8)(mlen >> 16);
+ ctx->nonce.c[14] = (u8)(mlen >> 8);
ctx->nonce.c[15] = (u8)mlen;
ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */
- memcpy(&ctx->nonce.c[1],nonce,14-L);
+ memcpy(&ctx->nonce.c[1], nonce, 14 - L);
return 0;
}
/* Then you pass additional authentication data, this is optional */
-void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
- const unsigned char *aad,size_t alen)
-{ unsigned int i;
+void
+CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
+ const unsigned char *aad, size_t alen)
+{
+ unsigned int i;
block128_f block = ctx->block;
- if (alen==0) return;
+ if (alen == 0)
+ return;
ctx->nonce.c[0] |= 0x40; /* set Adata flag */
- (*block)(ctx->nonce.c,ctx->cmac.c,ctx->key),
- ctx->blocks++;
+ (*block)(ctx->nonce.c, ctx->cmac.c, ctx->key),
+ ctx->blocks++;
- if (alen<(0x10000-0x100)) {
- ctx->cmac.c[0] ^= (u8)(alen>>8);
+ if (alen < (0x10000 - 0x100)) {
+ ctx->cmac.c[0] ^= (u8)(alen >> 8);
ctx->cmac.c[1] ^= (u8)alen;
- i=2;
- }
- else if (sizeof(alen)==8 && alen>=(size_t)1<<(32%(sizeof(alen)*8))) {
+ i = 2;
+ } else if (sizeof(alen) == 8 &&
+ alen >= (size_t)1 << (32 % (sizeof(alen)*8))) {
ctx->cmac.c[0] ^= 0xFF;
ctx->cmac.c[1] ^= 0xFF;
- ctx->cmac.c[2] ^= (u8)(alen>>(56%(sizeof(alen)*8)));
- ctx->cmac.c[3] ^= (u8)(alen>>(48%(sizeof(alen)*8)));
- ctx->cmac.c[4] ^= (u8)(alen>>(40%(sizeof(alen)*8)));
- ctx->cmac.c[5] ^= (u8)(alen>>(32%(sizeof(alen)*8)));
- ctx->cmac.c[6] ^= (u8)(alen>>24);
- ctx->cmac.c[7] ^= (u8)(alen>>16);
- ctx->cmac.c[8] ^= (u8)(alen>>8);
+ ctx->cmac.c[2] ^= (u8)(alen >> (56 % (sizeof(alen)*8)));
+ ctx->cmac.c[3] ^= (u8)(alen >> (48 % (sizeof(alen)*8)));
+ ctx->cmac.c[4] ^= (u8)(alen >> (40 % (sizeof(alen)*8)));
+ ctx->cmac.c[5] ^= (u8)(alen >> (32 % (sizeof(alen)*8)));
+ ctx->cmac.c[6] ^= (u8)(alen >> 24);
+ ctx->cmac.c[7] ^= (u8)(alen >> 16);
+ ctx->cmac.c[8] ^= (u8)(alen >> 8);
ctx->cmac.c[9] ^= (u8)alen;
- i=10;
- }
- else {
+ i = 10;
+ } else {
ctx->cmac.c[0] ^= 0xFF;
ctx->cmac.c[1] ^= 0xFE;
- ctx->cmac.c[2] ^= (u8)(alen>>24);
- ctx->cmac.c[3] ^= (u8)(alen>>16);
- ctx->cmac.c[4] ^= (u8)(alen>>8);
+ ctx->cmac.c[2] ^= (u8)(alen >> 24);
+ ctx->cmac.c[3] ^= (u8)(alen >> 16);
+ ctx->cmac.c[4] ^= (u8)(alen >> 8);
ctx->cmac.c[5] ^= (u8)alen;
- i=6;
+ i = 6;
}
do {
- for(;i<16 && alen;++i,++aad,--alen)
+ for (; i < 16 && alen; ++i, ++aad, --alen)
ctx->cmac.c[i] ^= *aad;
- (*block)(ctx->cmac.c,ctx->cmac.c,ctx->key),
- ctx->blocks++;
- i=0;
+ (*block)(ctx->cmac.c, ctx->cmac.c, ctx->key),
+ ctx->blocks++;
+ i = 0;
} while (alen);
}
/* counter part of nonce may not be larger than L*8 bits,
* L is not larger than 8, therefore 64-bit counter... */
-static void ctr64_inc(unsigned char *counter) {
- unsigned int n=8;
- u8 c;
+static void
+ctr64_inc(unsigned char *counter)
+{
+ unsigned int n = 8;
+ u8 c;
counter += 8;
do {
c = counter[n];
++c;
counter[n] = c;
- if (c) return;
+ if (c)
+ return;
} while (n);
}
-int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out,
- size_t len)
+int
+CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
+ const unsigned char *inp, unsigned char *out,
+ size_t len)
{
- size_t n;
- unsigned int i,L;
- unsigned char flags0 = ctx->nonce.c[0];
- block128_f block = ctx->block;
- void * key = ctx->key;
- union { u64 u[2]; u8 c[16]; } scratch;
-
- if (!(flags0&0x40))
- (*block)(ctx->nonce.c,ctx->cmac.c,key),
- ctx->blocks++;
-
- ctx->nonce.c[0] = L = flags0&7;
- for (n=0,i=15-L;i<15;++i) {
+ size_t n;
+ unsigned int i, L;
+ unsigned char flags0 = ctx->nonce.c[0];
+ block128_f block = ctx->block;
+ void *key = ctx->key;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } scratch;
+
+ if (!(flags0 & 0x40))
+ (*block)(ctx->nonce.c, ctx->cmac.c, key),
+ ctx->blocks++;
+
+ ctx->nonce.c[0] = L = flags0 & 7;
+ for (n = 0, i = 15 - L; i < 15; ++i) {
n |= ctx->nonce.c[i];
- ctx->nonce.c[i]=0;
+ ctx->nonce.c[i] = 0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
- ctx->nonce.c[15]=1;
+ ctx->nonce.c[15] = 1;
- if (n!=len) return -1; /* length mismatch */
+ if (n != len)
+ return -1; /* length mismatch */
- ctx->blocks += ((len+15)>>3)|1;
- if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */
+ ctx->blocks += ((len + 15) >> 3)|1;
+ if (ctx->blocks > (U64(1) << 61))
+ return -2; /* too much data */
- while (len>=16) {
+ while (len >= 16) {
#ifdef __STRICT_ALIGNMENT
- union { u64 u[2]; u8 c[16]; } temp;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } temp;
- memcpy (temp.c,inp,16);
+ memcpy(temp.c, inp, 16);
ctx->cmac.u[0] ^= temp.u[0];
ctx->cmac.u[1] ^= temp.u[1];
#else
- ctx->cmac.u[0] ^= ((u64*)inp)[0];
- ctx->cmac.u[1] ^= ((u64*)inp)[1];
+ ctx->cmac.u[0] ^= ((u64 *)inp)[0];
+ ctx->cmac.u[1] ^= ((u64 *)inp)[1];
#endif
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctr64_inc(ctx->nonce.c);
#ifdef __STRICT_ALIGNMENT
temp.u[0] ^= scratch.u[0];
temp.u[1] ^= scratch.u[1];
- memcpy(out,temp.c,16);
+ memcpy(out, temp.c, 16);
#else
- ((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0];
- ((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1];
+ ((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0];
+ ((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1];
#endif
inp += 16;
out += 16;
}
if (len) {
- for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i];
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
- (*block)(ctx->nonce.c,scratch.c,key);
- for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i];
+ for (i = 0; i < len; ++i)
+ ctx->cmac.c[i] ^= inp[i];
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
+ (*block)(ctx->nonce.c, scratch.c, key);
+ for (i = 0; i < len; ++i)
+ out[i] = scratch.c[i] ^ inp[i];
}
- for (i=15-L;i<16;++i)
- ctx->nonce.c[i]=0;
+ for (i = 15 - L; i < 16; ++i)
+ ctx->nonce.c[i] = 0;
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
return 0;
}
-int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out,
- size_t len)
+int
+CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
+ const unsigned char *inp, unsigned char *out,
+ size_t len)
{
- size_t n;
- unsigned int i,L;
- unsigned char flags0 = ctx->nonce.c[0];
- block128_f block = ctx->block;
- void * key = ctx->key;
- union { u64 u[2]; u8 c[16]; } scratch;
-
- if (!(flags0&0x40))
- (*block)(ctx->nonce.c,ctx->cmac.c,key);
-
- ctx->nonce.c[0] = L = flags0&7;
- for (n=0,i=15-L;i<15;++i) {
+ size_t n;
+ unsigned int i, L;
+ unsigned char flags0 = ctx->nonce.c[0];
+ block128_f block = ctx->block;
+ void *key = ctx->key;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } scratch;
+
+ if (!(flags0 & 0x40))
+ (*block)(ctx->nonce.c, ctx->cmac.c, key);
+
+ ctx->nonce.c[0] = L = flags0 & 7;
+ for (n = 0, i = 15 - L; i < 15; ++i) {
n |= ctx->nonce.c[i];
- ctx->nonce.c[i]=0;
+ ctx->nonce.c[i] = 0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
- ctx->nonce.c[15]=1;
+ ctx->nonce.c[15] = 1;
- if (n!=len) return -1;
+ if (n != len)
+ return -1;
- while (len>=16) {
+ while (len >= 16) {
#ifdef __STRICT_ALIGNMENT
- union { u64 u[2]; u8 c[16]; } temp;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } temp;
#endif
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctr64_inc(ctx->nonce.c);
#ifdef __STRICT_ALIGNMENT
- memcpy (temp.c,inp,16);
+ memcpy(temp.c, inp, 16);
ctx->cmac.u[0] ^= (scratch.u[0] ^= temp.u[0]);
ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]);
- memcpy (out,scratch.c,16);
+ memcpy(out, scratch.c, 16);
#else
- ctx->cmac.u[0] ^= (((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0]);
- ctx->cmac.u[1] ^= (((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1]);
+ ctx->cmac.u[0] ^= (((u64 *)out)[0] = scratch.u[0] ^
+ ((u64 *)inp)[0]);
+ ctx->cmac.u[1] ^= (((u64 *)out)[1] = scratch.u[1] ^
+ ((u64 *)inp)[1]);
#endif
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
inp += 16;
out += 16;
}
if (len) {
- (*block)(ctx->nonce.c,scratch.c,key);
- for (i=0; i<len; ++i)
- ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]);
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
+ for (i = 0; i < len; ++i)
+ ctx->cmac.c[i] ^= (out[i] = scratch.c[i] ^ inp[i]);
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
}
- for (i=15-L;i<16;++i)
- ctx->nonce.c[i]=0;
+ for (i = 15 - L; i < 16; ++i)
+ ctx->nonce.c[i] = 0;
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
return 0;
}
-static void ctr64_add (unsigned char *counter,size_t inc)
-{ size_t n=8, val=0;
+static void
+ctr64_add(unsigned char *counter, size_t inc)
+{
+ size_t n = 8, val = 0;
counter += 8;
do {
--n;
- val += counter[n] + (inc&0xff);
+ val += counter[n] + (inc & 0xff);
counter[n] = (unsigned char)val;
val >>= 8; /* carry bit */
inc >>= 8;
- } while(n && (inc || val));
+ } while (n && (inc || val));
}
-int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out,
- size_t len,ccm128_f stream)
+int
+CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
+ const unsigned char *inp, unsigned char *out,
+ size_t len, ccm128_f stream)
{
- size_t n;
- unsigned int i,L;
- unsigned char flags0 = ctx->nonce.c[0];
- block128_f block = ctx->block;
- void * key = ctx->key;
- union { u64 u[2]; u8 c[16]; } scratch;
-
- if (!(flags0&0x40))
- (*block)(ctx->nonce.c,ctx->cmac.c,key),
- ctx->blocks++;
-
- ctx->nonce.c[0] = L = flags0&7;
- for (n=0,i=15-L;i<15;++i) {
+ size_t n;
+ unsigned int i, L;
+ unsigned char flags0 = ctx->nonce.c[0];
+ block128_f block = ctx->block;
+ void *key = ctx->key;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } scratch;
+
+ if (!(flags0 & 0x40))
+ (*block)(ctx->nonce.c, ctx->cmac.c, key),
+ ctx->blocks++;
+
+ ctx->nonce.c[0] = L = flags0 & 7;
+ for (n = 0, i = 15 - L; i < 15; ++i) {
n |= ctx->nonce.c[i];
- ctx->nonce.c[i]=0;
+ ctx->nonce.c[i] = 0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
- ctx->nonce.c[15]=1;
+ ctx->nonce.c[15] = 1;
- if (n!=len) return -1; /* length mismatch */
+ if (n != len)
+ return -1; /* length mismatch */
- ctx->blocks += ((len+15)>>3)|1;
- if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */
+ ctx->blocks += ((len + 15) >> 3)|1;
+ if (ctx->blocks > (U64(1) << 61))
+ return -2; /* too much data */
- if ((n=len/16)) {
- (*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c);
- n *= 16;
+ if ((n = len/16)) {
+ (*stream)(inp, out, n, key, ctx->nonce.c, ctx->cmac.c);
+ n *= 16;
inp += n;
out += n;
len -= n;
- if (len) ctr64_add(ctx->nonce.c,n/16);
+ if (len)
+ ctr64_add(ctx->nonce.c, n/16);
}
if (len) {
- for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i];
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
- (*block)(ctx->nonce.c,scratch.c,key);
- for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i];
+ for (i = 0; i < len; ++i)
+ ctx->cmac.c[i] ^= inp[i];
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
+ (*block)(ctx->nonce.c, scratch.c, key);
+ for (i = 0; i < len; ++i)
+ out[i] = scratch.c[i] ^ inp[i];
}
- for (i=15-L;i<16;++i)
- ctx->nonce.c[i]=0;
+ for (i = 15 - L; i < 16; ++i)
+ ctx->nonce.c[i] = 0;
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
return 0;
}
-int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out,
- size_t len,ccm128_f stream)
+int
+CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
+ const unsigned char *inp, unsigned char *out,
+ size_t len, ccm128_f stream)
{
- size_t n;
- unsigned int i,L;
- unsigned char flags0 = ctx->nonce.c[0];
- block128_f block = ctx->block;
- void * key = ctx->key;
- union { u64 u[2]; u8 c[16]; } scratch;
-
- if (!(flags0&0x40))
- (*block)(ctx->nonce.c,ctx->cmac.c,key);
-
- ctx->nonce.c[0] = L = flags0&7;
- for (n=0,i=15-L;i<15;++i) {
+ size_t n;
+ unsigned int i, L;
+ unsigned char flags0 = ctx->nonce.c[0];
+ block128_f block = ctx->block;
+ void *key = ctx->key;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } scratch;
+
+ if (!(flags0 & 0x40))
+ (*block)(ctx->nonce.c, ctx->cmac.c, key);
+
+ ctx->nonce.c[0] = L = flags0 & 7;
+ for (n = 0, i = 15 - L; i < 15; ++i) {
n |= ctx->nonce.c[i];
- ctx->nonce.c[i]=0;
+ ctx->nonce.c[i] = 0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
- ctx->nonce.c[15]=1;
+ ctx->nonce.c[15] = 1;
- if (n!=len) return -1;
+ if (n != len)
+ return -1;
- if ((n=len/16)) {
- (*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c);
- n *= 16;
+ if ((n = len/16)) {
+ (*stream)(inp, out, n, key, ctx->nonce.c, ctx->cmac.c);
+ n *= 16;
inp += n;
out += n;
len -= n;
- if (len) ctr64_add(ctx->nonce.c,n/16);
+ if (len)
+ ctr64_add(ctx->nonce.c, n/16);
}
if (len) {
- (*block)(ctx->nonce.c,scratch.c,key);
- for (i=0; i<len; ++i)
- ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]);
- (*block)(ctx->cmac.c,ctx->cmac.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
+ for (i = 0; i < len; ++i)
+ ctx->cmac.c[i] ^= (out[i] = scratch.c[i] ^ inp[i]);
+ (*block)(ctx->cmac.c, ctx->cmac.c, key);
}
- for (i=15-L;i<16;++i)
- ctx->nonce.c[i]=0;
+ for (i = 15 - L; i < 16; ++i)
+ ctx->nonce.c[i] = 0;
- (*block)(ctx->nonce.c,scratch.c,key);
+ (*block)(ctx->nonce.c, scratch.c, key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
return 0;
}
-size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx,unsigned char *tag,size_t len)
-{ unsigned int M = (ctx->nonce.c[0]>>3)&7; /* the M parameter */
+size_t
+CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
+{
+ unsigned int M = (ctx->nonce.c[0] >> 3) & 7; /* the M parameter */
- M *= 2; M += 2;
- if (len != M) return 0;
- memcpy(tag,ctx->cmac.c,M);
+ M *= 2;
+ M += 2;
+ if (len != M)
+ return 0;
+ memcpy(tag, ctx->cmac.c, M);
return M;
}
-/* $OpenBSD: cfb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: cfb128.c,v 1.6 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* used. The extra state information to record how much of the
* 128bit block we have used is contained in *num;
*/
-void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block)
+void
+CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block)
{
- unsigned int n;
- size_t l = 0;
+ unsigned int n;
+ size_t l = 0;
- n = *num;
+ n = *num;
- if (enc) {
+ if (enc) {
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- while (n && len) {
- *(out++) = ivec[n] ^= *(in++);
- --len;
- n = (n+1) % 16;
- }
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ while (n && len) {
+ *(out++) = ivec[n] ^= *(in++);
+ --len;
+ n = (n + 1) % 16;
+ }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out|(size_t)ivec) %
+ sizeof(size_t) != 0)
+ break;
#endif
- while (len>=16) {
- (*block)(ivec, ivec, key);
- for (; n<16; n+=sizeof(size_t)) {
- *(size_t*)(out+n) =
- *(size_t*)(ivec+n) ^= *(size_t*)(in+n);
- }
- len -= 16;
- out += 16;
- in += 16;
- n = 0;
- }
- if (len) {
- (*block)(ivec, ivec, key);
- while (len--) {
- out[n] = ivec[n] ^= in[n];
- ++n;
- }
- }
- *num = n;
- return;
- } while (0);
+ while (len >= 16) {
+ (*block)(ivec, ivec, key);
+ for (; n < 16; n += sizeof(size_t)) {
+ *(size_t *)(out + n) =
+ *(size_t *)(ivec + n) ^= *(size_t *)(in +
+ n);
+ }
+ len -= 16;
+ out += 16;
+ in += 16;
+ n = 0;
+ }
+ if (len) {
+ (*block)(ivec, ivec, key);
+ while (len--) {
+ out[n] = ivec[n] ^= in[n];
+ ++n;
+ }
+ }
+ *num = n;
+ return;
+ } while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
- while (l<len) {
- if (n == 0) {
- (*block)(ivec, ivec, key);
+ while (l < len) {
+ if (n == 0) {
+ (*block)(ivec, ivec, key);
+ }
+ out[l] = ivec[n] ^= in[l];
+ ++l;
+ n = (n + 1) % 16;
}
- out[l] = ivec[n] ^= in[l];
- ++l;
- n = (n+1) % 16;
- }
- *num = n;
- } else {
+ *num = n;
+ } else {
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- while (n && len) {
- unsigned char c;
- *(out++) = ivec[n] ^ (c = *(in++)); ivec[n] = c;
- --len;
- n = (n+1) % 16;
- }
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ while (n && len) {
+ unsigned char c;
+ *(out++) = ivec[n] ^ (c = *(in++));
+ ivec[n] = c;
+ --len;
+ n = (n + 1) % 16;
+ }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out|(size_t)ivec) %
+ sizeof(size_t) != 0)
+ break;
#endif
- while (len>=16) {
- (*block)(ivec, ivec, key);
- for (; n<16; n+=sizeof(size_t)) {
- size_t t = *(size_t*)(in+n);
- *(size_t*)(out+n) = *(size_t*)(ivec+n) ^ t;
- *(size_t*)(ivec+n) = t;
- }
- len -= 16;
- out += 16;
- in += 16;
- n = 0;
- }
- if (len) {
- (*block)(ivec, ivec, key);
- while (len--) {
- unsigned char c;
- out[n] = ivec[n] ^ (c = in[n]); ivec[n] = c;
- ++n;
- }
- }
- *num = n;
- return;
- } while (0);
+ while (len >= 16) {
+ (*block)(ivec, ivec, key);
+ for (; n < 16; n += sizeof(size_t)) {
+ size_t t = *(size_t *)(in + n);
+ *(size_t *)(out + n) = *(size_t *)(ivec +
+ n) ^ t;
+ *(size_t *)(ivec + n) = t;
+ }
+ len -= 16;
+ out += 16;
+ in += 16;
+ n = 0;
+ }
+ if (len) {
+ (*block)(ivec, ivec, key);
+ while (len--) {
+ unsigned char c;
+ out[n] = ivec[n] ^ (c = in[n]);
+ ivec[n] = c;
+ ++n;
+ }
+ }
+ *num = n;
+ return;
+ } while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
- while (l<len) {
- unsigned char c;
- if (n == 0) {
- (*block)(ivec, ivec, key);
+ while (l < len) {
+ unsigned char c;
+ if (n == 0) {
+ (*block)(ivec, ivec, key);
+ }
+ out[l] = ivec[n] ^ (c = in[l]);
+ ivec[n] = c;
+ ++l;
+ n = (n + 1) % 16;
}
- out[l] = ivec[n] ^ (c = in[l]); ivec[n] = c;
- ++l;
- n = (n+1) % 16;
+ *num = n;
}
- *num=n;
- }
}
/* This expects a single block of size nbits for both in and out. Note that
it corrupts any extra bits in the last byte of out */
-static void cfbr_encrypt_block(const unsigned char *in,unsigned char *out,
- int nbits,const void *key,
- unsigned char ivec[16],int enc,
- block128_f block)
+static void
+cfbr_encrypt_block(const unsigned char *in, unsigned char *out,
+ int nbits, const void *key,
+ unsigned char ivec[16], int enc,
+ block128_f block)
{
- int n,rem,num;
- unsigned char ovec[16*2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */
+ int n, rem, num;
+ unsigned char ovec[16*2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */
- if (nbits<=0 || nbits>128) return;
+ if (nbits <= 0 || nbits > 128)
+ return;
/* fill in the first half of the new IV with the current IV */
- memcpy(ovec,ivec,16);
+ memcpy(ovec, ivec, 16);
/* construct the new IV */
- (*block)(ivec,ivec,key);
- num = (nbits+7)/8;
+ (*block)(ivec, ivec, key);
+ num = (nbits + 7)/8;
if (enc) /* encrypt the input */
- for(n=0 ; n < num ; ++n)
- out[n] = (ovec[16+n] = in[n] ^ ivec[n]);
+ for (n = 0; n < num; ++n)
+ out[n] = (ovec[16 + n] = in[n] ^ ivec[n]);
else /* decrypt the input */
- for(n=0 ; n < num ; ++n)
- out[n] = (ovec[16+n] = in[n]) ^ ivec[n];
+ for (n = 0; n < num; ++n)
+ out[n] = (ovec[16 + n] = in[n]) ^ ivec[n];
/* shift ovec left... */
- rem = nbits%8;
+ rem = nbits % 8;
num = nbits/8;
- if(rem==0)
- memcpy(ivec,ovec+num,16);
+ if (rem == 0)
+ memcpy(ivec, ovec + num, 16);
else
- for(n=0 ; n < 16 ; ++n)
- ivec[n] = ovec[n+num]<<rem | ovec[n+num+1]>>(8-rem);
+ for (n = 0; n < 16; ++n)
+ ivec[n] = ovec[n + num] << rem |
+ ovec[n + num + 1] >> (8 - rem);
/* it is not necessary to cleanse ovec, since the IV is not secret */
}
/* N.B. This expects the input to be packed, MS bit first */
-void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
- size_t bits, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block)
+void
+CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
+ size_t bits, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block)
{
- size_t n;
- unsigned char c[1],d[1];
+ size_t n;
+ unsigned char c[1], d[1];
- for(n=0 ; n<bits ; ++n)
+ for (n = 0; n < bits; ++n)
{
- c[0]=(in[n/8]&(1 << (7-n%8))) ? 0x80 : 0;
- cfbr_encrypt_block(c,d,1,key,ivec,enc,block);
- out[n/8]=(out[n/8]&~(1 << (unsigned int)(7-n%8))) |
- ((d[0]&0x80) >> (unsigned int)(n%8));
+ c[0] = (in[n/8] & (1 << (7 - n % 8))) ? 0x80 : 0;
+ cfbr_encrypt_block(c, d, 1, key, ivec, enc, block);
+ out[n/8] = (out[n/8] & ~(1 << (unsigned int)(7 - n % 8))) |
+ ((d[0] & 0x80) >> (unsigned int)(n % 8));
}
}
-void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
- size_t length, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block)
+void
+CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
+ size_t length, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block)
{
- size_t n;
+ size_t n;
- for(n=0 ; n<length ; ++n)
- cfbr_encrypt_block(&in[n],&out[n],8,key,ivec,enc,block);
+ for (n = 0; n < length; ++n)
+ cfbr_encrypt_block(&in[n], &out[n], 8, key, ivec, enc, block);
}
-
-/* $OpenBSD: ctr128.c,v 1.9 2022/12/26 07:18:52 jmc Exp $ */
+/* $OpenBSD: ctr128.c,v 1.10 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* is endian-neutral. */
/* increment counter (128-bit int) by 1 */
-static void ctr128_inc(unsigned char *counter) {
- u32 n=16;
+static void
+ctr128_inc(unsigned char *counter)
+{
+ u32 n = 16;
u8 c;
do {
c = counter[n];
++c;
counter[n] = c;
- if (c) return;
+ if (c)
+ return;
} while (n);
}
* responsibility for checking that the counter doesn't overflow
* into the rest of the IV when incremented.
*/
-void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], unsigned char ecount_buf[16],
- unsigned int *num, block128_f block)
+void
+CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], unsigned char ecount_buf[16],
+ unsigned int *num, block128_f block)
{
unsigned int n;
- size_t l=0;
+ size_t l = 0;
assert(*num < 16);
n = *num;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- while (n && len) {
- *(out++) = *(in++) ^ ecount_buf[n];
- --len;
- n = (n+1) % 16;
- }
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ while (n && len) {
+ *(out++) = *(in++) ^ ecount_buf[n];
+ --len;
+ n = (n + 1) % 16;
+ }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out|(size_t)ivec) %
+ sizeof(size_t) != 0)
+ break;
#endif
- while (len>=16) {
- (*block)(ivec, ecount_buf, key);
- ctr128_inc_aligned(ivec);
- for (; n<16; n+=sizeof(size_t))
- *(size_t *)(out+n) =
- *(size_t *)(in+n) ^ *(size_t *)(ecount_buf+n);
- len -= 16;
- out += 16;
- in += 16;
- n = 0;
- }
- if (len) {
- (*block)(ivec, ecount_buf, key);
- ctr128_inc_aligned(ivec);
- while (len--) {
- out[n] = in[n] ^ ecount_buf[n];
- ++n;
+ while (len >= 16) {
+ (*block)(ivec, ecount_buf, key);
+ ctr128_inc_aligned(ivec);
+ for (; n < 16; n += sizeof(size_t))
+ *(size_t *)(out + n) =
+ *(size_t *)(in + n) ^ *(size_t *)(ecount_buf +
+ n);
+ len -= 16;
+ out += 16;
+ in += 16;
+ n = 0;
}
- }
- *num = n;
- return;
- } while(0);
+ if (len) {
+ (*block)(ivec, ecount_buf, key);
+ ctr128_inc_aligned(ivec);
+ while (len--) {
+ out[n] = in[n] ^ ecount_buf[n];
+ ++n;
+ }
+ }
+ *num = n;
+ return;
+ } while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
- while (l<len) {
- if (n==0) {
+ while (l < len) {
+ if (n == 0) {
(*block)(ivec, ecount_buf, key);
- ctr128_inc(ivec);
+ ctr128_inc(ivec);
}
out[l] = in[l] ^ ecount_buf[n];
++l;
- n = (n+1) % 16;
+ n = (n + 1) % 16;
}
- *num=n;
+ *num = n;
}
/* increment upper 96 bits of 128-bit counter by 1 */
-static void ctr96_inc(unsigned char *counter) {
- u32 n=12;
+static void
+ctr96_inc(unsigned char *counter)
+{
+ u32 n = 12;
u8 c;
do {
c = counter[n];
++c;
counter[n] = c;
- if (c) return;
+ if (c)
+ return;
} while (n);
}
-void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], unsigned char ecount_buf[16],
- unsigned int *num, ctr128_f func)
+void
+CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], unsigned char ecount_buf[16],
+ unsigned int *num, ctr128_f func)
{
- unsigned int n,ctr32;
+ unsigned int n, ctr32;
assert(*num < 16);
while (n && len) {
*(out++) = *(in++) ^ ecount_buf[n];
--len;
- n = (n+1) % 16;
+ n = (n + 1) % 16;
}
- ctr32 = GETU32(ivec+12);
- while (len>=16) {
+ ctr32 = GETU32(ivec + 12);
+ while (len >= 16) {
size_t blocks = len/16;
/*
* 1<<28 is just a not-so-small yet not-so-large number...
* Below condition is practically never met, but it has to
* be checked for code correctness.
*/
- if (sizeof(size_t)>sizeof(unsigned int) && blocks>(1U<<28))
- blocks = (1U<<28);
+ if (sizeof(size_t) > sizeof(unsigned int) &&
+ blocks > (1U << 28))
+ blocks = (1U << 28);
/*
* As (*func) operates on 32-bit counter, caller
* has to handle overflow. 'if' below detects the
ctr32 += (u32)blocks;
if (ctr32 < blocks) {
blocks -= ctr32;
- ctr32 = 0;
+ ctr32 = 0;
}
- (*func)(in,out,blocks,key,ivec);
+ (*func)(in, out, blocks, key, ivec);
/* (*ctr) does not update ivec, caller does: */
- PUTU32(ivec+12,ctr32);
+ PUTU32(ivec + 12, ctr32);
/* ... overflow was detected, propagate carry. */
- if (ctr32 == 0) ctr96_inc(ivec);
+ if (ctr32 == 0)
+ ctr96_inc(ivec);
blocks *= 16;
len -= blocks;
out += blocks;
- in += blocks;
+ in += blocks;
}
if (len) {
- memset(ecount_buf,0,16);
- (*func)(ecount_buf,ecount_buf,1,key,ivec);
+ memset(ecount_buf, 0, 16);
+ (*func)(ecount_buf, ecount_buf, 1, key, ivec);
++ctr32;
- PUTU32(ivec+12,ctr32);
- if (ctr32 == 0) ctr96_inc(ivec);
+ PUTU32(ivec + 12, ctr32);
+ if (ctr32 == 0)
+ ctr96_inc(ivec);
while (len--) {
out[n] = in[n] ^ ecount_buf[n];
++n;
}
}
- *num=n;
+ *num = n;
}
-/* $OpenBSD: gcm128.c,v 1.23 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: gcm128.c,v 1.24 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2010 The OpenSSL Project. All rights reserved.
*
#endif
#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
-#define REDUCE1BIT(V) \
- do { \
- if (sizeof(size_t)==8) { \
- u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
- V.lo = (V.hi<<63)|(V.lo>>1); \
- V.hi = (V.hi>>1 )^T; \
- } else { \
- u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
- V.lo = (V.hi<<63)|(V.lo>>1); \
- V.hi = (V.hi>>1 )^((u64)T<<32); \
- } \
+#define REDUCE1BIT(V) \
+ do { \
+ if (sizeof(size_t)==8) { \
+ u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
+ V.lo = (V.hi<<63)|(V.lo>>1); \
+ V.hi = (V.hi>>1 )^T; \
+ } else { \
+ u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
+ V.lo = (V.hi<<63)|(V.lo>>1); \
+ V.hi = (V.hi>>1 )^((u64)T<<32); \
+ } \
} while(0)
/*
*/
#if TABLE_BITS==8
-static void gcm_init_8bit(u128 Htable[256], u64 H[2])
+static void
+gcm_init_8bit(u128 Htable[256], u64 H[2])
{
int i, j;
u128 V;
V.hi = H[0];
V.lo = H[1];
- for (Htable[128]=V, i=64; i>0; i>>=1) {
+ for (Htable[128] = V, i = 64; i > 0; i >>= 1) {
REDUCE1BIT(V);
Htable[i] = V;
}
- for (i=2; i<256; i<<=1) {
- u128 *Hi = Htable+i, H0 = *Hi;
- for (j=1; j<i; ++j) {
- Hi[j].hi = H0.hi^Htable[j].hi;
- Hi[j].lo = H0.lo^Htable[j].lo;
+ for (i = 2; i < 256; i <<= 1) {
+ u128 *Hi = Htable + i, H0 = *Hi;
+ for (j = 1; j < i; ++j) {
+ Hi[j].hi = H0.hi ^ Htable[j].hi;
+ Hi[j].lo = H0.lo ^ Htable[j].lo;
}
}
}
-static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
+static void
+gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
{
u128 Z = { 0, 0};
- const u8 *xi = (const u8 *)Xi+15;
+ const u8 *xi = (const u8 *)Xi + 15;
size_t rem, n = *xi;
static const size_t rem_8bit[256] = {
PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246),
Z.hi ^= Htable[n].hi;
Z.lo ^= Htable[n].lo;
- if ((u8 *)Xi==xi) break;
+ if ((u8 *)Xi == xi)
+ break;
n = *(--xi);
- rem = (size_t)Z.lo&0xff;
- Z.lo = (Z.hi<<56)|(Z.lo>>8);
- Z.hi = (Z.hi>>8);
+ rem = (size_t)Z.lo & 0xff;
+ Z.lo = (Z.hi << 56)|(Z.lo >> 8);
+ Z.hi = (Z.hi >> 8);
#if SIZE_MAX == 0xffffffffffffffff
Z.hi ^= rem_8bit[rem];
#else
- Z.hi ^= (u64)rem_8bit[rem]<<32;
+ Z.hi ^= (u64)rem_8bit[rem] << 32;
#endif
}
#else
u8 *p = (u8 *)Xi;
u32 v;
- v = (u32)(Z.hi>>32); PUTU32(p,v);
- v = (u32)(Z.hi); PUTU32(p+4,v);
- v = (u32)(Z.lo>>32); PUTU32(p+8,v);
- v = (u32)(Z.lo); PUTU32(p+12,v);
+ v = (u32)(Z.hi >> 32);
+ PUTU32(p, v);
+ v = (u32)(Z.hi);
+ PUTU32(p + 4, v);
+ v = (u32)(Z.lo >> 32);
+ PUTU32(p + 8, v);
+ v = (u32)(Z.lo);
+ PUTU32(p + 12, v);
#endif
#else /* BIG_ENDIAN */
Xi[0] = Z.hi;
#elif TABLE_BITS==4
-static void gcm_init_4bit(u128 Htable[16], u64 H[2])
+static void
+gcm_init_4bit(u128 Htable[16], u64 H[2])
{
u128 V;
#if defined(OPENSSL_SMALL_FOOTPRINT)
V.lo = H[1];
#if defined(OPENSSL_SMALL_FOOTPRINT)
- for (Htable[8]=V, i=4; i>0; i>>=1) {
+ for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
REDUCE1BIT(V);
Htable[i] = V;
}
- for (i=2; i<16; i<<=1) {
- u128 *Hi = Htable+i;
+ for (i = 2; i < 16; i <<= 1) {
+ u128 *Hi = Htable + i;
int j;
- for (V=*Hi, j=1; j<i; ++j) {
- Hi[j].hi = V.hi^Htable[j].hi;
- Hi[j].lo = V.lo^Htable[j].lo;
+ for (V = *Hi, j = 1; j < i; ++j) {
+ Hi[j].hi = V.hi ^ Htable[j].hi;
+ Hi[j].lo = V.lo ^ Htable[j].lo;
}
}
#else
Htable[2] = V;
REDUCE1BIT(V);
Htable[1] = V;
- Htable[3].hi = V.hi^Htable[2].hi, Htable[3].lo = V.lo^Htable[2].lo;
- V=Htable[4];
- Htable[5].hi = V.hi^Htable[1].hi, Htable[5].lo = V.lo^Htable[1].lo;
- Htable[6].hi = V.hi^Htable[2].hi, Htable[6].lo = V.lo^Htable[2].lo;
- Htable[7].hi = V.hi^Htable[3].hi, Htable[7].lo = V.lo^Htable[3].lo;
- V=Htable[8];
- Htable[9].hi = V.hi^Htable[1].hi, Htable[9].lo = V.lo^Htable[1].lo;
- Htable[10].hi = V.hi^Htable[2].hi, Htable[10].lo = V.lo^Htable[2].lo;
- Htable[11].hi = V.hi^Htable[3].hi, Htable[11].lo = V.lo^Htable[3].lo;
- Htable[12].hi = V.hi^Htable[4].hi, Htable[12].lo = V.lo^Htable[4].lo;
- Htable[13].hi = V.hi^Htable[5].hi, Htable[13].lo = V.lo^Htable[5].lo;
- Htable[14].hi = V.hi^Htable[6].hi, Htable[14].lo = V.lo^Htable[6].lo;
- Htable[15].hi = V.hi^Htable[7].hi, Htable[15].lo = V.lo^Htable[7].lo;
+ Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
+ V = Htable[4];
+ Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
+ Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
+ Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
+ V = Htable[8];
+ Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
+ Htable[10].hi = V.hi ^ Htable[2].hi,
+ Htable[10].lo = V.lo ^ Htable[2].lo;
+ Htable[11].hi = V.hi ^ Htable[3].hi,
+ Htable[11].lo = V.lo ^ Htable[3].lo;
+ Htable[12].hi = V.hi ^ Htable[4].hi,
+ Htable[12].lo = V.lo ^ Htable[4].lo;
+ Htable[13].hi = V.hi ^ Htable[5].hi,
+ Htable[13].lo = V.lo ^ Htable[5].lo;
+ Htable[14].hi = V.hi ^ Htable[6].hi,
+ Htable[14].lo = V.lo ^ Htable[6].lo;
+ Htable[15].hi = V.hi ^ Htable[7].hi,
+ Htable[15].lo = V.lo ^ Htable[7].lo;
#endif
#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
/*
{
int j;
#if BYTE_ORDER == LITTLE_ENDIAN
- for (j=0;j<16;++j) {
+ for (j = 0; j < 16; ++j) {
V = Htable[j];
Htable[j].hi = V.lo;
Htable[j].lo = V.hi;
}
#else /* BIG_ENDIAN */
- for (j=0;j<16;++j) {
+ for (j = 0; j < 16; ++j) {
V = Htable[j];
- Htable[j].hi = V.lo<<32|V.lo>>32;
- Htable[j].lo = V.hi<<32|V.hi>>32;
+ Htable[j].hi = V.lo << 32|V.lo >> 32;
+ Htable[j].lo = V.hi << 32|V.hi >> 32;
}
#endif
}
PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0) };
-static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
+static void
+gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
{
u128 Z;
int cnt = 15;
size_t rem, nlo, nhi;
- nlo = ((const u8 *)Xi)[15];
- nhi = nlo>>4;
+ nlo = ((const u8 *)Xi)[15];
+ nhi = nlo >> 4;
nlo &= 0xf;
Z.hi = Htable[nlo].hi;
Z.lo = Htable[nlo].lo;
while (1) {
- rem = (size_t)Z.lo&0xf;
- Z.lo = (Z.hi<<60)|(Z.lo>>4);
- Z.hi = (Z.hi>>4);
+ rem = (size_t)Z.lo & 0xf;
+ Z.lo = (Z.hi << 60)|(Z.lo >> 4);
+ Z.hi = (Z.hi >> 4);
#if SIZE_MAX == 0xffffffffffffffff
Z.hi ^= rem_4bit[rem];
#else
- Z.hi ^= (u64)rem_4bit[rem]<<32;
+ Z.hi ^= (u64)rem_4bit[rem] << 32;
#endif
Z.hi ^= Htable[nhi].hi;
Z.lo ^= Htable[nhi].lo;
- if (--cnt<0) break;
+ if (--cnt < 0)
+ break;
- nlo = ((const u8 *)Xi)[cnt];
- nhi = nlo>>4;
+ nlo = ((const u8 *)Xi)[cnt];
+ nhi = nlo >> 4;
nlo &= 0xf;
- rem = (size_t)Z.lo&0xf;
- Z.lo = (Z.hi<<60)|(Z.lo>>4);
- Z.hi = (Z.hi>>4);
+ rem = (size_t)Z.lo & 0xf;
+ Z.lo = (Z.hi << 60)|(Z.lo >> 4);
+ Z.hi = (Z.hi >> 4);
#if SIZE_MAX == 0xffffffffffffffff
Z.hi ^= rem_4bit[rem];
#else
- Z.hi ^= (u64)rem_4bit[rem]<<32;
+ Z.hi ^= (u64)rem_4bit[rem] << 32;
#endif
Z.hi ^= Htable[nlo].hi;
Z.lo ^= Htable[nlo].lo;
#else
u8 *p = (u8 *)Xi;
u32 v;
- v = (u32)(Z.hi>>32); PUTU32(p,v);
- v = (u32)(Z.hi); PUTU32(p+4,v);
- v = (u32)(Z.lo>>32); PUTU32(p+8,v);
- v = (u32)(Z.lo); PUTU32(p+12,v);
+ v = (u32)(Z.hi >> 32);
+ PUTU32(p, v);
+ v = (u32)(Z.hi);
+ PUTU32(p + 4, v);
+ v = (u32)(Z.lo >> 32);
+ PUTU32(p + 8, v);
+ v = (u32)(Z.lo);
+ PUTU32(p + 12, v);
#endif
#else /* BIG_ENDIAN */
Xi[0] = Z.hi;
* mostly as reference and a placeholder for possible future
* non-trivial optimization[s]...
*/
-static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len)
+static void
+gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len)
{
- u128 Z;
- int cnt;
- size_t rem, nlo, nhi;
+ u128 Z;
+ int cnt;
+ size_t rem, nlo, nhi;
#if 1
- do {
- cnt = 15;
- nlo = ((const u8 *)Xi)[15];
- nlo ^= inp[15];
- nhi = nlo>>4;
- nlo &= 0xf;
+ do {
+ cnt = 15;
+ nlo = ((const u8 *)Xi)[15];
+ nlo ^= inp[15];
+ nhi = nlo >> 4;
+ nlo &= 0xf;
- Z.hi = Htable[nlo].hi;
- Z.lo = Htable[nlo].lo;
+ Z.hi = Htable[nlo].hi;
+ Z.lo = Htable[nlo].lo;
- while (1) {
- rem = (size_t)Z.lo&0xf;
- Z.lo = (Z.hi<<60)|(Z.lo>>4);
- Z.hi = (Z.hi>>4);
+ while (1) {
+ rem = (size_t)Z.lo & 0xf;
+ Z.lo = (Z.hi << 60)|(Z.lo >> 4);
+ Z.hi = (Z.hi >> 4);
#if SIZE_MAX == 0xffffffffffffffff
- Z.hi ^= rem_4bit[rem];
+ Z.hi ^= rem_4bit[rem];
#else
- Z.hi ^= (u64)rem_4bit[rem]<<32;
+ Z.hi ^= (u64)rem_4bit[rem] << 32;
#endif
- Z.hi ^= Htable[nhi].hi;
- Z.lo ^= Htable[nhi].lo;
+ Z.hi ^= Htable[nhi].hi;
+ Z.lo ^= Htable[nhi].lo;
- if (--cnt<0) break;
+ if (--cnt < 0)
+ break;
- nlo = ((const u8 *)Xi)[cnt];
- nlo ^= inp[cnt];
- nhi = nlo>>4;
- nlo &= 0xf;
+ nlo = ((const u8 *)Xi)[cnt];
+ nlo ^= inp[cnt];
+ nhi = nlo >> 4;
+ nlo &= 0xf;
- rem = (size_t)Z.lo&0xf;
- Z.lo = (Z.hi<<60)|(Z.lo>>4);
- Z.hi = (Z.hi>>4);
+ rem = (size_t)Z.lo & 0xf;
+ Z.lo = (Z.hi << 60)|(Z.lo >> 4);
+ Z.hi = (Z.hi >> 4);
#if SIZE_MAX == 0xffffffffffffffff
- Z.hi ^= rem_4bit[rem];
+ Z.hi ^= rem_4bit[rem];
#else
- Z.hi ^= (u64)rem_4bit[rem]<<32;
+ Z.hi ^= (u64)rem_4bit[rem] << 32;
#endif
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
- }
+ Z.hi ^= Htable[nlo].hi;
+ Z.lo ^= Htable[nlo].lo;
+ }
#else
/*
* Extra 256+16 bytes per-key plus 512 bytes shared tables
* the rem_8bit even here, but the priority is to minimize
* cache footprint...
*/
- u128 Hshr4[16]; /* Htable shifted right by 4 bits */
- u8 Hshl4[16]; /* Htable shifted left by 4 bits */
- static const unsigned short rem_8bit[256] = {
- 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E,
- 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E,
- 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E,
- 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E,
- 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E,
- 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E,
- 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E,
- 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E,
- 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE,
- 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE,
- 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE,
- 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE,
- 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E,
- 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E,
- 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE,
- 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE,
- 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E,
- 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E,
- 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E,
- 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E,
- 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E,
- 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E,
- 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E,
- 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E,
- 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE,
- 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE,
- 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE,
- 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE,
- 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E,
- 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E,
- 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE,
- 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE };
+ u128 Hshr4[16]; /* Htable shifted right by 4 bits */
+ u8 Hshl4[16]; /* Htable shifted left by 4 bits */
+ static const unsigned short rem_8bit[256] = {
+ 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E,
+ 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E,
+ 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E,
+ 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E,
+ 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E,
+ 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E,
+ 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E,
+ 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E,
+ 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE,
+ 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE,
+ 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE,
+ 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE,
+ 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E,
+ 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E,
+ 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE,
+ 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE,
+ 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E,
+ 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E,
+ 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E,
+ 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E,
+ 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E,
+ 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E,
+ 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E,
+ 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E,
+ 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE,
+ 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE,
+ 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE,
+ 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE,
+ 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E,
+ 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E,
+ 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE,
+ 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE };
/*
* This pre-processing phase slows down procedure by approximately
* same time as it makes each loop spin faster. In other words
* single block performance is approximately same as straightforward
* "4-bit" implementation, and then it goes only faster...
*/
- for (cnt=0; cnt<16; ++cnt) {
- Z.hi = Htable[cnt].hi;
- Z.lo = Htable[cnt].lo;
- Hshr4[cnt].lo = (Z.hi<<60)|(Z.lo>>4);
- Hshr4[cnt].hi = (Z.hi>>4);
- Hshl4[cnt] = (u8)(Z.lo<<4);
- }
-
- do {
- for (Z.lo=0, Z.hi=0, cnt=15; cnt; --cnt) {
- nlo = ((const u8 *)Xi)[cnt];
- nlo ^= inp[cnt];
- nhi = nlo>>4;
- nlo &= 0xf;
+ for (cnt = 0; cnt < 16; ++cnt) {
+ Z.hi = Htable[cnt].hi;
+ Z.lo = Htable[cnt].lo;
+ Hshr4[cnt].lo = (Z.hi << 60)|(Z.lo >> 4);
+ Hshr4[cnt].hi = (Z.hi >> 4);
+ Hshl4[cnt] = (u8)(Z.lo << 4);
+ }
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
+ do {
+ for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) {
+ nlo = ((const u8 *)Xi)[cnt];
+ nlo ^= inp[cnt];
+ nhi = nlo >> 4;
+ nlo &= 0xf;
- rem = (size_t)Z.lo&0xff;
+ Z.hi ^= Htable[nlo].hi;
+ Z.lo ^= Htable[nlo].lo;
- Z.lo = (Z.hi<<56)|(Z.lo>>8);
- Z.hi = (Z.hi>>8);
+ rem = (size_t)Z.lo & 0xff;
- Z.hi ^= Hshr4[nhi].hi;
- Z.lo ^= Hshr4[nhi].lo;
- Z.hi ^= (u64)rem_8bit[rem^Hshl4[nhi]]<<48;
- }
+ Z.lo = (Z.hi << 56)|(Z.lo >> 8);
+ Z.hi = (Z.hi >> 8);
- nlo = ((const u8 *)Xi)[0];
- nlo ^= inp[0];
- nhi = nlo>>4;
- nlo &= 0xf;
+ Z.hi ^= Hshr4[nhi].hi;
+ Z.lo ^= Hshr4[nhi].lo;
+ Z.hi ^= (u64)rem_8bit[rem ^ Hshl4[nhi]] << 48;
+ }
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
+ nlo = ((const u8 *)Xi)[0];
+ nlo ^= inp[0];
+ nhi = nlo >> 4;
+ nlo &= 0xf;
+
+ Z.hi ^= Htable[nlo].hi;
+ Z.lo ^= Htable[nlo].lo;
- rem = (size_t)Z.lo&0xf;
+ rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi<<60)|(Z.lo>>4);
- Z.hi = (Z.hi>>4);
+ Z.lo = (Z.hi << 60)|(Z.lo >> 4);
+ Z.hi = (Z.hi >> 4);
- Z.hi ^= Htable[nhi].hi;
- Z.lo ^= Htable[nhi].lo;
- Z.hi ^= ((u64)rem_8bit[rem<<4])<<48;
+ Z.hi ^= Htable[nhi].hi;
+ Z.lo ^= Htable[nhi].lo;
+ Z.hi ^= ((u64)rem_8bit[rem << 4]) << 48;
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP8
- Xi[0] = BSWAP8(Z.hi);
- Xi[1] = BSWAP8(Z.lo);
+ Xi[0] = BSWAP8(Z.hi);
+ Xi[1] = BSWAP8(Z.lo);
#else
- u8 *p = (u8 *)Xi;
- u32 v;
- v = (u32)(Z.hi>>32); PUTU32(p,v);
- v = (u32)(Z.hi); PUTU32(p+4,v);
- v = (u32)(Z.lo>>32); PUTU32(p+8,v);
- v = (u32)(Z.lo); PUTU32(p+12,v);
+ u8 *p = (u8 *)Xi;
+ u32 v;
+ v = (u32)(Z.hi >> 32);
+ PUTU32(p, v);
+ v = (u32)(Z.hi);
+ PUTU32(p + 4, v);
+ v = (u32)(Z.lo >> 32);
+ PUTU32(p + 8, v);
+ v = (u32)(Z.lo);
+ PUTU32(p + 12, v);
#endif
#else /* BIG_ENDIAN */
- Xi[0] = Z.hi;
- Xi[1] = Z.lo;
+ Xi[0] = Z.hi;
+ Xi[1] = Z.lo;
#endif
- } while (inp+=16, len-=16);
+ } while (inp += 16, len -= 16);
}
#endif
#else
-void gcm_gmult_4bit(u64 Xi[2],const u128 Htable[16]);
-void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
#endif
#define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
#else /* TABLE_BITS */
-static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2])
+static void
+gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
{
- u128 V,Z = { 0,0 };
+ u128 V, Z = { 0,0 };
long X;
- int i,j;
+ int i, j;
const long *xi = (const long *)Xi;
V.hi = H[0]; /* H is in host byte order, no byte swapping */
V.lo = H[1];
- for (j=0; j<16/sizeof(long); ++j) {
+ for (j = 0; j < 16/sizeof(long); ++j) {
#if BYTE_ORDER == LITTLE_ENDIAN
#if SIZE_MAX == 0xffffffffffffffff
#ifdef BSWAP8
- X = (long)(BSWAP8(xi[j]));
+ X = (long)(BSWAP8(xi[j]));
#else
- const u8 *p = (const u8 *)(xi+j);
- X = (long)((u64)GETU32(p)<<32|GETU32(p+4));
+ const u8 *p = (const u8 *)(xi + j);
+ X = (long)((u64)GETU32(p) << 32|GETU32(p + 4));
#endif
#else
- const u8 *p = (const u8 *)(xi+j);
- X = (long)GETU32(p);
+ const u8 *p = (const u8 *)(xi + j);
+ X = (long)GETU32(p);
#endif
#else /* BIG_ENDIAN */
X = xi[j];
#endif
- for (i=0; i<8*sizeof(long); ++i, X<<=1) {
- u64 M = (u64)(X>>(8*sizeof(long)-1));
- Z.hi ^= V.hi&M;
- Z.lo ^= V.lo&M;
+ for (i = 0; i < 8*sizeof(long); ++i, X <<= 1) {
+ u64 M = (u64)(X >> (8*sizeof(long) - 1));
+ Z.hi ^= V.hi & M;
+ Z.lo ^= V.lo & M;
REDUCE1BIT(V);
}
#else
u8 *p = (u8 *)Xi;
u32 v;
- v = (u32)(Z.hi>>32); PUTU32(p,v);
- v = (u32)(Z.hi); PUTU32(p+4,v);
- v = (u32)(Z.lo>>32); PUTU32(p+8,v);
- v = (u32)(Z.lo); PUTU32(p+12,v);
+ v = (u32)(Z.hi >> 32);
+ PUTU32(p, v);
+ v = (u32)(Z.hi);
+ PUTU32(p + 4, v);
+ v = (u32)(Z.lo >> 32);
+ PUTU32(p + 8, v);
+ v = (u32)(Z.lo);
+ PUTU32(p + 12, v);
#endif
#else /* BIG_ENDIAN */
Xi[0] = Z.hi;
#endif
-#if defined(GHASH_ASM) && \
- (defined(__i386) || defined(__i386__) || \
- defined(__x86_64) || defined(__x86_64__) || \
+#if defined(GHASH_ASM) && \
+ (defined(__i386) || defined(__i386__) || \
+ defined(__x86_64) || defined(__x86_64__) || \
defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
#include "x86_arch.h"
#endif
#if TABLE_BITS==4 && defined(GHASH_ASM)
-# if (defined(__i386) || defined(__i386__) || \
- defined(__x86_64) || defined(__x86_64__) || \
+# if (defined(__i386) || defined(__i386__) || \
+ defined(__x86_64) || defined(__x86_64__) || \
defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
# define GHASH_ASM_X86_OR_64
# define GCM_FUNCREF_4BIT
-void gcm_init_clmul(u128 Htable[16],const u64 Xi[2]);
-void gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]);
-void gcm_ghash_clmul(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
+void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
# define GHASH_ASM_X86
-void gcm_gmult_4bit_mmx(u64 Xi[2],const u128 Htable[16]);
-void gcm_ghash_4bit_mmx(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
-void gcm_gmult_4bit_x86(u64 Xi[2],const u128 Htable[16]);
-void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
# endif
# elif defined(__arm__) || defined(__arm)
# include "arm_arch.h"
# if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
# define GHASH_ASM_ARM
# define GCM_FUNCREF_4BIT
-void gcm_gmult_neon(u64 Xi[2],const u128 Htable[16]);
-void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
+void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
# endif
# endif
#endif
# endif
#endif
-void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
+void
+CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
{
- memset(ctx,0,sizeof(*ctx));
+ memset(ctx, 0, sizeof(*ctx));
ctx->block = block;
- ctx->key = key;
+ ctx->key = key;
- (*block)(ctx->H.c,ctx->H.c,key);
+ (*block)(ctx->H.c, ctx->H.c, key);
#if BYTE_ORDER == LITTLE_ENDIAN
/* H is stored in host byte order */
ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
#else
u8 *p = ctx->H.c;
- u64 hi,lo;
- hi = (u64)GETU32(p) <<32|GETU32(p+4);
- lo = (u64)GETU32(p+8)<<32|GETU32(p+12);
+ u64 hi, lo;
+ hi = (u64)GETU32(p) << 32|GETU32(p + 4);
+ lo = (u64)GETU32(p + 8) << 32|GETU32(p + 12);
ctx->H.u[0] = hi;
ctx->H.u[1] = lo;
#endif
#endif
#if TABLE_BITS==8
- gcm_init_8bit(ctx->Htable,ctx->H.u);
+ gcm_init_8bit(ctx->Htable, ctx->H.u);
#elif TABLE_BITS==4
# if defined(GHASH_ASM_X86_OR_64)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
/* check FXSR and PCLMULQDQ bits */
if ((OPENSSL_cpu_caps() & (CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) ==
(CPUCAP_MASK_FXSR | CPUCAP_MASK_PCLMUL)) {
- gcm_init_clmul(ctx->Htable,ctx->H.u);
+ gcm_init_clmul(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_clmul;
ctx->ghash = gcm_ghash_clmul;
return;
}
# endif
- gcm_init_4bit(ctx->Htable,ctx->H.u);
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
# if defined(GHASH_ASM_X86) /* x86 only */
# if defined(OPENSSL_IA32_SSE2)
if (OPENSSL_cpu_caps() & CPUCAP_MASK_SSE) { /* check SSE bit */
ctx->gmult = gcm_gmult_neon;
ctx->ghash = gcm_ghash_neon;
} else {
- gcm_init_4bit(ctx->Htable,ctx->H.u);
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
ctx->ghash = gcm_ghash_4bit;
}
# else
- gcm_init_4bit(ctx->Htable,ctx->H.u);
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
# endif
#endif
}
-void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx,const unsigned char *iv,size_t len)
+void
+CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, size_t len)
{
unsigned int ctr;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
#endif
- ctx->Yi.u[0] = 0;
- ctx->Yi.u[1] = 0;
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
+ ctx->Yi.u[0] = 0;
+ ctx->Yi.u[1] = 0;
+ ctx->Xi.u[0] = 0;
+ ctx->Xi.u[1] = 0;
ctx->len.u[0] = 0; /* AAD length */
ctx->len.u[1] = 0; /* message length */
ctx->ares = 0;
ctx->mres = 0;
- if (len==12) {
- memcpy(ctx->Yi.c,iv,12);
- ctx->Yi.c[15]=1;
- ctr=1;
- }
- else {
+ if (len == 12) {
+ memcpy(ctx->Yi.c, iv, 12);
+ ctx->Yi.c[15] = 1;
+ ctr = 1;
+ } else {
size_t i;
u64 len0 = len;
- while (len>=16) {
- for (i=0; i<16; ++i) ctx->Yi.c[i] ^= iv[i];
- GCM_MUL(ctx,Yi);
+ while (len >= 16) {
+ for (i = 0; i < 16; ++i)
+ ctx->Yi.c[i] ^= iv[i];
+ GCM_MUL(ctx, Yi);
iv += 16;
len -= 16;
}
if (len) {
- for (i=0; i<len; ++i) ctx->Yi.c[i] ^= iv[i];
- GCM_MUL(ctx,Yi);
+ for (i = 0; i < len; ++i)
+ ctx->Yi.c[i] ^= iv[i];
+ GCM_MUL(ctx, Yi);
}
len0 <<= 3;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP8
- ctx->Yi.u[1] ^= BSWAP8(len0);
+ ctx->Yi.u[1] ^= BSWAP8(len0);
#else
- ctx->Yi.c[8] ^= (u8)(len0>>56);
- ctx->Yi.c[9] ^= (u8)(len0>>48);
- ctx->Yi.c[10] ^= (u8)(len0>>40);
- ctx->Yi.c[11] ^= (u8)(len0>>32);
- ctx->Yi.c[12] ^= (u8)(len0>>24);
- ctx->Yi.c[13] ^= (u8)(len0>>16);
- ctx->Yi.c[14] ^= (u8)(len0>>8);
+ ctx->Yi.c[8] ^= (u8)(len0 >> 56);
+ ctx->Yi.c[9] ^= (u8)(len0 >> 48);
+ ctx->Yi.c[10] ^= (u8)(len0 >> 40);
+ ctx->Yi.c[11] ^= (u8)(len0 >> 32);
+ ctx->Yi.c[12] ^= (u8)(len0 >> 24);
+ ctx->Yi.c[13] ^= (u8)(len0 >> 16);
+ ctx->Yi.c[14] ^= (u8)(len0 >> 8);
ctx->Yi.c[15] ^= (u8)(len0);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.u[1] ^= len0;
+ ctx->Yi.u[1] ^= len0;
#endif
- GCM_MUL(ctx,Yi);
+ GCM_MUL(ctx, Yi);
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
#else
- ctr = GETU32(ctx->Yi.c+12);
+ ctr = GETU32(ctx->Yi.c + 12);
#endif
#else /* BIG_ENDIAN */
ctr = ctx->Yi.d[3];
#endif
}
- (*ctx->block)(ctx->Yi.c,ctx->EK0.c,ctx->key);
+ (*ctx->block)(ctx->Yi.c, ctx->EK0.c, ctx->key);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
}
-int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len)
+int
+CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, size_t len)
{
size_t i;
unsigned int n;
u64 alen = ctx->len.u[0];
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
# ifdef GHASH
- void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len) = ctx->ghash;
+ void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len) = ctx->ghash;
# endif
#endif
- if (ctx->len.u[1]) return -2;
+ if (ctx->len.u[1])
+ return -2;
alen += len;
- if (alen>(U64(1)<<61) || (sizeof(len)==8 && alen<len))
+ if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
return -1;
ctx->len.u[0] = alen;
while (n && len) {
ctx->Xi.c[n] ^= *(aad++);
--len;
- n = (n+1)%16;
+ n = (n + 1) % 16;
}
- if (n==0) GCM_MUL(ctx,Xi);
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
else {
ctx->ares = n;
return 0;
}
#ifdef GHASH
- if ((i = (len&(size_t)-16))) {
- GHASH(ctx,aad,i);
+ if ((i = (len & (size_t)-16))) {
+ GHASH(ctx, aad, i);
aad += i;
len -= i;
}
#else
- while (len>=16) {
- for (i=0; i<16; ++i) ctx->Xi.c[i] ^= aad[i];
- GCM_MUL(ctx,Xi);
+ while (len >= 16) {
+ for (i = 0; i < 16; ++i)
+ ctx->Xi.c[i] ^= aad[i];
+ GCM_MUL(ctx, Xi);
aad += 16;
len -= 16;
}
#endif
if (len) {
n = (unsigned int)len;
- for (i=0; i<len; ++i) ctx->Xi.c[i] ^= aad[i];
+ for (i = 0; i < len; ++i)
+ ctx->Xi.c[i] ^= aad[i];
}
ctx->ares = n;
return 0;
}
-int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len)
+int
+CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len)
{
unsigned int n, ctr;
size_t i;
- u64 mlen = ctx->len.u[1];
+ u64 mlen = ctx->len.u[1];
block128_f block = ctx->block;
- void *key = ctx->key;
+ void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
# ifdef GHASH
- void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len) = ctx->ghash;
+ void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len) = ctx->ghash;
# endif
#endif
mlen += len;
- if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len))
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
return -1;
ctx->len.u[1] = mlen;
if (ctx->ares) {
/* First call to encrypt finalizes GHASH(AAD) */
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
#ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
#else
- ctr = GETU32(ctx->Yi.c+12);
+ ctr = GETU32(ctx->Yi.c + 12);
#endif
#else /* BIG_ENDIAN */
ctr = ctx->Yi.d[3];
n = ctx->mres;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- if (n) {
- while (n && len) {
- ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n];
- --len;
- n = (n+1)%16;
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ if (n) {
+ while (n && len) {
+ ctx->Xi.c[n] ^= *(out++) = *(in++) ^
+ ctx->EKi.c[n];
+ --len;
+ n = (n + 1) % 16;
+ }
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
+ else {
+ ctx->mres = n;
+ return 0;
+ }
}
- if (n==0) GCM_MUL(ctx,Xi);
- else {
- ctx->mres = n;
- return 0;
- }
- }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out) % sizeof(size_t) != 0)
+ break;
#endif
#if defined(GHASH) && defined(GHASH_CHUNK)
- while (len>=GHASH_CHUNK) {
- size_t j=GHASH_CHUNK;
+ while (len >= GHASH_CHUNK) {
+ size_t j = GHASH_CHUNK;
- while (j) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
+ while (j) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
-#endif
- for (i=0; i<16/sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- j -= 16;
- }
- GHASH(ctx,out-GHASH_CHUNK,GHASH_CHUNK);
- len -= GHASH_CHUNK;
- }
- if ((i = (len&(size_t)-16))) {
- size_t j=i;
+ ctx->Yi.d[3] = ctr;
+#endif
+ for (i = 0; i < 16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^
+ ctx->EKi.t[i];
+ out += 16;
+ in += 16;
+ j -= 16;
+ }
+ GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
+ len -= GHASH_CHUNK;
+ }
+ if ((i = (len & (size_t)-16))) {
+ size_t j = i;
- while (len>=16) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
+ while (len >= 16) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
-#endif
- for (i=0; i<16/sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- len -= 16;
- }
- GHASH(ctx,out-j,j);
- }
+ ctx->Yi.d[3] = ctr;
+#endif
+ for (i = 0; i < 16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^
+ ctx->EKi.t[i];
+ out += 16;
+ in += 16;
+ len -= 16;
+ }
+ GHASH(ctx, out - j, j);
+ }
#else
- while (len>=16) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
+ while (len >= 16) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
+ ctx->Yi.d[3] = ctr;
#endif
- for (i=0; i<16/sizeof(size_t); ++i)
- ctx->Xi.t[i] ^=
- out_t[i] = in_t[i]^ctx->EKi.t[i];
- GCM_MUL(ctx,Xi);
- out += 16;
- in += 16;
- len -= 16;
- }
+ for (i = 0; i < 16/sizeof(size_t); ++i)
+ ctx->Xi.t[i] ^=
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
+ GCM_MUL(ctx, Xi);
+ out += 16;
+ in += 16;
+ len -= 16;
+ }
#endif
- if (len) {
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ if (len) {
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
+ ctx->Yi.d[3] = ctr;
#endif
- while (len--) {
- ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n];
- ++n;
+ while (len--) {
+ ctx->Xi.c[n] ^= out[n] = in[n] ^
+ ctx->EKi.c[n];
+ ++n;
+ }
}
- }
- ctx->mres = n;
- return 0;
- } while(0);
+ ctx->mres = n;
+ return 0;
+ } while (0);
#endif
- for (i=0;i<len;++i) {
- if (n==0) {
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
+ for (i = 0; i < len; ++i) {
+ if (n == 0) {
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
}
- ctx->Xi.c[n] ^= out[i] = in[i]^ctx->EKi.c[n];
- n = (n+1)%16;
- if (n==0)
- GCM_MUL(ctx,Xi);
+ ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
+ n = (n + 1) % 16;
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
}
ctx->mres = n;
return 0;
}
-int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len)
+int
+CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len)
{
unsigned int n, ctr;
size_t i;
- u64 mlen = ctx->len.u[1];
+ u64 mlen = ctx->len.u[1];
block128_f block = ctx->block;
- void *key = ctx->key;
+ void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
# ifdef GHASH
- void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len) = ctx->ghash;
+ void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len) = ctx->ghash;
# endif
#endif
mlen += len;
- if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len))
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
return -1;
ctx->len.u[1] = mlen;
if (ctx->ares) {
/* First call to decrypt finalizes GHASH(AAD) */
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
#ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
#else
- ctr = GETU32(ctx->Yi.c+12);
+ ctr = GETU32(ctx->Yi.c + 12);
#endif
#else /* BIG_ENDIAN */
ctr = ctx->Yi.d[3];
n = ctx->mres;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- if (n) {
- while (n && len) {
- u8 c = *(in++);
- *(out++) = c^ctx->EKi.c[n];
- ctx->Xi.c[n] ^= c;
- --len;
- n = (n+1)%16;
- }
- if (n==0) GCM_MUL (ctx,Xi);
- else {
- ctx->mres = n;
- return 0;
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ if (n) {
+ while (n && len) {
+ u8 c = *(in++);
+ *(out++) = c ^ ctx->EKi.c[n];
+ ctx->Xi.c[n] ^= c;
+ --len;
+ n = (n + 1) % 16;
+ }
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
+ else {
+ ctx->mres = n;
+ return 0;
+ }
}
- }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out) % sizeof(size_t) != 0)
+ break;
#endif
#if defined(GHASH) && defined(GHASH_CHUNK)
- while (len>=GHASH_CHUNK) {
- size_t j=GHASH_CHUNK;
+ while (len >= GHASH_CHUNK) {
+ size_t j = GHASH_CHUNK;
- GHASH(ctx,in,GHASH_CHUNK);
- while (j) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
+ GHASH(ctx, in, GHASH_CHUNK);
+ while (j) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
-#endif
- for (i=0; i<16/sizeof(size_t); ++i)
- out_t[i] = in_t[i]^ctx->EKi.t[i];
- out += 16;
- in += 16;
- j -= 16;
- }
- len -= GHASH_CHUNK;
- }
- if ((i = (len&(size_t)-16))) {
- GHASH(ctx,in,i);
- while (len>=16) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
-
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ ctx->Yi.d[3] = ctr;
+#endif
+ for (i = 0; i < 16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^
+ ctx->EKi.t[i];
+ out += 16;
+ in += 16;
+ j -= 16;
+ }
+ len -= GHASH_CHUNK;
+ }
+ if ((i = (len & (size_t)-16))) {
+ GHASH(ctx, in, i);
+ while (len >= 16) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
+
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
-#endif
- for (i=0; i<16/sizeof(size_t); ++i)
- out_t[i] = in_t[i]^ctx->EKi.t[i];
- out += 16;
- in += 16;
- len -= 16;
- }
- }
+ ctx->Yi.d[3] = ctr;
+#endif
+ for (i = 0; i < 16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^
+ ctx->EKi.t[i];
+ out += 16;
+ in += 16;
+ len -= 16;
+ }
+ }
#else
- while (len>=16) {
- size_t *out_t=(size_t *)out;
- const size_t *in_t=(const size_t *)in;
+ while (len >= 16) {
+ size_t *out_t = (size_t *)out;
+ const size_t *in_t = (const size_t *)in;
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
+ ctx->Yi.d[3] = ctr;
#endif
- for (i=0; i<16/sizeof(size_t); ++i) {
- size_t c = in[i];
- out[i] = c^ctx->EKi.t[i];
- ctx->Xi.t[i] ^= c;
+ for (i = 0; i < 16/sizeof(size_t); ++i) {
+ size_t c = in[i];
+ out[i] = c ^ ctx->EKi.t[i];
+ ctx->Xi.t[i] ^= c;
+ }
+ GCM_MUL(ctx, Xi);
+ out += 16;
+ in += 16;
+ len -= 16;
}
- GCM_MUL(ctx,Xi);
- out += 16;
- in += 16;
- len -= 16;
- }
#endif
- if (len) {
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
- ++ctr;
+ if (len) {
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
+ ++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
+ ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
- ctx->Yi.d[3] = ctr;
+ ctx->Yi.d[3] = ctr;
#endif
- while (len--) {
- u8 c = in[n];
- ctx->Xi.c[n] ^= c;
- out[n] = c^ctx->EKi.c[n];
- ++n;
+ while (len--) {
+ u8 c = in[n];
+ ctx->Xi.c[n] ^= c;
+ out[n] = c ^ ctx->EKi.c[n];
+ ++n;
+ }
}
- }
- ctx->mres = n;
- return 0;
- } while(0);
+ ctx->mres = n;
+ return 0;
+ } while (0);
#endif
- for (i=0;i<len;++i) {
+ for (i = 0; i < len; ++i) {
u8 c;
- if (n==0) {
- (*block)(ctx->Yi.c,ctx->EKi.c,key);
+ if (n == 0) {
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
}
c = in[i];
- out[i] = c^ctx->EKi.c[n];
+ out[i] = c ^ ctx->EKi.c[n];
ctx->Xi.c[n] ^= c;
- n = (n+1)%16;
- if (n==0)
- GCM_MUL(ctx,Xi);
+ n = (n + 1) % 16;
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
}
ctx->mres = n;
return 0;
}
-int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len, ctr128_f stream)
+int
+CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len, ctr128_f stream)
{
unsigned int n, ctr;
size_t i;
- u64 mlen = ctx->len.u[1];
- void *key = ctx->key;
+ u64 mlen = ctx->len.u[1];
+ void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
# ifdef GHASH
- void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len) = ctx->ghash;
+ void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len) = ctx->ghash;
# endif
#endif
mlen += len;
- if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len))
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
return -1;
ctx->len.u[1] = mlen;
if (ctx->ares) {
/* First call to encrypt finalizes GHASH(AAD) */
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
#ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
#else
- ctr = GETU32(ctx->Yi.c+12);
+ ctr = GETU32(ctx->Yi.c + 12);
#endif
#else /* BIG_ENDIAN */
ctr = ctx->Yi.d[3];
n = ctx->mres;
if (n) {
while (n && len) {
- ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n];
+ ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
--len;
- n = (n+1)%16;
+ n = (n + 1) % 16;
}
- if (n==0) GCM_MUL(ctx,Xi);
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
else {
ctx->mres = n;
return 0;
}
}
#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- while (len>=GHASH_CHUNK) {
- (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c);
+ while (len >= GHASH_CHUNK) {
+ (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);
ctr += GHASH_CHUNK/16;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
- GHASH(ctx,out,GHASH_CHUNK);
+ GHASH(ctx, out, GHASH_CHUNK);
out += GHASH_CHUNK;
- in += GHASH_CHUNK;
+ in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
#endif
- if ((i = (len&(size_t)-16))) {
- size_t j=i/16;
+ if ((i = (len & (size_t)-16))) {
+ size_t j = i/16;
- (*stream)(in,out,j,key,ctx->Yi.c);
+ (*stream)(in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
- in += i;
+ in += i;
len -= i;
#if defined(GHASH)
- GHASH(ctx,out,i);
+ GHASH(ctx, out, i);
out += i;
#else
while (j--) {
- for (i=0;i<16;++i) ctx->Xi.c[i] ^= out[i];
- GCM_MUL(ctx,Xi);
+ for (i = 0; i < 16; ++i)
+ ctx->Xi.c[i] ^= out[i];
+ GCM_MUL(ctx, Xi);
out += 16;
}
#endif
}
if (len) {
- (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key);
+ (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
while (len--) {
- ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n];
+ ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
++n;
}
}
return 0;
}
-int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len,ctr128_f stream)
+int
+CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len, ctr128_f stream)
{
unsigned int n, ctr;
size_t i;
- u64 mlen = ctx->len.u[1];
- void *key = ctx->key;
+ u64 mlen = ctx->len.u[1];
+ void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
# ifdef GHASH
- void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
- const u8 *inp,size_t len) = ctx->ghash;
+ void (*gcm_ghash_p)(u64 Xi[2], const u128 Htable[16],
+ const u8 *inp, size_t len) = ctx->ghash;
# endif
#endif
mlen += len;
- if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlen<len))
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
return -1;
ctx->len.u[1] = mlen;
if (ctx->ares) {
/* First call to decrypt finalizes GHASH(AAD) */
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
ctx->ares = 0;
}
#ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
#else
- ctr = GETU32(ctx->Yi.c+12);
+ ctr = GETU32(ctx->Yi.c + 12);
#endif
#else /* BIG_ENDIAN */
ctr = ctx->Yi.d[3];
if (n) {
while (n && len) {
u8 c = *(in++);
- *(out++) = c^ctx->EKi.c[n];
+ *(out++) = c ^ ctx->EKi.c[n];
ctx->Xi.c[n] ^= c;
--len;
- n = (n+1)%16;
+ n = (n + 1) % 16;
}
- if (n==0) GCM_MUL (ctx,Xi);
+ if (n == 0)
+ GCM_MUL(ctx, Xi);
else {
ctx->mres = n;
return 0;
}
}
#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- while (len>=GHASH_CHUNK) {
- GHASH(ctx,in,GHASH_CHUNK);
- (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c);
+ while (len >= GHASH_CHUNK) {
+ GHASH(ctx, in, GHASH_CHUNK);
+ (*stream)(in, out, GHASH_CHUNK/16, key, ctx->Yi.c);
ctr += GHASH_CHUNK/16;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
out += GHASH_CHUNK;
- in += GHASH_CHUNK;
+ in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
#endif
- if ((i = (len&(size_t)-16))) {
- size_t j=i/16;
+ if ((i = (len & (size_t)-16))) {
+ size_t j = i/16;
#if defined(GHASH)
- GHASH(ctx,in,i);
+ GHASH(ctx, in, i);
#else
while (j--) {
size_t k;
- for (k=0;k<16;++k) ctx->Xi.c[k] ^= in[k];
- GCM_MUL(ctx,Xi);
+ for (k = 0; k < 16; ++k)
+ ctx->Xi.c[k] ^= in[k];
+ GCM_MUL(ctx, Xi);
in += 16;
}
- j = i/16;
+ j = i/16;
in -= i;
#endif
- (*stream)(in,out,j,key,ctx->Yi.c);
+ (*stream)(in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
#endif
out += i;
- in += i;
+ in += i;
len -= i;
}
if (len) {
- (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key);
+ (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
#else
- PUTU32(ctx->Yi.c+12,ctr);
+ PUTU32(ctx->Yi.c + 12, ctr);
#endif
#else /* BIG_ENDIAN */
ctx->Yi.d[3] = ctr;
while (len--) {
u8 c = in[n];
ctx->Xi.c[n] ^= c;
- out[n] = c^ctx->EKi.c[n];
+ out[n] = c ^ ctx->EKi.c[n];
++n;
}
}
return 0;
}
-int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag,
- size_t len)
+int
+CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
+ size_t len)
{
- u64 alen = ctx->len.u[0]<<3;
- u64 clen = ctx->len.u[1]<<3;
+ u64 alen = ctx->len.u[0] << 3;
+ u64 clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
#endif
if (ctx->mres || ctx->ares)
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
#if BYTE_ORDER == LITTLE_ENDIAN
#ifdef BSWAP8
ctx->len.u[0] = alen;
ctx->len.u[1] = clen;
- alen = (u64)GETU32(p) <<32|GETU32(p+4);
- clen = (u64)GETU32(p+8)<<32|GETU32(p+12);
+ alen = (u64)GETU32(p) << 32|GETU32(p + 4);
+ clen = (u64)GETU32(p + 8) << 32|GETU32(p + 12);
}
#endif
#endif
ctx->Xi.u[0] ^= alen;
ctx->Xi.u[1] ^= clen;
- GCM_MUL(ctx,Xi);
+ GCM_MUL(ctx, Xi);
ctx->Xi.u[0] ^= ctx->EK0.u[0];
ctx->Xi.u[1] ^= ctx->EK0.u[1];
- if (tag && len<=sizeof(ctx->Xi))
- return memcmp(ctx->Xi.c,tag,len);
+ if (tag && len <= sizeof(ctx->Xi))
+ return memcmp(ctx->Xi.c, tag, len);
else
return -1;
}
-void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
+void
+CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
{
CRYPTO_gcm128_finish(ctx, NULL, 0);
- memcpy(tag, ctx->Xi.c, len<=sizeof(ctx->Xi.c)?len:sizeof(ctx->Xi.c));
+ memcpy(tag, ctx->Xi.c,
+ len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
}
-GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block)
+GCM128_CONTEXT *
+CRYPTO_gcm128_new(void *key, block128_f block)
{
GCM128_CONTEXT *ret;
if ((ret = malloc(sizeof(GCM128_CONTEXT))))
- CRYPTO_gcm128_init(ret,key,block);
+ CRYPTO_gcm128_init(ret, key, block);
return ret;
}
-void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
+void
+CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
{
freezero(ctx, sizeof(*ctx));
}
-/* $OpenBSD: modes.h,v 1.5 2023/04/25 17:54:10 tb Exp $ */
+/* $OpenBSD: modes.h,v 1.6 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
#endif
typedef void (*block128_f)(const unsigned char in[16],
- unsigned char out[16],
- const void *key);
+ unsigned char out[16],
+ const void *key);
typedef void (*cbc128_f)(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], int enc);
+ size_t len, const void *key,
+ unsigned char ivec[16], int enc);
typedef void (*ctr128_f)(const unsigned char *in, unsigned char *out,
- size_t blocks, const void *key,
- const unsigned char ivec[16]);
+ size_t blocks, const void *key,
+ const unsigned char ivec[16]);
typedef void (*ccm128_f)(const unsigned char *in, unsigned char *out,
- size_t blocks, const void *key,
- const unsigned char ivec[16],unsigned char cmac[16]);
+ size_t blocks, const void *key,
+ const unsigned char ivec[16], unsigned char cmac[16]);
void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], block128_f block);
+ size_t len, const void *key,
+ unsigned char ivec[16], block128_f block);
void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], block128_f block);
+ size_t len, const void *key,
+ unsigned char ivec[16], block128_f block);
void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], unsigned char ecount_buf[16],
- unsigned int *num, block128_f block);
+ size_t len, const void *key,
+ unsigned char ivec[16], unsigned char ecount_buf[16],
+ unsigned int *num, block128_f block);
void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], unsigned char ecount_buf[16],
- unsigned int *num, ctr128_f ctr);
+ size_t len, const void *key,
+ unsigned char ivec[16], unsigned char ecount_buf[16],
+ unsigned int *num, ctr128_f ctr);
void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], int *num,
- block128_f block);
+ size_t len, const void *key,
+ unsigned char ivec[16], int *num,
+ block128_f block);
void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block);
+ size_t len, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block);
void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
- size_t length, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block);
+ size_t length, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block);
void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
- size_t bits, const void *key,
- unsigned char ivec[16], int *num,
- int enc, block128_f block);
+ size_t bits, const void *key,
+ unsigned char ivec[16], int *num,
+ int enc, block128_f block);
typedef struct gcm128_context GCM128_CONTEXT;
GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block);
-void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block);
+void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block);
void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
- size_t len);
+ size_t len);
int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
- size_t len);
+ size_t len);
int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len);
+ const unsigned char *in, unsigned char *out,
+ size_t len);
int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len);
+ const unsigned char *in, unsigned char *out,
+ size_t len);
int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len, ctr128_f stream);
+ const unsigned char *in, unsigned char *out,
+ size_t len, ctr128_f stream);
int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len, ctr128_f stream);
-int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag,
- size_t len);
+ const unsigned char *in, unsigned char *out,
+ size_t len, ctr128_f stream);
+int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
+ size_t len);
void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx);
typedef struct ccm128_context CCM128_CONTEXT;
void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
- unsigned int M, unsigned int L, void *key,block128_f block);
+ unsigned int M, unsigned int L, void *key, block128_f block);
int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
- const unsigned char *nonce, size_t nlen, size_t mlen);
+ const unsigned char *nonce, size_t nlen, size_t mlen);
void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
- const unsigned char *aad, size_t alen);
+ const unsigned char *aad, size_t alen);
int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out, size_t len);
+ const unsigned char *inp, unsigned char *out, size_t len);
int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out, size_t len);
+ const unsigned char *inp, unsigned char *out, size_t len);
int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out, size_t len,
- ccm128_f stream);
+ const unsigned char *inp, unsigned char *out, size_t len,
+ ccm128_f stream);
int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
- const unsigned char *inp, unsigned char *out, size_t len,
- ccm128_f stream);
+ const unsigned char *inp, unsigned char *out, size_t len,
+ ccm128_f stream);
size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len);
typedef struct xts128_context XTS128_CONTEXT;
int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
- const unsigned char *inp, unsigned char *out, size_t len, int enc);
+ const unsigned char *inp, unsigned char *out, size_t len, int enc);
#ifdef __cplusplus
}
-/* $OpenBSD: modes_local.h,v 1.1 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: modes_local.h,v 1.2 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2010 The OpenSSL Project. All rights reserved.
*
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
#if defined(__GNUC__) && __GNUC__>=2
# if defined(__x86_64) || defined(__x86_64__)
-# define BSWAP8(x) ({ u64 ret=(x); \
- asm ("bswapq %0" \
+# define BSWAP8(x) ({ u64 ret=(x); \
+ asm ("bswapq %0" \
: "+r"(ret)); ret; })
-# define BSWAP4(x) ({ u32 ret=(x); \
- asm ("bswapl %0" \
+# define BSWAP4(x) ({ u32 ret=(x); \
+ asm ("bswapl %0" \
: "+r"(ret)); ret; })
# elif (defined(__i386) || defined(__i386__))
-# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
- asm ("bswapl %0; bswapl %1" \
- : "+r"(hi),"+r"(lo)); \
+# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
+ asm ("bswapl %0; bswapl %1" \
+ : "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
-# define BSWAP4(x) ({ u32 ret=(x); \
- asm ("bswapl %0" \
+# define BSWAP4(x) ({ u32 ret=(x); \
+ asm ("bswapl %0" \
: "+r"(ret)); ret; })
# elif (defined(__arm__) || defined(__arm)) && !defined(__STRICT_ALIGNMENT)
-# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
- asm ("rev %0,%0; rev %1,%1" \
- : "+r"(hi),"+r"(lo)); \
+# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
+ asm ("rev %0,%0; rev %1,%1" \
+ : "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
-# define BSWAP4(x) ({ u32 ret; \
- asm ("rev %0,%1" \
- : "=r"(ret) : "r"((u32)(x))); \
+# define BSWAP4(x) ({ u32 ret; \
+ asm ("rev %0,%1" \
+ : "=r"(ret) : "r"((u32)(x))); \
ret; })
# endif
#endif
/* GCM definitions */
-typedef struct { u64 hi,lo; } u128;
+typedef struct {
+ u64 hi, lo;
+} u128;
#ifdef TABLE_BITS
#undef TABLE_BITS
struct gcm128_context {
/* Following 6 names follow names in GCM specification */
- union { u64 u[2]; u32 d[4]; u8 c[16]; size_t t[16/sizeof(size_t)]; }
- Yi,EKi,EK0,len,Xi,H;
+ union {
+ u64 u[2];
+ u32 d[4];
+ u8 c[16];
+ size_t t[16/sizeof(size_t)];
+ } Yi, EKi, EK0, len, Xi, H;
/* Relative position of Xi, H and pre-computed Htable is used
* in some assembler modules, i.e. don't change the order! */
#if TABLE_BITS==8
u128 Htable[256];
#else
u128 Htable[16];
- void (*gmult)(u64 Xi[2],const u128 Htable[16]);
- void (*ghash)(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+ void (*gmult)(u64 Xi[2], const u128 Htable[16]);
+ void (*ghash)(u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
#endif
unsigned int mres, ares;
block128_f block;
struct xts128_context {
void *key1, *key2;
- block128_f block1,block2;
+ block128_f block1, block2;
};
struct ccm128_context {
- union { u64 u[2]; u8 c[16]; } nonce, cmac;
+ union {
+ u64 u[2];
+ u8 c[16];
+ } nonce, cmac;
u64 blocks;
block128_f block;
void *key;
-/* $OpenBSD: ofb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */
+/* $OpenBSD: ofb128.c,v 1.6 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* used. The extra state information to record how much of the
* 128bit block we have used is contained in *num;
*/
-void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const void *key,
- unsigned char ivec[16], int *num,
- block128_f block)
+void
+CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key,
+ unsigned char ivec[16], int *num,
+ block128_f block)
{
unsigned int n;
- size_t l=0;
+ size_t l = 0;
n = *num;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16%sizeof(size_t) == 0) do { /* always true actually */
- while (n && len) {
- *(out++) = *(in++) ^ ivec[n];
- --len;
- n = (n+1) % 16;
- }
+ if (16 % sizeof(size_t) == 0)
+ do { /* always true actually */
+ while (n && len) {
+ *(out++) = *(in++) ^ ivec[n];
+ --len;
+ n = (n + 1) % 16;
+ }
#ifdef __STRICT_ALIGNMENT
- if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
- break;
+ if (((size_t)in|(size_t)out|(size_t)ivec) %
+ sizeof(size_t) != 0)
+ break;
#endif
- while (len>=16) {
- (*block)(ivec, ivec, key);
- for (; n<16; n+=sizeof(size_t))
- *(size_t*)(out+n) =
- *(size_t*)(in+n) ^ *(size_t*)(ivec+n);
- len -= 16;
- out += 16;
- in += 16;
- n = 0;
- }
- if (len) {
- (*block)(ivec, ivec, key);
- while (len--) {
- out[n] = in[n] ^ ivec[n];
- ++n;
+ while (len >= 16) {
+ (*block)(ivec, ivec, key);
+ for (; n < 16; n += sizeof(size_t))
+ *(size_t *)(out + n) =
+ *(size_t *)(in + n) ^ *(size_t *)(ivec +
+ n);
+ len -= 16;
+ out += 16;
+ in += 16;
+ n = 0;
}
- }
- *num = n;
- return;
- } while(0);
+ if (len) {
+ (*block)(ivec, ivec, key);
+ while (len--) {
+ out[n] = in[n] ^ ivec[n];
+ ++n;
+ }
+ }
+ *num = n;
+ return;
+ } while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
- while (l<len) {
- if (n==0) {
+ while (l < len) {
+ if (n == 0) {
(*block)(ivec, ivec, key);
}
out[l] = in[l] ^ ivec[n];
++l;
- n = (n+1) % 16;
+ n = (n + 1) % 16;
}
- *num=n;
+ *num = n;
}
-/* $OpenBSD: xts128.c,v 1.10 2023/05/07 14:38:04 tb Exp $ */
+/* $OpenBSD: xts128.c,v 1.11 2023/07/08 14:55:36 beck Exp $ */
/* ====================================================================
* Copyright (c) 2011 The OpenSSL Project. All rights reserved.
*
* are met:
*
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
# endif
#endif
-int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
- const unsigned char *inp, unsigned char *out,
- size_t len, int enc)
+int
+CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
+ const unsigned char *inp, unsigned char *out,
+ size_t len, int enc)
{
- union { u64 u[2]; u32 d[4]; u8 c[16]; } tweak, scratch;
+ union {
+ u64 u[2];
+ u32 d[4];
+ u8 c[16];
+ } tweak, scratch;
unsigned int i;
- if (len<16) return -1;
+ if (len < 16)
+ return -1;
memcpy(tweak.c, iv, 16);
- (*ctx->block2)(tweak.c,tweak.c,ctx->key2);
+ (*ctx->block2)(tweak.c, tweak.c, ctx->key2);
- if (!enc && (len%16)) len-=16;
+ if (!enc && (len % 16))
+ len -= 16;
- while (len>=16) {
+ while (len >= 16) {
#ifdef __STRICT_ALIGNMENT
- memcpy(scratch.c,inp,16);
+ memcpy(scratch.c, inp, 16);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
#else
- scratch.u[0] = ((u64*)inp)[0]^tweak.u[0];
- scratch.u[1] = ((u64*)inp)[1]^tweak.u[1];
+ scratch.u[0] = ((u64 *)inp)[0] ^ tweak.u[0];
+ scratch.u[1] = ((u64 *)inp)[1] ^ tweak.u[1];
#endif
- (*ctx->block1)(scratch.c,scratch.c,ctx->key1);
+ (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
#ifdef __STRICT_ALIGNMENT
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
- memcpy(out,scratch.c,16);
+ memcpy(out, scratch.c, 16);
#else
- ((u64*)out)[0] = scratch.u[0]^=tweak.u[0];
- ((u64*)out)[1] = scratch.u[1]^=tweak.u[1];
+ ((u64 *)out)[0] = scratch.u[0] ^= tweak.u[0];
+ ((u64 *)out)[1] = scratch.u[1] ^= tweak.u[1];
#endif
inp += 16;
out += 16;
len -= 16;
- if (len==0) return 0;
+ if (len == 0)
+ return 0;
#if BYTE_ORDER == LITTLE_ENDIAN
- unsigned int carry,res;
+ unsigned int carry, res;
- res = 0x87&(((int)tweak.d[3])>>31);
- carry = (unsigned int)(tweak.u[0]>>63);
- tweak.u[0] = (tweak.u[0]<<1)^res;
- tweak.u[1] = (tweak.u[1]<<1)|carry;
+ res = 0x87 & (((int)tweak.d[3]) >> 31);
+ carry = (unsigned int)(tweak.u[0] >> 63);
+ tweak.u[0] = (tweak.u[0] << 1) ^ res;
+ tweak.u[1] = (tweak.u[1] << 1)|carry;
#else /* BIG_ENDIAN */
size_t c;
- for (c=0,i=0;i<16;++i) {
+ for (c = 0, i = 0; i < 16; ++i) {
/*+ substitutes for |, because c is 1 bit */
- c += ((size_t)tweak.c[i])<<1;
+ c += ((size_t)tweak.c[i]) << 1;
tweak.c[i] = (u8)c;
- c = c>>8;
+ c = c >> 8;
}
- tweak.c[0] ^= (u8)(0x87&(0-c));
+ tweak.c[0] ^= (u8)(0x87 & (0 - c));
#endif
}
if (enc) {
- for (i=0;i<len;++i) {
+ for (i = 0; i < len; ++i) {
u8 ch = inp[i];
out[i] = scratch.c[i];
scratch.c[i] = ch;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
- (*ctx->block1)(scratch.c,scratch.c,ctx->key1);
+ (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
- memcpy(out-16,scratch.c,16);
- }
- else {
- union { u64 u[2]; u8 c[16]; } tweak1;
+ memcpy(out - 16, scratch.c, 16);
+ } else {
+ union {
+ u64 u[2];
+ u8 c[16];
+ } tweak1;
#if BYTE_ORDER == LITTLE_ENDIAN
- unsigned int carry,res;
+ unsigned int carry, res;
- res = 0x87&(((int)tweak.d[3])>>31);
- carry = (unsigned int)(tweak.u[0]>>63);
- tweak1.u[0] = (tweak.u[0]<<1)^res;
- tweak1.u[1] = (tweak.u[1]<<1)|carry;
+ res = 0x87 & (((int)tweak.d[3]) >> 31);
+ carry = (unsigned int)(tweak.u[0] >> 63);
+ tweak1.u[0] = (tweak.u[0] << 1) ^ res;
+ tweak1.u[1] = (tweak.u[1] << 1)|carry;
#else
size_t c;
- for (c=0,i=0;i<16;++i) {
+ for (c = 0, i = 0; i < 16; ++i) {
/*+ substitutes for |, because c is 1 bit */
- c += ((size_t)tweak.c[i])<<1;
+ c += ((size_t)tweak.c[i]) << 1;
tweak1.c[i] = (u8)c;
- c = c>>8;
+ c = c >> 8;
}
- tweak1.c[0] ^= (u8)(0x87&(0-c));
+ tweak1.c[0] ^= (u8)(0x87 & (0 - c));
#endif
#ifdef __STRICT_ALIGNMENT
- memcpy(scratch.c,inp,16);
+ memcpy(scratch.c, inp, 16);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
#else
- scratch.u[0] = ((u64*)inp)[0]^tweak1.u[0];
- scratch.u[1] = ((u64*)inp)[1]^tweak1.u[1];
+ scratch.u[0] = ((u64 *)inp)[0] ^ tweak1.u[0];
+ scratch.u[1] = ((u64 *)inp)[1] ^ tweak1.u[1];
#endif
- (*ctx->block1)(scratch.c,scratch.c,ctx->key1);
+ (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
- for (i=0;i<len;++i) {
- u8 ch = inp[16+i];
- out[16+i] = scratch.c[i];
+ for (i = 0; i < len; ++i) {
+ u8 ch = inp[16 + i];
+ out[16 + i] = scratch.c[i];
scratch.c[i] = ch;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
- (*ctx->block1)(scratch.c,scratch.c,ctx->key1);
+ (*ctx->block1)(scratch.c, scratch.c, ctx->key1);
#ifdef __STRICT_ALIGNMENT
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
- memcpy (out,scratch.c,16);
+ memcpy(out, scratch.c, 16);
#else
- ((u64*)out)[0] = scratch.u[0]^tweak.u[0];
- ((u64*)out)[1] = scratch.u[1]^tweak.u[1];
+ ((u64 *)out)[0] = scratch.u[0] ^ tweak.u[0];
+ ((u64 *)out)[1] = scratch.u[1] ^ tweak.u[1];
#endif
}