Created
July 11, 2018 09:50
-
-
Save rajkosto/a443f837d729cde413df27b52028e2b3 to your computer and use it in GitHub Desktop.
aarch64 SHA1/SHA256
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <assert.h> | |
#include <cstddef> | |
#include <cstdint> | |
#include <cstring> | |
#include <arm_neon.h> | |
#include <arm_acle.h> | |
namespace CryptoPP | |
{ | |
typedef unsigned char byte; | |
typedef unsigned short word16; | |
typedef unsigned int word32; | |
typedef unsigned long long word64; | |
enum ByteOrder | |
{ | |
LITTLE_ENDIAN_ORDER = 0, | |
BIG_ENDIAN_ORDER = 1 | |
}; | |
}; | |
#include "sha.h" | |
struct SHA1Engine | |
{ | |
static constexpr int DIGESTSIZE = 20; | |
static void InitState(CryptoPP::word32* state) | |
{ | |
state[0] = 0x67452301; | |
state[1] = 0xEFCDAB89; | |
state[2] = 0x98BADCFE; | |
state[3] = 0x10325476; | |
state[4] = 0xC3D2E1F0; | |
} | |
static void HashMultipleBlocks(CryptoPP::word32* state, const CryptoPP::byte* data, size_t length, CryptoPP::ByteOrder order) | |
{ | |
using namespace CryptoPP; | |
assert("SHA1_HashMultipleBlocks_ARMV8" && state != nullptr); | |
assert("SHA1_HashMultipleBlocks_ARMV8" && data != nullptr); | |
assert("SHA1_HashMultipleBlocks_ARMV8" && length >= SHA_BLOCKSIZE); | |
uint32x4_t C0, C1, C2, C3; | |
uint32x4_t ABCD, ABCD_SAVED; | |
uint32x4_t MSG0, MSG1, MSG2, MSG3; | |
uint32x4_t TMP0, TMP1; | |
uint32_t E0, E0_SAVED, E1; | |
// Load initial values | |
C0 = vdupq_n_u32(0x5A827999); | |
C1 = vdupq_n_u32(0x6ED9EBA1); | |
C2 = vdupq_n_u32(0x8F1BBCDC); | |
C3 = vdupq_n_u32(0xCA62C1D6); | |
ABCD = vld1q_u32(&state[0]); | |
E0 = state[4]; | |
while (length >= SHA_BLOCKSIZE) | |
{ | |
// Save current hash | |
ABCD_SAVED = ABCD; | |
E0_SAVED = E0; | |
// Load message | |
MSG0 = vld1q_u32((const CryptoPP::word32*)&data[0]); | |
MSG1 = vld1q_u32((const CryptoPP::word32*)&data[16]); | |
MSG2 = vld1q_u32((const CryptoPP::word32*)&data[32]); | |
MSG3 = vld1q_u32((const CryptoPP::word32*)&data[48]); | |
if (order == BIG_ENDIAN_ORDER) // Data arrangement | |
{ | |
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); | |
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); | |
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); | |
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); | |
} | |
TMP0 = vaddq_u32(MSG0, C0); | |
TMP1 = vaddq_u32(MSG1, C0); | |
// Rounds 0-3 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1cq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG2, C0); | |
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); | |
// Rounds 4-7 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1cq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG3, C0); | |
MSG0 = vsha1su1q_u32(MSG0, MSG3); | |
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); | |
// Rounds 8-11 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1cq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG0, C0); | |
MSG1 = vsha1su1q_u32(MSG1, MSG0); | |
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); | |
// Rounds 12-15 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1cq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG1, C1); | |
MSG2 = vsha1su1q_u32(MSG2, MSG1); | |
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); | |
// Rounds 16-19 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1cq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG2, C1); | |
MSG3 = vsha1su1q_u32(MSG3, MSG2); | |
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); | |
// Rounds 20-23 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG3, C1); | |
MSG0 = vsha1su1q_u32(MSG0, MSG3); | |
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); | |
// Rounds 24-27 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG0, C1); | |
MSG1 = vsha1su1q_u32(MSG1, MSG0); | |
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); | |
// Rounds 28-31 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG1, C1); | |
MSG2 = vsha1su1q_u32(MSG2, MSG1); | |
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); | |
// Rounds 32-35 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG2, C2); | |
MSG3 = vsha1su1q_u32(MSG3, MSG2); | |
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); | |
// Rounds 36-39 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG3, C2); | |
MSG0 = vsha1su1q_u32(MSG0, MSG3); | |
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); | |
// Rounds 40-43 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1mq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG0, C2); | |
MSG1 = vsha1su1q_u32(MSG1, MSG0); | |
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); | |
// Rounds 44-47 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1mq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG1, C2); | |
MSG2 = vsha1su1q_u32(MSG2, MSG1); | |
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); | |
// Rounds 48-51 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1mq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG2, C2); | |
MSG3 = vsha1su1q_u32(MSG3, MSG2); | |
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); | |
// Rounds 52-55 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1mq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG3, C3); | |
MSG0 = vsha1su1q_u32(MSG0, MSG3); | |
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); | |
// Rounds 56-59 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1mq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG0, C3); | |
MSG1 = vsha1su1q_u32(MSG1, MSG0); | |
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); | |
// Rounds 60-63 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG1, C3); | |
MSG2 = vsha1su1q_u32(MSG2, MSG1); | |
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); | |
// Rounds 64-67 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E0, TMP0); | |
TMP0 = vaddq_u32(MSG2, C3); | |
MSG3 = vsha1su1q_u32(MSG3, MSG2); | |
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); | |
// Rounds 68-71 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
TMP1 = vaddq_u32(MSG3, C3); | |
MSG0 = vsha1su1q_u32(MSG0, MSG3); | |
// Rounds 72-75 | |
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E0, TMP0); | |
// Rounds 76-79 | |
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); | |
ABCD = vsha1pq_u32(ABCD, E1, TMP1); | |
E0 += E0_SAVED; | |
ABCD = vaddq_u32(ABCD_SAVED, ABCD); | |
data += SHA_BLOCKSIZE/sizeof(word32); | |
length -= SHA_BLOCKSIZE; | |
} | |
// Save state | |
vst1q_u32(&state[0], ABCD); | |
state[4] = E0; | |
} | |
}; | |
alignas(16) static const CryptoPP::word32 SHA256_K[64] = | |
{ | |
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, | |
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, | |
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, | |
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, | |
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, | |
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, | |
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, | |
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, | |
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, | |
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, | |
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, | |
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, | |
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, | |
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, | |
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, | |
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 | |
}; | |
struct SHA256Engine | |
{ | |
static constexpr int DIGESTSIZE = 32; | |
static void InitState(CryptoPP::word32* state) | |
{ | |
static const CryptoPP::word32 s[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; | |
memcpy(state, s, sizeof(s)); | |
} | |
static void HashMultipleBlocks(CryptoPP::word32* state, const CryptoPP::byte* data, size_t length, CryptoPP::ByteOrder order) | |
{ | |
using namespace CryptoPP; | |
assert("SHA256_HashMultipleBlocks_ARMV8" && state); | |
assert("SHA256_HashMultipleBlocks_ARMV8" && data); | |
assert("SHA256_HashMultipleBlocks_ARMV8" && length >= SHA_BLOCKSIZE); | |
uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; | |
uint32x4_t MSG0, MSG1, MSG2, MSG3; | |
uint32x4_t TMP0, TMP1, TMP2; | |
// Load initial values | |
STATE0 = vld1q_u32(&state[0]); | |
STATE1 = vld1q_u32(&state[4]); | |
while (length >= SHA_BLOCKSIZE) | |
{ | |
// Save current hash | |
ABEF_SAVE = STATE0; | |
CDGH_SAVE = STATE1; | |
// Load message | |
MSG0 = vld1q_u32((const CryptoPP::word32*)&data[0]); | |
MSG1 = vld1q_u32((const CryptoPP::word32*)&data[16]); | |
MSG2 = vld1q_u32((const CryptoPP::word32*)&data[32]); | |
MSG3 = vld1q_u32((const CryptoPP::word32*)&data[48]); | |
if (order == BIG_ENDIAN_ORDER) // Data arrangement | |
{ | |
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); | |
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); | |
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); | |
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); | |
} | |
TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x00])); | |
// Rounds 0-3 | |
MSG0 = vsha256su0q_u32(MSG0, MSG1); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x04])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; | |
// Rounds 4-7 | |
MSG1 = vsha256su0q_u32(MSG1, MSG2); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x08])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; | |
// Rounds 8-11 | |
MSG2 = vsha256su0q_u32(MSG2, MSG3); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x0c])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; | |
// Rounds 12-15 | |
MSG3 = vsha256su0q_u32(MSG3, MSG0); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x10])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; | |
// Rounds 16-19 | |
MSG0 = vsha256su0q_u32(MSG0, MSG1); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x14])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; | |
// Rounds 20-23 | |
MSG1 = vsha256su0q_u32(MSG1, MSG2); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x18])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; | |
// Rounds 24-27 | |
MSG2 = vsha256su0q_u32(MSG2, MSG3); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x1c])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; | |
// Rounds 28-31 | |
MSG3 = vsha256su0q_u32(MSG3, MSG0); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x20])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; | |
// Rounds 32-35 | |
MSG0 = vsha256su0q_u32(MSG0, MSG1); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x24])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; | |
// Rounds 36-39 | |
MSG1 = vsha256su0q_u32(MSG1, MSG2); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x28])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; | |
// Rounds 40-43 | |
MSG2 = vsha256su0q_u32(MSG2, MSG3); | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x2c])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); | |
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; | |
// Rounds 44-47 | |
MSG3 = vsha256su0q_u32(MSG3, MSG0); | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x30])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); | |
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; | |
// Rounds 48-51 | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x34])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; | |
// Rounds 52-55 | |
TMP2 = STATE0; | |
TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x38])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; | |
// Rounds 56-59 | |
TMP2 = STATE0; | |
TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x3c])); | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; | |
// Rounds 60-63 | |
TMP2 = STATE0; | |
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); | |
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; | |
// Add back to state | |
STATE0 = vaddq_u32(STATE0, ABEF_SAVE); | |
STATE1 = vaddq_u32(STATE1, CDGH_SAVE); | |
data += SHA_BLOCKSIZE/sizeof(word32); | |
length -= SHA_BLOCKSIZE; | |
} | |
// Save state | |
vst1q_u32(&state[0], STATE0); | |
vst1q_u32(&state[4], STATE1); | |
} | |
}; | |
template<typename SHAEngine> | |
inline void SHA_Update(SHAState* ctx, const u8* data, size_t len) | |
{ | |
for (size_t i=0; i<len; ++i) | |
{ | |
ctx->data[ctx->dataLen] = data[i]; | |
ctx->dataLen++; | |
if (ctx->dataLen == 64) | |
{ | |
SHAEngine::HashMultipleBlocks(ctx->state, ctx->data, SHA_BLOCKSIZE, CryptoPP::BIG_ENDIAN_ORDER); | |
ctx->bitLen += 512; | |
ctx->dataLen = 0; | |
} | |
} | |
} | |
template<typename SHAEngine> | |
inline void SHA_Final(SHAState* ctx, u8* outDigest, size_t digestSize) | |
{ | |
// Pad whatever data is left in the buffer. | |
size_t i = ctx->dataLen; | |
if (i < (SHA_BLOCKSIZE-sizeof(ctx->bitLen))) | |
{ | |
ctx->data[i++] = 0x80; | |
while (i < (SHA_BLOCKSIZE-sizeof(ctx->bitLen))) | |
ctx->data[i++] = 0x00; | |
} | |
else | |
{ | |
ctx->data[i++] = 0x80; | |
while (i < SHA_BLOCKSIZE) | |
ctx->data[i++] = 0x00; | |
SHAEngine::HashMultipleBlocks(ctx->state, ctx->data, SHA_BLOCKSIZE, CryptoPP::BIG_ENDIAN_ORDER); | |
memset(ctx->data, 0, (SHA_BLOCKSIZE-sizeof(ctx->bitLen))); | |
} | |
// Append to the padding the total message's length in bits and transform. | |
ctx->bitLen += ctx->dataLen * 8; | |
ctx->data[63] = ctx->bitLen; | |
ctx->data[62] = ctx->bitLen >> 8; | |
ctx->data[61] = ctx->bitLen >> 16; | |
ctx->data[60] = ctx->bitLen >> 24; | |
ctx->data[59] = ctx->bitLen >> 32; | |
ctx->data[58] = ctx->bitLen >> 40; | |
ctx->data[57] = ctx->bitLen >> 48; | |
ctx->data[56] = ctx->bitLen >> 56; | |
SHAEngine::HashMultipleBlocks(ctx->state, ctx->data, SHA_BLOCKSIZE, CryptoPP::BIG_ENDIAN_ORDER); | |
memset(outDigest, 0, digestSize); | |
if (digestSize > SHAEngine::DIGESTSIZE) | |
digestSize = SHAEngine::DIGESTSIZE; | |
for (i=0; i<4; i++) | |
{ | |
for (size_t j=0; j<(digestSize/sizeof(ctx->state[0])); j++) | |
outDigest[i + j*sizeof(ctx->state[0])] = (ctx->state[j] >> (24 - i * 8)) & 0xFF; | |
} | |
} | |
SHAState* SHA1_Init(SHAState* state) | |
{ | |
memset(state, 0, sizeof(SHAState)); | |
SHA1Engine::InitState(state->state); | |
return state; | |
} | |
SHAState* SHA1_Update(SHAState* state, const u8* input, size_t len) | |
{ | |
SHA_Update<SHA1Engine>(state, input, len); | |
return state; | |
} | |
u8* SHA1_Final(SHAState* state, u8* digest, size_t size) | |
{ | |
SHA_Final<SHA1Engine>(state, digest, size); | |
return digest; | |
} | |
SHAState* SHA256_Init(SHAState* state) | |
{ | |
memset(state, 0, sizeof(SHAState)); | |
SHA256Engine::InitState(state->state); | |
return state; | |
} | |
SHAState* SHA256_Update(SHAState* state, const u8* input, size_t len) | |
{ | |
SHA_Update<SHA256Engine>(state, input, len); | |
return state; | |
} | |
u8* SHA256_Final(SHAState* state, u8* digest, size_t size) | |
{ | |
SHA_Final<SHA256Engine>(state, digest, size); | |
return digest; | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#pragma once | |
#include "types.h" | |
#ifdef __cplusplus | |
extern "C" { | |
#endif | |
#define SHA_BLOCKSIZE (64) | |
struct alignas(16) SHAState | |
{ | |
alignas(16) u32 state[SHA_BLOCKSIZE/sizeof(u32)]; | |
alignas(16) u8 data[SHA_BLOCKSIZE]; | |
u64 dataLen, bitLen; | |
}; | |
typedef struct SHAState SHAState; | |
SHAState* SHA1_Init(SHAState* state); | |
SHAState* SHA1_Update(SHAState* state, const u8* input, size_t len); | |
u8* SHA1_Final(SHAState* state, u8* digest, size_t size); | |
SHAState* SHA256_Init(SHAState* state); | |
SHAState* SHA256_Update(SHAState* state, const u8* input, size_t len); | |
u8* SHA256_Final(SHAState* state, u8* digest, size_t size); | |
#ifdef __cplusplus | |
} | |
#endif |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment