michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef _SHA_FAST_H_ michael@0: #define _SHA_FAST_H_ michael@0: michael@0: #include "prlong.h" michael@0: michael@0: #define SHA1_INPUT_LEN 64 michael@0: michael@0: #if defined(IS_64) && !defined(__sparc) michael@0: typedef PRUint64 SHA_HW_t; michael@0: #define SHA1_USING_64_BIT 1 michael@0: #else michael@0: typedef PRUint32 SHA_HW_t; michael@0: #endif michael@0: michael@0: struct SHA1ContextStr { michael@0: union { michael@0: PRUint32 w[16]; /* input buffer */ michael@0: PRUint8 b[64]; michael@0: } u; michael@0: PRUint64 size; /* count of hashed bytes. */ michael@0: SHA_HW_t H[22]; /* 5 state variables, 16 tmp values, 1 extra */ michael@0: }; michael@0: michael@0: #if defined(_MSC_VER) michael@0: #include michael@0: #if defined(IS_LITTLE_ENDIAN) michael@0: #if (_MSC_VER >= 1300) michael@0: #pragma intrinsic(_byteswap_ulong) michael@0: #define SHA_HTONL(x) _byteswap_ulong(x) michael@0: #elif defined(NSS_X86_OR_X64) michael@0: #ifndef FORCEINLINE michael@0: #if (_MSC_VER >= 1200) michael@0: #define FORCEINLINE __forceinline michael@0: #else michael@0: #define FORCEINLINE __inline michael@0: #endif /* _MSC_VER */ michael@0: #endif /* !defined FORCEINLINE */ michael@0: #define FASTCALL __fastcall michael@0: michael@0: static FORCEINLINE PRUint32 FASTCALL michael@0: swap4b(PRUint32 dwd) michael@0: { michael@0: __asm { michael@0: mov eax,dwd michael@0: bswap eax michael@0: } michael@0: } michael@0: michael@0: #define SHA_HTONL(x) swap4b(x) michael@0: #endif /* NSS_X86_OR_X64 */ michael@0: #endif /* IS_LITTLE_ENDIAN */ michael@0: michael@0: #pragma intrinsic (_lrotr, _lrotl) michael@0: #define SHA_ROTL(x,n) _lrotl(x,n) michael@0: #define SHA_ROTL_IS_DEFINED 1 michael@0: #endif /* _MSC_VER */ michael@0: michael@0: #if defined(__GNUC__) michael@0: /* __x86_64__ and __x86_64 are defined by GCC on x86_64 CPUs */ michael@0: #if defined( SHA1_USING_64_BIT ) michael@0: static __inline__ PRUint64 SHA_ROTL(PRUint64 x, PRUint32 n) michael@0: { michael@0: PRUint32 t = (PRUint32)x; michael@0: return ((t << n) | (t >> (32 - n))); michael@0: } michael@0: #else michael@0: static __inline__ PRUint32 SHA_ROTL(PRUint32 t, PRUint32 n) michael@0: { michael@0: return ((t << n) | (t >> (32 - n))); michael@0: } michael@0: #endif michael@0: #define SHA_ROTL_IS_DEFINED 1 michael@0: michael@0: #if defined(NSS_X86_OR_X64) michael@0: static __inline__ PRUint32 swap4b(PRUint32 value) michael@0: { michael@0: __asm__("bswap %0" : "+r" (value)); michael@0: return (value); michael@0: } michael@0: #define SHA_HTONL(x) swap4b(x) michael@0: michael@0: #elif defined(__thumb2__) || \ michael@0: (!defined(__thumb__) && \ michael@0: (defined(__ARM_ARCH_6__) || \ michael@0: defined(__ARM_ARCH_6J__) || \ michael@0: defined(__ARM_ARCH_6K__) || \ michael@0: defined(__ARM_ARCH_6Z__) || \ michael@0: defined(__ARM_ARCH_6ZK__) || \ michael@0: defined(__ARM_ARCH_6T2__) || \ michael@0: defined(__ARM_ARCH_7__) || \ michael@0: defined(__ARM_ARCH_7A__) || \ michael@0: defined(__ARM_ARCH_7R__))) michael@0: static __inline__ PRUint32 swap4b(PRUint32 value) michael@0: { michael@0: PRUint32 ret; michael@0: __asm__("rev %0, %1" : "=r" (ret) : "r"(value)); michael@0: return ret; michael@0: } michael@0: #define SHA_HTONL(x) swap4b(x) michael@0: michael@0: #endif /* x86 family */ michael@0: michael@0: #endif /* __GNUC__ */ michael@0: michael@0: #if !defined(SHA_ROTL_IS_DEFINED) michael@0: #define SHA_NEED_TMP_VARIABLE 1 michael@0: #define SHA_ROTL(X,n) (tmp = (X), ((tmp) << (n)) | ((tmp) >> (32-(n)))) michael@0: #endif michael@0: michael@0: #if defined(NSS_X86_OR_X64) michael@0: #define SHA_ALLOW_UNALIGNED_ACCESS 1 michael@0: #endif michael@0: michael@0: #if !defined(SHA_HTONL) michael@0: #define SHA_MASK 0x00FF00FF michael@0: #if defined(IS_LITTLE_ENDIAN) michael@0: #undef SHA_NEED_TMP_VARIABLE michael@0: #define SHA_NEED_TMP_VARIABLE 1 michael@0: #define SHA_HTONL(x) (tmp = (x), tmp = (tmp << 16) | (tmp >> 16), \ michael@0: ((tmp & SHA_MASK) << 8) | ((tmp >> 8) & SHA_MASK)) michael@0: #else michael@0: #define SHA_HTONL(x) (x) michael@0: #endif michael@0: #endif michael@0: michael@0: #define SHA_BYTESWAP(x) x = SHA_HTONL(x) michael@0: michael@0: #define SHA_STORE(n) ((PRUint32*)hashout)[n] = SHA_HTONL(ctx->H[n]) michael@0: #if defined(SHA_ALLOW_UNALIGNED_ACCESS) michael@0: #define SHA_STORE_RESULT \ michael@0: SHA_STORE(0); \ michael@0: SHA_STORE(1); \ michael@0: SHA_STORE(2); \ michael@0: SHA_STORE(3); \ michael@0: SHA_STORE(4); michael@0: michael@0: #elif defined(IS_LITTLE_ENDIAN) || defined( SHA1_USING_64_BIT ) michael@0: #define SHA_STORE_RESULT \ michael@0: if (!((ptrdiff_t)hashout % sizeof(PRUint32))) { \ michael@0: SHA_STORE(0); \ michael@0: SHA_STORE(1); \ michael@0: SHA_STORE(2); \ michael@0: SHA_STORE(3); \ michael@0: SHA_STORE(4); \ michael@0: } else { \ michael@0: tmpbuf[0] = SHA_HTONL(ctx->H[0]); \ michael@0: tmpbuf[1] = SHA_HTONL(ctx->H[1]); \ michael@0: tmpbuf[2] = SHA_HTONL(ctx->H[2]); \ michael@0: tmpbuf[3] = SHA_HTONL(ctx->H[3]); \ michael@0: tmpbuf[4] = SHA_HTONL(ctx->H[4]); \ michael@0: memcpy(hashout, tmpbuf, SHA1_LENGTH); \ michael@0: } michael@0: michael@0: #else michael@0: #define SHA_STORE_RESULT \ michael@0: if (!((ptrdiff_t)hashout % sizeof(PRUint32))) { \ michael@0: SHA_STORE(0); \ michael@0: SHA_STORE(1); \ michael@0: SHA_STORE(2); \ michael@0: SHA_STORE(3); \ michael@0: SHA_STORE(4); \ michael@0: } else { \ michael@0: memcpy(hashout, ctx->H, SHA1_LENGTH); \ michael@0: } michael@0: #endif michael@0: michael@0: #endif /* _SHA_FAST_H_ */