gfx/angle/src/third_party/murmurhash/MurmurHash3.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 //-----------------------------------------------------------------------------
michael@0 2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
michael@0 3 // domain. The author hereby disclaims copyright to this source code.
michael@0 4
michael@0 5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
michael@0 6 // algorithms are optimized for their respective platforms. You can still
michael@0 7 // compile and run any of them on any platform, but your performance with the
michael@0 8 // non-native version will be less than optimal.
michael@0 9
michael@0 10 #include "MurmurHash3.h"
michael@0 11
michael@0 12 //-----------------------------------------------------------------------------
michael@0 13 // Platform-specific functions and macros
michael@0 14
michael@0 15 // Microsoft Visual Studio
michael@0 16
michael@0 17 #if defined(_MSC_VER)
michael@0 18
michael@0 19 #define FORCE_INLINE __forceinline
michael@0 20
michael@0 21 #include <stdlib.h>
michael@0 22
michael@0 23 #define ROTL32(x,y) _rotl(x,y)
michael@0 24 #define ROTL64(x,y) _rotl64(x,y)
michael@0 25
michael@0 26 #define BIG_CONSTANT(x) (x)
michael@0 27
michael@0 28 // Other compilers
michael@0 29
michael@0 30 #else // defined(_MSC_VER)
michael@0 31
michael@0 32 #define FORCE_INLINE __attribute__((always_inline))
michael@0 33
michael@0 34 inline uint32_t rotl32 ( uint32_t x, int8_t r )
michael@0 35 {
michael@0 36 return (x << r) | (x >> (32 - r));
michael@0 37 }
michael@0 38
michael@0 39 inline uint64_t rotl64 ( uint64_t x, int8_t r )
michael@0 40 {
michael@0 41 return (x << r) | (x >> (64 - r));
michael@0 42 }
michael@0 43
michael@0 44 #define ROTL32(x,y) rotl32(x,y)
michael@0 45 #define ROTL64(x,y) rotl64(x,y)
michael@0 46
michael@0 47 #define BIG_CONSTANT(x) (x##LLU)
michael@0 48
michael@0 49 #endif // !defined(_MSC_VER)
michael@0 50
michael@0 51 //-----------------------------------------------------------------------------
michael@0 52 // Block read - if your platform needs to do endian-swapping or can only
michael@0 53 // handle aligned reads, do the conversion here
michael@0 54
michael@0 55 FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
michael@0 56 {
michael@0 57 return p[i];
michael@0 58 }
michael@0 59
michael@0 60 FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
michael@0 61 {
michael@0 62 return p[i];
michael@0 63 }
michael@0 64
michael@0 65 //-----------------------------------------------------------------------------
michael@0 66 // Finalization mix - force all bits of a hash block to avalanche
michael@0 67
michael@0 68 FORCE_INLINE uint32_t fmix ( uint32_t h )
michael@0 69 {
michael@0 70 h ^= h >> 16;
michael@0 71 h *= 0x85ebca6b;
michael@0 72 h ^= h >> 13;
michael@0 73 h *= 0xc2b2ae35;
michael@0 74 h ^= h >> 16;
michael@0 75
michael@0 76 return h;
michael@0 77 }
michael@0 78
michael@0 79 //----------
michael@0 80
michael@0 81 FORCE_INLINE uint64_t fmix ( uint64_t k )
michael@0 82 {
michael@0 83 k ^= k >> 33;
michael@0 84 k *= BIG_CONSTANT(0xff51afd7ed558ccd);
michael@0 85 k ^= k >> 33;
michael@0 86 k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
michael@0 87 k ^= k >> 33;
michael@0 88
michael@0 89 return k;
michael@0 90 }
michael@0 91
michael@0 92 //-----------------------------------------------------------------------------
michael@0 93
michael@0 94 void MurmurHash3_x86_32 ( const void * key, int len,
michael@0 95 uint32_t seed, void * out )
michael@0 96 {
michael@0 97 const uint8_t * data = (const uint8_t*)key;
michael@0 98 const int nblocks = len / 4;
michael@0 99
michael@0 100 uint32_t h1 = seed;
michael@0 101
michael@0 102 const uint32_t c1 = 0xcc9e2d51;
michael@0 103 const uint32_t c2 = 0x1b873593;
michael@0 104
michael@0 105 //----------
michael@0 106 // body
michael@0 107
michael@0 108 const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
michael@0 109
michael@0 110 for(int i = -nblocks; i; i++)
michael@0 111 {
michael@0 112 uint32_t k1 = getblock(blocks,i);
michael@0 113
michael@0 114 k1 *= c1;
michael@0 115 k1 = ROTL32(k1,15);
michael@0 116 k1 *= c2;
michael@0 117
michael@0 118 h1 ^= k1;
michael@0 119 h1 = ROTL32(h1,13);
michael@0 120 h1 = h1*5+0xe6546b64;
michael@0 121 }
michael@0 122
michael@0 123 //----------
michael@0 124 // tail
michael@0 125
michael@0 126 const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
michael@0 127
michael@0 128 uint32_t k1 = 0;
michael@0 129
michael@0 130 switch(len & 3)
michael@0 131 {
michael@0 132 case 3: k1 ^= tail[2] << 16;
michael@0 133 case 2: k1 ^= tail[1] << 8;
michael@0 134 case 1: k1 ^= tail[0];
michael@0 135 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
michael@0 136 };
michael@0 137
michael@0 138 //----------
michael@0 139 // finalization
michael@0 140
michael@0 141 h1 ^= len;
michael@0 142
michael@0 143 h1 = fmix(h1);
michael@0 144
michael@0 145 *(uint32_t*)out = h1;
michael@0 146 }
michael@0 147
michael@0 148 //-----------------------------------------------------------------------------
michael@0 149
michael@0 150 void MurmurHash3_x86_128 ( const void * key, const int len,
michael@0 151 uint32_t seed, void * out )
michael@0 152 {
michael@0 153 const uint8_t * data = (const uint8_t*)key;
michael@0 154 const int nblocks = len / 16;
michael@0 155
michael@0 156 uint32_t h1 = seed;
michael@0 157 uint32_t h2 = seed;
michael@0 158 uint32_t h3 = seed;
michael@0 159 uint32_t h4 = seed;
michael@0 160
michael@0 161 const uint32_t c1 = 0x239b961b;
michael@0 162 const uint32_t c2 = 0xab0e9789;
michael@0 163 const uint32_t c3 = 0x38b34ae5;
michael@0 164 const uint32_t c4 = 0xa1e38b93;
michael@0 165
michael@0 166 //----------
michael@0 167 // body
michael@0 168
michael@0 169 const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
michael@0 170
michael@0 171 for(int i = -nblocks; i; i++)
michael@0 172 {
michael@0 173 uint32_t k1 = getblock(blocks,i*4+0);
michael@0 174 uint32_t k2 = getblock(blocks,i*4+1);
michael@0 175 uint32_t k3 = getblock(blocks,i*4+2);
michael@0 176 uint32_t k4 = getblock(blocks,i*4+3);
michael@0 177
michael@0 178 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
michael@0 179
michael@0 180 h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
michael@0 181
michael@0 182 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
michael@0 183
michael@0 184 h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
michael@0 185
michael@0 186 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
michael@0 187
michael@0 188 h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
michael@0 189
michael@0 190 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
michael@0 191
michael@0 192 h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
michael@0 193 }
michael@0 194
michael@0 195 //----------
michael@0 196 // tail
michael@0 197
michael@0 198 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
michael@0 199
michael@0 200 uint32_t k1 = 0;
michael@0 201 uint32_t k2 = 0;
michael@0 202 uint32_t k3 = 0;
michael@0 203 uint32_t k4 = 0;
michael@0 204
michael@0 205 switch(len & 15)
michael@0 206 {
michael@0 207 case 15: k4 ^= tail[14] << 16;
michael@0 208 case 14: k4 ^= tail[13] << 8;
michael@0 209 case 13: k4 ^= tail[12] << 0;
michael@0 210 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
michael@0 211
michael@0 212 case 12: k3 ^= tail[11] << 24;
michael@0 213 case 11: k3 ^= tail[10] << 16;
michael@0 214 case 10: k3 ^= tail[ 9] << 8;
michael@0 215 case 9: k3 ^= tail[ 8] << 0;
michael@0 216 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
michael@0 217
michael@0 218 case 8: k2 ^= tail[ 7] << 24;
michael@0 219 case 7: k2 ^= tail[ 6] << 16;
michael@0 220 case 6: k2 ^= tail[ 5] << 8;
michael@0 221 case 5: k2 ^= tail[ 4] << 0;
michael@0 222 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
michael@0 223
michael@0 224 case 4: k1 ^= tail[ 3] << 24;
michael@0 225 case 3: k1 ^= tail[ 2] << 16;
michael@0 226 case 2: k1 ^= tail[ 1] << 8;
michael@0 227 case 1: k1 ^= tail[ 0] << 0;
michael@0 228 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
michael@0 229 };
michael@0 230
michael@0 231 //----------
michael@0 232 // finalization
michael@0 233
michael@0 234 h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
michael@0 235
michael@0 236 h1 += h2; h1 += h3; h1 += h4;
michael@0 237 h2 += h1; h3 += h1; h4 += h1;
michael@0 238
michael@0 239 h1 = fmix(h1);
michael@0 240 h2 = fmix(h2);
michael@0 241 h3 = fmix(h3);
michael@0 242 h4 = fmix(h4);
michael@0 243
michael@0 244 h1 += h2; h1 += h3; h1 += h4;
michael@0 245 h2 += h1; h3 += h1; h4 += h1;
michael@0 246
michael@0 247 ((uint32_t*)out)[0] = h1;
michael@0 248 ((uint32_t*)out)[1] = h2;
michael@0 249 ((uint32_t*)out)[2] = h3;
michael@0 250 ((uint32_t*)out)[3] = h4;
michael@0 251 }
michael@0 252
michael@0 253 //-----------------------------------------------------------------------------
michael@0 254
michael@0 255 void MurmurHash3_x64_128 ( const void * key, const int len,
michael@0 256 const uint32_t seed, void * out )
michael@0 257 {
michael@0 258 const uint8_t * data = (const uint8_t*)key;
michael@0 259 const int nblocks = len / 16;
michael@0 260
michael@0 261 uint64_t h1 = seed;
michael@0 262 uint64_t h2 = seed;
michael@0 263
michael@0 264 const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
michael@0 265 const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
michael@0 266
michael@0 267 //----------
michael@0 268 // body
michael@0 269
michael@0 270 const uint64_t * blocks = (const uint64_t *)(data);
michael@0 271
michael@0 272 for(int i = 0; i < nblocks; i++)
michael@0 273 {
michael@0 274 uint64_t k1 = getblock(blocks,i*2+0);
michael@0 275 uint64_t k2 = getblock(blocks,i*2+1);
michael@0 276
michael@0 277 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
michael@0 278
michael@0 279 h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
michael@0 280
michael@0 281 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
michael@0 282
michael@0 283 h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
michael@0 284 }
michael@0 285
michael@0 286 //----------
michael@0 287 // tail
michael@0 288
michael@0 289 const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
michael@0 290
michael@0 291 uint64_t k1 = 0;
michael@0 292 uint64_t k2 = 0;
michael@0 293
michael@0 294 switch(len & 15)
michael@0 295 {
michael@0 296 case 15: k2 ^= uint64_t(tail[14]) << 48;
michael@0 297 case 14: k2 ^= uint64_t(tail[13]) << 40;
michael@0 298 case 13: k2 ^= uint64_t(tail[12]) << 32;
michael@0 299 case 12: k2 ^= uint64_t(tail[11]) << 24;
michael@0 300 case 11: k2 ^= uint64_t(tail[10]) << 16;
michael@0 301 case 10: k2 ^= uint64_t(tail[ 9]) << 8;
michael@0 302 case 9: k2 ^= uint64_t(tail[ 8]) << 0;
michael@0 303 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
michael@0 304
michael@0 305 case 8: k1 ^= uint64_t(tail[ 7]) << 56;
michael@0 306 case 7: k1 ^= uint64_t(tail[ 6]) << 48;
michael@0 307 case 6: k1 ^= uint64_t(tail[ 5]) << 40;
michael@0 308 case 5: k1 ^= uint64_t(tail[ 4]) << 32;
michael@0 309 case 4: k1 ^= uint64_t(tail[ 3]) << 24;
michael@0 310 case 3: k1 ^= uint64_t(tail[ 2]) << 16;
michael@0 311 case 2: k1 ^= uint64_t(tail[ 1]) << 8;
michael@0 312 case 1: k1 ^= uint64_t(tail[ 0]) << 0;
michael@0 313 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
michael@0 314 };
michael@0 315
michael@0 316 //----------
michael@0 317 // finalization
michael@0 318
michael@0 319 h1 ^= len; h2 ^= len;
michael@0 320
michael@0 321 h1 += h2;
michael@0 322 h2 += h1;
michael@0 323
michael@0 324 h1 = fmix(h1);
michael@0 325 h2 = fmix(h2);
michael@0 326
michael@0 327 h1 += h2;
michael@0 328 h2 += h1;
michael@0 329
michael@0 330 ((uint64_t*)out)[0] = h1;
michael@0 331 ((uint64_t*)out)[1] = h2;
michael@0 332 }
michael@0 333
michael@0 334 //-----------------------------------------------------------------------------

mercurial