Fri, 16 Jan 2015 18:13:44 +0100
Integrate suggestion from review to improve consistency with existing code.
michael@0 | 1 | // Copyright (c) 2011 Google, Inc. |
michael@0 | 2 | // |
michael@0 | 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy |
michael@0 | 4 | // of this software and associated documentation files (the "Software"), to deal |
michael@0 | 5 | // in the Software without restriction, including without limitation the rights |
michael@0 | 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
michael@0 | 7 | // copies of the Software, and to permit persons to whom the Software is |
michael@0 | 8 | // furnished to do so, subject to the following conditions: |
michael@0 | 9 | // |
michael@0 | 10 | // The above copyright notice and this permission notice shall be included in |
michael@0 | 11 | // all copies or substantial portions of the Software. |
michael@0 | 12 | // |
michael@0 | 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
michael@0 | 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
michael@0 | 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
michael@0 | 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
michael@0 | 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
michael@0 | 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
michael@0 | 19 | // THE SOFTWARE. |
michael@0 | 20 | // |
michael@0 | 21 | // CityHash Version 1, by Geoff Pike and Jyrki Alakuijala |
michael@0 | 22 | // |
michael@0 | 23 | // This file provides CityHash64() and related functions. |
michael@0 | 24 | // |
michael@0 | 25 | // It's probably possible to create even faster hash functions by |
michael@0 | 26 | // writing a program that systematically explores some of the space of |
michael@0 | 27 | // possible hash functions, by using SIMD instructions, or by |
michael@0 | 28 | // compromising on hash quality. |
michael@0 | 29 | |
michael@0 | 30 | #include "city.h" |
michael@0 | 31 | |
michael@0 | 32 | #include <algorithm> |
michael@0 | 33 | |
michael@0 | 34 | using namespace std; |
michael@0 | 35 | |
michael@0 | 36 | #define UNALIGNED_LOAD64(p) (*(const uint64*)(p)) |
michael@0 | 37 | #define UNALIGNED_LOAD32(p) (*(const uint32*)(p)) |
michael@0 | 38 | |
michael@0 | 39 | #if !defined(LIKELY) |
michael@0 | 40 | #if defined(__GNUC__) |
michael@0 | 41 | #define LIKELY(x) (__builtin_expect(!!(x), 1)) |
michael@0 | 42 | #else |
michael@0 | 43 | #define LIKELY(x) (x) |
michael@0 | 44 | #endif |
michael@0 | 45 | #endif |
michael@0 | 46 | |
michael@0 | 47 | // Some primes between 2^63 and 2^64 for various uses. |
michael@0 | 48 | static const uint64 k0 = 0xc3a5c85c97cb3127; |
michael@0 | 49 | static const uint64 k1 = 0xb492b66fbe98f273; |
michael@0 | 50 | static const uint64 k2 = 0x9ae16a3b2f90404f; |
michael@0 | 51 | static const uint64 k3 = 0xc949d7c7509e6557; |
michael@0 | 52 | |
michael@0 | 53 | // Bitwise right rotate. Normally this will compile to a single |
michael@0 | 54 | // instruction, especially if the shift is a manifest constant. |
michael@0 | 55 | static uint64 Rotate(uint64 val, int shift) { |
michael@0 | 56 | // Avoid shifting by 64: doing so yields an undefined result. |
michael@0 | 57 | return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); |
michael@0 | 58 | } |
michael@0 | 59 | |
michael@0 | 60 | // Equivalent to Rotate(), but requires the second arg to be non-zero. |
michael@0 | 61 | // On x86-64, and probably others, it's possible for this to compile |
michael@0 | 62 | // to a single instruction if both args are already in registers. |
michael@0 | 63 | static uint64 RotateByAtLeast1(uint64 val, int shift) { |
michael@0 | 64 | return (val >> shift) | (val << (64 - shift)); |
michael@0 | 65 | } |
michael@0 | 66 | |
michael@0 | 67 | static uint64 ShiftMix(uint64 val) { |
michael@0 | 68 | return val ^ (val >> 47); |
michael@0 | 69 | } |
michael@0 | 70 | |
michael@0 | 71 | static uint64 HashLen16(uint64 u, uint64 v) { |
michael@0 | 72 | return Hash128to64(uint128(u, v)); |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | static uint64 HashLen0to16(const char *s, size_t len) { |
michael@0 | 76 | if (len > 8) { |
michael@0 | 77 | uint64 a = UNALIGNED_LOAD64(s); |
michael@0 | 78 | uint64 b = UNALIGNED_LOAD64(s + len - 8); |
michael@0 | 79 | return HashLen16(a, RotateByAtLeast1(b + len, len)) ^ b; |
michael@0 | 80 | } |
michael@0 | 81 | if (len >= 4) { |
michael@0 | 82 | uint64 a = UNALIGNED_LOAD32(s); |
michael@0 | 83 | return HashLen16(len + (a << 3), UNALIGNED_LOAD32(s + len - 4)); |
michael@0 | 84 | } |
michael@0 | 85 | if (len > 0) { |
michael@0 | 86 | uint8 a = s[0]; |
michael@0 | 87 | uint8 b = s[len >> 1]; |
michael@0 | 88 | uint8 c = s[len - 1]; |
michael@0 | 89 | uint32 y = static_cast<uint32>(a) + (static_cast<uint32>(b) << 8); |
michael@0 | 90 | uint32 z = len + (static_cast<uint32>(c) << 2); |
michael@0 | 91 | return ShiftMix(y * k2 ^ z * k3) * k2; |
michael@0 | 92 | } |
michael@0 | 93 | return k2; |
michael@0 | 94 | } |
michael@0 | 95 | |
michael@0 | 96 | // This probably works well for 16-byte strings as well, but it may be overkill |
michael@0 | 97 | // in that case. |
michael@0 | 98 | static uint64 HashLen17to32(const char *s, size_t len) { |
michael@0 | 99 | uint64 a = UNALIGNED_LOAD64(s) * k1; |
michael@0 | 100 | uint64 b = UNALIGNED_LOAD64(s + 8); |
michael@0 | 101 | uint64 c = UNALIGNED_LOAD64(s + len - 8) * k2; |
michael@0 | 102 | uint64 d = UNALIGNED_LOAD64(s + len - 16) * k0; |
michael@0 | 103 | return HashLen16(Rotate(a - b, 43) + Rotate(c, 30) + d, |
michael@0 | 104 | a + Rotate(b ^ k3, 20) - c + len); |
michael@0 | 105 | } |
michael@0 | 106 | |
michael@0 | 107 | // Return a 16-byte hash for 48 bytes. Quick and dirty. |
michael@0 | 108 | // Callers do best to use "random-looking" values for a and b. |
michael@0 | 109 | static pair<uint64, uint64> WeakHashLen32WithSeeds( |
michael@0 | 110 | uint64 w, uint64 x, uint64 y, uint64 z, uint64 a, uint64 b) { |
michael@0 | 111 | a += w; |
michael@0 | 112 | b = Rotate(b + a + z, 21); |
michael@0 | 113 | uint64 c = a; |
michael@0 | 114 | a += x; |
michael@0 | 115 | a += y; |
michael@0 | 116 | b += Rotate(a, 44); |
michael@0 | 117 | return make_pair(a + z, b + c); |
michael@0 | 118 | } |
michael@0 | 119 | |
michael@0 | 120 | // Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. |
michael@0 | 121 | static pair<uint64, uint64> WeakHashLen32WithSeeds( |
michael@0 | 122 | const char* s, uint64 a, uint64 b) { |
michael@0 | 123 | return WeakHashLen32WithSeeds(UNALIGNED_LOAD64(s), |
michael@0 | 124 | UNALIGNED_LOAD64(s + 8), |
michael@0 | 125 | UNALIGNED_LOAD64(s + 16), |
michael@0 | 126 | UNALIGNED_LOAD64(s + 24), |
michael@0 | 127 | a, |
michael@0 | 128 | b); |
michael@0 | 129 | } |
michael@0 | 130 | |
michael@0 | 131 | // Return an 8-byte hash for 33 to 64 bytes. |
michael@0 | 132 | static uint64 HashLen33to64(const char *s, size_t len) { |
michael@0 | 133 | uint64 z = UNALIGNED_LOAD64(s + 24); |
michael@0 | 134 | uint64 a = UNALIGNED_LOAD64(s) + (len + UNALIGNED_LOAD64(s + len - 16)) * k0; |
michael@0 | 135 | uint64 b = Rotate(a + z, 52); |
michael@0 | 136 | uint64 c = Rotate(a, 37); |
michael@0 | 137 | a += UNALIGNED_LOAD64(s + 8); |
michael@0 | 138 | c += Rotate(a, 7); |
michael@0 | 139 | a += UNALIGNED_LOAD64(s + 16); |
michael@0 | 140 | uint64 vf = a + z; |
michael@0 | 141 | uint64 vs = b + Rotate(a, 31) + c; |
michael@0 | 142 | a = UNALIGNED_LOAD64(s + 16) + UNALIGNED_LOAD64(s + len - 32); |
michael@0 | 143 | z = UNALIGNED_LOAD64(s + len - 8); |
michael@0 | 144 | b = Rotate(a + z, 52); |
michael@0 | 145 | c = Rotate(a, 37); |
michael@0 | 146 | a += UNALIGNED_LOAD64(s + len - 24); |
michael@0 | 147 | c += Rotate(a, 7); |
michael@0 | 148 | a += UNALIGNED_LOAD64(s + len - 16); |
michael@0 | 149 | uint64 wf = a + z; |
michael@0 | 150 | uint64 ws = b + Rotate(a, 31) + c; |
michael@0 | 151 | uint64 r = ShiftMix((vf + ws) * k2 + (wf + vs) * k0); |
michael@0 | 152 | return ShiftMix(r * k0 + vs) * k2; |
michael@0 | 153 | } |
michael@0 | 154 | |
michael@0 | 155 | uint64 CityHash64(const char *s, size_t len) { |
michael@0 | 156 | if (len <= 32) { |
michael@0 | 157 | if (len <= 16) { |
michael@0 | 158 | return HashLen0to16(s, len); |
michael@0 | 159 | } else { |
michael@0 | 160 | return HashLen17to32(s, len); |
michael@0 | 161 | } |
michael@0 | 162 | } else if (len <= 64) { |
michael@0 | 163 | return HashLen33to64(s, len); |
michael@0 | 164 | } |
michael@0 | 165 | |
michael@0 | 166 | // For strings over 64 bytes we hash the end first, and then as we |
michael@0 | 167 | // loop we keep 56 bytes of state: v, w, x, y, and z. |
michael@0 | 168 | uint64 x = UNALIGNED_LOAD64(s); |
michael@0 | 169 | uint64 y = UNALIGNED_LOAD64(s + len - 16) ^ k1; |
michael@0 | 170 | uint64 z = UNALIGNED_LOAD64(s + len - 56) ^ k0; |
michael@0 | 171 | pair<uint64, uint64> v = WeakHashLen32WithSeeds(s + len - 64, len, y); |
michael@0 | 172 | pair<uint64, uint64> w = WeakHashLen32WithSeeds(s + len - 32, len * k1, k0); |
michael@0 | 173 | z += ShiftMix(v.second) * k1; |
michael@0 | 174 | x = Rotate(z + x, 39) * k1; |
michael@0 | 175 | y = Rotate(y, 33) * k1; |
michael@0 | 176 | |
michael@0 | 177 | // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. |
michael@0 | 178 | len = (len - 1) & ~static_cast<size_t>(63); |
michael@0 | 179 | do { |
michael@0 | 180 | x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1; |
michael@0 | 181 | y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1; |
michael@0 | 182 | x ^= w.second; |
michael@0 | 183 | y ^= v.first; |
michael@0 | 184 | z = Rotate(z ^ w.first, 33); |
michael@0 | 185 | v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); |
michael@0 | 186 | w = WeakHashLen32WithSeeds(s + 32, z + w.second, y); |
michael@0 | 187 | std::swap(z, x); |
michael@0 | 188 | s += 64; |
michael@0 | 189 | len -= 64; |
michael@0 | 190 | } while (len != 0); |
michael@0 | 191 | return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z, |
michael@0 | 192 | HashLen16(v.second, w.second) + x); |
michael@0 | 193 | } |
michael@0 | 194 | |
michael@0 | 195 | uint64 CityHash64WithSeed(const char *s, size_t len, uint64 seed) { |
michael@0 | 196 | return CityHash64WithSeeds(s, len, k2, seed); |
michael@0 | 197 | } |
michael@0 | 198 | |
michael@0 | 199 | uint64 CityHash64WithSeeds(const char *s, size_t len, |
michael@0 | 200 | uint64 seed0, uint64 seed1) { |
michael@0 | 201 | return HashLen16(CityHash64(s, len) - seed0, seed1); |
michael@0 | 202 | } |
michael@0 | 203 | |
michael@0 | 204 | // A subroutine for CityHash128(). Returns a decent 128-bit hash for strings |
michael@0 | 205 | // of any length representable in ssize_t. Based on City and Murmur. |
michael@0 | 206 | static uint128 CityMurmur(const char *s, size_t len, uint128 seed) { |
michael@0 | 207 | uint64 a = Uint128Low64(seed); |
michael@0 | 208 | uint64 b = Uint128High64(seed); |
michael@0 | 209 | uint64 c = 0; |
michael@0 | 210 | uint64 d = 0; |
michael@0 | 211 | ssize_t l = len - 16; |
michael@0 | 212 | if (l <= 0) { // len <= 16 |
michael@0 | 213 | c = b * k1 + HashLen0to16(s, len); |
michael@0 | 214 | d = Rotate(a + (len >= 8 ? UNALIGNED_LOAD64(s) : c), 32); |
michael@0 | 215 | } else { // len > 16 |
michael@0 | 216 | c = HashLen16(UNALIGNED_LOAD64(s + len - 8) + k1, a); |
michael@0 | 217 | d = HashLen16(b + len, c + UNALIGNED_LOAD64(s + len - 16)); |
michael@0 | 218 | a += d; |
michael@0 | 219 | do { |
michael@0 | 220 | a ^= ShiftMix(UNALIGNED_LOAD64(s) * k1) * k1; |
michael@0 | 221 | a *= k1; |
michael@0 | 222 | b ^= a; |
michael@0 | 223 | c ^= ShiftMix(UNALIGNED_LOAD64(s + 8) * k1) * k1; |
michael@0 | 224 | c *= k1; |
michael@0 | 225 | d ^= c; |
michael@0 | 226 | s += 16; |
michael@0 | 227 | l -= 16; |
michael@0 | 228 | } while (l > 0); |
michael@0 | 229 | } |
michael@0 | 230 | a = HashLen16(a, c); |
michael@0 | 231 | b = HashLen16(d, b); |
michael@0 | 232 | return uint128(a ^ b, HashLen16(b, a)); |
michael@0 | 233 | } |
michael@0 | 234 | |
michael@0 | 235 | uint128 CityHash128WithSeed(const char *s, size_t len, uint128 seed) { |
michael@0 | 236 | if (len < 128) { |
michael@0 | 237 | return CityMurmur(s, len, seed); |
michael@0 | 238 | } |
michael@0 | 239 | |
michael@0 | 240 | // We expect len >= 128 to be the common case. Keep 56 bytes of state: |
michael@0 | 241 | // v, w, x, y, and z. |
michael@0 | 242 | pair<uint64, uint64> v, w; |
michael@0 | 243 | uint64 x = Uint128Low64(seed); |
michael@0 | 244 | uint64 y = Uint128High64(seed); |
michael@0 | 245 | uint64 z = len * k1; |
michael@0 | 246 | v.first = Rotate(y ^ k1, 49) * k1 + UNALIGNED_LOAD64(s); |
michael@0 | 247 | v.second = Rotate(v.first, 42) * k1 + UNALIGNED_LOAD64(s + 8); |
michael@0 | 248 | w.first = Rotate(y + z, 35) * k1 + x; |
michael@0 | 249 | w.second = Rotate(x + UNALIGNED_LOAD64(s + 88), 53) * k1; |
michael@0 | 250 | |
michael@0 | 251 | // This is the same inner loop as CityHash64(), manually unrolled. |
michael@0 | 252 | do { |
michael@0 | 253 | x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1; |
michael@0 | 254 | y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1; |
michael@0 | 255 | x ^= w.second; |
michael@0 | 256 | y ^= v.first; |
michael@0 | 257 | z = Rotate(z ^ w.first, 33); |
michael@0 | 258 | v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); |
michael@0 | 259 | w = WeakHashLen32WithSeeds(s + 32, z + w.second, y); |
michael@0 | 260 | std::swap(z, x); |
michael@0 | 261 | s += 64; |
michael@0 | 262 | x = Rotate(x + y + v.first + UNALIGNED_LOAD64(s + 16), 37) * k1; |
michael@0 | 263 | y = Rotate(y + v.second + UNALIGNED_LOAD64(s + 48), 42) * k1; |
michael@0 | 264 | x ^= w.second; |
michael@0 | 265 | y ^= v.first; |
michael@0 | 266 | z = Rotate(z ^ w.first, 33); |
michael@0 | 267 | v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); |
michael@0 | 268 | w = WeakHashLen32WithSeeds(s + 32, z + w.second, y); |
michael@0 | 269 | std::swap(z, x); |
michael@0 | 270 | s += 64; |
michael@0 | 271 | len -= 128; |
michael@0 | 272 | } while (LIKELY(len >= 128)); |
michael@0 | 273 | y += Rotate(w.first, 37) * k0 + z; |
michael@0 | 274 | x += Rotate(v.first + z, 49) * k0; |
michael@0 | 275 | // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s. |
michael@0 | 276 | for (size_t tail_done = 0; tail_done < len; ) { |
michael@0 | 277 | tail_done += 32; |
michael@0 | 278 | y = Rotate(y - x, 42) * k0 + v.second; |
michael@0 | 279 | w.first += UNALIGNED_LOAD64(s + len - tail_done + 16); |
michael@0 | 280 | x = Rotate(x, 49) * k0 + w.first; |
michael@0 | 281 | w.first += v.first; |
michael@0 | 282 | v = WeakHashLen32WithSeeds(s + len - tail_done, v.first, v.second); |
michael@0 | 283 | } |
michael@0 | 284 | // At this point our 48 bytes of state should contain more than |
michael@0 | 285 | // enough information for a strong 128-bit hash. We use two |
michael@0 | 286 | // different 48-byte-to-8-byte hashes to get a 16-byte final result. |
michael@0 | 287 | x = HashLen16(x, v.first); |
michael@0 | 288 | y = HashLen16(y, w.first); |
michael@0 | 289 | return uint128(HashLen16(x + v.second, w.second) + y, |
michael@0 | 290 | HashLen16(x + w.second, y + v.second)); |
michael@0 | 291 | } |
michael@0 | 292 | |
michael@0 | 293 | uint128 CityHash128(const char *s, size_t len) { |
michael@0 | 294 | if (len >= 16) { |
michael@0 | 295 | return CityHash128WithSeed(s + 16, |
michael@0 | 296 | len - 16, |
michael@0 | 297 | uint128(UNALIGNED_LOAD64(s) ^ k3, |
michael@0 | 298 | UNALIGNED_LOAD64(s + 8))); |
michael@0 | 299 | } else if (len >= 8) { |
michael@0 | 300 | return CityHash128WithSeed(NULL, |
michael@0 | 301 | 0, |
michael@0 | 302 | uint128(UNALIGNED_LOAD64(s) ^ (len * k0), |
michael@0 | 303 | UNALIGNED_LOAD64(s + len - 8) ^ k1)); |
michael@0 | 304 | } else { |
michael@0 | 305 | return CityHash128WithSeed(s, len, uint128(k0, k1)); |
michael@0 | 306 | } |
michael@0 | 307 | } |