michael@0: /* michael@0: * Copyright 2012 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: #ifndef SkChecksum_DEFINED michael@0: #define SkChecksum_DEFINED michael@0: michael@0: #include "SkTypes.h" michael@0: michael@0: /** michael@0: * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant michael@0: * to be very very fast, as it is used internally by the font cache, in michael@0: * conjuction with the entire raw key. This algorithm does not generate michael@0: * unique values as well as others (e.g. MD5) but it performs much faster. michael@0: * Skia's use cases can survive non-unique values (since the entire key is michael@0: * always available). Clients should only be used in circumstances where speed michael@0: * over uniqueness is at a premium. michael@0: */ michael@0: class SkChecksum : SkNoncopyable { michael@0: private: michael@0: /* michael@0: * Our Rotate and Mash helpers are meant to automatically do the right michael@0: * thing depending if sizeof(uintptr_t) is 4 or 8. michael@0: */ michael@0: enum { michael@0: ROTR = 17, michael@0: ROTL = sizeof(uintptr_t) * 8 - ROTR, michael@0: HALFBITS = sizeof(uintptr_t) * 4 michael@0: }; michael@0: michael@0: static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { michael@0: return ((total >> ROTR) | (total << ROTL)) ^ value; michael@0: } michael@0: michael@0: public: michael@0: michael@0: /** michael@0: * Calculate 32-bit Murmur hash (murmur3). michael@0: * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash. michael@0: * See en.wikipedia.org/wiki/MurmurHash. michael@0: * michael@0: * @param data Memory address of the data block to be processed. Must be 32-bit aligned. michael@0: * @param size Size of the data block in bytes. Must be a multiple of 4. michael@0: * @param seed Initial hash seed. (optional) michael@0: * @return hash result michael@0: */ michael@0: static uint32_t Murmur3(const uint32_t* data, size_t bytes, uint32_t seed=0) { michael@0: SkASSERT(SkIsAlign4(bytes)); michael@0: const size_t words = bytes/4; michael@0: michael@0: uint32_t hash = seed; michael@0: for (size_t i = 0; i < words; i++) { michael@0: uint32_t k = data[i]; michael@0: k *= 0xcc9e2d51; michael@0: k = (k << 15) | (k >> 17); michael@0: k *= 0x1b873593; michael@0: michael@0: hash ^= k; michael@0: hash = (hash << 13) | (hash >> 19); michael@0: hash *= 5; michael@0: hash += 0xe6546b64; michael@0: } michael@0: hash ^= bytes; michael@0: hash ^= hash >> 16; michael@0: hash *= 0x85ebca6b; michael@0: hash ^= hash >> 13; michael@0: hash *= 0xc2b2ae35; michael@0: hash ^= hash >> 16; michael@0: return hash; michael@0: } michael@0: michael@0: /** michael@0: * Compute a 32-bit checksum for a given data block michael@0: * michael@0: * WARNING: this algorithm is tuned for efficiency, not backward/forward michael@0: * compatibility. It may change at any time, so a checksum generated with michael@0: * one version of the Skia code may not match a checksum generated with michael@0: * a different version of the Skia code. michael@0: * michael@0: * @param data Memory address of the data block to be processed. Must be michael@0: * 32-bit aligned. michael@0: * @param size Size of the data block in bytes. Must be a multiple of 4. michael@0: * @return checksum result michael@0: */ michael@0: static uint32_t Compute(const uint32_t* data, size_t size) { michael@0: SkASSERT(SkIsAlign4(size)); michael@0: michael@0: /* michael@0: * We want to let the compiler use 32bit or 64bit addressing and math michael@0: * so we use uintptr_t as our magic type. This makes the code a little michael@0: * more obscure (we can't hard-code 32 or 64 anywhere, but have to use michael@0: * sizeof()). michael@0: */ michael@0: uintptr_t result = 0; michael@0: const uintptr_t* ptr = reinterpret_cast(data); michael@0: michael@0: /* michael@0: * count the number of quad element chunks. This takes into account michael@0: * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) michael@0: * to compute how much to shift-down the size. michael@0: */ michael@0: size_t n4 = size / (sizeof(uintptr_t) << 2); michael@0: for (size_t i = 0; i < n4; ++i) { michael@0: result = Mash(result, *ptr++); michael@0: result = Mash(result, *ptr++); michael@0: result = Mash(result, *ptr++); michael@0: result = Mash(result, *ptr++); michael@0: } michael@0: size &= ((sizeof(uintptr_t) << 2) - 1); michael@0: michael@0: data = reinterpret_cast(ptr); michael@0: const uint32_t* stop = data + (size >> 2); michael@0: while (data < stop) { michael@0: result = Mash(result, *data++); michael@0: } michael@0: michael@0: /* michael@0: * smash us down to 32bits if we were 64. Note that when uintptr_t is michael@0: * 32bits, this code-path should go away, but I still got a warning michael@0: * when I wrote michael@0: * result ^= result >> 32; michael@0: * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS michael@0: * define. michael@0: */ michael@0: if (8 == sizeof(result)) { michael@0: result ^= result >> HALFBITS; michael@0: } michael@0: return static_cast(result); michael@0: } michael@0: }; michael@0: michael@0: #endif