1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/other-licenses/snappy/src/snappy-stubs-internal.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,514 @@ 1.4 +// Copyright 2011 Google Inc. All Rights Reserved. 1.5 +// 1.6 +// Redistribution and use in source and binary forms, with or without 1.7 +// modification, are permitted provided that the following conditions are 1.8 +// met: 1.9 +// 1.10 +// * Redistributions of source code must retain the above copyright 1.11 +// notice, this list of conditions and the following disclaimer. 1.12 +// * Redistributions in binary form must reproduce the above 1.13 +// copyright notice, this list of conditions and the following disclaimer 1.14 +// in the documentation and/or other materials provided with the 1.15 +// distribution. 1.16 +// * Neither the name of Google Inc. nor the names of its 1.17 +// contributors may be used to endorse or promote products derived from 1.18 +// this software without specific prior written permission. 1.19 +// 1.20 +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1.21 +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1.22 +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1.23 +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 1.24 +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 1.25 +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 1.26 +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 1.27 +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 1.28 +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 1.29 +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 1.30 +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 1.31 +// 1.32 +// Various stubs for the open-source version of Snappy. 1.33 + 1.34 +#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_ 1.35 +#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_ 1.36 + 1.37 +#ifdef HAVE_CONFIG_H 1.38 +#include "config.h" 1.39 +#endif 1.40 + 1.41 +#include <iostream> 1.42 +#include <string> 1.43 + 1.44 +#include <assert.h> 1.45 +#include <stdlib.h> 1.46 +#include <string.h> 1.47 + 1.48 +#ifdef HAVE_SYS_MMAN_H 1.49 +#include <sys/mman.h> 1.50 +#endif 1.51 + 1.52 +#include "snappy-stubs-public.h" 1.53 + 1.54 +#if defined(__x86_64__) 1.55 + 1.56 +// Enable 64-bit optimized versions of some routines. 1.57 +#define ARCH_K8 1 1.58 + 1.59 +#endif 1.60 + 1.61 +// Needed by OS X, among others. 1.62 +#ifndef MAP_ANONYMOUS 1.63 +#define MAP_ANONYMOUS MAP_ANON 1.64 +#endif 1.65 + 1.66 +// Pull in std::min, std::ostream, and the likes. This is safe because this 1.67 +// header file is never used from any public header files. 1.68 +using namespace std; 1.69 + 1.70 +// The size of an array, if known at compile-time. 1.71 +// Will give unexpected results if used on a pointer. 1.72 +// We undefine it first, since some compilers already have a definition. 1.73 +#ifdef ARRAYSIZE 1.74 +#undef ARRAYSIZE 1.75 +#endif 1.76 +#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a))) 1.77 + 1.78 +// Static prediction hints. 1.79 +#ifdef HAVE_BUILTIN_EXPECT 1.80 +#define PREDICT_FALSE(x) (__builtin_expect(x, 0)) 1.81 +#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) 1.82 +#else 1.83 +#define PREDICT_FALSE(x) x 1.84 +#define PREDICT_TRUE(x) x 1.85 +#endif 1.86 + 1.87 +// This is only used for recomputing the tag byte table used during 1.88 +// decompression; for simplicity we just remove it from the open-source 1.89 +// version (anyone who wants to regenerate it can just do the call 1.90 +// themselves within main()). 1.91 +#define DEFINE_bool(flag_name, default_value, description) \ 1.92 + bool FLAGS_ ## flag_name = default_value 1.93 +#define DECLARE_bool(flag_name) \ 1.94 + extern bool FLAGS_ ## flag_name 1.95 + 1.96 +namespace snappy { 1.97 + 1.98 +static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF); 1.99 +static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL); 1.100 + 1.101 +// Logging. 1.102 + 1.103 +#define LOG(level) LogMessage() 1.104 +#define VLOG(level) true ? (void)0 : \ 1.105 + snappy::LogMessageVoidify() & snappy::LogMessage() 1.106 + 1.107 +class LogMessage { 1.108 + public: 1.109 + LogMessage() { } 1.110 + ~LogMessage() { 1.111 + cerr << endl; 1.112 + } 1.113 + 1.114 + LogMessage& operator<<(const std::string& msg) { 1.115 + cerr << msg; 1.116 + return *this; 1.117 + } 1.118 + LogMessage& operator<<(int x) { 1.119 + cerr << x; 1.120 + return *this; 1.121 + } 1.122 +}; 1.123 + 1.124 +// Asserts, both versions activated in debug mode only, 1.125 +// and ones that are always active. 1.126 + 1.127 +#define CRASH_UNLESS(condition) \ 1.128 + PREDICT_TRUE(condition) ? (void)0 : \ 1.129 + snappy::LogMessageVoidify() & snappy::LogMessageCrash() 1.130 + 1.131 +class LogMessageCrash : public LogMessage { 1.132 + public: 1.133 + LogMessageCrash() { } 1.134 + ~LogMessageCrash() { 1.135 + cerr << endl; 1.136 + abort(); 1.137 + } 1.138 +}; 1.139 + 1.140 +// This class is used to explicitly ignore values in the conditional 1.141 +// logging macros. This avoids compiler warnings like "value computed 1.142 +// is not used" and "statement has no effect". 1.143 + 1.144 +class LogMessageVoidify { 1.145 + public: 1.146 + LogMessageVoidify() { } 1.147 + // This has to be an operator with a precedence lower than << but 1.148 + // higher than ?: 1.149 + void operator&(const LogMessage&) { } 1.150 +}; 1.151 + 1.152 +#define CHECK(cond) CRASH_UNLESS(cond) 1.153 +#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b)) 1.154 +#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b)) 1.155 +#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b)) 1.156 +#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b)) 1.157 +#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b)) 1.158 +#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b)) 1.159 + 1.160 +#ifdef NDEBUG 1.161 + 1.162 +#define DCHECK(cond) CRASH_UNLESS(true) 1.163 +#define DCHECK_LE(a, b) CRASH_UNLESS(true) 1.164 +#define DCHECK_GE(a, b) CRASH_UNLESS(true) 1.165 +#define DCHECK_EQ(a, b) CRASH_UNLESS(true) 1.166 +#define DCHECK_NE(a, b) CRASH_UNLESS(true) 1.167 +#define DCHECK_LT(a, b) CRASH_UNLESS(true) 1.168 +#define DCHECK_GT(a, b) CRASH_UNLESS(true) 1.169 + 1.170 +#else 1.171 + 1.172 +#define DCHECK(cond) CHECK(cond) 1.173 +#define DCHECK_LE(a, b) CHECK_LE(a, b) 1.174 +#define DCHECK_GE(a, b) CHECK_GE(a, b) 1.175 +#define DCHECK_EQ(a, b) CHECK_EQ(a, b) 1.176 +#define DCHECK_NE(a, b) CHECK_NE(a, b) 1.177 +#define DCHECK_LT(a, b) CHECK_LT(a, b) 1.178 +#define DCHECK_GT(a, b) CHECK_GT(a, b) 1.179 + 1.180 +#endif 1.181 + 1.182 +// Potentially unaligned loads and stores. 1.183 + 1.184 +#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) 1.185 + 1.186 +#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p)) 1.187 +#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p)) 1.188 +#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p)) 1.189 + 1.190 +#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val)) 1.191 +#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val)) 1.192 +#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val)) 1.193 + 1.194 +#else 1.195 + 1.196 +// These functions are provided for architectures that don't support 1.197 +// unaligned loads and stores. 1.198 + 1.199 +inline uint16 UNALIGNED_LOAD16(const void *p) { 1.200 + uint16 t; 1.201 + memcpy(&t, p, sizeof t); 1.202 + return t; 1.203 +} 1.204 + 1.205 +inline uint32 UNALIGNED_LOAD32(const void *p) { 1.206 + uint32 t; 1.207 + memcpy(&t, p, sizeof t); 1.208 + return t; 1.209 +} 1.210 + 1.211 +inline uint64 UNALIGNED_LOAD64(const void *p) { 1.212 + uint64 t; 1.213 + memcpy(&t, p, sizeof t); 1.214 + return t; 1.215 +} 1.216 + 1.217 +inline void UNALIGNED_STORE16(void *p, uint16 v) { 1.218 + memcpy(p, &v, sizeof v); 1.219 +} 1.220 + 1.221 +inline void UNALIGNED_STORE32(void *p, uint32 v) { 1.222 + memcpy(p, &v, sizeof v); 1.223 +} 1.224 + 1.225 +inline void UNALIGNED_STORE64(void *p, uint64 v) { 1.226 + memcpy(p, &v, sizeof v); 1.227 +} 1.228 + 1.229 +#endif 1.230 + 1.231 +// The following guarantees declaration of the byte swap functions. 1.232 +#ifdef WORDS_BIGENDIAN 1.233 + 1.234 +#ifdef HAVE_SYS_BYTEORDER_H 1.235 +#include <sys/byteorder.h> 1.236 +#endif 1.237 + 1.238 +#ifdef HAVE_SYS_ENDIAN_H 1.239 +#include <sys/endian.h> 1.240 +#endif 1.241 + 1.242 +#ifdef _MSC_VER 1.243 +#include <stdlib.h> 1.244 +#define bswap_16(x) _byteswap_ushort(x) 1.245 +#define bswap_32(x) _byteswap_ulong(x) 1.246 +#define bswap_64(x) _byteswap_uint64(x) 1.247 + 1.248 +#elif defined(__APPLE__) 1.249 +// Mac OS X / Darwin features 1.250 +#include <libkern/OSByteOrder.h> 1.251 +#define bswap_16(x) OSSwapInt16(x) 1.252 +#define bswap_32(x) OSSwapInt32(x) 1.253 +#define bswap_64(x) OSSwapInt64(x) 1.254 + 1.255 +#elif defined(HAVE_BYTESWAP_H) 1.256 +#include <byteswap.h> 1.257 + 1.258 +#elif defined(bswap32) 1.259 +// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included). 1.260 +#define bswap_16(x) bswap16(x) 1.261 +#define bswap_32(x) bswap32(x) 1.262 +#define bswap_64(x) bswap64(x) 1.263 + 1.264 +#elif defined(BSWAP_64) 1.265 +// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included). 1.266 +#define bswap_16(x) BSWAP_16(x) 1.267 +#define bswap_32(x) BSWAP_32(x) 1.268 +#define bswap_64(x) BSWAP_64(x) 1.269 + 1.270 +#else 1.271 + 1.272 +inline uint16 bswap_16(uint16 x) { 1.273 + return (x << 8) | (x >> 8); 1.274 +} 1.275 + 1.276 +inline uint32 bswap_32(uint32 x) { 1.277 + x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8); 1.278 + return (x >> 16) | (x << 16); 1.279 +} 1.280 + 1.281 +inline uint64 bswap_64(uint64 x) { 1.282 + x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8); 1.283 + x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16); 1.284 + return (x >> 32) | (x << 32); 1.285 +} 1.286 + 1.287 +#endif 1.288 + 1.289 +#endif // WORDS_BIGENDIAN 1.290 + 1.291 +// Convert to little-endian storage, opposite of network format. 1.292 +// Convert x from host to little endian: x = LittleEndian.FromHost(x); 1.293 +// convert x from little endian to host: x = LittleEndian.ToHost(x); 1.294 +// 1.295 +// Store values into unaligned memory converting to little endian order: 1.296 +// LittleEndian.Store16(p, x); 1.297 +// 1.298 +// Load unaligned values stored in little endian converting to host order: 1.299 +// x = LittleEndian.Load16(p); 1.300 +class LittleEndian { 1.301 + public: 1.302 + // Conversion functions. 1.303 +#ifdef WORDS_BIGENDIAN 1.304 + 1.305 + static uint16 FromHost16(uint16 x) { return bswap_16(x); } 1.306 + static uint16 ToHost16(uint16 x) { return bswap_16(x); } 1.307 + 1.308 + static uint32 FromHost32(uint32 x) { return bswap_32(x); } 1.309 + static uint32 ToHost32(uint32 x) { return bswap_32(x); } 1.310 + 1.311 + static bool IsLittleEndian() { return false; } 1.312 + 1.313 +#else // !defined(WORDS_BIGENDIAN) 1.314 + 1.315 + static uint16 FromHost16(uint16 x) { return x; } 1.316 + static uint16 ToHost16(uint16 x) { return x; } 1.317 + 1.318 + static uint32 FromHost32(uint32 x) { return x; } 1.319 + static uint32 ToHost32(uint32 x) { return x; } 1.320 + 1.321 + static bool IsLittleEndian() { return true; } 1.322 + 1.323 +#endif // !defined(WORDS_BIGENDIAN) 1.324 + 1.325 + // Functions to do unaligned loads and stores in little-endian order. 1.326 + static uint16 Load16(const void *p) { 1.327 + return ToHost16(UNALIGNED_LOAD16(p)); 1.328 + } 1.329 + 1.330 + static void Store16(void *p, uint16 v) { 1.331 + UNALIGNED_STORE16(p, FromHost16(v)); 1.332 + } 1.333 + 1.334 + static uint32 Load32(const void *p) { 1.335 + return ToHost32(UNALIGNED_LOAD32(p)); 1.336 + } 1.337 + 1.338 + static void Store32(void *p, uint32 v) { 1.339 + UNALIGNED_STORE32(p, FromHost32(v)); 1.340 + } 1.341 +}; 1.342 + 1.343 +// Some bit-manipulation functions. 1.344 +class Bits { 1.345 + public: 1.346 + // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. 1.347 + static int Log2Floor(uint32 n); 1.348 + 1.349 + // Return the first set least / most significant bit, 0-indexed. Returns an 1.350 + // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except 1.351 + // that it's 0-indexed. 1.352 + static int FindLSBSetNonZero(uint32 n); 1.353 + static int FindLSBSetNonZero64(uint64 n); 1.354 + 1.355 + private: 1.356 + DISALLOW_COPY_AND_ASSIGN(Bits); 1.357 +}; 1.358 + 1.359 +#ifdef HAVE_BUILTIN_CTZ 1.360 + 1.361 +inline int Bits::Log2Floor(uint32 n) { 1.362 + return n == 0 ? -1 : 31 ^ __builtin_clz(n); 1.363 +} 1.364 + 1.365 +inline int Bits::FindLSBSetNonZero(uint32 n) { 1.366 + return __builtin_ctz(n); 1.367 +} 1.368 + 1.369 +inline int Bits::FindLSBSetNonZero64(uint64 n) { 1.370 + return __builtin_ctzll(n); 1.371 +} 1.372 + 1.373 +#else // Portable versions. 1.374 + 1.375 +inline int Bits::Log2Floor(uint32 n) { 1.376 + if (n == 0) 1.377 + return -1; 1.378 + int log = 0; 1.379 + uint32 value = n; 1.380 + for (int i = 4; i >= 0; --i) { 1.381 + int shift = (1 << i); 1.382 + uint32 x = value >> shift; 1.383 + if (x != 0) { 1.384 + value = x; 1.385 + log += shift; 1.386 + } 1.387 + } 1.388 + assert(value == 1); 1.389 + return log; 1.390 +} 1.391 + 1.392 +inline int Bits::FindLSBSetNonZero(uint32 n) { 1.393 + int rc = 31; 1.394 + for (int i = 4, shift = 1 << 4; i >= 0; --i) { 1.395 + const uint32 x = n << shift; 1.396 + if (x != 0) { 1.397 + n = x; 1.398 + rc -= shift; 1.399 + } 1.400 + shift >>= 1; 1.401 + } 1.402 + return rc; 1.403 +} 1.404 + 1.405 +// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero(). 1.406 +inline int Bits::FindLSBSetNonZero64(uint64 n) { 1.407 + const uint32 bottombits = static_cast<uint32>(n); 1.408 + if (bottombits == 0) { 1.409 + // Bottom bits are zero, so scan in top bits 1.410 + return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32)); 1.411 + } else { 1.412 + return FindLSBSetNonZero(bottombits); 1.413 + } 1.414 +} 1.415 + 1.416 +#endif // End portable versions. 1.417 + 1.418 +// Variable-length integer encoding. 1.419 +class Varint { 1.420 + public: 1.421 + // Maximum lengths of varint encoding of uint32. 1.422 + static const int kMax32 = 5; 1.423 + 1.424 + // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1]. 1.425 + // Never reads a character at or beyond limit. If a valid/terminated varint32 1.426 + // was found in the range, stores it in *OUTPUT and returns a pointer just 1.427 + // past the last byte of the varint32. Else returns NULL. On success, 1.428 + // "result <= limit". 1.429 + static const char* Parse32WithLimit(const char* ptr, const char* limit, 1.430 + uint32* OUTPUT); 1.431 + 1.432 + // REQUIRES "ptr" points to a buffer of length sufficient to hold "v". 1.433 + // EFFECTS Encodes "v" into "ptr" and returns a pointer to the 1.434 + // byte just past the last encoded byte. 1.435 + static char* Encode32(char* ptr, uint32 v); 1.436 + 1.437 + // EFFECTS Appends the varint representation of "value" to "*s". 1.438 + static void Append32(string* s, uint32 value); 1.439 +}; 1.440 + 1.441 +inline const char* Varint::Parse32WithLimit(const char* p, 1.442 + const char* l, 1.443 + uint32* OUTPUT) { 1.444 + const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p); 1.445 + const unsigned char* limit = reinterpret_cast<const unsigned char*>(l); 1.446 + uint32 b, result; 1.447 + if (ptr >= limit) return NULL; 1.448 + b = *(ptr++); result = b & 127; if (b < 128) goto done; 1.449 + if (ptr >= limit) return NULL; 1.450 + b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done; 1.451 + if (ptr >= limit) return NULL; 1.452 + b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done; 1.453 + if (ptr >= limit) return NULL; 1.454 + b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done; 1.455 + if (ptr >= limit) return NULL; 1.456 + b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done; 1.457 + return NULL; // Value is too long to be a varint32 1.458 + done: 1.459 + *OUTPUT = result; 1.460 + return reinterpret_cast<const char*>(ptr); 1.461 +} 1.462 + 1.463 +inline char* Varint::Encode32(char* sptr, uint32 v) { 1.464 + // Operate on characters as unsigneds 1.465 + unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr); 1.466 + static const int B = 128; 1.467 + if (v < (1<<7)) { 1.468 + *(ptr++) = v; 1.469 + } else if (v < (1<<14)) { 1.470 + *(ptr++) = v | B; 1.471 + *(ptr++) = v>>7; 1.472 + } else if (v < (1<<21)) { 1.473 + *(ptr++) = v | B; 1.474 + *(ptr++) = (v>>7) | B; 1.475 + *(ptr++) = v>>14; 1.476 + } else if (v < (1<<28)) { 1.477 + *(ptr++) = v | B; 1.478 + *(ptr++) = (v>>7) | B; 1.479 + *(ptr++) = (v>>14) | B; 1.480 + *(ptr++) = v>>21; 1.481 + } else { 1.482 + *(ptr++) = v | B; 1.483 + *(ptr++) = (v>>7) | B; 1.484 + *(ptr++) = (v>>14) | B; 1.485 + *(ptr++) = (v>>21) | B; 1.486 + *(ptr++) = v>>28; 1.487 + } 1.488 + return reinterpret_cast<char*>(ptr); 1.489 +} 1.490 + 1.491 +// If you know the internal layout of the std::string in use, you can 1.492 +// replace this function with one that resizes the string without 1.493 +// filling the new space with zeros (if applicable) -- 1.494 +// it will be non-portable but faster. 1.495 +inline void STLStringResizeUninitialized(string* s, size_t new_size) { 1.496 + s->resize(new_size); 1.497 +} 1.498 + 1.499 +// Return a mutable char* pointing to a string's internal buffer, 1.500 +// which may not be null-terminated. Writing through this pointer will 1.501 +// modify the string. 1.502 +// 1.503 +// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the 1.504 +// next call to a string method that invalidates iterators. 1.505 +// 1.506 +// As of 2006-04, there is no standard-blessed way of getting a 1.507 +// mutable reference to a string's internal buffer. However, issue 530 1.508 +// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530) 1.509 +// proposes this as the method. It will officially be part of the standard 1.510 +// for C++0x. This should already work on all current implementations. 1.511 +inline char* string_as_array(string* str) { 1.512 + return str->empty() ? NULL : &*str->begin(); 1.513 +} 1.514 + 1.515 +} // namespace snappy 1.516 + 1.517 +#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_