other-licenses/snappy/src/snappy.cc

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 // Copyright 2005 Google Inc. All Rights Reserved.
michael@0 2 //
michael@0 3 // Redistribution and use in source and binary forms, with or without
michael@0 4 // modification, are permitted provided that the following conditions are
michael@0 5 // met:
michael@0 6 //
michael@0 7 // * Redistributions of source code must retain the above copyright
michael@0 8 // notice, this list of conditions and the following disclaimer.
michael@0 9 // * Redistributions in binary form must reproduce the above
michael@0 10 // copyright notice, this list of conditions and the following disclaimer
michael@0 11 // in the documentation and/or other materials provided with the
michael@0 12 // distribution.
michael@0 13 // * Neither the name of Google Inc. nor the names of its
michael@0 14 // contributors may be used to endorse or promote products derived from
michael@0 15 // this software without specific prior written permission.
michael@0 16 //
michael@0 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
michael@0 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
michael@0 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
michael@0 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
michael@0 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
michael@0 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
michael@0 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 28
michael@0 29 #include "snappy.h"
michael@0 30 #include "snappy-internal.h"
michael@0 31 #include "snappy-sinksource.h"
michael@0 32
michael@0 33 #include <stdio.h>
michael@0 34
michael@0 35 #include <algorithm>
michael@0 36 #include <string>
michael@0 37 #include <vector>
michael@0 38
michael@0 39
michael@0 40 namespace snappy {
michael@0 41
michael@0 42 // Any hash function will produce a valid compressed bitstream, but a good
michael@0 43 // hash function reduces the number of collisions and thus yields better
michael@0 44 // compression for compressible input, and more speed for incompressible
michael@0 45 // input. Of course, it doesn't hurt if the hash function is reasonably fast
michael@0 46 // either, as it gets called a lot.
michael@0 47 static inline uint32 HashBytes(uint32 bytes, int shift) {
michael@0 48 uint32 kMul = 0x1e35a7bd;
michael@0 49 return (bytes * kMul) >> shift;
michael@0 50 }
michael@0 51 static inline uint32 Hash(const char* p, int shift) {
michael@0 52 return HashBytes(UNALIGNED_LOAD32(p), shift);
michael@0 53 }
michael@0 54
michael@0 55 size_t MaxCompressedLength(size_t source_len) {
michael@0 56 // Compressed data can be defined as:
michael@0 57 // compressed := item* literal*
michael@0 58 // item := literal* copy
michael@0 59 //
michael@0 60 // The trailing literal sequence has a space blowup of at most 62/60
michael@0 61 // since a literal of length 60 needs one tag byte + one extra byte
michael@0 62 // for length information.
michael@0 63 //
michael@0 64 // Item blowup is trickier to measure. Suppose the "copy" op copies
michael@0 65 // 4 bytes of data. Because of a special check in the encoding code,
michael@0 66 // we produce a 4-byte copy only if the offset is < 65536. Therefore
michael@0 67 // the copy op takes 3 bytes to encode, and this type of item leads
michael@0 68 // to at most the 62/60 blowup for representing literals.
michael@0 69 //
michael@0 70 // Suppose the "copy" op copies 5 bytes of data. If the offset is big
michael@0 71 // enough, it will take 5 bytes to encode the copy op. Therefore the
michael@0 72 // worst case here is a one-byte literal followed by a five-byte copy.
michael@0 73 // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
michael@0 74 //
michael@0 75 // This last factor dominates the blowup, so the final estimate is:
michael@0 76 return 32 + source_len + source_len/6;
michael@0 77 }
michael@0 78
michael@0 79 enum {
michael@0 80 LITERAL = 0,
michael@0 81 COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
michael@0 82 COPY_2_BYTE_OFFSET = 2,
michael@0 83 COPY_4_BYTE_OFFSET = 3
michael@0 84 };
michael@0 85
michael@0 86 // Copy "len" bytes from "src" to "op", one byte at a time. Used for
michael@0 87 // handling COPY operations where the input and output regions may
michael@0 88 // overlap. For example, suppose:
michael@0 89 // src == "ab"
michael@0 90 // op == src + 2
michael@0 91 // len == 20
michael@0 92 // After IncrementalCopy(src, op, len), the result will have
michael@0 93 // eleven copies of "ab"
michael@0 94 // ababababababababababab
michael@0 95 // Note that this does not match the semantics of either memcpy()
michael@0 96 // or memmove().
michael@0 97 static inline void IncrementalCopy(const char* src, char* op, int len) {
michael@0 98 DCHECK_GT(len, 0);
michael@0 99 do {
michael@0 100 *op++ = *src++;
michael@0 101 } while (--len > 0);
michael@0 102 }
michael@0 103
michael@0 104 // Equivalent to IncrementalCopy except that it can write up to ten extra
michael@0 105 // bytes after the end of the copy, and that it is faster.
michael@0 106 //
michael@0 107 // The main part of this loop is a simple copy of eight bytes at a time until
michael@0 108 // we've copied (at least) the requested amount of bytes. However, if op and
michael@0 109 // src are less than eight bytes apart (indicating a repeating pattern of
michael@0 110 // length < 8), we first need to expand the pattern in order to get the correct
michael@0 111 // results. For instance, if the buffer looks like this, with the eight-byte
michael@0 112 // <src> and <op> patterns marked as intervals:
michael@0 113 //
michael@0 114 // abxxxxxxxxxxxx
michael@0 115 // [------] src
michael@0 116 // [------] op
michael@0 117 //
michael@0 118 // a single eight-byte copy from <src> to <op> will repeat the pattern once,
michael@0 119 // after which we can move <op> two bytes without moving <src>:
michael@0 120 //
michael@0 121 // ababxxxxxxxxxx
michael@0 122 // [------] src
michael@0 123 // [------] op
michael@0 124 //
michael@0 125 // and repeat the exercise until the two no longer overlap.
michael@0 126 //
michael@0 127 // This allows us to do very well in the special case of one single byte
michael@0 128 // repeated many times, without taking a big hit for more general cases.
michael@0 129 //
michael@0 130 // The worst case of extra writing past the end of the match occurs when
michael@0 131 // op - src == 1 and len == 1; the last copy will read from byte positions
michael@0 132 // [0..7] and write to [4..11], whereas it was only supposed to write to
michael@0 133 // position 1. Thus, ten excess bytes.
michael@0 134
michael@0 135 namespace {
michael@0 136
michael@0 137 const int kMaxIncrementCopyOverflow = 10;
michael@0 138
michael@0 139 } // namespace
michael@0 140
michael@0 141 static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
michael@0 142 while (op - src < 8) {
michael@0 143 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
michael@0 144 len -= op - src;
michael@0 145 op += op - src;
michael@0 146 }
michael@0 147 while (len > 0) {
michael@0 148 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
michael@0 149 src += 8;
michael@0 150 op += 8;
michael@0 151 len -= 8;
michael@0 152 }
michael@0 153 }
michael@0 154
michael@0 155 static inline char* EmitLiteral(char* op,
michael@0 156 const char* literal,
michael@0 157 int len,
michael@0 158 bool allow_fast_path) {
michael@0 159 int n = len - 1; // Zero-length literals are disallowed
michael@0 160 if (n < 60) {
michael@0 161 // Fits in tag byte
michael@0 162 *op++ = LITERAL | (n << 2);
michael@0 163
michael@0 164 // The vast majority of copies are below 16 bytes, for which a
michael@0 165 // call to memcpy is overkill. This fast path can sometimes
michael@0 166 // copy up to 15 bytes too much, but that is okay in the
michael@0 167 // main loop, since we have a bit to go on for both sides:
michael@0 168 //
michael@0 169 // - The input will always have kInputMarginBytes = 15 extra
michael@0 170 // available bytes, as long as we're in the main loop, and
michael@0 171 // if not, allow_fast_path = false.
michael@0 172 // - The output will always have 32 spare bytes (see
michael@0 173 // MaxCompressedLength).
michael@0 174 if (allow_fast_path && len <= 16) {
michael@0 175 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
michael@0 176 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
michael@0 177 return op + len;
michael@0 178 }
michael@0 179 } else {
michael@0 180 // Encode in upcoming bytes
michael@0 181 char* base = op;
michael@0 182 int count = 0;
michael@0 183 op++;
michael@0 184 while (n > 0) {
michael@0 185 *op++ = n & 0xff;
michael@0 186 n >>= 8;
michael@0 187 count++;
michael@0 188 }
michael@0 189 assert(count >= 1);
michael@0 190 assert(count <= 4);
michael@0 191 *base = LITERAL | ((59+count) << 2);
michael@0 192 }
michael@0 193 memcpy(op, literal, len);
michael@0 194 return op + len;
michael@0 195 }
michael@0 196
michael@0 197 static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
michael@0 198 DCHECK_LE(len, 64);
michael@0 199 DCHECK_GE(len, 4);
michael@0 200 DCHECK_LT(offset, 65536);
michael@0 201
michael@0 202 if ((len < 12) && (offset < 2048)) {
michael@0 203 size_t len_minus_4 = len - 4;
michael@0 204 assert(len_minus_4 < 8); // Must fit in 3 bits
michael@0 205 *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
michael@0 206 *op++ = offset & 0xff;
michael@0 207 } else {
michael@0 208 *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
michael@0 209 LittleEndian::Store16(op, offset);
michael@0 210 op += 2;
michael@0 211 }
michael@0 212 return op;
michael@0 213 }
michael@0 214
michael@0 215 static inline char* EmitCopy(char* op, size_t offset, int len) {
michael@0 216 // Emit 64 byte copies but make sure to keep at least four bytes reserved
michael@0 217 while (len >= 68) {
michael@0 218 op = EmitCopyLessThan64(op, offset, 64);
michael@0 219 len -= 64;
michael@0 220 }
michael@0 221
michael@0 222 // Emit an extra 60 byte copy if have too much data to fit in one copy
michael@0 223 if (len > 64) {
michael@0 224 op = EmitCopyLessThan64(op, offset, 60);
michael@0 225 len -= 60;
michael@0 226 }
michael@0 227
michael@0 228 // Emit remainder
michael@0 229 op = EmitCopyLessThan64(op, offset, len);
michael@0 230 return op;
michael@0 231 }
michael@0 232
michael@0 233
michael@0 234 bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
michael@0 235 uint32 v = 0;
michael@0 236 const char* limit = start + n;
michael@0 237 if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
michael@0 238 *result = v;
michael@0 239 return true;
michael@0 240 } else {
michael@0 241 return false;
michael@0 242 }
michael@0 243 }
michael@0 244
michael@0 245 namespace internal {
michael@0 246 uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
michael@0 247 // Use smaller hash table when input.size() is smaller, since we
michael@0 248 // fill the table, incurring O(hash table size) overhead for
michael@0 249 // compression, and if the input is short, we won't need that
michael@0 250 // many hash table entries anyway.
michael@0 251 assert(kMaxHashTableSize >= 256);
michael@0 252 size_t htsize = 256;
michael@0 253 while (htsize < kMaxHashTableSize && htsize < input_size) {
michael@0 254 htsize <<= 1;
michael@0 255 }
michael@0 256 CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
michael@0 257 CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
michael@0 258
michael@0 259 uint16* table;
michael@0 260 if (htsize <= ARRAYSIZE(small_table_)) {
michael@0 261 table = small_table_;
michael@0 262 } else {
michael@0 263 if (large_table_ == NULL) {
michael@0 264 large_table_ = new uint16[kMaxHashTableSize];
michael@0 265 }
michael@0 266 table = large_table_;
michael@0 267 }
michael@0 268
michael@0 269 *table_size = htsize;
michael@0 270 memset(table, 0, htsize * sizeof(*table));
michael@0 271 return table;
michael@0 272 }
michael@0 273 } // end namespace internal
michael@0 274
michael@0 275 // For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
michael@0 276 // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
michael@0 277 // empirically found that overlapping loads such as
michael@0 278 // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
michael@0 279 // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
michael@0 280 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
michael@0 281 DCHECK(0 <= offset && offset <= 4) << offset;
michael@0 282 return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
michael@0 283 }
michael@0 284
michael@0 285 // Flat array compression that does not emit the "uncompressed length"
michael@0 286 // prefix. Compresses "input" string to the "*op" buffer.
michael@0 287 //
michael@0 288 // REQUIRES: "input" is at most "kBlockSize" bytes long.
michael@0 289 // REQUIRES: "op" points to an array of memory that is at least
michael@0 290 // "MaxCompressedLength(input.size())" in size.
michael@0 291 // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
michael@0 292 // REQUIRES: "table_size" is a power of two
michael@0 293 //
michael@0 294 // Returns an "end" pointer into "op" buffer.
michael@0 295 // "end - op" is the compressed size of "input".
michael@0 296 namespace internal {
michael@0 297 char* CompressFragment(const char* input,
michael@0 298 size_t input_size,
michael@0 299 char* op,
michael@0 300 uint16* table,
michael@0 301 const int table_size) {
michael@0 302 // "ip" is the input pointer, and "op" is the output pointer.
michael@0 303 const char* ip = input;
michael@0 304 CHECK_LE(input_size, kBlockSize);
michael@0 305 CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
michael@0 306 const int shift = 32 - Bits::Log2Floor(table_size);
michael@0 307 DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
michael@0 308 const char* ip_end = input + input_size;
michael@0 309 const char* base_ip = ip;
michael@0 310 // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
michael@0 311 // [next_emit, ip_end) after the main loop.
michael@0 312 const char* next_emit = ip;
michael@0 313
michael@0 314 const size_t kInputMarginBytes = 15;
michael@0 315 if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
michael@0 316 const char* ip_limit = input + input_size - kInputMarginBytes;
michael@0 317
michael@0 318 for (uint32 next_hash = Hash(++ip, shift); ; ) {
michael@0 319 DCHECK_LT(next_emit, ip);
michael@0 320 // The body of this loop calls EmitLiteral once and then EmitCopy one or
michael@0 321 // more times. (The exception is that when we're close to exhausting
michael@0 322 // the input we goto emit_remainder.)
michael@0 323 //
michael@0 324 // In the first iteration of this loop we're just starting, so
michael@0 325 // there's nothing to copy, so calling EmitLiteral once is
michael@0 326 // necessary. And we only start a new iteration when the
michael@0 327 // current iteration has determined that a call to EmitLiteral will
michael@0 328 // precede the next call to EmitCopy (if any).
michael@0 329 //
michael@0 330 // Step 1: Scan forward in the input looking for a 4-byte-long match.
michael@0 331 // If we get close to exhausting the input then goto emit_remainder.
michael@0 332 //
michael@0 333 // Heuristic match skipping: If 32 bytes are scanned with no matches
michael@0 334 // found, start looking only at every other byte. If 32 more bytes are
michael@0 335 // scanned, look at every third byte, etc.. When a match is found,
michael@0 336 // immediately go back to looking at every byte. This is a small loss
michael@0 337 // (~5% performance, ~0.1% density) for compressible data due to more
michael@0 338 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
michael@0 339 // win since the compressor quickly "realizes" the data is incompressible
michael@0 340 // and doesn't bother looking for matches everywhere.
michael@0 341 //
michael@0 342 // The "skip" variable keeps track of how many bytes there are since the
michael@0 343 // last match; dividing it by 32 (ie. right-shifting by five) gives the
michael@0 344 // number of bytes to move ahead for each iteration.
michael@0 345 uint32 skip = 32;
michael@0 346
michael@0 347 const char* next_ip = ip;
michael@0 348 const char* candidate;
michael@0 349 do {
michael@0 350 ip = next_ip;
michael@0 351 uint32 hash = next_hash;
michael@0 352 DCHECK_EQ(hash, Hash(ip, shift));
michael@0 353 uint32 bytes_between_hash_lookups = skip++ >> 5;
michael@0 354 next_ip = ip + bytes_between_hash_lookups;
michael@0 355 if (PREDICT_FALSE(next_ip > ip_limit)) {
michael@0 356 goto emit_remainder;
michael@0 357 }
michael@0 358 next_hash = Hash(next_ip, shift);
michael@0 359 candidate = base_ip + table[hash];
michael@0 360 DCHECK_GE(candidate, base_ip);
michael@0 361 DCHECK_LT(candidate, ip);
michael@0 362
michael@0 363 table[hash] = ip - base_ip;
michael@0 364 } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
michael@0 365 UNALIGNED_LOAD32(candidate)));
michael@0 366
michael@0 367 // Step 2: A 4-byte match has been found. We'll later see if more
michael@0 368 // than 4 bytes match. But, prior to the match, input
michael@0 369 // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
michael@0 370 DCHECK_LE(next_emit + 16, ip_end);
michael@0 371 op = EmitLiteral(op, next_emit, ip - next_emit, true);
michael@0 372
michael@0 373 // Step 3: Call EmitCopy, and then see if another EmitCopy could
michael@0 374 // be our next move. Repeat until we find no match for the
michael@0 375 // input immediately after what was consumed by the last EmitCopy call.
michael@0 376 //
michael@0 377 // If we exit this loop normally then we need to call EmitLiteral next,
michael@0 378 // though we don't yet know how big the literal will be. We handle that
michael@0 379 // by proceeding to the next iteration of the main loop. We also can exit
michael@0 380 // this loop via goto if we get close to exhausting the input.
michael@0 381 uint64 input_bytes = 0;
michael@0 382 uint32 candidate_bytes = 0;
michael@0 383
michael@0 384 do {
michael@0 385 // We have a 4-byte match at ip, and no need to emit any
michael@0 386 // "literal bytes" prior to ip.
michael@0 387 const char* base = ip;
michael@0 388 int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
michael@0 389 ip += matched;
michael@0 390 size_t offset = base - candidate;
michael@0 391 DCHECK_EQ(0, memcmp(base, candidate, matched));
michael@0 392 op = EmitCopy(op, offset, matched);
michael@0 393 // We could immediately start working at ip now, but to improve
michael@0 394 // compression we first update table[Hash(ip - 1, ...)].
michael@0 395 const char* insert_tail = ip - 1;
michael@0 396 next_emit = ip;
michael@0 397 if (PREDICT_FALSE(ip >= ip_limit)) {
michael@0 398 goto emit_remainder;
michael@0 399 }
michael@0 400 input_bytes = UNALIGNED_LOAD64(insert_tail);
michael@0 401 uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
michael@0 402 table[prev_hash] = ip - base_ip - 1;
michael@0 403 uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
michael@0 404 candidate = base_ip + table[cur_hash];
michael@0 405 candidate_bytes = UNALIGNED_LOAD32(candidate);
michael@0 406 table[cur_hash] = ip - base_ip;
michael@0 407 } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
michael@0 408
michael@0 409 next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
michael@0 410 ++ip;
michael@0 411 }
michael@0 412 }
michael@0 413
michael@0 414 emit_remainder:
michael@0 415 // Emit the remaining bytes as a literal
michael@0 416 if (next_emit < ip_end) {
michael@0 417 op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
michael@0 418 }
michael@0 419
michael@0 420 return op;
michael@0 421 }
michael@0 422 } // end namespace internal
michael@0 423
michael@0 424 // Signature of output types needed by decompression code.
michael@0 425 // The decompression code is templatized on a type that obeys this
michael@0 426 // signature so that we do not pay virtual function call overhead in
michael@0 427 // the middle of a tight decompression loop.
michael@0 428 //
michael@0 429 // class DecompressionWriter {
michael@0 430 // public:
michael@0 431 // // Called before decompression
michael@0 432 // void SetExpectedLength(size_t length);
michael@0 433 //
michael@0 434 // // Called after decompression
michael@0 435 // bool CheckLength() const;
michael@0 436 //
michael@0 437 // // Called repeatedly during decompression
michael@0 438 // bool Append(const char* ip, size_t length);
michael@0 439 // bool AppendFromSelf(uint32 offset, size_t length);
michael@0 440 //
michael@0 441 // // The difference between TryFastAppend and Append is that TryFastAppend
michael@0 442 // // is allowed to read up to <available> bytes from the input buffer,
michael@0 443 // // whereas Append is allowed to read <length>.
michael@0 444 // //
michael@0 445 // // Also, TryFastAppend is allowed to return false, declining the append,
michael@0 446 // // without it being a fatal error -- just "return false" would be
michael@0 447 // // a perfectly legal implementation of TryFastAppend. The intention
michael@0 448 // // is for TryFastAppend to allow a fast path in the common case of
michael@0 449 // // a small append.
michael@0 450 // //
michael@0 451 // // NOTE(user): TryFastAppend must always return decline (return false)
michael@0 452 // // if <length> is 61 or more, as in this case the literal length is not
michael@0 453 // // decoded fully. In practice, this should not be a big problem,
michael@0 454 // // as it is unlikely that one would implement a fast path accepting
michael@0 455 // // this much data.
michael@0 456 // bool TryFastAppend(const char* ip, size_t available, size_t length);
michael@0 457 // };
michael@0 458
michael@0 459 // -----------------------------------------------------------------------
michael@0 460 // Lookup table for decompression code. Generated by ComputeTable() below.
michael@0 461 // -----------------------------------------------------------------------
michael@0 462
michael@0 463 // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
michael@0 464 static const uint32 wordmask[] = {
michael@0 465 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
michael@0 466 };
michael@0 467
michael@0 468 // Data stored per entry in lookup table:
michael@0 469 // Range Bits-used Description
michael@0 470 // ------------------------------------
michael@0 471 // 1..64 0..7 Literal/copy length encoded in opcode byte
michael@0 472 // 0..7 8..10 Copy offset encoded in opcode byte / 256
michael@0 473 // 0..4 11..13 Extra bytes after opcode
michael@0 474 //
michael@0 475 // We use eight bits for the length even though 7 would have sufficed
michael@0 476 // because of efficiency reasons:
michael@0 477 // (1) Extracting a byte is faster than a bit-field
michael@0 478 // (2) It properly aligns copy offset so we do not need a <<8
michael@0 479 static const uint16 char_table[256] = {
michael@0 480 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
michael@0 481 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
michael@0 482 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
michael@0 483 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
michael@0 484 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
michael@0 485 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
michael@0 486 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
michael@0 487 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
michael@0 488 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
michael@0 489 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
michael@0 490 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
michael@0 491 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
michael@0 492 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
michael@0 493 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
michael@0 494 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
michael@0 495 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
michael@0 496 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
michael@0 497 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
michael@0 498 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
michael@0 499 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
michael@0 500 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
michael@0 501 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
michael@0 502 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
michael@0 503 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
michael@0 504 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
michael@0 505 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
michael@0 506 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
michael@0 507 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
michael@0 508 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
michael@0 509 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
michael@0 510 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
michael@0 511 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
michael@0 512 };
michael@0 513
michael@0 514 // In debug mode, allow optional computation of the table at startup.
michael@0 515 // Also, check that the decompression table is correct.
michael@0 516 #ifndef NDEBUG
michael@0 517 DEFINE_bool(snappy_dump_decompression_table, false,
michael@0 518 "If true, we print the decompression table at startup.");
michael@0 519
michael@0 520 static uint16 MakeEntry(unsigned int extra,
michael@0 521 unsigned int len,
michael@0 522 unsigned int copy_offset) {
michael@0 523 // Check that all of the fields fit within the allocated space
michael@0 524 DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
michael@0 525 DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
michael@0 526 DCHECK_EQ(len, len & 0x7f); // At most 7 bits
michael@0 527 return len | (copy_offset << 8) | (extra << 11);
michael@0 528 }
michael@0 529
michael@0 530 static void ComputeTable() {
michael@0 531 uint16 dst[256];
michael@0 532
michael@0 533 // Place invalid entries in all places to detect missing initialization
michael@0 534 int assigned = 0;
michael@0 535 for (int i = 0; i < 256; i++) {
michael@0 536 dst[i] = 0xffff;
michael@0 537 }
michael@0 538
michael@0 539 // Small LITERAL entries. We store (len-1) in the top 6 bits.
michael@0 540 for (unsigned int len = 1; len <= 60; len++) {
michael@0 541 dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
michael@0 542 assigned++;
michael@0 543 }
michael@0 544
michael@0 545 // Large LITERAL entries. We use 60..63 in the high 6 bits to
michael@0 546 // encode the number of bytes of length info that follow the opcode.
michael@0 547 for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
michael@0 548 // We set the length field in the lookup table to 1 because extra
michael@0 549 // bytes encode len-1.
michael@0 550 dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
michael@0 551 assigned++;
michael@0 552 }
michael@0 553
michael@0 554 // COPY_1_BYTE_OFFSET.
michael@0 555 //
michael@0 556 // The tag byte in the compressed data stores len-4 in 3 bits, and
michael@0 557 // offset/256 in 5 bits. offset%256 is stored in the next byte.
michael@0 558 //
michael@0 559 // This format is used for length in range [4..11] and offset in
michael@0 560 // range [0..2047]
michael@0 561 for (unsigned int len = 4; len < 12; len++) {
michael@0 562 for (unsigned int offset = 0; offset < 2048; offset += 256) {
michael@0 563 dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
michael@0 564 MakeEntry(1, len, offset>>8);
michael@0 565 assigned++;
michael@0 566 }
michael@0 567 }
michael@0 568
michael@0 569 // COPY_2_BYTE_OFFSET.
michael@0 570 // Tag contains len-1 in top 6 bits, and offset in next two bytes.
michael@0 571 for (unsigned int len = 1; len <= 64; len++) {
michael@0 572 dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
michael@0 573 assigned++;
michael@0 574 }
michael@0 575
michael@0 576 // COPY_4_BYTE_OFFSET.
michael@0 577 // Tag contents len-1 in top 6 bits, and offset in next four bytes.
michael@0 578 for (unsigned int len = 1; len <= 64; len++) {
michael@0 579 dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
michael@0 580 assigned++;
michael@0 581 }
michael@0 582
michael@0 583 // Check that each entry was initialized exactly once.
michael@0 584 CHECK_EQ(assigned, 256);
michael@0 585 for (int i = 0; i < 256; i++) {
michael@0 586 CHECK_NE(dst[i], 0xffff);
michael@0 587 }
michael@0 588
michael@0 589 if (FLAGS_snappy_dump_decompression_table) {
michael@0 590 printf("static const uint16 char_table[256] = {\n ");
michael@0 591 for (int i = 0; i < 256; i++) {
michael@0 592 printf("0x%04x%s",
michael@0 593 dst[i],
michael@0 594 ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
michael@0 595 }
michael@0 596 printf("};\n");
michael@0 597 }
michael@0 598
michael@0 599 // Check that computed table matched recorded table
michael@0 600 for (int i = 0; i < 256; i++) {
michael@0 601 CHECK_EQ(dst[i], char_table[i]);
michael@0 602 }
michael@0 603 }
michael@0 604 #endif /* !NDEBUG */
michael@0 605
michael@0 606 // Helper class for decompression
michael@0 607 class SnappyDecompressor {
michael@0 608 private:
michael@0 609 Source* reader_; // Underlying source of bytes to decompress
michael@0 610 const char* ip_; // Points to next buffered byte
michael@0 611 const char* ip_limit_; // Points just past buffered bytes
michael@0 612 uint32 peeked_; // Bytes peeked from reader (need to skip)
michael@0 613 bool eof_; // Hit end of input without an error?
michael@0 614 char scratch_[5]; // Temporary buffer for PeekFast() boundaries
michael@0 615
michael@0 616 // Ensure that all of the tag metadata for the next tag is available
michael@0 617 // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
michael@0 618 // if (ip_limit_ - ip_ < 5).
michael@0 619 //
michael@0 620 // Returns true on success, false on error or end of input.
michael@0 621 bool RefillTag();
michael@0 622
michael@0 623 public:
michael@0 624 explicit SnappyDecompressor(Source* reader)
michael@0 625 : reader_(reader),
michael@0 626 ip_(NULL),
michael@0 627 ip_limit_(NULL),
michael@0 628 peeked_(0),
michael@0 629 eof_(false) {
michael@0 630 }
michael@0 631
michael@0 632 ~SnappyDecompressor() {
michael@0 633 // Advance past any bytes we peeked at from the reader
michael@0 634 reader_->Skip(peeked_);
michael@0 635 }
michael@0 636
michael@0 637 // Returns true iff we have hit the end of the input without an error.
michael@0 638 bool eof() const {
michael@0 639 return eof_;
michael@0 640 }
michael@0 641
michael@0 642 // Read the uncompressed length stored at the start of the compressed data.
michael@0 643 // On succcess, stores the length in *result and returns true.
michael@0 644 // On failure, returns false.
michael@0 645 bool ReadUncompressedLength(uint32* result) {
michael@0 646 DCHECK(ip_ == NULL); // Must not have read anything yet
michael@0 647 // Length is encoded in 1..5 bytes
michael@0 648 *result = 0;
michael@0 649 uint32 shift = 0;
michael@0 650 while (true) {
michael@0 651 if (shift >= 32) return false;
michael@0 652 size_t n;
michael@0 653 const char* ip = reader_->Peek(&n);
michael@0 654 if (n == 0) return false;
michael@0 655 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
michael@0 656 reader_->Skip(1);
michael@0 657 *result |= static_cast<uint32>(c & 0x7f) << shift;
michael@0 658 if (c < 128) {
michael@0 659 break;
michael@0 660 }
michael@0 661 shift += 7;
michael@0 662 }
michael@0 663 return true;
michael@0 664 }
michael@0 665
michael@0 666 // Process the next item found in the input.
michael@0 667 // Returns true if successful, false on error or end of input.
michael@0 668 template <class Writer>
michael@0 669 void DecompressAllTags(Writer* writer) {
michael@0 670 const char* ip = ip_;
michael@0 671
michael@0 672 // We could have put this refill fragment only at the beginning of the loop.
michael@0 673 // However, duplicating it at the end of each branch gives the compiler more
michael@0 674 // scope to optimize the <ip_limit_ - ip> expression based on the local
michael@0 675 // context, which overall increases speed.
michael@0 676 #define MAYBE_REFILL() \
michael@0 677 if (ip_limit_ - ip < 5) { \
michael@0 678 ip_ = ip; \
michael@0 679 if (!RefillTag()) return; \
michael@0 680 ip = ip_; \
michael@0 681 }
michael@0 682
michael@0 683 MAYBE_REFILL();
michael@0 684 for ( ;; ) {
michael@0 685 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
michael@0 686
michael@0 687 if ((c & 0x3) == LITERAL) {
michael@0 688 size_t literal_length = (c >> 2) + 1u;
michael@0 689 if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
michael@0 690 DCHECK_LT(literal_length, 61);
michael@0 691 ip += literal_length;
michael@0 692 MAYBE_REFILL();
michael@0 693 continue;
michael@0 694 }
michael@0 695 if (PREDICT_FALSE(literal_length >= 61)) {
michael@0 696 // Long literal.
michael@0 697 const size_t literal_length_length = literal_length - 60;
michael@0 698 literal_length =
michael@0 699 (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
michael@0 700 ip += literal_length_length;
michael@0 701 }
michael@0 702
michael@0 703 size_t avail = ip_limit_ - ip;
michael@0 704 while (avail < literal_length) {
michael@0 705 if (!writer->Append(ip, avail)) return;
michael@0 706 literal_length -= avail;
michael@0 707 reader_->Skip(peeked_);
michael@0 708 size_t n;
michael@0 709 ip = reader_->Peek(&n);
michael@0 710 avail = n;
michael@0 711 peeked_ = avail;
michael@0 712 if (avail == 0) return; // Premature end of input
michael@0 713 ip_limit_ = ip + avail;
michael@0 714 }
michael@0 715 if (!writer->Append(ip, literal_length)) {
michael@0 716 return;
michael@0 717 }
michael@0 718 ip += literal_length;
michael@0 719 MAYBE_REFILL();
michael@0 720 } else {
michael@0 721 const uint32 entry = char_table[c];
michael@0 722 const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
michael@0 723 const uint32 length = entry & 0xff;
michael@0 724 ip += entry >> 11;
michael@0 725
michael@0 726 // copy_offset/256 is encoded in bits 8..10. By just fetching
michael@0 727 // those bits, we get copy_offset (since the bit-field starts at
michael@0 728 // bit 8).
michael@0 729 const uint32 copy_offset = entry & 0x700;
michael@0 730 if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
michael@0 731 return;
michael@0 732 }
michael@0 733 MAYBE_REFILL();
michael@0 734 }
michael@0 735 }
michael@0 736
michael@0 737 #undef MAYBE_REFILL
michael@0 738 }
michael@0 739 };
michael@0 740
michael@0 741 bool SnappyDecompressor::RefillTag() {
michael@0 742 const char* ip = ip_;
michael@0 743 if (ip == ip_limit_) {
michael@0 744 // Fetch a new fragment from the reader
michael@0 745 reader_->Skip(peeked_); // All peeked bytes are used up
michael@0 746 size_t n;
michael@0 747 ip = reader_->Peek(&n);
michael@0 748 peeked_ = n;
michael@0 749 if (n == 0) {
michael@0 750 eof_ = true;
michael@0 751 return false;
michael@0 752 }
michael@0 753 ip_limit_ = ip + n;
michael@0 754 }
michael@0 755
michael@0 756 // Read the tag character
michael@0 757 DCHECK_LT(ip, ip_limit_);
michael@0 758 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
michael@0 759 const uint32 entry = char_table[c];
michael@0 760 const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
michael@0 761 DCHECK_LE(needed, sizeof(scratch_));
michael@0 762
michael@0 763 // Read more bytes from reader if needed
michael@0 764 uint32 nbuf = ip_limit_ - ip;
michael@0 765 if (nbuf < needed) {
michael@0 766 // Stitch together bytes from ip and reader to form the word
michael@0 767 // contents. We store the needed bytes in "scratch_". They
michael@0 768 // will be consumed immediately by the caller since we do not
michael@0 769 // read more than we need.
michael@0 770 memmove(scratch_, ip, nbuf);
michael@0 771 reader_->Skip(peeked_); // All peeked bytes are used up
michael@0 772 peeked_ = 0;
michael@0 773 while (nbuf < needed) {
michael@0 774 size_t length;
michael@0 775 const char* src = reader_->Peek(&length);
michael@0 776 if (length == 0) return false;
michael@0 777 uint32 to_add = min<uint32>(needed - nbuf, length);
michael@0 778 memcpy(scratch_ + nbuf, src, to_add);
michael@0 779 nbuf += to_add;
michael@0 780 reader_->Skip(to_add);
michael@0 781 }
michael@0 782 DCHECK_EQ(nbuf, needed);
michael@0 783 ip_ = scratch_;
michael@0 784 ip_limit_ = scratch_ + needed;
michael@0 785 } else if (nbuf < 5) {
michael@0 786 // Have enough bytes, but move into scratch_ so that we do not
michael@0 787 // read past end of input
michael@0 788 memmove(scratch_, ip, nbuf);
michael@0 789 reader_->Skip(peeked_); // All peeked bytes are used up
michael@0 790 peeked_ = 0;
michael@0 791 ip_ = scratch_;
michael@0 792 ip_limit_ = scratch_ + nbuf;
michael@0 793 } else {
michael@0 794 // Pass pointer to buffer returned by reader_.
michael@0 795 ip_ = ip;
michael@0 796 }
michael@0 797 return true;
michael@0 798 }
michael@0 799
michael@0 800 template <typename Writer>
michael@0 801 static bool InternalUncompress(Source* r,
michael@0 802 Writer* writer,
michael@0 803 uint32 max_len) {
michael@0 804 // Read the uncompressed length from the front of the compressed input
michael@0 805 SnappyDecompressor decompressor(r);
michael@0 806 uint32 uncompressed_len = 0;
michael@0 807 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
michael@0 808 // Protect against possible DoS attack
michael@0 809 if (static_cast<uint64>(uncompressed_len) > max_len) {
michael@0 810 return false;
michael@0 811 }
michael@0 812
michael@0 813 writer->SetExpectedLength(uncompressed_len);
michael@0 814
michael@0 815 // Process the entire input
michael@0 816 decompressor.DecompressAllTags(writer);
michael@0 817 return (decompressor.eof() && writer->CheckLength());
michael@0 818 }
michael@0 819
michael@0 820 bool GetUncompressedLength(Source* source, uint32* result) {
michael@0 821 SnappyDecompressor decompressor(source);
michael@0 822 return decompressor.ReadUncompressedLength(result);
michael@0 823 }
michael@0 824
michael@0 825 size_t Compress(Source* reader, Sink* writer) {
michael@0 826 size_t written = 0;
michael@0 827 size_t N = reader->Available();
michael@0 828 char ulength[Varint::kMax32];
michael@0 829 char* p = Varint::Encode32(ulength, N);
michael@0 830 writer->Append(ulength, p-ulength);
michael@0 831 written += (p - ulength);
michael@0 832
michael@0 833 internal::WorkingMemory wmem;
michael@0 834 char* scratch = NULL;
michael@0 835 char* scratch_output = NULL;
michael@0 836
michael@0 837 while (N > 0) {
michael@0 838 // Get next block to compress (without copying if possible)
michael@0 839 size_t fragment_size;
michael@0 840 const char* fragment = reader->Peek(&fragment_size);
michael@0 841 DCHECK_NE(fragment_size, 0) << ": premature end of input";
michael@0 842 const size_t num_to_read = min(N, kBlockSize);
michael@0 843 size_t bytes_read = fragment_size;
michael@0 844
michael@0 845 size_t pending_advance = 0;
michael@0 846 if (bytes_read >= num_to_read) {
michael@0 847 // Buffer returned by reader is large enough
michael@0 848 pending_advance = num_to_read;
michael@0 849 fragment_size = num_to_read;
michael@0 850 } else {
michael@0 851 // Read into scratch buffer
michael@0 852 if (scratch == NULL) {
michael@0 853 // If this is the last iteration, we want to allocate N bytes
michael@0 854 // of space, otherwise the max possible kBlockSize space.
michael@0 855 // num_to_read contains exactly the correct value
michael@0 856 scratch = new char[num_to_read];
michael@0 857 }
michael@0 858 memcpy(scratch, fragment, bytes_read);
michael@0 859 reader->Skip(bytes_read);
michael@0 860
michael@0 861 while (bytes_read < num_to_read) {
michael@0 862 fragment = reader->Peek(&fragment_size);
michael@0 863 size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
michael@0 864 memcpy(scratch + bytes_read, fragment, n);
michael@0 865 bytes_read += n;
michael@0 866 reader->Skip(n);
michael@0 867 }
michael@0 868 DCHECK_EQ(bytes_read, num_to_read);
michael@0 869 fragment = scratch;
michael@0 870 fragment_size = num_to_read;
michael@0 871 }
michael@0 872 DCHECK_EQ(fragment_size, num_to_read);
michael@0 873
michael@0 874 // Get encoding table for compression
michael@0 875 int table_size;
michael@0 876 uint16* table = wmem.GetHashTable(num_to_read, &table_size);
michael@0 877
michael@0 878 // Compress input_fragment and append to dest
michael@0 879 const int max_output = MaxCompressedLength(num_to_read);
michael@0 880
michael@0 881 // Need a scratch buffer for the output, in case the byte sink doesn't
michael@0 882 // have room for us directly.
michael@0 883 if (scratch_output == NULL) {
michael@0 884 scratch_output = new char[max_output];
michael@0 885 } else {
michael@0 886 // Since we encode kBlockSize regions followed by a region
michael@0 887 // which is <= kBlockSize in length, a previously allocated
michael@0 888 // scratch_output[] region is big enough for this iteration.
michael@0 889 }
michael@0 890 char* dest = writer->GetAppendBuffer(max_output, scratch_output);
michael@0 891 char* end = internal::CompressFragment(fragment, fragment_size,
michael@0 892 dest, table, table_size);
michael@0 893 writer->Append(dest, end - dest);
michael@0 894 written += (end - dest);
michael@0 895
michael@0 896 N -= num_to_read;
michael@0 897 reader->Skip(pending_advance);
michael@0 898 }
michael@0 899
michael@0 900 delete[] scratch;
michael@0 901 delete[] scratch_output;
michael@0 902
michael@0 903 return written;
michael@0 904 }
michael@0 905
michael@0 906 // -----------------------------------------------------------------------
michael@0 907 // Flat array interfaces
michael@0 908 // -----------------------------------------------------------------------
michael@0 909
michael@0 910 // A type that writes to a flat array.
michael@0 911 // Note that this is not a "ByteSink", but a type that matches the
michael@0 912 // Writer template argument to SnappyDecompressor::DecompressAllTags().
michael@0 913 class SnappyArrayWriter {
michael@0 914 private:
michael@0 915 char* base_;
michael@0 916 char* op_;
michael@0 917 char* op_limit_;
michael@0 918
michael@0 919 public:
michael@0 920 inline explicit SnappyArrayWriter(char* dst)
michael@0 921 : base_(dst),
michael@0 922 op_(dst) {
michael@0 923 }
michael@0 924
michael@0 925 inline void SetExpectedLength(size_t len) {
michael@0 926 op_limit_ = op_ + len;
michael@0 927 }
michael@0 928
michael@0 929 inline bool CheckLength() const {
michael@0 930 return op_ == op_limit_;
michael@0 931 }
michael@0 932
michael@0 933 inline bool Append(const char* ip, size_t len) {
michael@0 934 char* op = op_;
michael@0 935 const size_t space_left = op_limit_ - op;
michael@0 936 if (space_left < len) {
michael@0 937 return false;
michael@0 938 }
michael@0 939 memcpy(op, ip, len);
michael@0 940 op_ = op + len;
michael@0 941 return true;
michael@0 942 }
michael@0 943
michael@0 944 inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
michael@0 945 char* op = op_;
michael@0 946 const size_t space_left = op_limit_ - op;
michael@0 947 if (len <= 16 && available >= 16 && space_left >= 16) {
michael@0 948 // Fast path, used for the majority (about 95%) of invocations.
michael@0 949 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
michael@0 950 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
michael@0 951 op_ = op + len;
michael@0 952 return true;
michael@0 953 } else {
michael@0 954 return false;
michael@0 955 }
michael@0 956 }
michael@0 957
michael@0 958 inline bool AppendFromSelf(size_t offset, size_t len) {
michael@0 959 char* op = op_;
michael@0 960 const size_t space_left = op_limit_ - op;
michael@0 961
michael@0 962 if (op - base_ <= offset - 1u) { // -1u catches offset==0
michael@0 963 return false;
michael@0 964 }
michael@0 965 if (len <= 16 && offset >= 8 && space_left >= 16) {
michael@0 966 // Fast path, used for the majority (70-80%) of dynamic invocations.
michael@0 967 UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
michael@0 968 UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
michael@0 969 } else {
michael@0 970 if (space_left >= len + kMaxIncrementCopyOverflow) {
michael@0 971 IncrementalCopyFastPath(op - offset, op, len);
michael@0 972 } else {
michael@0 973 if (space_left < len) {
michael@0 974 return false;
michael@0 975 }
michael@0 976 IncrementalCopy(op - offset, op, len);
michael@0 977 }
michael@0 978 }
michael@0 979
michael@0 980 op_ = op + len;
michael@0 981 return true;
michael@0 982 }
michael@0 983 };
michael@0 984
michael@0 985 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
michael@0 986 ByteArraySource reader(compressed, n);
michael@0 987 return RawUncompress(&reader, uncompressed);
michael@0 988 }
michael@0 989
michael@0 990 bool RawUncompress(Source* compressed, char* uncompressed) {
michael@0 991 SnappyArrayWriter output(uncompressed);
michael@0 992 return InternalUncompress(compressed, &output, kuint32max);
michael@0 993 }
michael@0 994
michael@0 995 bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
michael@0 996 size_t ulength;
michael@0 997 if (!GetUncompressedLength(compressed, n, &ulength)) {
michael@0 998 return false;
michael@0 999 }
michael@0 1000 // Protect against possible DoS attack
michael@0 1001 if ((static_cast<uint64>(ulength) + uncompressed->size()) >
michael@0 1002 uncompressed->max_size()) {
michael@0 1003 return false;
michael@0 1004 }
michael@0 1005 STLStringResizeUninitialized(uncompressed, ulength);
michael@0 1006 return RawUncompress(compressed, n, string_as_array(uncompressed));
michael@0 1007 }
michael@0 1008
michael@0 1009
michael@0 1010 // A Writer that drops everything on the floor and just does validation
michael@0 1011 class SnappyDecompressionValidator {
michael@0 1012 private:
michael@0 1013 size_t expected_;
michael@0 1014 size_t produced_;
michael@0 1015
michael@0 1016 public:
michael@0 1017 inline SnappyDecompressionValidator() : produced_(0) { }
michael@0 1018 inline void SetExpectedLength(size_t len) {
michael@0 1019 expected_ = len;
michael@0 1020 }
michael@0 1021 inline bool CheckLength() const {
michael@0 1022 return expected_ == produced_;
michael@0 1023 }
michael@0 1024 inline bool Append(const char* ip, size_t len) {
michael@0 1025 produced_ += len;
michael@0 1026 return produced_ <= expected_;
michael@0 1027 }
michael@0 1028 inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
michael@0 1029 return false;
michael@0 1030 }
michael@0 1031 inline bool AppendFromSelf(size_t offset, size_t len) {
michael@0 1032 if (produced_ <= offset - 1u) return false; // -1u catches offset==0
michael@0 1033 produced_ += len;
michael@0 1034 return produced_ <= expected_;
michael@0 1035 }
michael@0 1036 };
michael@0 1037
michael@0 1038 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
michael@0 1039 ByteArraySource reader(compressed, n);
michael@0 1040 SnappyDecompressionValidator writer;
michael@0 1041 return InternalUncompress(&reader, &writer, kuint32max);
michael@0 1042 }
michael@0 1043
michael@0 1044 void RawCompress(const char* input,
michael@0 1045 size_t input_length,
michael@0 1046 char* compressed,
michael@0 1047 size_t* compressed_length) {
michael@0 1048 ByteArraySource reader(input, input_length);
michael@0 1049 UncheckedByteArraySink writer(compressed);
michael@0 1050 Compress(&reader, &writer);
michael@0 1051
michael@0 1052 // Compute how many bytes were added
michael@0 1053 *compressed_length = (writer.CurrentDestination() - compressed);
michael@0 1054 }
michael@0 1055
michael@0 1056 size_t Compress(const char* input, size_t input_length, string* compressed) {
michael@0 1057 // Pre-grow the buffer to the max length of the compressed output
michael@0 1058 compressed->resize(MaxCompressedLength(input_length));
michael@0 1059
michael@0 1060 size_t compressed_length;
michael@0 1061 RawCompress(input, input_length, string_as_array(compressed),
michael@0 1062 &compressed_length);
michael@0 1063 compressed->resize(compressed_length);
michael@0 1064 return compressed_length;
michael@0 1065 }
michael@0 1066
michael@0 1067
michael@0 1068 } // end namespace snappy
michael@0 1069

mercurial