other-licenses/snappy/src/snappy.cc

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

     1 // Copyright 2005 Google Inc. All Rights Reserved.
     2 //
     3 // Redistribution and use in source and binary forms, with or without
     4 // modification, are permitted provided that the following conditions are
     5 // met:
     6 //
     7 //     * Redistributions of source code must retain the above copyright
     8 // notice, this list of conditions and the following disclaimer.
     9 //     * Redistributions in binary form must reproduce the above
    10 // copyright notice, this list of conditions and the following disclaimer
    11 // in the documentation and/or other materials provided with the
    12 // distribution.
    13 //     * Neither the name of Google Inc. nor the names of its
    14 // contributors may be used to endorse or promote products derived from
    15 // this software without specific prior written permission.
    16 //
    17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    29 #include "snappy.h"
    30 #include "snappy-internal.h"
    31 #include "snappy-sinksource.h"
    33 #include <stdio.h>
    35 #include <algorithm>
    36 #include <string>
    37 #include <vector>
    40 namespace snappy {
    42 // Any hash function will produce a valid compressed bitstream, but a good
    43 // hash function reduces the number of collisions and thus yields better
    44 // compression for compressible input, and more speed for incompressible
    45 // input. Of course, it doesn't hurt if the hash function is reasonably fast
    46 // either, as it gets called a lot.
    47 static inline uint32 HashBytes(uint32 bytes, int shift) {
    48   uint32 kMul = 0x1e35a7bd;
    49   return (bytes * kMul) >> shift;
    50 }
    51 static inline uint32 Hash(const char* p, int shift) {
    52   return HashBytes(UNALIGNED_LOAD32(p), shift);
    53 }
    55 size_t MaxCompressedLength(size_t source_len) {
    56   // Compressed data can be defined as:
    57   //    compressed := item* literal*
    58   //    item       := literal* copy
    59   //
    60   // The trailing literal sequence has a space blowup of at most 62/60
    61   // since a literal of length 60 needs one tag byte + one extra byte
    62   // for length information.
    63   //
    64   // Item blowup is trickier to measure.  Suppose the "copy" op copies
    65   // 4 bytes of data.  Because of a special check in the encoding code,
    66   // we produce a 4-byte copy only if the offset is < 65536.  Therefore
    67   // the copy op takes 3 bytes to encode, and this type of item leads
    68   // to at most the 62/60 blowup for representing literals.
    69   //
    70   // Suppose the "copy" op copies 5 bytes of data.  If the offset is big
    71   // enough, it will take 5 bytes to encode the copy op.  Therefore the
    72   // worst case here is a one-byte literal followed by a five-byte copy.
    73   // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
    74   //
    75   // This last factor dominates the blowup, so the final estimate is:
    76   return 32 + source_len + source_len/6;
    77 }
    79 enum {
    80   LITERAL = 0,
    81   COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
    82   COPY_2_BYTE_OFFSET = 2,
    83   COPY_4_BYTE_OFFSET = 3
    84 };
    86 // Copy "len" bytes from "src" to "op", one byte at a time.  Used for
    87 // handling COPY operations where the input and output regions may
    88 // overlap.  For example, suppose:
    89 //    src    == "ab"
    90 //    op     == src + 2
    91 //    len    == 20
    92 // After IncrementalCopy(src, op, len), the result will have
    93 // eleven copies of "ab"
    94 //    ababababababababababab
    95 // Note that this does not match the semantics of either memcpy()
    96 // or memmove().
    97 static inline void IncrementalCopy(const char* src, char* op, int len) {
    98   DCHECK_GT(len, 0);
    99   do {
   100     *op++ = *src++;
   101   } while (--len > 0);
   102 }
   104 // Equivalent to IncrementalCopy except that it can write up to ten extra
   105 // bytes after the end of the copy, and that it is faster.
   106 //
   107 // The main part of this loop is a simple copy of eight bytes at a time until
   108 // we've copied (at least) the requested amount of bytes.  However, if op and
   109 // src are less than eight bytes apart (indicating a repeating pattern of
   110 // length < 8), we first need to expand the pattern in order to get the correct
   111 // results. For instance, if the buffer looks like this, with the eight-byte
   112 // <src> and <op> patterns marked as intervals:
   113 //
   114 //    abxxxxxxxxxxxx
   115 //    [------]           src
   116 //      [------]         op
   117 //
   118 // a single eight-byte copy from <src> to <op> will repeat the pattern once,
   119 // after which we can move <op> two bytes without moving <src>:
   120 //
   121 //    ababxxxxxxxxxx
   122 //    [------]           src
   123 //        [------]       op
   124 //
   125 // and repeat the exercise until the two no longer overlap.
   126 //
   127 // This allows us to do very well in the special case of one single byte
   128 // repeated many times, without taking a big hit for more general cases.
   129 //
   130 // The worst case of extra writing past the end of the match occurs when
   131 // op - src == 1 and len == 1; the last copy will read from byte positions
   132 // [0..7] and write to [4..11], whereas it was only supposed to write to
   133 // position 1. Thus, ten excess bytes.
   135 namespace {
   137 const int kMaxIncrementCopyOverflow = 10;
   139 }  // namespace
   141 static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
   142   while (op - src < 8) {
   143     UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
   144     len -= op - src;
   145     op += op - src;
   146   }
   147   while (len > 0) {
   148     UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
   149     src += 8;
   150     op += 8;
   151     len -= 8;
   152   }
   153 }
   155 static inline char* EmitLiteral(char* op,
   156                                 const char* literal,
   157                                 int len,
   158                                 bool allow_fast_path) {
   159   int n = len - 1;      // Zero-length literals are disallowed
   160   if (n < 60) {
   161     // Fits in tag byte
   162     *op++ = LITERAL | (n << 2);
   164     // The vast majority of copies are below 16 bytes, for which a
   165     // call to memcpy is overkill. This fast path can sometimes
   166     // copy up to 15 bytes too much, but that is okay in the
   167     // main loop, since we have a bit to go on for both sides:
   168     //
   169     //   - The input will always have kInputMarginBytes = 15 extra
   170     //     available bytes, as long as we're in the main loop, and
   171     //     if not, allow_fast_path = false.
   172     //   - The output will always have 32 spare bytes (see
   173     //     MaxCompressedLength).
   174     if (allow_fast_path && len <= 16) {
   175       UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
   176       UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
   177       return op + len;
   178     }
   179   } else {
   180     // Encode in upcoming bytes
   181     char* base = op;
   182     int count = 0;
   183     op++;
   184     while (n > 0) {
   185       *op++ = n & 0xff;
   186       n >>= 8;
   187       count++;
   188     }
   189     assert(count >= 1);
   190     assert(count <= 4);
   191     *base = LITERAL | ((59+count) << 2);
   192   }
   193   memcpy(op, literal, len);
   194   return op + len;
   195 }
   197 static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
   198   DCHECK_LE(len, 64);
   199   DCHECK_GE(len, 4);
   200   DCHECK_LT(offset, 65536);
   202   if ((len < 12) && (offset < 2048)) {
   203     size_t len_minus_4 = len - 4;
   204     assert(len_minus_4 < 8);            // Must fit in 3 bits
   205     *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
   206     *op++ = offset & 0xff;
   207   } else {
   208     *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
   209     LittleEndian::Store16(op, offset);
   210     op += 2;
   211   }
   212   return op;
   213 }
   215 static inline char* EmitCopy(char* op, size_t offset, int len) {
   216   // Emit 64 byte copies but make sure to keep at least four bytes reserved
   217   while (len >= 68) {
   218     op = EmitCopyLessThan64(op, offset, 64);
   219     len -= 64;
   220   }
   222   // Emit an extra 60 byte copy if have too much data to fit in one copy
   223   if (len > 64) {
   224     op = EmitCopyLessThan64(op, offset, 60);
   225     len -= 60;
   226   }
   228   // Emit remainder
   229   op = EmitCopyLessThan64(op, offset, len);
   230   return op;
   231 }
   234 bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
   235   uint32 v = 0;
   236   const char* limit = start + n;
   237   if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
   238     *result = v;
   239     return true;
   240   } else {
   241     return false;
   242   }
   243 }
   245 namespace internal {
   246 uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
   247   // Use smaller hash table when input.size() is smaller, since we
   248   // fill the table, incurring O(hash table size) overhead for
   249   // compression, and if the input is short, we won't need that
   250   // many hash table entries anyway.
   251   assert(kMaxHashTableSize >= 256);
   252   size_t htsize = 256;
   253   while (htsize < kMaxHashTableSize && htsize < input_size) {
   254     htsize <<= 1;
   255   }
   256   CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
   257   CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
   259   uint16* table;
   260   if (htsize <= ARRAYSIZE(small_table_)) {
   261     table = small_table_;
   262   } else {
   263     if (large_table_ == NULL) {
   264       large_table_ = new uint16[kMaxHashTableSize];
   265     }
   266     table = large_table_;
   267   }
   269   *table_size = htsize;
   270   memset(table, 0, htsize * sizeof(*table));
   271   return table;
   272 }
   273 }  // end namespace internal
   275 // For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
   276 // equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
   277 // empirically found that overlapping loads such as
   278 //  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
   279 // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
   280 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
   281   DCHECK(0 <= offset && offset <= 4) << offset;
   282   return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
   283 }
   285 // Flat array compression that does not emit the "uncompressed length"
   286 // prefix. Compresses "input" string to the "*op" buffer.
   287 //
   288 // REQUIRES: "input" is at most "kBlockSize" bytes long.
   289 // REQUIRES: "op" points to an array of memory that is at least
   290 // "MaxCompressedLength(input.size())" in size.
   291 // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
   292 // REQUIRES: "table_size" is a power of two
   293 //
   294 // Returns an "end" pointer into "op" buffer.
   295 // "end - op" is the compressed size of "input".
   296 namespace internal {
   297 char* CompressFragment(const char* input,
   298                        size_t input_size,
   299                        char* op,
   300                        uint16* table,
   301                        const int table_size) {
   302   // "ip" is the input pointer, and "op" is the output pointer.
   303   const char* ip = input;
   304   CHECK_LE(input_size, kBlockSize);
   305   CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
   306   const int shift = 32 - Bits::Log2Floor(table_size);
   307   DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
   308   const char* ip_end = input + input_size;
   309   const char* base_ip = ip;
   310   // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
   311   // [next_emit, ip_end) after the main loop.
   312   const char* next_emit = ip;
   314   const size_t kInputMarginBytes = 15;
   315   if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
   316     const char* ip_limit = input + input_size - kInputMarginBytes;
   318     for (uint32 next_hash = Hash(++ip, shift); ; ) {
   319       DCHECK_LT(next_emit, ip);
   320       // The body of this loop calls EmitLiteral once and then EmitCopy one or
   321       // more times.  (The exception is that when we're close to exhausting
   322       // the input we goto emit_remainder.)
   323       //
   324       // In the first iteration of this loop we're just starting, so
   325       // there's nothing to copy, so calling EmitLiteral once is
   326       // necessary.  And we only start a new iteration when the
   327       // current iteration has determined that a call to EmitLiteral will
   328       // precede the next call to EmitCopy (if any).
   329       //
   330       // Step 1: Scan forward in the input looking for a 4-byte-long match.
   331       // If we get close to exhausting the input then goto emit_remainder.
   332       //
   333       // Heuristic match skipping: If 32 bytes are scanned with no matches
   334       // found, start looking only at every other byte. If 32 more bytes are
   335       // scanned, look at every third byte, etc.. When a match is found,
   336       // immediately go back to looking at every byte. This is a small loss
   337       // (~5% performance, ~0.1% density) for compressible data due to more
   338       // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
   339       // win since the compressor quickly "realizes" the data is incompressible
   340       // and doesn't bother looking for matches everywhere.
   341       //
   342       // The "skip" variable keeps track of how many bytes there are since the
   343       // last match; dividing it by 32 (ie. right-shifting by five) gives the
   344       // number of bytes to move ahead for each iteration.
   345       uint32 skip = 32;
   347       const char* next_ip = ip;
   348       const char* candidate;
   349       do {
   350         ip = next_ip;
   351         uint32 hash = next_hash;
   352         DCHECK_EQ(hash, Hash(ip, shift));
   353         uint32 bytes_between_hash_lookups = skip++ >> 5;
   354         next_ip = ip + bytes_between_hash_lookups;
   355         if (PREDICT_FALSE(next_ip > ip_limit)) {
   356           goto emit_remainder;
   357         }
   358         next_hash = Hash(next_ip, shift);
   359         candidate = base_ip + table[hash];
   360         DCHECK_GE(candidate, base_ip);
   361         DCHECK_LT(candidate, ip);
   363         table[hash] = ip - base_ip;
   364       } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
   365                             UNALIGNED_LOAD32(candidate)));
   367       // Step 2: A 4-byte match has been found.  We'll later see if more
   368       // than 4 bytes match.  But, prior to the match, input
   369       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
   370       DCHECK_LE(next_emit + 16, ip_end);
   371       op = EmitLiteral(op, next_emit, ip - next_emit, true);
   373       // Step 3: Call EmitCopy, and then see if another EmitCopy could
   374       // be our next move.  Repeat until we find no match for the
   375       // input immediately after what was consumed by the last EmitCopy call.
   376       //
   377       // If we exit this loop normally then we need to call EmitLiteral next,
   378       // though we don't yet know how big the literal will be.  We handle that
   379       // by proceeding to the next iteration of the main loop.  We also can exit
   380       // this loop via goto if we get close to exhausting the input.
   381       uint64 input_bytes = 0;
   382       uint32 candidate_bytes = 0;
   384       do {
   385         // We have a 4-byte match at ip, and no need to emit any
   386         // "literal bytes" prior to ip.
   387         const char* base = ip;
   388         int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
   389         ip += matched;
   390         size_t offset = base - candidate;
   391         DCHECK_EQ(0, memcmp(base, candidate, matched));
   392         op = EmitCopy(op, offset, matched);
   393         // We could immediately start working at ip now, but to improve
   394         // compression we first update table[Hash(ip - 1, ...)].
   395         const char* insert_tail = ip - 1;
   396         next_emit = ip;
   397         if (PREDICT_FALSE(ip >= ip_limit)) {
   398           goto emit_remainder;
   399         }
   400         input_bytes = UNALIGNED_LOAD64(insert_tail);
   401         uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
   402         table[prev_hash] = ip - base_ip - 1;
   403         uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
   404         candidate = base_ip + table[cur_hash];
   405         candidate_bytes = UNALIGNED_LOAD32(candidate);
   406         table[cur_hash] = ip - base_ip;
   407       } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
   409       next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
   410       ++ip;
   411     }
   412   }
   414  emit_remainder:
   415   // Emit the remaining bytes as a literal
   416   if (next_emit < ip_end) {
   417     op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
   418   }
   420   return op;
   421 }
   422 }  // end namespace internal
   424 // Signature of output types needed by decompression code.
   425 // The decompression code is templatized on a type that obeys this
   426 // signature so that we do not pay virtual function call overhead in
   427 // the middle of a tight decompression loop.
   428 //
   429 // class DecompressionWriter {
   430 //  public:
   431 //   // Called before decompression
   432 //   void SetExpectedLength(size_t length);
   433 //
   434 //   // Called after decompression
   435 //   bool CheckLength() const;
   436 //
   437 //   // Called repeatedly during decompression
   438 //   bool Append(const char* ip, size_t length);
   439 //   bool AppendFromSelf(uint32 offset, size_t length);
   440 //
   441 //   // The difference between TryFastAppend and Append is that TryFastAppend
   442 //   // is allowed to read up to <available> bytes from the input buffer,
   443 //   // whereas Append is allowed to read <length>.
   444 //   //
   445 //   // Also, TryFastAppend is allowed to return false, declining the append,
   446 //   // without it being a fatal error -- just "return false" would be
   447 //   // a perfectly legal implementation of TryFastAppend. The intention
   448 //   // is for TryFastAppend to allow a fast path in the common case of
   449 //   // a small append.
   450 //   //
   451 //   // NOTE(user): TryFastAppend must always return decline (return false)
   452 //   // if <length> is 61 or more, as in this case the literal length is not
   453 //   // decoded fully. In practice, this should not be a big problem,
   454 //   // as it is unlikely that one would implement a fast path accepting
   455 //   // this much data.
   456 //   bool TryFastAppend(const char* ip, size_t available, size_t length);
   457 // };
   459 // -----------------------------------------------------------------------
   460 // Lookup table for decompression code.  Generated by ComputeTable() below.
   461 // -----------------------------------------------------------------------
   463 // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
   464 static const uint32 wordmask[] = {
   465   0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
   466 };
   468 // Data stored per entry in lookup table:
   469 //      Range   Bits-used       Description
   470 //      ------------------------------------
   471 //      1..64   0..7            Literal/copy length encoded in opcode byte
   472 //      0..7    8..10           Copy offset encoded in opcode byte / 256
   473 //      0..4    11..13          Extra bytes after opcode
   474 //
   475 // We use eight bits for the length even though 7 would have sufficed
   476 // because of efficiency reasons:
   477 //      (1) Extracting a byte is faster than a bit-field
   478 //      (2) It properly aligns copy offset so we do not need a <<8
   479 static const uint16 char_table[256] = {
   480   0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
   481   0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
   482   0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
   483   0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
   484   0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
   485   0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
   486   0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
   487   0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
   488   0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
   489   0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
   490   0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
   491   0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
   492   0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
   493   0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
   494   0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
   495   0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
   496   0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
   497   0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
   498   0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
   499   0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
   500   0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
   501   0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
   502   0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
   503   0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
   504   0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
   505   0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
   506   0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
   507   0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
   508   0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
   509   0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
   510   0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
   511   0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
   512 };
   514 // In debug mode, allow optional computation of the table at startup.
   515 // Also, check that the decompression table is correct.
   516 #ifndef NDEBUG
   517 DEFINE_bool(snappy_dump_decompression_table, false,
   518             "If true, we print the decompression table at startup.");
   520 static uint16 MakeEntry(unsigned int extra,
   521                         unsigned int len,
   522                         unsigned int copy_offset) {
   523   // Check that all of the fields fit within the allocated space
   524   DCHECK_EQ(extra,       extra & 0x7);          // At most 3 bits
   525   DCHECK_EQ(copy_offset, copy_offset & 0x7);    // At most 3 bits
   526   DCHECK_EQ(len,         len & 0x7f);           // At most 7 bits
   527   return len | (copy_offset << 8) | (extra << 11);
   528 }
   530 static void ComputeTable() {
   531   uint16 dst[256];
   533   // Place invalid entries in all places to detect missing initialization
   534   int assigned = 0;
   535   for (int i = 0; i < 256; i++) {
   536     dst[i] = 0xffff;
   537   }
   539   // Small LITERAL entries.  We store (len-1) in the top 6 bits.
   540   for (unsigned int len = 1; len <= 60; len++) {
   541     dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
   542     assigned++;
   543   }
   545   // Large LITERAL entries.  We use 60..63 in the high 6 bits to
   546   // encode the number of bytes of length info that follow the opcode.
   547   for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
   548     // We set the length field in the lookup table to 1 because extra
   549     // bytes encode len-1.
   550     dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
   551     assigned++;
   552   }
   554   // COPY_1_BYTE_OFFSET.
   555   //
   556   // The tag byte in the compressed data stores len-4 in 3 bits, and
   557   // offset/256 in 5 bits.  offset%256 is stored in the next byte.
   558   //
   559   // This format is used for length in range [4..11] and offset in
   560   // range [0..2047]
   561   for (unsigned int len = 4; len < 12; len++) {
   562     for (unsigned int offset = 0; offset < 2048; offset += 256) {
   563       dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
   564         MakeEntry(1, len, offset>>8);
   565       assigned++;
   566     }
   567   }
   569   // COPY_2_BYTE_OFFSET.
   570   // Tag contains len-1 in top 6 bits, and offset in next two bytes.
   571   for (unsigned int len = 1; len <= 64; len++) {
   572     dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
   573     assigned++;
   574   }
   576   // COPY_4_BYTE_OFFSET.
   577   // Tag contents len-1 in top 6 bits, and offset in next four bytes.
   578   for (unsigned int len = 1; len <= 64; len++) {
   579     dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
   580     assigned++;
   581   }
   583   // Check that each entry was initialized exactly once.
   584   CHECK_EQ(assigned, 256);
   585   for (int i = 0; i < 256; i++) {
   586     CHECK_NE(dst[i], 0xffff);
   587   }
   589   if (FLAGS_snappy_dump_decompression_table) {
   590     printf("static const uint16 char_table[256] = {\n  ");
   591     for (int i = 0; i < 256; i++) {
   592       printf("0x%04x%s",
   593              dst[i],
   594              ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n  " : ", ")));
   595     }
   596     printf("};\n");
   597   }
   599   // Check that computed table matched recorded table
   600   for (int i = 0; i < 256; i++) {
   601     CHECK_EQ(dst[i], char_table[i]);
   602   }
   603 }
   604 #endif /* !NDEBUG */
   606 // Helper class for decompression
   607 class SnappyDecompressor {
   608  private:
   609   Source*       reader_;         // Underlying source of bytes to decompress
   610   const char*   ip_;             // Points to next buffered byte
   611   const char*   ip_limit_;       // Points just past buffered bytes
   612   uint32        peeked_;         // Bytes peeked from reader (need to skip)
   613   bool          eof_;            // Hit end of input without an error?
   614   char          scratch_[5];     // Temporary buffer for PeekFast() boundaries
   616   // Ensure that all of the tag metadata for the next tag is available
   617   // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
   618   // if (ip_limit_ - ip_ < 5).
   619   //
   620   // Returns true on success, false on error or end of input.
   621   bool RefillTag();
   623  public:
   624   explicit SnappyDecompressor(Source* reader)
   625       : reader_(reader),
   626         ip_(NULL),
   627         ip_limit_(NULL),
   628         peeked_(0),
   629         eof_(false) {
   630   }
   632   ~SnappyDecompressor() {
   633     // Advance past any bytes we peeked at from the reader
   634     reader_->Skip(peeked_);
   635   }
   637   // Returns true iff we have hit the end of the input without an error.
   638   bool eof() const {
   639     return eof_;
   640   }
   642   // Read the uncompressed length stored at the start of the compressed data.
   643   // On succcess, stores the length in *result and returns true.
   644   // On failure, returns false.
   645   bool ReadUncompressedLength(uint32* result) {
   646     DCHECK(ip_ == NULL);       // Must not have read anything yet
   647     // Length is encoded in 1..5 bytes
   648     *result = 0;
   649     uint32 shift = 0;
   650     while (true) {
   651       if (shift >= 32) return false;
   652       size_t n;
   653       const char* ip = reader_->Peek(&n);
   654       if (n == 0) return false;
   655       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
   656       reader_->Skip(1);
   657       *result |= static_cast<uint32>(c & 0x7f) << shift;
   658       if (c < 128) {
   659         break;
   660       }
   661       shift += 7;
   662     }
   663     return true;
   664   }
   666   // Process the next item found in the input.
   667   // Returns true if successful, false on error or end of input.
   668   template <class Writer>
   669   void DecompressAllTags(Writer* writer) {
   670     const char* ip = ip_;
   672     // We could have put this refill fragment only at the beginning of the loop.
   673     // However, duplicating it at the end of each branch gives the compiler more
   674     // scope to optimize the <ip_limit_ - ip> expression based on the local
   675     // context, which overall increases speed.
   676     #define MAYBE_REFILL() \
   677         if (ip_limit_ - ip < 5) { \
   678           ip_ = ip; \
   679           if (!RefillTag()) return; \
   680           ip = ip_; \
   681         }
   683     MAYBE_REFILL();
   684     for ( ;; ) {
   685       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
   687       if ((c & 0x3) == LITERAL) {
   688         size_t literal_length = (c >> 2) + 1u;
   689         if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
   690           DCHECK_LT(literal_length, 61);
   691           ip += literal_length;
   692           MAYBE_REFILL();
   693           continue;
   694         }
   695         if (PREDICT_FALSE(literal_length >= 61)) {
   696           // Long literal.
   697           const size_t literal_length_length = literal_length - 60;
   698           literal_length =
   699               (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
   700           ip += literal_length_length;
   701         }
   703         size_t avail = ip_limit_ - ip;
   704         while (avail < literal_length) {
   705           if (!writer->Append(ip, avail)) return;
   706           literal_length -= avail;
   707           reader_->Skip(peeked_);
   708           size_t n;
   709           ip = reader_->Peek(&n);
   710           avail = n;
   711           peeked_ = avail;
   712           if (avail == 0) return;  // Premature end of input
   713           ip_limit_ = ip + avail;
   714         }
   715         if (!writer->Append(ip, literal_length)) {
   716           return;
   717         }
   718         ip += literal_length;
   719         MAYBE_REFILL();
   720       } else {
   721         const uint32 entry = char_table[c];
   722         const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
   723         const uint32 length = entry & 0xff;
   724         ip += entry >> 11;
   726         // copy_offset/256 is encoded in bits 8..10.  By just fetching
   727         // those bits, we get copy_offset (since the bit-field starts at
   728         // bit 8).
   729         const uint32 copy_offset = entry & 0x700;
   730         if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
   731           return;
   732         }
   733         MAYBE_REFILL();
   734       }
   735     }
   737 #undef MAYBE_REFILL
   738   }
   739 };
   741 bool SnappyDecompressor::RefillTag() {
   742   const char* ip = ip_;
   743   if (ip == ip_limit_) {
   744     // Fetch a new fragment from the reader
   745     reader_->Skip(peeked_);   // All peeked bytes are used up
   746     size_t n;
   747     ip = reader_->Peek(&n);
   748     peeked_ = n;
   749     if (n == 0) {
   750       eof_ = true;
   751       return false;
   752     }
   753     ip_limit_ = ip + n;
   754   }
   756   // Read the tag character
   757   DCHECK_LT(ip, ip_limit_);
   758   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
   759   const uint32 entry = char_table[c];
   760   const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
   761   DCHECK_LE(needed, sizeof(scratch_));
   763   // Read more bytes from reader if needed
   764   uint32 nbuf = ip_limit_ - ip;
   765   if (nbuf < needed) {
   766     // Stitch together bytes from ip and reader to form the word
   767     // contents.  We store the needed bytes in "scratch_".  They
   768     // will be consumed immediately by the caller since we do not
   769     // read more than we need.
   770     memmove(scratch_, ip, nbuf);
   771     reader_->Skip(peeked_);  // All peeked bytes are used up
   772     peeked_ = 0;
   773     while (nbuf < needed) {
   774       size_t length;
   775       const char* src = reader_->Peek(&length);
   776       if (length == 0) return false;
   777       uint32 to_add = min<uint32>(needed - nbuf, length);
   778       memcpy(scratch_ + nbuf, src, to_add);
   779       nbuf += to_add;
   780       reader_->Skip(to_add);
   781     }
   782     DCHECK_EQ(nbuf, needed);
   783     ip_ = scratch_;
   784     ip_limit_ = scratch_ + needed;
   785   } else if (nbuf < 5) {
   786     // Have enough bytes, but move into scratch_ so that we do not
   787     // read past end of input
   788     memmove(scratch_, ip, nbuf);
   789     reader_->Skip(peeked_);  // All peeked bytes are used up
   790     peeked_ = 0;
   791     ip_ = scratch_;
   792     ip_limit_ = scratch_ + nbuf;
   793   } else {
   794     // Pass pointer to buffer returned by reader_.
   795     ip_ = ip;
   796   }
   797   return true;
   798 }
   800 template <typename Writer>
   801 static bool InternalUncompress(Source* r,
   802                                Writer* writer,
   803                                uint32 max_len) {
   804   // Read the uncompressed length from the front of the compressed input
   805   SnappyDecompressor decompressor(r);
   806   uint32 uncompressed_len = 0;
   807   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
   808   // Protect against possible DoS attack
   809   if (static_cast<uint64>(uncompressed_len) > max_len) {
   810     return false;
   811   }
   813   writer->SetExpectedLength(uncompressed_len);
   815   // Process the entire input
   816   decompressor.DecompressAllTags(writer);
   817   return (decompressor.eof() && writer->CheckLength());
   818 }
   820 bool GetUncompressedLength(Source* source, uint32* result) {
   821   SnappyDecompressor decompressor(source);
   822   return decompressor.ReadUncompressedLength(result);
   823 }
   825 size_t Compress(Source* reader, Sink* writer) {
   826   size_t written = 0;
   827   size_t N = reader->Available();
   828   char ulength[Varint::kMax32];
   829   char* p = Varint::Encode32(ulength, N);
   830   writer->Append(ulength, p-ulength);
   831   written += (p - ulength);
   833   internal::WorkingMemory wmem;
   834   char* scratch = NULL;
   835   char* scratch_output = NULL;
   837   while (N > 0) {
   838     // Get next block to compress (without copying if possible)
   839     size_t fragment_size;
   840     const char* fragment = reader->Peek(&fragment_size);
   841     DCHECK_NE(fragment_size, 0) << ": premature end of input";
   842     const size_t num_to_read = min(N, kBlockSize);
   843     size_t bytes_read = fragment_size;
   845     size_t pending_advance = 0;
   846     if (bytes_read >= num_to_read) {
   847       // Buffer returned by reader is large enough
   848       pending_advance = num_to_read;
   849       fragment_size = num_to_read;
   850     } else {
   851       // Read into scratch buffer
   852       if (scratch == NULL) {
   853         // If this is the last iteration, we want to allocate N bytes
   854         // of space, otherwise the max possible kBlockSize space.
   855         // num_to_read contains exactly the correct value
   856         scratch = new char[num_to_read];
   857       }
   858       memcpy(scratch, fragment, bytes_read);
   859       reader->Skip(bytes_read);
   861       while (bytes_read < num_to_read) {
   862         fragment = reader->Peek(&fragment_size);
   863         size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
   864         memcpy(scratch + bytes_read, fragment, n);
   865         bytes_read += n;
   866         reader->Skip(n);
   867       }
   868       DCHECK_EQ(bytes_read, num_to_read);
   869       fragment = scratch;
   870       fragment_size = num_to_read;
   871     }
   872     DCHECK_EQ(fragment_size, num_to_read);
   874     // Get encoding table for compression
   875     int table_size;
   876     uint16* table = wmem.GetHashTable(num_to_read, &table_size);
   878     // Compress input_fragment and append to dest
   879     const int max_output = MaxCompressedLength(num_to_read);
   881     // Need a scratch buffer for the output, in case the byte sink doesn't
   882     // have room for us directly.
   883     if (scratch_output == NULL) {
   884       scratch_output = new char[max_output];
   885     } else {
   886       // Since we encode kBlockSize regions followed by a region
   887       // which is <= kBlockSize in length, a previously allocated
   888       // scratch_output[] region is big enough for this iteration.
   889     }
   890     char* dest = writer->GetAppendBuffer(max_output, scratch_output);
   891     char* end = internal::CompressFragment(fragment, fragment_size,
   892                                            dest, table, table_size);
   893     writer->Append(dest, end - dest);
   894     written += (end - dest);
   896     N -= num_to_read;
   897     reader->Skip(pending_advance);
   898   }
   900   delete[] scratch;
   901   delete[] scratch_output;
   903   return written;
   904 }
   906 // -----------------------------------------------------------------------
   907 // Flat array interfaces
   908 // -----------------------------------------------------------------------
   910 // A type that writes to a flat array.
   911 // Note that this is not a "ByteSink", but a type that matches the
   912 // Writer template argument to SnappyDecompressor::DecompressAllTags().
   913 class SnappyArrayWriter {
   914  private:
   915   char* base_;
   916   char* op_;
   917   char* op_limit_;
   919  public:
   920   inline explicit SnappyArrayWriter(char* dst)
   921       : base_(dst),
   922         op_(dst) {
   923   }
   925   inline void SetExpectedLength(size_t len) {
   926     op_limit_ = op_ + len;
   927   }
   929   inline bool CheckLength() const {
   930     return op_ == op_limit_;
   931   }
   933   inline bool Append(const char* ip, size_t len) {
   934     char* op = op_;
   935     const size_t space_left = op_limit_ - op;
   936     if (space_left < len) {
   937       return false;
   938     }
   939     memcpy(op, ip, len);
   940     op_ = op + len;
   941     return true;
   942   }
   944   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
   945     char* op = op_;
   946     const size_t space_left = op_limit_ - op;
   947     if (len <= 16 && available >= 16 && space_left >= 16) {
   948       // Fast path, used for the majority (about 95%) of invocations.
   949       UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
   950       UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
   951       op_ = op + len;
   952       return true;
   953     } else {
   954       return false;
   955     }
   956   }
   958   inline bool AppendFromSelf(size_t offset, size_t len) {
   959     char* op = op_;
   960     const size_t space_left = op_limit_ - op;
   962     if (op - base_ <= offset - 1u) {  // -1u catches offset==0
   963       return false;
   964     }
   965     if (len <= 16 && offset >= 8 && space_left >= 16) {
   966       // Fast path, used for the majority (70-80%) of dynamic invocations.
   967       UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
   968       UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
   969     } else {
   970       if (space_left >= len + kMaxIncrementCopyOverflow) {
   971         IncrementalCopyFastPath(op - offset, op, len);
   972       } else {
   973         if (space_left < len) {
   974           return false;
   975         }
   976         IncrementalCopy(op - offset, op, len);
   977       }
   978     }
   980     op_ = op + len;
   981     return true;
   982   }
   983 };
   985 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
   986   ByteArraySource reader(compressed, n);
   987   return RawUncompress(&reader, uncompressed);
   988 }
   990 bool RawUncompress(Source* compressed, char* uncompressed) {
   991   SnappyArrayWriter output(uncompressed);
   992   return InternalUncompress(compressed, &output, kuint32max);
   993 }
   995 bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
   996   size_t ulength;
   997   if (!GetUncompressedLength(compressed, n, &ulength)) {
   998     return false;
   999   }
  1000   // Protect against possible DoS attack
  1001   if ((static_cast<uint64>(ulength) + uncompressed->size()) >
  1002       uncompressed->max_size()) {
  1003     return false;
  1005   STLStringResizeUninitialized(uncompressed, ulength);
  1006   return RawUncompress(compressed, n, string_as_array(uncompressed));
  1010 // A Writer that drops everything on the floor and just does validation
  1011 class SnappyDecompressionValidator {
  1012  private:
  1013   size_t expected_;
  1014   size_t produced_;
  1016  public:
  1017   inline SnappyDecompressionValidator() : produced_(0) { }
  1018   inline void SetExpectedLength(size_t len) {
  1019     expected_ = len;
  1021   inline bool CheckLength() const {
  1022     return expected_ == produced_;
  1024   inline bool Append(const char* ip, size_t len) {
  1025     produced_ += len;
  1026     return produced_ <= expected_;
  1028   inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
  1029     return false;
  1031   inline bool AppendFromSelf(size_t offset, size_t len) {
  1032     if (produced_ <= offset - 1u) return false;  // -1u catches offset==0
  1033     produced_ += len;
  1034     return produced_ <= expected_;
  1036 };
  1038 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
  1039   ByteArraySource reader(compressed, n);
  1040   SnappyDecompressionValidator writer;
  1041   return InternalUncompress(&reader, &writer, kuint32max);
  1044 void RawCompress(const char* input,
  1045                  size_t input_length,
  1046                  char* compressed,
  1047                  size_t* compressed_length) {
  1048   ByteArraySource reader(input, input_length);
  1049   UncheckedByteArraySink writer(compressed);
  1050   Compress(&reader, &writer);
  1052   // Compute how many bytes were added
  1053   *compressed_length = (writer.CurrentDestination() - compressed);
  1056 size_t Compress(const char* input, size_t input_length, string* compressed) {
  1057   // Pre-grow the buffer to the max length of the compressed output
  1058   compressed->resize(MaxCompressedLength(input_length));
  1060   size_t compressed_length;
  1061   RawCompress(input, input_length, string_as_array(compressed),
  1062               &compressed_length);
  1063   compressed->resize(compressed_length);
  1064   return compressed_length;
  1068 } // end namespace snappy

mercurial