other-licenses/snappy/src/snappy-test.cc

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

     1 // Copyright 2011 Google Inc. All Rights Reserved.
     2 //
     3 // Redistribution and use in source and binary forms, with or without
     4 // modification, are permitted provided that the following conditions are
     5 // met:
     6 //
     7 //     * Redistributions of source code must retain the above copyright
     8 // notice, this list of conditions and the following disclaimer.
     9 //     * Redistributions in binary form must reproduce the above
    10 // copyright notice, this list of conditions and the following disclaimer
    11 // in the documentation and/or other materials provided with the
    12 // distribution.
    13 //     * Neither the name of Google Inc. nor the names of its
    14 // contributors may be used to endorse or promote products derived from
    15 // this software without specific prior written permission.
    16 //
    17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    28 //
    29 // Various stubs for the unit tests for the open-source version of Snappy.
    31 #include "snappy-test.h"
    33 #ifdef HAVE_WINDOWS_H
    34 #define WIN32_LEAN_AND_MEAN
    35 #include <windows.h>
    36 #endif
    38 #include <algorithm>
    40 DEFINE_bool(run_microbenchmarks, true,
    41             "Run microbenchmarks before doing anything else.");
    43 namespace snappy {
    45 string ReadTestDataFile(const string& base) {
    46   string contents;
    47   const char* srcdir = getenv("srcdir");  // This is set by Automake.
    48   if (srcdir) {
    49     File::ReadFileToStringOrDie(
    50         string(srcdir) + "/testdata/" + base, &contents);
    51   } else {
    52     File::ReadFileToStringOrDie("testdata/" + base, &contents);
    53   }
    54   return contents;
    55 }
    57 string StringPrintf(const char* format, ...) {
    58   char buf[4096];
    59   va_list ap;
    60   va_start(ap, format);
    61   vsnprintf(buf, sizeof(buf), format, ap);
    62   va_end(ap);
    63   return buf;
    64 }
    66 bool benchmark_running = false;
    67 int64 benchmark_real_time_us = 0;
    68 int64 benchmark_cpu_time_us = 0;
    69 string *benchmark_label = NULL;
    70 int64 benchmark_bytes_processed = 0;
    72 void ResetBenchmarkTiming() {
    73   benchmark_real_time_us = 0;
    74   benchmark_cpu_time_us = 0;
    75 }
    77 #ifdef WIN32
    78 LARGE_INTEGER benchmark_start_real;
    79 FILETIME benchmark_start_cpu;
    80 #else  // WIN32
    81 struct timeval benchmark_start_real;
    82 struct rusage benchmark_start_cpu;
    83 #endif  // WIN32
    85 void StartBenchmarkTiming() {
    86 #ifdef WIN32
    87   QueryPerformanceCounter(&benchmark_start_real);
    88   FILETIME dummy;
    89   CHECK(GetProcessTimes(
    90       GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
    91 #else
    92   gettimeofday(&benchmark_start_real, NULL);
    93   if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
    94     perror("getrusage(RUSAGE_SELF)");
    95     exit(1);
    96   }
    97 #endif
    98   benchmark_running = true;
    99 }
   101 void StopBenchmarkTiming() {
   102   if (!benchmark_running) {
   103     return;
   104   }
   106 #ifdef WIN32
   107   LARGE_INTEGER benchmark_stop_real;
   108   LARGE_INTEGER benchmark_frequency;
   109   QueryPerformanceCounter(&benchmark_stop_real);
   110   QueryPerformanceFrequency(&benchmark_frequency);
   112   double elapsed_real = static_cast<double>(
   113       benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
   114       benchmark_frequency.QuadPart;
   115   benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
   117   FILETIME benchmark_stop_cpu, dummy;
   118   CHECK(GetProcessTimes(
   119       GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
   121   ULARGE_INTEGER start_ulargeint;
   122   start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
   123   start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
   125   ULARGE_INTEGER stop_ulargeint;
   126   stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
   127   stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
   129   benchmark_cpu_time_us +=
   130       (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
   131 #else  // WIN32
   132   struct timeval benchmark_stop_real;
   133   gettimeofday(&benchmark_stop_real, NULL);
   134   benchmark_real_time_us +=
   135       1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
   136   benchmark_real_time_us +=
   137       (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
   139   struct rusage benchmark_stop_cpu;
   140   if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
   141     perror("getrusage(RUSAGE_SELF)");
   142     exit(1);
   143   }
   144   benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
   145                                       benchmark_start_cpu.ru_utime.tv_sec);
   146   benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
   147                             benchmark_start_cpu.ru_utime.tv_usec);
   148 #endif  // WIN32
   150   benchmark_running = false;
   151 }
   153 void SetBenchmarkLabel(const string& str) {
   154   if (benchmark_label) {
   155     delete benchmark_label;
   156   }
   157   benchmark_label = new string(str);
   158 }
   160 void SetBenchmarkBytesProcessed(int64 bytes) {
   161   benchmark_bytes_processed = bytes;
   162 }
   164 struct BenchmarkRun {
   165   int64 real_time_us;
   166   int64 cpu_time_us;
   167 };
   169 struct BenchmarkCompareCPUTime {
   170   bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
   171     return a.cpu_time_us < b.cpu_time_us;
   172   }
   173 };
   175 void Benchmark::Run() {
   176   for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
   177     // Run a few iterations first to find out approximately how fast
   178     // the benchmark is.
   179     const int kCalibrateIterations = 100;
   180     ResetBenchmarkTiming();
   181     StartBenchmarkTiming();
   182     (*function_)(kCalibrateIterations, test_case_num);
   183     StopBenchmarkTiming();
   185     // Let each test case run for about 200ms, but at least as many
   186     // as we used to calibrate.
   187     // Run five times and pick the median.
   188     const int kNumRuns = 5;
   189     const int kMedianPos = kNumRuns / 2;
   190     int num_iterations = 0;
   191     if (benchmark_real_time_us > 0) {
   192       num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
   193     }
   194     num_iterations = max(num_iterations, kCalibrateIterations);
   195     BenchmarkRun benchmark_runs[kNumRuns];
   197     for (int run = 0; run < kNumRuns; ++run) {
   198       ResetBenchmarkTiming();
   199       StartBenchmarkTiming();
   200       (*function_)(num_iterations, test_case_num);
   201       StopBenchmarkTiming();
   203       benchmark_runs[run].real_time_us = benchmark_real_time_us;
   204       benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
   205     }
   207     nth_element(benchmark_runs,
   208                 benchmark_runs + kMedianPos,
   209                 benchmark_runs + kNumRuns,
   210                 BenchmarkCompareCPUTime());
   211     int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
   212     int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
   213     int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us;
   215     string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
   216     string human_readable_speed;
   217     if (bytes_per_second < 1024) {
   218       human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
   219     } else if (bytes_per_second < 1024 * 1024) {
   220       human_readable_speed = StringPrintf(
   221           "%.1fkB/s", bytes_per_second / 1024.0f);
   222     } else if (bytes_per_second < 1024 * 1024 * 1024) {
   223       human_readable_speed = StringPrintf(
   224           "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
   225     } else {
   226       human_readable_speed = StringPrintf(
   227           "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
   228     }
   230     fprintf(stderr,
   231 #ifdef WIN32
   232             "%-18s %10I64d %10I64d %10d %s  %s\n",
   233 #else
   234             "%-18s %10lld %10lld %10d %s  %s\n",
   235 #endif
   236             heading.c_str(),
   237             static_cast<long long>(real_time_us * 1000 / num_iterations),
   238             static_cast<long long>(cpu_time_us * 1000 / num_iterations),
   239             num_iterations,
   240             human_readable_speed.c_str(),
   241             benchmark_label->c_str());
   242   }
   243 }
   245 #ifdef HAVE_LIBZ
   247 ZLib::ZLib()
   248     : comp_init_(false),
   249       uncomp_init_(false) {
   250   Reinit();
   251 }
   253 ZLib::~ZLib() {
   254   if (comp_init_)   { deflateEnd(&comp_stream_); }
   255   if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
   256 }
   258 void ZLib::Reinit() {
   259   compression_level_ = Z_DEFAULT_COMPRESSION;
   260   window_bits_ = MAX_WBITS;
   261   mem_level_ =  8;  // DEF_MEM_LEVEL
   262   if (comp_init_) {
   263     deflateEnd(&comp_stream_);
   264     comp_init_ = false;
   265   }
   266   if (uncomp_init_) {
   267     inflateEnd(&uncomp_stream_);
   268     uncomp_init_ = false;
   269   }
   270   first_chunk_ = true;
   271 }
   273 void ZLib::Reset() {
   274   first_chunk_ = true;
   275 }
   277 // --------- COMPRESS MODE
   279 // Initialization method to be called if we hit an error while
   280 // compressing. On hitting an error, call this method before returning
   281 // the error.
   282 void ZLib::CompressErrorInit() {
   283   deflateEnd(&comp_stream_);
   284   comp_init_ = false;
   285   Reset();
   286 }
   288 int ZLib::DeflateInit() {
   289   return deflateInit2(&comp_stream_,
   290                       compression_level_,
   291                       Z_DEFLATED,
   292                       window_bits_,
   293                       mem_level_,
   294                       Z_DEFAULT_STRATEGY);
   295 }
   297 int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
   298                        const Bytef *source, uLong *sourceLen) {
   299   int err;
   301   comp_stream_.next_in = (Bytef*)source;
   302   comp_stream_.avail_in = (uInt)*sourceLen;
   303   if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
   304   comp_stream_.next_out = dest;
   305   comp_stream_.avail_out = (uInt)*destLen;
   306   if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
   308   if ( !first_chunk_ )   // only need to set up stream the first time through
   309     return Z_OK;
   311   if (comp_init_) {      // we've already initted it
   312     err = deflateReset(&comp_stream_);
   313     if (err != Z_OK) {
   314       LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
   315       deflateEnd(&comp_stream_);
   316       comp_init_ = false;
   317     }
   318   }
   319   if (!comp_init_) {     // first use
   320     comp_stream_.zalloc = (alloc_func)0;
   321     comp_stream_.zfree = (free_func)0;
   322     comp_stream_.opaque = (voidpf)0;
   323     err = DeflateInit();
   324     if (err != Z_OK) return err;
   325     comp_init_ = true;
   326   }
   327   return Z_OK;
   328 }
   330 // In a perfect world we'd always have the full buffer to compress
   331 // when the time came, and we could just call Compress().  Alas, we
   332 // want to do chunked compression on our webserver.  In this
   333 // application, we compress the header, send it off, then compress the
   334 // results, send them off, then compress the footer.  Thus we need to
   335 // use the chunked compression features of zlib.
   336 int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
   337                               const Bytef *source, uLong *sourceLen,
   338                               int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
   339   int err;
   341   if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
   342     return err;
   344   // This is used to figure out how many bytes we wrote *this chunk*
   345   int compressed_size = comp_stream_.total_out;
   347   // Some setup happens only for the first chunk we compress in a run
   348   if ( first_chunk_ ) {
   349     first_chunk_ = false;
   350   }
   352   // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
   353   // compression.
   354   err = deflate(&comp_stream_, flush_mode);
   356   *sourceLen = comp_stream_.avail_in;
   358   if ((err == Z_STREAM_END || err == Z_OK)
   359       && comp_stream_.avail_in == 0
   360       && comp_stream_.avail_out != 0 ) {
   361     // we processed everything ok and the output buffer was large enough.
   362     ;
   363   } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
   364     return Z_BUF_ERROR;                            // should never happen
   365   } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
   366     // an error happened
   367     CompressErrorInit();
   368     return err;
   369   } else if (comp_stream_.avail_out == 0) {     // not enough space
   370     err = Z_BUF_ERROR;
   371   }
   373   assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
   374   if (err == Z_STREAM_END)
   375     err = Z_OK;
   377   // update the crc and other metadata
   378   compressed_size = comp_stream_.total_out - compressed_size;  // delta
   379   *destLen = compressed_size;
   381   return err;
   382 }
   384 int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
   385                              const Bytef *source, uLong sourceLen,
   386                              int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
   387   const int ret =
   388     CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
   389   if (ret == Z_BUF_ERROR)
   390     CompressErrorInit();
   391   return ret;
   392 }
   394 // This routine only initializes the compression stream once.  Thereafter, it
   395 // just does a deflateReset on the stream, which should be faster.
   396 int ZLib::Compress(Bytef *dest, uLongf *destLen,
   397                    const Bytef *source, uLong sourceLen) {
   398   int err;
   399   if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
   400                                Z_FINISH)) != Z_OK )
   401     return err;
   402   Reset();         // reset for next call to Compress
   404   return Z_OK;
   405 }
   408 // --------- UNCOMPRESS MODE
   410 int ZLib::InflateInit() {
   411   return inflateInit2(&uncomp_stream_, MAX_WBITS);
   412 }
   414 // Initialization method to be called if we hit an error while
   415 // uncompressing. On hitting an error, call this method before
   416 // returning the error.
   417 void ZLib::UncompressErrorInit() {
   418   inflateEnd(&uncomp_stream_);
   419   uncomp_init_ = false;
   420   Reset();
   421 }
   423 int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
   424                          const Bytef *source, uLong *sourceLen) {
   425   int err;
   427   uncomp_stream_.next_in = (Bytef*)source;
   428   uncomp_stream_.avail_in = (uInt)*sourceLen;
   429   // Check for source > 64K on 16-bit machine:
   430   if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
   432   uncomp_stream_.next_out = dest;
   433   uncomp_stream_.avail_out = (uInt)*destLen;
   434   if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
   436   if ( !first_chunk_ )   // only need to set up stream the first time through
   437     return Z_OK;
   439   if (uncomp_init_) {    // we've already initted it
   440     err = inflateReset(&uncomp_stream_);
   441     if (err != Z_OK) {
   442       LOG(WARNING)
   443         << "ERROR: Can't reset uncompress object; creating a new one";
   444       UncompressErrorInit();
   445     }
   446   }
   447   if (!uncomp_init_) {
   448     uncomp_stream_.zalloc = (alloc_func)0;
   449     uncomp_stream_.zfree = (free_func)0;
   450     uncomp_stream_.opaque = (voidpf)0;
   451     err = InflateInit();
   452     if (err != Z_OK) return err;
   453     uncomp_init_ = true;
   454   }
   455   return Z_OK;
   456 }
   458 // If you compressed your data a chunk at a time, with CompressChunk,
   459 // you can uncompress it a chunk at a time with UncompressChunk.
   460 // Only difference bewteen chunked and unchunked uncompression
   461 // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
   462 int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
   463                                 const Bytef *source, uLong *sourceLen,
   464                                 int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
   465   int err = Z_OK;
   467   if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
   468     LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
   469                  << *sourceLen;
   470     return err;
   471   }
   473   // This is used to figure out how many output bytes we wrote *this chunk*:
   474   const uLong old_total_out = uncomp_stream_.total_out;
   476   // This is used to figure out how many input bytes we read *this chunk*:
   477   const uLong old_total_in = uncomp_stream_.total_in;
   479   // Some setup happens only for the first chunk we compress in a run
   480   if ( first_chunk_ ) {
   481     first_chunk_ = false;                          // so we don't do this again
   483     // For the first chunk *only* (to avoid infinite troubles), we let
   484     // there be no actual data to uncompress.  This sometimes triggers
   485     // when the input is only the gzip header, say.
   486     if ( *sourceLen == 0 ) {
   487       *destLen = 0;
   488       return Z_OK;
   489     }
   490   }
   492   // We'll uncompress as much as we can.  If we end OK great, otherwise
   493   // if we get an error that seems to be the gzip footer, we store the
   494   // gzip footer and return OK, otherwise we return the error.
   496   // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
   497   err = inflate(&uncomp_stream_, flush_mode);
   499   // Figure out how many bytes of the input zlib slurped up:
   500   const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
   501   CHECK_LE(source + bytes_read, source + *sourceLen);
   502   *sourceLen = uncomp_stream_.avail_in;
   504   if ((err == Z_STREAM_END || err == Z_OK)  // everything went ok
   505              && uncomp_stream_.avail_in == 0) {    // and we read it all
   506     ;
   507   } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
   508     LOG(WARNING)
   509       << "UncompressChunkOrAll: Received some extra data, bytes total: "
   510       << uncomp_stream_.avail_in << " bytes: "
   511       << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
   512                 min(int(uncomp_stream_.avail_in), 20));
   513     UncompressErrorInit();
   514     return Z_DATA_ERROR;       // what's the extra data for?
   515   } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
   516     // an error happened
   517     LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
   518                  << " avail_out: " << uncomp_stream_.avail_out;
   519     UncompressErrorInit();
   520     return err;
   521   } else if (uncomp_stream_.avail_out == 0) {
   522     err = Z_BUF_ERROR;
   523   }
   525   assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
   526   if (err == Z_STREAM_END)
   527     err = Z_OK;
   529   *destLen = uncomp_stream_.total_out - old_total_out;  // size for this call
   531   return err;
   532 }
   534 int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
   535                                const Bytef *source, uLong sourceLen,
   536                                int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
   537   const int ret =
   538     UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
   539   if (ret == Z_BUF_ERROR)
   540     UncompressErrorInit();
   541   return ret;
   542 }
   544 int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
   545                           const Bytef *source, uLong *sourceLen) {
   546   return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
   547 }
   549 // We make sure we've uncompressed everything, that is, the current
   550 // uncompress stream is at a compressed-buffer-EOF boundary.  In gzip
   551 // mode, we also check the gzip footer to make sure we pass the gzip
   552 // consistency checks.  We RETURN true iff both types of checks pass.
   553 bool ZLib::UncompressChunkDone() {
   554   assert(!first_chunk_ && uncomp_init_);
   555   // Make sure we're at the end-of-compressed-data point.  This means
   556   // if we call inflate with Z_FINISH we won't consume any input or
   557   // write any output
   558   Bytef dummyin, dummyout;
   559   uLongf dummylen = 0;
   560   if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
   561        != Z_OK ) {
   562     return false;
   563   }
   565   // Make sure that when we exit, we can start a new round of chunks later
   566   Reset();
   568   return true;
   569 }
   571 // Uncompresses the source buffer into the destination buffer.
   572 // The destination buffer must be long enough to hold the entire
   573 // decompressed contents.
   574 //
   575 // We only initialize the uncomp_stream once.  Thereafter, we use
   576 // inflateReset, which should be faster.
   577 //
   578 // Returns Z_OK on success, otherwise, it returns a zlib error code.
   579 int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
   580                      const Bytef *source, uLong sourceLen) {
   581   int err;
   582   if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
   583                                  Z_FINISH)) != Z_OK ) {
   584     Reset();                           // let us try to compress again
   585     return err;
   586   }
   587   if ( !UncompressChunkDone() )        // calls Reset()
   588     return Z_DATA_ERROR;
   589   return Z_OK;  // stream_end is ok
   590 }
   592 #endif  // HAVE_LIBZ
   594 }  // namespace snappy

mercurial