michael@0: // Copyright 2011 Google Inc. All Rights Reserved. michael@0: // michael@0: // Redistribution and use in source and binary forms, with or without michael@0: // modification, are permitted provided that the following conditions are michael@0: // met: michael@0: // michael@0: // * Redistributions of source code must retain the above copyright michael@0: // notice, this list of conditions and the following disclaimer. michael@0: // * Redistributions in binary form must reproduce the above michael@0: // copyright notice, this list of conditions and the following disclaimer michael@0: // in the documentation and/or other materials provided with the michael@0: // distribution. michael@0: // * Neither the name of Google Inc. nor the names of its michael@0: // contributors may be used to endorse or promote products derived from michael@0: // this software without specific prior written permission. michael@0: // michael@0: // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT michael@0: // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR michael@0: // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT michael@0: // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, michael@0: // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT michael@0: // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: // michael@0: // Various stubs for the unit tests for the open-source version of Snappy. michael@0: michael@0: #include "snappy-test.h" michael@0: michael@0: #ifdef HAVE_WINDOWS_H michael@0: #define WIN32_LEAN_AND_MEAN michael@0: #include michael@0: #endif michael@0: michael@0: #include michael@0: michael@0: DEFINE_bool(run_microbenchmarks, true, michael@0: "Run microbenchmarks before doing anything else."); michael@0: michael@0: namespace snappy { michael@0: michael@0: string ReadTestDataFile(const string& base) { michael@0: string contents; michael@0: const char* srcdir = getenv("srcdir"); // This is set by Automake. michael@0: if (srcdir) { michael@0: File::ReadFileToStringOrDie( michael@0: string(srcdir) + "/testdata/" + base, &contents); michael@0: } else { michael@0: File::ReadFileToStringOrDie("testdata/" + base, &contents); michael@0: } michael@0: return contents; michael@0: } michael@0: michael@0: string StringPrintf(const char* format, ...) { michael@0: char buf[4096]; michael@0: va_list ap; michael@0: va_start(ap, format); michael@0: vsnprintf(buf, sizeof(buf), format, ap); michael@0: va_end(ap); michael@0: return buf; michael@0: } michael@0: michael@0: bool benchmark_running = false; michael@0: int64 benchmark_real_time_us = 0; michael@0: int64 benchmark_cpu_time_us = 0; michael@0: string *benchmark_label = NULL; michael@0: int64 benchmark_bytes_processed = 0; michael@0: michael@0: void ResetBenchmarkTiming() { michael@0: benchmark_real_time_us = 0; michael@0: benchmark_cpu_time_us = 0; michael@0: } michael@0: michael@0: #ifdef WIN32 michael@0: LARGE_INTEGER benchmark_start_real; michael@0: FILETIME benchmark_start_cpu; michael@0: #else // WIN32 michael@0: struct timeval benchmark_start_real; michael@0: struct rusage benchmark_start_cpu; michael@0: #endif // WIN32 michael@0: michael@0: void StartBenchmarkTiming() { michael@0: #ifdef WIN32 michael@0: QueryPerformanceCounter(&benchmark_start_real); michael@0: FILETIME dummy; michael@0: CHECK(GetProcessTimes( michael@0: GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu)); michael@0: #else michael@0: gettimeofday(&benchmark_start_real, NULL); michael@0: if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) { michael@0: perror("getrusage(RUSAGE_SELF)"); michael@0: exit(1); michael@0: } michael@0: #endif michael@0: benchmark_running = true; michael@0: } michael@0: michael@0: void StopBenchmarkTiming() { michael@0: if (!benchmark_running) { michael@0: return; michael@0: } michael@0: michael@0: #ifdef WIN32 michael@0: LARGE_INTEGER benchmark_stop_real; michael@0: LARGE_INTEGER benchmark_frequency; michael@0: QueryPerformanceCounter(&benchmark_stop_real); michael@0: QueryPerformanceFrequency(&benchmark_frequency); michael@0: michael@0: double elapsed_real = static_cast( michael@0: benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) / michael@0: benchmark_frequency.QuadPart; michael@0: benchmark_real_time_us += elapsed_real * 1e6 + 0.5; michael@0: michael@0: FILETIME benchmark_stop_cpu, dummy; michael@0: CHECK(GetProcessTimes( michael@0: GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu)); michael@0: michael@0: ULARGE_INTEGER start_ulargeint; michael@0: start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime; michael@0: start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime; michael@0: michael@0: ULARGE_INTEGER stop_ulargeint; michael@0: stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime; michael@0: stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime; michael@0: michael@0: benchmark_cpu_time_us += michael@0: (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10; michael@0: #else // WIN32 michael@0: struct timeval benchmark_stop_real; michael@0: gettimeofday(&benchmark_stop_real, NULL); michael@0: benchmark_real_time_us += michael@0: 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec); michael@0: benchmark_real_time_us += michael@0: (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec); michael@0: michael@0: struct rusage benchmark_stop_cpu; michael@0: if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) { michael@0: perror("getrusage(RUSAGE_SELF)"); michael@0: exit(1); michael@0: } michael@0: benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec - michael@0: benchmark_start_cpu.ru_utime.tv_sec); michael@0: benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec - michael@0: benchmark_start_cpu.ru_utime.tv_usec); michael@0: #endif // WIN32 michael@0: michael@0: benchmark_running = false; michael@0: } michael@0: michael@0: void SetBenchmarkLabel(const string& str) { michael@0: if (benchmark_label) { michael@0: delete benchmark_label; michael@0: } michael@0: benchmark_label = new string(str); michael@0: } michael@0: michael@0: void SetBenchmarkBytesProcessed(int64 bytes) { michael@0: benchmark_bytes_processed = bytes; michael@0: } michael@0: michael@0: struct BenchmarkRun { michael@0: int64 real_time_us; michael@0: int64 cpu_time_us; michael@0: }; michael@0: michael@0: struct BenchmarkCompareCPUTime { michael@0: bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const { michael@0: return a.cpu_time_us < b.cpu_time_us; michael@0: } michael@0: }; michael@0: michael@0: void Benchmark::Run() { michael@0: for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) { michael@0: // Run a few iterations first to find out approximately how fast michael@0: // the benchmark is. michael@0: const int kCalibrateIterations = 100; michael@0: ResetBenchmarkTiming(); michael@0: StartBenchmarkTiming(); michael@0: (*function_)(kCalibrateIterations, test_case_num); michael@0: StopBenchmarkTiming(); michael@0: michael@0: // Let each test case run for about 200ms, but at least as many michael@0: // as we used to calibrate. michael@0: // Run five times and pick the median. michael@0: const int kNumRuns = 5; michael@0: const int kMedianPos = kNumRuns / 2; michael@0: int num_iterations = 0; michael@0: if (benchmark_real_time_us > 0) { michael@0: num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us; michael@0: } michael@0: num_iterations = max(num_iterations, kCalibrateIterations); michael@0: BenchmarkRun benchmark_runs[kNumRuns]; michael@0: michael@0: for (int run = 0; run < kNumRuns; ++run) { michael@0: ResetBenchmarkTiming(); michael@0: StartBenchmarkTiming(); michael@0: (*function_)(num_iterations, test_case_num); michael@0: StopBenchmarkTiming(); michael@0: michael@0: benchmark_runs[run].real_time_us = benchmark_real_time_us; michael@0: benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us; michael@0: } michael@0: michael@0: nth_element(benchmark_runs, michael@0: benchmark_runs + kMedianPos, michael@0: benchmark_runs + kNumRuns, michael@0: BenchmarkCompareCPUTime()); michael@0: int64 real_time_us = benchmark_runs[kMedianPos].real_time_us; michael@0: int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us; michael@0: int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us; michael@0: michael@0: string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num); michael@0: string human_readable_speed; michael@0: if (bytes_per_second < 1024) { michael@0: human_readable_speed = StringPrintf("%dB/s", bytes_per_second); michael@0: } else if (bytes_per_second < 1024 * 1024) { michael@0: human_readable_speed = StringPrintf( michael@0: "%.1fkB/s", bytes_per_second / 1024.0f); michael@0: } else if (bytes_per_second < 1024 * 1024 * 1024) { michael@0: human_readable_speed = StringPrintf( michael@0: "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f)); michael@0: } else { michael@0: human_readable_speed = StringPrintf( michael@0: "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f)); michael@0: } michael@0: michael@0: fprintf(stderr, michael@0: #ifdef WIN32 michael@0: "%-18s %10I64d %10I64d %10d %s %s\n", michael@0: #else michael@0: "%-18s %10lld %10lld %10d %s %s\n", michael@0: #endif michael@0: heading.c_str(), michael@0: static_cast(real_time_us * 1000 / num_iterations), michael@0: static_cast(cpu_time_us * 1000 / num_iterations), michael@0: num_iterations, michael@0: human_readable_speed.c_str(), michael@0: benchmark_label->c_str()); michael@0: } michael@0: } michael@0: michael@0: #ifdef HAVE_LIBZ michael@0: michael@0: ZLib::ZLib() michael@0: : comp_init_(false), michael@0: uncomp_init_(false) { michael@0: Reinit(); michael@0: } michael@0: michael@0: ZLib::~ZLib() { michael@0: if (comp_init_) { deflateEnd(&comp_stream_); } michael@0: if (uncomp_init_) { inflateEnd(&uncomp_stream_); } michael@0: } michael@0: michael@0: void ZLib::Reinit() { michael@0: compression_level_ = Z_DEFAULT_COMPRESSION; michael@0: window_bits_ = MAX_WBITS; michael@0: mem_level_ = 8; // DEF_MEM_LEVEL michael@0: if (comp_init_) { michael@0: deflateEnd(&comp_stream_); michael@0: comp_init_ = false; michael@0: } michael@0: if (uncomp_init_) { michael@0: inflateEnd(&uncomp_stream_); michael@0: uncomp_init_ = false; michael@0: } michael@0: first_chunk_ = true; michael@0: } michael@0: michael@0: void ZLib::Reset() { michael@0: first_chunk_ = true; michael@0: } michael@0: michael@0: // --------- COMPRESS MODE michael@0: michael@0: // Initialization method to be called if we hit an error while michael@0: // compressing. On hitting an error, call this method before returning michael@0: // the error. michael@0: void ZLib::CompressErrorInit() { michael@0: deflateEnd(&comp_stream_); michael@0: comp_init_ = false; michael@0: Reset(); michael@0: } michael@0: michael@0: int ZLib::DeflateInit() { michael@0: return deflateInit2(&comp_stream_, michael@0: compression_level_, michael@0: Z_DEFLATED, michael@0: window_bits_, michael@0: mem_level_, michael@0: Z_DEFAULT_STRATEGY); michael@0: } michael@0: michael@0: int ZLib::CompressInit(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong *sourceLen) { michael@0: int err; michael@0: michael@0: comp_stream_.next_in = (Bytef*)source; michael@0: comp_stream_.avail_in = (uInt)*sourceLen; michael@0: if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR; michael@0: comp_stream_.next_out = dest; michael@0: comp_stream_.avail_out = (uInt)*destLen; michael@0: if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR; michael@0: michael@0: if ( !first_chunk_ ) // only need to set up stream the first time through michael@0: return Z_OK; michael@0: michael@0: if (comp_init_) { // we've already initted it michael@0: err = deflateReset(&comp_stream_); michael@0: if (err != Z_OK) { michael@0: LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one"; michael@0: deflateEnd(&comp_stream_); michael@0: comp_init_ = false; michael@0: } michael@0: } michael@0: if (!comp_init_) { // first use michael@0: comp_stream_.zalloc = (alloc_func)0; michael@0: comp_stream_.zfree = (free_func)0; michael@0: comp_stream_.opaque = (voidpf)0; michael@0: err = DeflateInit(); michael@0: if (err != Z_OK) return err; michael@0: comp_init_ = true; michael@0: } michael@0: return Z_OK; michael@0: } michael@0: michael@0: // In a perfect world we'd always have the full buffer to compress michael@0: // when the time came, and we could just call Compress(). Alas, we michael@0: // want to do chunked compression on our webserver. In this michael@0: // application, we compress the header, send it off, then compress the michael@0: // results, send them off, then compress the footer. Thus we need to michael@0: // use the chunked compression features of zlib. michael@0: int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong *sourceLen, michael@0: int flush_mode) { // Z_FULL_FLUSH or Z_FINISH michael@0: int err; michael@0: michael@0: if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK ) michael@0: return err; michael@0: michael@0: // This is used to figure out how many bytes we wrote *this chunk* michael@0: int compressed_size = comp_stream_.total_out; michael@0: michael@0: // Some setup happens only for the first chunk we compress in a run michael@0: if ( first_chunk_ ) { michael@0: first_chunk_ = false; michael@0: } michael@0: michael@0: // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental michael@0: // compression. michael@0: err = deflate(&comp_stream_, flush_mode); michael@0: michael@0: *sourceLen = comp_stream_.avail_in; michael@0: michael@0: if ((err == Z_STREAM_END || err == Z_OK) michael@0: && comp_stream_.avail_in == 0 michael@0: && comp_stream_.avail_out != 0 ) { michael@0: // we processed everything ok and the output buffer was large enough. michael@0: ; michael@0: } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) { michael@0: return Z_BUF_ERROR; // should never happen michael@0: } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) { michael@0: // an error happened michael@0: CompressErrorInit(); michael@0: return err; michael@0: } else if (comp_stream_.avail_out == 0) { // not enough space michael@0: err = Z_BUF_ERROR; michael@0: } michael@0: michael@0: assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR); michael@0: if (err == Z_STREAM_END) michael@0: err = Z_OK; michael@0: michael@0: // update the crc and other metadata michael@0: compressed_size = comp_stream_.total_out - compressed_size; // delta michael@0: *destLen = compressed_size; michael@0: michael@0: return err; michael@0: } michael@0: michael@0: int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong sourceLen, michael@0: int flush_mode) { // Z_FULL_FLUSH or Z_FINISH michael@0: const int ret = michael@0: CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode); michael@0: if (ret == Z_BUF_ERROR) michael@0: CompressErrorInit(); michael@0: return ret; michael@0: } michael@0: michael@0: // This routine only initializes the compression stream once. Thereafter, it michael@0: // just does a deflateReset on the stream, which should be faster. michael@0: int ZLib::Compress(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong sourceLen) { michael@0: int err; michael@0: if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen, michael@0: Z_FINISH)) != Z_OK ) michael@0: return err; michael@0: Reset(); // reset for next call to Compress michael@0: michael@0: return Z_OK; michael@0: } michael@0: michael@0: michael@0: // --------- UNCOMPRESS MODE michael@0: michael@0: int ZLib::InflateInit() { michael@0: return inflateInit2(&uncomp_stream_, MAX_WBITS); michael@0: } michael@0: michael@0: // Initialization method to be called if we hit an error while michael@0: // uncompressing. On hitting an error, call this method before michael@0: // returning the error. michael@0: void ZLib::UncompressErrorInit() { michael@0: inflateEnd(&uncomp_stream_); michael@0: uncomp_init_ = false; michael@0: Reset(); michael@0: } michael@0: michael@0: int ZLib::UncompressInit(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong *sourceLen) { michael@0: int err; michael@0: michael@0: uncomp_stream_.next_in = (Bytef*)source; michael@0: uncomp_stream_.avail_in = (uInt)*sourceLen; michael@0: // Check for source > 64K on 16-bit machine: michael@0: if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR; michael@0: michael@0: uncomp_stream_.next_out = dest; michael@0: uncomp_stream_.avail_out = (uInt)*destLen; michael@0: if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR; michael@0: michael@0: if ( !first_chunk_ ) // only need to set up stream the first time through michael@0: return Z_OK; michael@0: michael@0: if (uncomp_init_) { // we've already initted it michael@0: err = inflateReset(&uncomp_stream_); michael@0: if (err != Z_OK) { michael@0: LOG(WARNING) michael@0: << "ERROR: Can't reset uncompress object; creating a new one"; michael@0: UncompressErrorInit(); michael@0: } michael@0: } michael@0: if (!uncomp_init_) { michael@0: uncomp_stream_.zalloc = (alloc_func)0; michael@0: uncomp_stream_.zfree = (free_func)0; michael@0: uncomp_stream_.opaque = (voidpf)0; michael@0: err = InflateInit(); michael@0: if (err != Z_OK) return err; michael@0: uncomp_init_ = true; michael@0: } michael@0: return Z_OK; michael@0: } michael@0: michael@0: // If you compressed your data a chunk at a time, with CompressChunk, michael@0: // you can uncompress it a chunk at a time with UncompressChunk. michael@0: // Only difference bewteen chunked and unchunked uncompression michael@0: // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked). michael@0: int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong *sourceLen, michael@0: int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH michael@0: int err = Z_OK; michael@0: michael@0: if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) { michael@0: LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: " michael@0: << *sourceLen; michael@0: return err; michael@0: } michael@0: michael@0: // This is used to figure out how many output bytes we wrote *this chunk*: michael@0: const uLong old_total_out = uncomp_stream_.total_out; michael@0: michael@0: // This is used to figure out how many input bytes we read *this chunk*: michael@0: const uLong old_total_in = uncomp_stream_.total_in; michael@0: michael@0: // Some setup happens only for the first chunk we compress in a run michael@0: if ( first_chunk_ ) { michael@0: first_chunk_ = false; // so we don't do this again michael@0: michael@0: // For the first chunk *only* (to avoid infinite troubles), we let michael@0: // there be no actual data to uncompress. This sometimes triggers michael@0: // when the input is only the gzip header, say. michael@0: if ( *sourceLen == 0 ) { michael@0: *destLen = 0; michael@0: return Z_OK; michael@0: } michael@0: } michael@0: michael@0: // We'll uncompress as much as we can. If we end OK great, otherwise michael@0: // if we get an error that seems to be the gzip footer, we store the michael@0: // gzip footer and return OK, otherwise we return the error. michael@0: michael@0: // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode. michael@0: err = inflate(&uncomp_stream_, flush_mode); michael@0: michael@0: // Figure out how many bytes of the input zlib slurped up: michael@0: const uLong bytes_read = uncomp_stream_.total_in - old_total_in; michael@0: CHECK_LE(source + bytes_read, source + *sourceLen); michael@0: *sourceLen = uncomp_stream_.avail_in; michael@0: michael@0: if ((err == Z_STREAM_END || err == Z_OK) // everything went ok michael@0: && uncomp_stream_.avail_in == 0) { // and we read it all michael@0: ; michael@0: } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) { michael@0: LOG(WARNING) michael@0: << "UncompressChunkOrAll: Received some extra data, bytes total: " michael@0: << uncomp_stream_.avail_in << " bytes: " michael@0: << string(reinterpret_cast(uncomp_stream_.next_in), michael@0: min(int(uncomp_stream_.avail_in), 20)); michael@0: UncompressErrorInit(); michael@0: return Z_DATA_ERROR; // what's the extra data for? michael@0: } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) { michael@0: // an error happened michael@0: LOG(WARNING) << "UncompressChunkOrAll: Error: " << err michael@0: << " avail_out: " << uncomp_stream_.avail_out; michael@0: UncompressErrorInit(); michael@0: return err; michael@0: } else if (uncomp_stream_.avail_out == 0) { michael@0: err = Z_BUF_ERROR; michael@0: } michael@0: michael@0: assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END); michael@0: if (err == Z_STREAM_END) michael@0: err = Z_OK; michael@0: michael@0: *destLen = uncomp_stream_.total_out - old_total_out; // size for this call michael@0: michael@0: return err; michael@0: } michael@0: michael@0: int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong sourceLen, michael@0: int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH michael@0: const int ret = michael@0: UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode); michael@0: if (ret == Z_BUF_ERROR) michael@0: UncompressErrorInit(); michael@0: return ret; michael@0: } michael@0: michael@0: int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong *sourceLen) { michael@0: return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH); michael@0: } michael@0: michael@0: // We make sure we've uncompressed everything, that is, the current michael@0: // uncompress stream is at a compressed-buffer-EOF boundary. In gzip michael@0: // mode, we also check the gzip footer to make sure we pass the gzip michael@0: // consistency checks. We RETURN true iff both types of checks pass. michael@0: bool ZLib::UncompressChunkDone() { michael@0: assert(!first_chunk_ && uncomp_init_); michael@0: // Make sure we're at the end-of-compressed-data point. This means michael@0: // if we call inflate with Z_FINISH we won't consume any input or michael@0: // write any output michael@0: Bytef dummyin, dummyout; michael@0: uLongf dummylen = 0; michael@0: if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH) michael@0: != Z_OK ) { michael@0: return false; michael@0: } michael@0: michael@0: // Make sure that when we exit, we can start a new round of chunks later michael@0: Reset(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // Uncompresses the source buffer into the destination buffer. michael@0: // The destination buffer must be long enough to hold the entire michael@0: // decompressed contents. michael@0: // michael@0: // We only initialize the uncomp_stream once. Thereafter, we use michael@0: // inflateReset, which should be faster. michael@0: // michael@0: // Returns Z_OK on success, otherwise, it returns a zlib error code. michael@0: int ZLib::Uncompress(Bytef *dest, uLongf *destLen, michael@0: const Bytef *source, uLong sourceLen) { michael@0: int err; michael@0: if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen, michael@0: Z_FINISH)) != Z_OK ) { michael@0: Reset(); // let us try to compress again michael@0: return err; michael@0: } michael@0: if ( !UncompressChunkDone() ) // calls Reset() michael@0: return Z_DATA_ERROR; michael@0: return Z_OK; // stream_end is ok michael@0: } michael@0: michael@0: #endif // HAVE_LIBZ michael@0: michael@0: } // namespace snappy