Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | |
michael@0 | 2 | /* |
michael@0 | 3 | * Copyright 2010 The Android Open Source Project |
michael@0 | 4 | * |
michael@0 | 5 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 6 | * found in the LICENSE file. |
michael@0 | 7 | */ |
michael@0 | 8 | |
michael@0 | 9 | |
michael@0 | 10 | #include "SkData.h" |
michael@0 | 11 | #include "SkFlate.h" |
michael@0 | 12 | #include "SkStream.h" |
michael@0 | 13 | |
michael@0 | 14 | #ifndef SK_HAS_ZLIB |
michael@0 | 15 | bool SkFlate::HaveFlate() { return false; } |
michael@0 | 16 | bool SkFlate::Deflate(SkStream*, SkWStream*) { return false; } |
michael@0 | 17 | bool SkFlate::Deflate(const void*, size_t, SkWStream*) { return false; } |
michael@0 | 18 | bool SkFlate::Deflate(const SkData*, SkWStream*) { return false; } |
michael@0 | 19 | bool SkFlate::Inflate(SkStream*, SkWStream*) { return false; } |
michael@0 | 20 | #else |
michael@0 | 21 | |
michael@0 | 22 | // static |
michael@0 | 23 | bool SkFlate::HaveFlate() { |
michael@0 | 24 | return true; |
michael@0 | 25 | } |
michael@0 | 26 | |
michael@0 | 27 | namespace { |
michael@0 | 28 | |
michael@0 | 29 | #ifdef SK_SYSTEM_ZLIB |
michael@0 | 30 | #include <zlib.h> |
michael@0 | 31 | #else |
michael@0 | 32 | #include SK_ZLIB_INCLUDE |
michael@0 | 33 | #endif |
michael@0 | 34 | |
michael@0 | 35 | // static |
michael@0 | 36 | const size_t kBufferSize = 1024; |
michael@0 | 37 | |
michael@0 | 38 | bool doFlate(bool compress, SkStream* src, SkWStream* dst) { |
michael@0 | 39 | uint8_t inputBuffer[kBufferSize]; |
michael@0 | 40 | uint8_t outputBuffer[kBufferSize]; |
michael@0 | 41 | z_stream flateData; |
michael@0 | 42 | flateData.zalloc = NULL; |
michael@0 | 43 | flateData.zfree = NULL; |
michael@0 | 44 | flateData.next_in = NULL; |
michael@0 | 45 | flateData.avail_in = 0; |
michael@0 | 46 | flateData.next_out = outputBuffer; |
michael@0 | 47 | flateData.avail_out = kBufferSize; |
michael@0 | 48 | int rc; |
michael@0 | 49 | if (compress) |
michael@0 | 50 | rc = deflateInit(&flateData, Z_DEFAULT_COMPRESSION); |
michael@0 | 51 | else |
michael@0 | 52 | rc = inflateInit(&flateData); |
michael@0 | 53 | if (rc != Z_OK) |
michael@0 | 54 | return false; |
michael@0 | 55 | |
michael@0 | 56 | uint8_t* input = (uint8_t*)src->getMemoryBase(); |
michael@0 | 57 | size_t inputLength = src->getLength(); |
michael@0 | 58 | if (input == NULL || inputLength == 0) { |
michael@0 | 59 | input = NULL; |
michael@0 | 60 | flateData.next_in = inputBuffer; |
michael@0 | 61 | flateData.avail_in = 0; |
michael@0 | 62 | } else { |
michael@0 | 63 | flateData.next_in = input; |
michael@0 | 64 | flateData.avail_in = SkToUInt(inputLength); |
michael@0 | 65 | } |
michael@0 | 66 | |
michael@0 | 67 | rc = Z_OK; |
michael@0 | 68 | while (true) { |
michael@0 | 69 | if (flateData.avail_out < kBufferSize) { |
michael@0 | 70 | if (!dst->write(outputBuffer, kBufferSize - flateData.avail_out)) { |
michael@0 | 71 | rc = Z_BUF_ERROR; |
michael@0 | 72 | break; |
michael@0 | 73 | } |
michael@0 | 74 | flateData.next_out = outputBuffer; |
michael@0 | 75 | flateData.avail_out = kBufferSize; |
michael@0 | 76 | } |
michael@0 | 77 | if (rc != Z_OK) |
michael@0 | 78 | break; |
michael@0 | 79 | if (flateData.avail_in == 0) { |
michael@0 | 80 | if (input != NULL) |
michael@0 | 81 | break; |
michael@0 | 82 | size_t read = src->read(&inputBuffer, kBufferSize); |
michael@0 | 83 | if (read == 0) |
michael@0 | 84 | break; |
michael@0 | 85 | flateData.next_in = inputBuffer; |
michael@0 | 86 | flateData.avail_in = SkToUInt(read); |
michael@0 | 87 | } |
michael@0 | 88 | if (compress) |
michael@0 | 89 | rc = deflate(&flateData, Z_NO_FLUSH); |
michael@0 | 90 | else |
michael@0 | 91 | rc = inflate(&flateData, Z_NO_FLUSH); |
michael@0 | 92 | } |
michael@0 | 93 | while (rc == Z_OK) { |
michael@0 | 94 | if (compress) |
michael@0 | 95 | rc = deflate(&flateData, Z_FINISH); |
michael@0 | 96 | else |
michael@0 | 97 | rc = inflate(&flateData, Z_FINISH); |
michael@0 | 98 | if (flateData.avail_out < kBufferSize) { |
michael@0 | 99 | if (!dst->write(outputBuffer, kBufferSize - flateData.avail_out)) |
michael@0 | 100 | return false; |
michael@0 | 101 | flateData.next_out = outputBuffer; |
michael@0 | 102 | flateData.avail_out = kBufferSize; |
michael@0 | 103 | } |
michael@0 | 104 | } |
michael@0 | 105 | |
michael@0 | 106 | if (compress) |
michael@0 | 107 | deflateEnd(&flateData); |
michael@0 | 108 | else |
michael@0 | 109 | inflateEnd(&flateData); |
michael@0 | 110 | if (rc == Z_STREAM_END) |
michael@0 | 111 | return true; |
michael@0 | 112 | return false; |
michael@0 | 113 | } |
michael@0 | 114 | |
michael@0 | 115 | } |
michael@0 | 116 | |
michael@0 | 117 | // static |
michael@0 | 118 | bool SkFlate::Deflate(SkStream* src, SkWStream* dst) { |
michael@0 | 119 | return doFlate(true, src, dst); |
michael@0 | 120 | } |
michael@0 | 121 | |
michael@0 | 122 | bool SkFlate::Deflate(const void* ptr, size_t len, SkWStream* dst) { |
michael@0 | 123 | SkMemoryStream stream(ptr, len); |
michael@0 | 124 | return doFlate(true, &stream, dst); |
michael@0 | 125 | } |
michael@0 | 126 | |
michael@0 | 127 | bool SkFlate::Deflate(const SkData* data, SkWStream* dst) { |
michael@0 | 128 | if (data) { |
michael@0 | 129 | SkMemoryStream stream(data->data(), data->size()); |
michael@0 | 130 | return doFlate(true, &stream, dst); |
michael@0 | 131 | } |
michael@0 | 132 | return false; |
michael@0 | 133 | } |
michael@0 | 134 | |
michael@0 | 135 | // static |
michael@0 | 136 | bool SkFlate::Inflate(SkStream* src, SkWStream* dst) { |
michael@0 | 137 | return doFlate(false, src, dst); |
michael@0 | 138 | } |
michael@0 | 139 | |
michael@0 | 140 | #endif |