mfbt/lz4.c

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rw-r--r--

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*
michael@0 2 LZ4 - Fast LZ compression algorithm
michael@0 3 Copyright (C) 2011-2014, Yann Collet.
michael@0 4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
michael@0 5
michael@0 6 Redistribution and use in source and binary forms, with or without
michael@0 7 modification, are permitted provided that the following conditions are
michael@0 8 met:
michael@0 9
michael@0 10 * Redistributions of source code must retain the above copyright
michael@0 11 notice, this list of conditions and the following disclaimer.
michael@0 12 * Redistributions in binary form must reproduce the above
michael@0 13 copyright notice, this list of conditions and the following disclaimer
michael@0 14 in the documentation and/or other materials provided with the
michael@0 15 distribution.
michael@0 16
michael@0 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
michael@0 19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
michael@0 20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
michael@0 21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
michael@0 22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
michael@0 23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
michael@0 27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 28
michael@0 29 You can contact the author at :
michael@0 30 - LZ4 source repository : http://code.google.com/p/lz4/
michael@0 31 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
michael@0 32 */
michael@0 33
michael@0 34 /**************************************
michael@0 35 Tuning parameters
michael@0 36 **************************************/
michael@0 37 /*
michael@0 38 * HEAPMODE :
michael@0 39 * Select how default compression functions will allocate memory for their hash table,
michael@0 40 * in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
michael@0 41 */
michael@0 42 #define HEAPMODE 0
michael@0 43
michael@0 44
michael@0 45 /**************************************
michael@0 46 CPU Feature Detection
michael@0 47 **************************************/
michael@0 48 /* 32 or 64 bits ? */
michael@0 49 #if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
michael@0 50 || defined(__powerpc64__) || defined(__powerpc64le__) \
michael@0 51 || defined(__ppc64__) || defined(__ppc64le__) \
michael@0 52 || defined(__PPC64__) || defined(__PPC64LE__) \
michael@0 53 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
michael@0 54 # define LZ4_ARCH64 1
michael@0 55 #else
michael@0 56 # define LZ4_ARCH64 0
michael@0 57 #endif
michael@0 58
michael@0 59 /*
michael@0 60 * Little Endian or Big Endian ?
michael@0 61 * Overwrite the #define below if you know your architecture endianess
michael@0 62 */
michael@0 63 #include <stdlib.h> /* Apparently required to detect endianess */
michael@0 64 #if defined (__GLIBC__)
michael@0 65 # include <endian.h>
michael@0 66 # if (__BYTE_ORDER == __BIG_ENDIAN)
michael@0 67 # define LZ4_BIG_ENDIAN 1
michael@0 68 # endif
michael@0 69 #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
michael@0 70 # define LZ4_BIG_ENDIAN 1
michael@0 71 #elif defined(__sparc) || defined(__sparc__) \
michael@0 72 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
michael@0 73 || defined(__hpux) || defined(__hppa) \
michael@0 74 || defined(_MIPSEB) || defined(__s390__)
michael@0 75 # define LZ4_BIG_ENDIAN 1
michael@0 76 #else
michael@0 77 /* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
michael@0 78 #endif
michael@0 79
michael@0 80 /*
michael@0 81 * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
michael@0 82 * For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
michael@0 83 * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
michael@0 84 */
michael@0 85 #if defined(__ARM_FEATURE_UNALIGNED)
michael@0 86 # define LZ4_FORCE_UNALIGNED_ACCESS 1
michael@0 87 #endif
michael@0 88
michael@0 89 /* Define this parameter if your target system or compiler does not support hardware bit count */
michael@0 90 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
michael@0 91 # define LZ4_FORCE_SW_BITCOUNT
michael@0 92 #endif
michael@0 93
michael@0 94 /*
michael@0 95 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
michael@0 96 * This option may provide a small boost to performance for some big endian cpu, although probably modest.
michael@0 97 * You may set this option to 1 if data will remain within closed environment.
michael@0 98 * This option is useless on Little_Endian CPU (such as x86)
michael@0 99 */
michael@0 100
michael@0 101 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
michael@0 102
michael@0 103
michael@0 104 /**************************************
michael@0 105 Compiler Options
michael@0 106 **************************************/
michael@0 107 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
michael@0 108 /* "restrict" is a known keyword */
michael@0 109 #else
michael@0 110 # define restrict /* Disable restrict */
michael@0 111 #endif
michael@0 112
michael@0 113 #ifdef _MSC_VER /* Visual Studio */
michael@0 114 # define FORCE_INLINE static __forceinline
michael@0 115 # include <intrin.h> /* For Visual 2005 */
michael@0 116 # if LZ4_ARCH64 /* 64-bits */
michael@0 117 # pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
michael@0 118 # pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
michael@0 119 # else /* 32-bits */
michael@0 120 # pragma intrinsic(_BitScanForward) /* For Visual 2005 */
michael@0 121 # pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
michael@0 122 # endif
michael@0 123 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
michael@0 124 #else
michael@0 125 # ifdef __GNUC__
michael@0 126 # define FORCE_INLINE static inline __attribute__((always_inline))
michael@0 127 # else
michael@0 128 # define FORCE_INLINE static inline
michael@0 129 # endif
michael@0 130 #endif
michael@0 131
michael@0 132 #ifdef _MSC_VER /* Visual Studio */
michael@0 133 # define lz4_bswap16(x) _byteswap_ushort(x)
michael@0 134 #else
michael@0 135 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
michael@0 136 #endif
michael@0 137
michael@0 138 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
michael@0 139
michael@0 140 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
michael@0 141 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
michael@0 142 #else
michael@0 143 # define expect(expr,value) (expr)
michael@0 144 #endif
michael@0 145
michael@0 146 #define likely(expr) expect((expr) != 0, 1)
michael@0 147 #define unlikely(expr) expect((expr) != 0, 0)
michael@0 148
michael@0 149
michael@0 150 /**************************************
michael@0 151 Memory routines
michael@0 152 **************************************/
michael@0 153 #include <stdlib.h> /* malloc, calloc, free */
michael@0 154 #define ALLOCATOR(n,s) calloc(n,s)
michael@0 155 #define FREEMEM free
michael@0 156 #include <string.h> /* memset, memcpy */
michael@0 157 #define MEM_INIT memset
michael@0 158
michael@0 159
michael@0 160 /**************************************
michael@0 161 Includes
michael@0 162 **************************************/
michael@0 163 #include "lz4.h"
michael@0 164
michael@0 165
michael@0 166 /**************************************
michael@0 167 Basic Types
michael@0 168 **************************************/
michael@0 169 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
michael@0 170 # include <stdint.h>
michael@0 171 typedef uint8_t BYTE;
michael@0 172 typedef uint16_t U16;
michael@0 173 typedef uint32_t U32;
michael@0 174 typedef int32_t S32;
michael@0 175 typedef uint64_t U64;
michael@0 176 #else
michael@0 177 typedef unsigned char BYTE;
michael@0 178 typedef unsigned short U16;
michael@0 179 typedef unsigned int U32;
michael@0 180 typedef signed int S32;
michael@0 181 typedef unsigned long long U64;
michael@0 182 #endif
michael@0 183
michael@0 184 #if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
michael@0 185 # define _PACKED __attribute__ ((packed))
michael@0 186 #else
michael@0 187 # define _PACKED
michael@0 188 #endif
michael@0 189
michael@0 190 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
michael@0 191 # if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
michael@0 192 # pragma pack(1)
michael@0 193 # else
michael@0 194 # pragma pack(push, 1)
michael@0 195 # endif
michael@0 196 #endif
michael@0 197
michael@0 198 typedef struct { U16 v; } _PACKED U16_S;
michael@0 199 typedef struct { U32 v; } _PACKED U32_S;
michael@0 200 typedef struct { U64 v; } _PACKED U64_S;
michael@0 201 typedef struct {size_t v;} _PACKED size_t_S;
michael@0 202
michael@0 203 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
michael@0 204 # if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
michael@0 205 # pragma pack(0)
michael@0 206 # else
michael@0 207 # pragma pack(pop)
michael@0 208 # endif
michael@0 209 #endif
michael@0 210
michael@0 211 #define A16(x) (((U16_S *)(x))->v)
michael@0 212 #define A32(x) (((U32_S *)(x))->v)
michael@0 213 #define A64(x) (((U64_S *)(x))->v)
michael@0 214 #define AARCH(x) (((size_t_S *)(x))->v)
michael@0 215
michael@0 216
michael@0 217 /**************************************
michael@0 218 Constants
michael@0 219 **************************************/
michael@0 220 #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
michael@0 221 #define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
michael@0 222 #define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
michael@0 223
michael@0 224 #define MINMATCH 4
michael@0 225
michael@0 226 #define COPYLENGTH 8
michael@0 227 #define LASTLITERALS 5
michael@0 228 #define MFLIMIT (COPYLENGTH+MINMATCH)
michael@0 229 static const int LZ4_minLength = (MFLIMIT+1);
michael@0 230
michael@0 231 #define KB *(1U<<10)
michael@0 232 #define MB *(1U<<20)
michael@0 233 #define GB *(1U<<30)
michael@0 234
michael@0 235 #define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
michael@0 236 #define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
michael@0 237
michael@0 238 #define MAXD_LOG 16
michael@0 239 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
michael@0 240
michael@0 241 #define ML_BITS 4
michael@0 242 #define ML_MASK ((1U<<ML_BITS)-1)
michael@0 243 #define RUN_BITS (8-ML_BITS)
michael@0 244 #define RUN_MASK ((1U<<RUN_BITS)-1)
michael@0 245
michael@0 246
michael@0 247 /**************************************
michael@0 248 Structures and local types
michael@0 249 **************************************/
michael@0 250 typedef struct {
michael@0 251 U32 hashTable[HASH_SIZE_U32];
michael@0 252 U32 currentOffset;
michael@0 253 U32 initCheck;
michael@0 254 const BYTE* dictionary;
michael@0 255 const BYTE* bufferStart;
michael@0 256 U32 dictSize;
michael@0 257 } LZ4_stream_t_internal;
michael@0 258
michael@0 259 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
michael@0 260 typedef enum { byPtr, byU32, byU16 } tableType_t;
michael@0 261
michael@0 262 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
michael@0 263 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
michael@0 264
michael@0 265 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
michael@0 266 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
michael@0 267
michael@0 268
michael@0 269 /**************************************
michael@0 270 Architecture-specific macros
michael@0 271 **************************************/
michael@0 272 #define STEPSIZE sizeof(size_t)
michael@0 273 #define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
michael@0 274 #define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
michael@0 275
michael@0 276 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
michael@0 277 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
michael@0 278 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
michael@0 279 #else /* Little Endian */
michael@0 280 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
michael@0 281 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
michael@0 282 #endif
michael@0 283
michael@0 284
michael@0 285 /**************************************
michael@0 286 Macros
michael@0 287 **************************************/
michael@0 288 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
michael@0 289 #if LZ4_ARCH64 || !defined(__GNUC__)
michael@0 290 # define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
michael@0 291 #else
michael@0 292 # define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
michael@0 293 #endif
michael@0 294
michael@0 295
michael@0 296 /****************************
michael@0 297 Private local functions
michael@0 298 ****************************/
michael@0 299 #if LZ4_ARCH64
michael@0 300
michael@0 301 int LZ4_NbCommonBytes (register U64 val)
michael@0 302 {
michael@0 303 # if defined(LZ4_BIG_ENDIAN)
michael@0 304 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 305 unsigned long r = 0;
michael@0 306 _BitScanReverse64( &r, val );
michael@0 307 return (int)(r>>3);
michael@0 308 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 309 return (__builtin_clzll(val) >> 3);
michael@0 310 # else
michael@0 311 int r;
michael@0 312 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
michael@0 313 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
michael@0 314 r += (!val);
michael@0 315 return r;
michael@0 316 # endif
michael@0 317 # else
michael@0 318 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 319 unsigned long r = 0;
michael@0 320 _BitScanForward64( &r, val );
michael@0 321 return (int)(r>>3);
michael@0 322 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 323 return (__builtin_ctzll(val) >> 3);
michael@0 324 # else
michael@0 325 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
michael@0 326 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
michael@0 327 # endif
michael@0 328 # endif
michael@0 329 }
michael@0 330
michael@0 331 #else
michael@0 332
michael@0 333 int LZ4_NbCommonBytes (register U32 val)
michael@0 334 {
michael@0 335 # if defined(LZ4_BIG_ENDIAN)
michael@0 336 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 337 unsigned long r = 0;
michael@0 338 _BitScanReverse( &r, val );
michael@0 339 return (int)(r>>3);
michael@0 340 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 341 return (__builtin_clz(val) >> 3);
michael@0 342 # else
michael@0 343 int r;
michael@0 344 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
michael@0 345 r += (!val);
michael@0 346 return r;
michael@0 347 # endif
michael@0 348 # else
michael@0 349 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 350 unsigned long r;
michael@0 351 _BitScanForward( &r, val );
michael@0 352 return (int)(r>>3);
michael@0 353 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
michael@0 354 return (__builtin_ctz(val) >> 3);
michael@0 355 # else
michael@0 356 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
michael@0 357 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
michael@0 358 # endif
michael@0 359 # endif
michael@0 360 }
michael@0 361
michael@0 362 #endif
michael@0 363
michael@0 364
michael@0 365 /********************************
michael@0 366 Compression functions
michael@0 367 ********************************/
michael@0 368 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
michael@0 369
michael@0 370 static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
michael@0 371 {
michael@0 372 if (tableType == byU16)
michael@0 373 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
michael@0 374 else
michael@0 375 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
michael@0 376 }
michael@0 377
michael@0 378 static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
michael@0 379
michael@0 380 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
michael@0 381 {
michael@0 382 switch (tableType)
michael@0 383 {
michael@0 384 case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
michael@0 385 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
michael@0 386 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
michael@0 387 }
michael@0 388 }
michael@0 389
michael@0 390 static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
michael@0 391 {
michael@0 392 U32 h = LZ4_hashPosition(p, tableType);
michael@0 393 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
michael@0 394 }
michael@0 395
michael@0 396 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
michael@0 397 {
michael@0 398 if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
michael@0 399 if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
michael@0 400 { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
michael@0 401 }
michael@0 402
michael@0 403 static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
michael@0 404 {
michael@0 405 U32 h = LZ4_hashPosition(p, tableType);
michael@0 406 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
michael@0 407 }
michael@0 408
michael@0 409 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
michael@0 410 {
michael@0 411 const BYTE* const pStart = pIn;
michael@0 412
michael@0 413 while (likely(pIn<pInLimit-(STEPSIZE-1)))
michael@0 414 {
michael@0 415 size_t diff = AARCH(pRef) ^ AARCH(pIn);
michael@0 416 if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
michael@0 417 pIn += LZ4_NbCommonBytes(diff);
michael@0 418 return (unsigned)(pIn - pStart);
michael@0 419 }
michael@0 420 if (sizeof(void*)==8) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
michael@0 421 if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
michael@0 422 if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
michael@0 423
michael@0 424 return (unsigned)(pIn - pStart);
michael@0 425 }
michael@0 426
michael@0 427
michael@0 428 static int LZ4_compress_generic(
michael@0 429 void* ctx,
michael@0 430 const char* source,
michael@0 431 char* dest,
michael@0 432 int inputSize,
michael@0 433 int maxOutputSize,
michael@0 434
michael@0 435 limitedOutput_directive outputLimited,
michael@0 436 tableType_t tableType,
michael@0 437 dict_directive dict,
michael@0 438 dictIssue_directive dictIssue)
michael@0 439 {
michael@0 440 LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
michael@0 441
michael@0 442 const BYTE* ip = (const BYTE*) source;
michael@0 443 const BYTE* base;
michael@0 444 const BYTE* lowLimit;
michael@0 445 const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
michael@0 446 const BYTE* const dictionary = dictPtr->dictionary;
michael@0 447 const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
michael@0 448 const size_t dictDelta = dictEnd - (const BYTE*)source;
michael@0 449 const BYTE* anchor = (const BYTE*) source;
michael@0 450 const BYTE* const iend = ip + inputSize;
michael@0 451 const BYTE* const mflimit = iend - MFLIMIT;
michael@0 452 const BYTE* const matchlimit = iend - LASTLITERALS;
michael@0 453
michael@0 454 BYTE* op = (BYTE*) dest;
michael@0 455 BYTE* const olimit = op + maxOutputSize;
michael@0 456
michael@0 457 const int skipStrength = SKIPSTRENGTH;
michael@0 458 U32 forwardH;
michael@0 459 size_t refDelta=0;
michael@0 460
michael@0 461 /* Init conditions */
michael@0 462 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
michael@0 463 switch(dict)
michael@0 464 {
michael@0 465 case noDict:
michael@0 466 default:
michael@0 467 base = (const BYTE*)source;
michael@0 468 lowLimit = (const BYTE*)source;
michael@0 469 break;
michael@0 470 case withPrefix64k:
michael@0 471 base = (const BYTE*)source - dictPtr->currentOffset;
michael@0 472 lowLimit = (const BYTE*)source - dictPtr->dictSize;
michael@0 473 break;
michael@0 474 case usingExtDict:
michael@0 475 base = (const BYTE*)source - dictPtr->currentOffset;
michael@0 476 lowLimit = (const BYTE*)source;
michael@0 477 break;
michael@0 478 }
michael@0 479 if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
michael@0 480 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
michael@0 481
michael@0 482 /* First Byte */
michael@0 483 LZ4_putPosition(ip, ctx, tableType, base);
michael@0 484 ip++; forwardH = LZ4_hashPosition(ip, tableType);
michael@0 485
michael@0 486 /* Main Loop */
michael@0 487 for ( ; ; )
michael@0 488 {
michael@0 489 const BYTE* ref;
michael@0 490 BYTE* token;
michael@0 491 {
michael@0 492 const BYTE* forwardIp = ip;
michael@0 493 unsigned step=1;
michael@0 494 unsigned searchMatchNb = (1U << skipStrength);
michael@0 495
michael@0 496 /* Find a match */
michael@0 497 do {
michael@0 498 U32 h = forwardH;
michael@0 499 ip = forwardIp;
michael@0 500 forwardIp += step;
michael@0 501 step = searchMatchNb++ >> skipStrength;
michael@0 502 //if (step>8) step=8; // required for valid forwardIp ; slows down uncompressible data a bit
michael@0 503
michael@0 504 if (unlikely(forwardIp > mflimit)) goto _last_literals;
michael@0 505
michael@0 506 ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
michael@0 507 if (dict==usingExtDict)
michael@0 508 {
michael@0 509 if (ref<(const BYTE*)source)
michael@0 510 {
michael@0 511 refDelta = dictDelta;
michael@0 512 lowLimit = dictionary;
michael@0 513 }
michael@0 514 else
michael@0 515 {
michael@0 516 refDelta = 0;
michael@0 517 lowLimit = (const BYTE*)source;
michael@0 518 }
michael@0 519 }
michael@0 520 forwardH = LZ4_hashPosition(forwardIp, tableType);
michael@0 521 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
michael@0 522
michael@0 523 } while ( ((dictIssue==dictSmall) ? (ref < lowRefLimit) : 0)
michael@0 524 || ((tableType==byU16) ? 0 : (ref + MAX_DISTANCE < ip))
michael@0 525 || (A32(ref+refDelta) != A32(ip)) );
michael@0 526 }
michael@0 527
michael@0 528 /* Catch up */
michael@0 529 while ((ip>anchor) && (ref+refDelta > lowLimit) && (unlikely(ip[-1]==ref[refDelta-1]))) { ip--; ref--; }
michael@0 530
michael@0 531 {
michael@0 532 /* Encode Literal length */
michael@0 533 unsigned litLength = (unsigned)(ip - anchor);
michael@0 534 token = op++;
michael@0 535 if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
michael@0 536 return 0; /* Check output limit */
michael@0 537 if (litLength>=RUN_MASK)
michael@0 538 {
michael@0 539 int len = (int)litLength-RUN_MASK;
michael@0 540 *token=(RUN_MASK<<ML_BITS);
michael@0 541 for(; len >= 255 ; len-=255) *op++ = 255;
michael@0 542 *op++ = (BYTE)len;
michael@0 543 }
michael@0 544 else *token = (BYTE)(litLength<<ML_BITS);
michael@0 545
michael@0 546 /* Copy Literals */
michael@0 547 { BYTE* end = op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
michael@0 548 }
michael@0 549
michael@0 550 _next_match:
michael@0 551 /* Encode Offset */
michael@0 552 LZ4_WRITE_LITTLEENDIAN_16(op, (U16)(ip-ref));
michael@0 553
michael@0 554 /* Encode MatchLength */
michael@0 555 {
michael@0 556 unsigned matchLength;
michael@0 557
michael@0 558 if ((dict==usingExtDict) && (lowLimit==dictionary))
michael@0 559 {
michael@0 560 const BYTE* limit;
michael@0 561 ref += refDelta;
michael@0 562 limit = ip + (dictEnd-ref);
michael@0 563 if (limit > matchlimit) limit = matchlimit;
michael@0 564 matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, limit);
michael@0 565 ip += MINMATCH + matchLength;
michael@0 566 if (ip==limit)
michael@0 567 {
michael@0 568 unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
michael@0 569 matchLength += more;
michael@0 570 ip += more;
michael@0 571 }
michael@0 572 }
michael@0 573 else
michael@0 574 {
michael@0 575 matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, matchlimit);
michael@0 576 ip += MINMATCH + matchLength;
michael@0 577 }
michael@0 578
michael@0 579 if (matchLength>=ML_MASK)
michael@0 580 {
michael@0 581 if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
michael@0 582 return 0; /* Check output limit */
michael@0 583 *token += ML_MASK;
michael@0 584 matchLength -= ML_MASK;
michael@0 585 for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
michael@0 586 if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
michael@0 587 *op++ = (BYTE)matchLength;
michael@0 588 }
michael@0 589 else *token += (BYTE)(matchLength);
michael@0 590 }
michael@0 591
michael@0 592 anchor = ip;
michael@0 593
michael@0 594 /* Test end of chunk */
michael@0 595 if (ip > mflimit) break;
michael@0 596
michael@0 597 /* Fill table */
michael@0 598 LZ4_putPosition(ip-2, ctx, tableType, base);
michael@0 599
michael@0 600 /* Test next position */
michael@0 601 ref = LZ4_getPosition(ip, ctx, tableType, base);
michael@0 602 if (dict==usingExtDict)
michael@0 603 {
michael@0 604 if (ref<(const BYTE*)source)
michael@0 605 {
michael@0 606 refDelta = dictDelta;
michael@0 607 lowLimit = dictionary;
michael@0 608 }
michael@0 609 else
michael@0 610 {
michael@0 611 refDelta = 0;
michael@0 612 lowLimit = (const BYTE*)source;
michael@0 613 }
michael@0 614 }
michael@0 615 LZ4_putPosition(ip, ctx, tableType, base);
michael@0 616 if ( ((dictIssue==dictSmall) ? (ref>=lowRefLimit) : 1)
michael@0 617 && (ref+MAX_DISTANCE>=ip)
michael@0 618 && (A32(ref+refDelta)==A32(ip)) )
michael@0 619 { token=op++; *token=0; goto _next_match; }
michael@0 620
michael@0 621 /* Prepare next loop */
michael@0 622 forwardH = LZ4_hashPosition(++ip, tableType);
michael@0 623 }
michael@0 624
michael@0 625 _last_literals:
michael@0 626 /* Encode Last Literals */
michael@0 627 {
michael@0 628 int lastRun = (int)(iend - anchor);
michael@0 629 if ((outputLimited) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
michael@0 630 return 0; /* Check output limit */
michael@0 631 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
michael@0 632 else *op++ = (BYTE)(lastRun<<ML_BITS);
michael@0 633 memcpy(op, anchor, iend - anchor);
michael@0 634 op += iend-anchor;
michael@0 635 }
michael@0 636
michael@0 637 /* End */
michael@0 638 return (int) (((char*)op)-dest);
michael@0 639 }
michael@0 640
michael@0 641
michael@0 642 int LZ4_compress(const char* source, char* dest, int inputSize)
michael@0 643 {
michael@0 644 #if (HEAPMODE)
michael@0 645 void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
michael@0 646 #else
michael@0 647 U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
michael@0 648 #endif
michael@0 649 int result;
michael@0 650
michael@0 651 if (inputSize < (int)LZ4_64KLIMIT)
michael@0 652 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
michael@0 653 else
michael@0 654 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
michael@0 655
michael@0 656 #if (HEAPMODE)
michael@0 657 FREEMEM(ctx);
michael@0 658 #endif
michael@0 659 return result;
michael@0 660 }
michael@0 661
michael@0 662 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
michael@0 663 {
michael@0 664 #if (HEAPMODE)
michael@0 665 void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
michael@0 666 #else
michael@0 667 U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
michael@0 668 #endif
michael@0 669 int result;
michael@0 670
michael@0 671 if (inputSize < (int)LZ4_64KLIMIT)
michael@0 672 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
michael@0 673 else
michael@0 674 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
michael@0 675
michael@0 676 #if (HEAPMODE)
michael@0 677 FREEMEM(ctx);
michael@0 678 #endif
michael@0 679 return result;
michael@0 680 }
michael@0 681
michael@0 682
michael@0 683 /*****************************************
michael@0 684 Experimental : Streaming functions
michael@0 685 *****************************************/
michael@0 686
michael@0 687 void* LZ4_createStream()
michael@0 688 {
michael@0 689 void* lz4s = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
michael@0 690 MEM_INIT(lz4s, 0, LZ4_STREAMSIZE);
michael@0 691 return lz4s;
michael@0 692 }
michael@0 693
michael@0 694 int LZ4_free (void* LZ4_stream)
michael@0 695 {
michael@0 696 FREEMEM(LZ4_stream);
michael@0 697 return (0);
michael@0 698 }
michael@0 699
michael@0 700
michael@0 701 int LZ4_loadDict (void* LZ4_dict, const char* dictionary, int dictSize)
michael@0 702 {
michael@0 703 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
michael@0 704 const BYTE* p = (const BYTE*)dictionary;
michael@0 705 const BYTE* const dictEnd = p + dictSize;
michael@0 706 const BYTE* base;
michael@0 707
michael@0 708 LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
michael@0 709 if (dict->initCheck) MEM_INIT(dict, 0, sizeof(LZ4_stream_t_internal)); /* Uninitialized structure detected */
michael@0 710
michael@0 711 if (dictSize < MINMATCH)
michael@0 712 {
michael@0 713 dict->dictionary = NULL;
michael@0 714 dict->dictSize = 0;
michael@0 715 return 1;
michael@0 716 }
michael@0 717
michael@0 718 if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB;
michael@0 719 base = p - dict->currentOffset;
michael@0 720 dict->dictionary = p;
michael@0 721 dict->dictSize = (U32)(dictEnd - p);
michael@0 722 dict->currentOffset += dict->dictSize;
michael@0 723
michael@0 724 while (p <= dictEnd-MINMATCH)
michael@0 725 {
michael@0 726 LZ4_putPosition(p, dict, byU32, base);
michael@0 727 p+=3;
michael@0 728 }
michael@0 729
michael@0 730 return 1;
michael@0 731 }
michael@0 732
michael@0 733
michael@0 734 void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
michael@0 735 {
michael@0 736 if ((LZ4_dict->currentOffset > 0x80000000) ||
michael@0 737 ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
michael@0 738 {
michael@0 739 /* rescale hash table */
michael@0 740 U32 delta = LZ4_dict->currentOffset - 64 KB;
michael@0 741 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
michael@0 742 int i;
michael@0 743 for (i=0; i<HASH_SIZE_U32; i++)
michael@0 744 {
michael@0 745 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
michael@0 746 else LZ4_dict->hashTable[i] -= delta;
michael@0 747 }
michael@0 748 LZ4_dict->currentOffset = 64 KB;
michael@0 749 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
michael@0 750 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
michael@0 751 }
michael@0 752 }
michael@0 753
michael@0 754
michael@0 755 FORCE_INLINE int LZ4_compress_continue_generic (void* LZ4_stream, const char* source, char* dest, int inputSize,
michael@0 756 int maxOutputSize, limitedOutput_directive limit)
michael@0 757 {
michael@0 758 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
michael@0 759 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
michael@0 760
michael@0 761 const BYTE* smallest = (const BYTE*) source;
michael@0 762 if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
michael@0 763 if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
michael@0 764 LZ4_renormDictT(streamPtr, smallest);
michael@0 765
michael@0 766 /* Check overlapping input/dictionary space */
michael@0 767 {
michael@0 768 const BYTE* sourceEnd = (const BYTE*) source + inputSize;
michael@0 769 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
michael@0 770 {
michael@0 771 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
michael@0 772 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
michael@0 773 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
michael@0 774 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
michael@0 775 }
michael@0 776 }
michael@0 777
michael@0 778 /* prefix mode : source data follows dictionary */
michael@0 779 if (dictEnd == (const BYTE*)source)
michael@0 780 {
michael@0 781 int result;
michael@0 782 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
michael@0 783 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
michael@0 784 else
michael@0 785 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
michael@0 786 streamPtr->dictSize += (U32)inputSize;
michael@0 787 streamPtr->currentOffset += (U32)inputSize;
michael@0 788 return result;
michael@0 789 }
michael@0 790
michael@0 791 /* external dictionary mode */
michael@0 792 {
michael@0 793 int result;
michael@0 794 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
michael@0 795 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
michael@0 796 else
michael@0 797 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
michael@0 798 streamPtr->dictionary = (const BYTE*)source;
michael@0 799 streamPtr->dictSize = (U32)inputSize;
michael@0 800 streamPtr->currentOffset += (U32)inputSize;
michael@0 801 return result;
michael@0 802 }
michael@0 803 }
michael@0 804
michael@0 805
michael@0 806 int LZ4_compress_continue (void* LZ4_stream, const char* source, char* dest, int inputSize)
michael@0 807 {
michael@0 808 return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
michael@0 809 }
michael@0 810
michael@0 811 int LZ4_compress_limitedOutput_continue (void* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize)
michael@0 812 {
michael@0 813 return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
michael@0 814 }
michael@0 815
michael@0 816
michael@0 817 // Hidden debug function, to force separate dictionary mode
michael@0 818 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
michael@0 819 {
michael@0 820 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
michael@0 821 int result;
michael@0 822 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
michael@0 823
michael@0 824 const BYTE* smallest = dictEnd;
michael@0 825 if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
michael@0 826 LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
michael@0 827
michael@0 828 result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
michael@0 829
michael@0 830 streamPtr->dictionary = (const BYTE*)source;
michael@0 831 streamPtr->dictSize = (U32)inputSize;
michael@0 832 streamPtr->currentOffset += (U32)inputSize;
michael@0 833
michael@0 834 return result;
michael@0 835 }
michael@0 836
michael@0 837
michael@0 838 int LZ4_saveDict (void* LZ4_dict, char* safeBuffer, int dictSize)
michael@0 839 {
michael@0 840 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
michael@0 841 const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
michael@0 842
michael@0 843 if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
michael@0 844 if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
michael@0 845
michael@0 846 memcpy(safeBuffer, previousDictEnd - dictSize, dictSize);
michael@0 847
michael@0 848 dict->dictionary = (const BYTE*)safeBuffer;
michael@0 849 dict->dictSize = (U32)dictSize;
michael@0 850
michael@0 851 return 1;
michael@0 852 }
michael@0 853
michael@0 854
michael@0 855
michael@0 856 /****************************
michael@0 857 Decompression functions
michael@0 858 ****************************/
michael@0 859 /*
michael@0 860 * This generic decompression function cover all use cases.
michael@0 861 * It shall be instanciated several times, using different sets of directives
michael@0 862 * Note that it is essential this generic function is really inlined,
michael@0 863 * in order to remove useless branches during compilation optimisation.
michael@0 864 */
michael@0 865 FORCE_INLINE int LZ4_decompress_generic(
michael@0 866 const char* source,
michael@0 867 char* dest,
michael@0 868 int inputSize,
michael@0 869 int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
michael@0 870
michael@0 871 int endOnInput, /* endOnOutputSize, endOnInputSize */
michael@0 872 int partialDecoding, /* full, partial */
michael@0 873 int targetOutputSize, /* only used if partialDecoding==partial */
michael@0 874 int dict, /* noDict, withPrefix64k, usingExtDict */
michael@0 875 const char* dictStart, /* only if dict==usingExtDict */
michael@0 876 int dictSize /* note : = 0 if noDict */
michael@0 877 )
michael@0 878 {
michael@0 879 /* Local Variables */
michael@0 880 const BYTE* restrict ip = (const BYTE*) source;
michael@0 881 const BYTE* ref;
michael@0 882 const BYTE* const iend = ip + inputSize;
michael@0 883
michael@0 884 BYTE* op = (BYTE*) dest;
michael@0 885 BYTE* const oend = op + outputSize;
michael@0 886 BYTE* cpy;
michael@0 887 BYTE* oexit = op + targetOutputSize;
michael@0 888 const BYTE* const lowLimit = (const BYTE*)dest - dictSize;
michael@0 889
michael@0 890 const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
michael@0 891 //#define OLD
michael@0 892 #ifdef OLD
michael@0 893 const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
michael@0 894 #else
michael@0 895 const size_t dec32table[] = {4-0, 4-3, 4-2, 4-3, 4-0, 4-0, 4-0, 4-0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
michael@0 896 #endif
michael@0 897 static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
michael@0 898
michael@0 899 const int checkOffset = (endOnInput) && (dictSize < (int)(64 KB));
michael@0 900
michael@0 901
michael@0 902 /* Special cases */
michael@0 903 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
michael@0 904 if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
michael@0 905 if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
michael@0 906
michael@0 907
michael@0 908 /* Main Loop */
michael@0 909 while (1)
michael@0 910 {
michael@0 911 unsigned token;
michael@0 912 size_t length;
michael@0 913
michael@0 914 /* get runlength */
michael@0 915 token = *ip++;
michael@0 916 if ((length=(token>>ML_BITS)) == RUN_MASK)
michael@0 917 {
michael@0 918 unsigned s;
michael@0 919 do
michael@0 920 {
michael@0 921 s = *ip++;
michael@0 922 length += s;
michael@0 923 }
michael@0 924 while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
michael@0 925 //if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
michael@0 926 if ((sizeof(void*)==4) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* quickfix issue 134 */
michael@0 927 if ((endOnInput) && (sizeof(void*)==4) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* quickfix issue 134 */
michael@0 928 }
michael@0 929
michael@0 930 /* copy literals */
michael@0 931 cpy = op+length;
michael@0 932 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
michael@0 933 || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
michael@0 934 {
michael@0 935 if (partialDecoding)
michael@0 936 {
michael@0 937 if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
michael@0 938 if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
michael@0 939 }
michael@0 940 else
michael@0 941 {
michael@0 942 if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
michael@0 943 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
michael@0 944 }
michael@0 945 memcpy(op, ip, length);
michael@0 946 ip += length;
michael@0 947 op += length;
michael@0 948 break; /* Necessarily EOF, due to parsing restrictions */
michael@0 949 }
michael@0 950 LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
michael@0 951
michael@0 952 /* get offset */
michael@0 953 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
michael@0 954 if ((checkOffset) && (unlikely(ref < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
michael@0 955
michael@0 956 /* get matchlength */
michael@0 957 if ((length=(token&ML_MASK)) == ML_MASK)
michael@0 958 {
michael@0 959 unsigned s;
michael@0 960 do
michael@0 961 {
michael@0 962 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
michael@0 963 s = *ip++;
michael@0 964 length += s;
michael@0 965 } while (s==255);
michael@0 966 //if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
michael@0 967 if ((sizeof(void*)==4) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* quickfix issue 134 */
michael@0 968 }
michael@0 969
michael@0 970 /* check external dictionary */
michael@0 971 if ((dict==usingExtDict) && (ref < (BYTE* const)dest))
michael@0 972 {
michael@0 973 if (unlikely(op+length+MINMATCH > oend-LASTLITERALS)) goto _output_error;
michael@0 974
michael@0 975 if (length+MINMATCH <= (size_t)(dest-(char*)ref))
michael@0 976 {
michael@0 977 ref = dictEnd - (dest-(char*)ref);
michael@0 978 memcpy(op, ref, length+MINMATCH);
michael@0 979 op += length+MINMATCH;
michael@0 980 }
michael@0 981 else
michael@0 982 {
michael@0 983 size_t copySize = (size_t)(dest-(char*)ref);
michael@0 984 memcpy(op, dictEnd - copySize, copySize);
michael@0 985 op += copySize;
michael@0 986 copySize = length+MINMATCH - copySize;
michael@0 987 if (copySize > (size_t)((char*)op-dest)) /* overlap */
michael@0 988 {
michael@0 989 BYTE* const cpy = op + copySize;
michael@0 990 const BYTE* ref = (BYTE*)dest;
michael@0 991 while (op < cpy) *op++ = *ref++;
michael@0 992 }
michael@0 993 else
michael@0 994 {
michael@0 995 memcpy(op, dest, copySize);
michael@0 996 op += copySize;
michael@0 997 }
michael@0 998 }
michael@0 999 continue;
michael@0 1000 }
michael@0 1001
michael@0 1002 /* copy repeated sequence */
michael@0 1003 if (unlikely((op-ref)<(int)STEPSIZE))
michael@0 1004 {
michael@0 1005 const size_t dec64 = dec64table[(sizeof(void*)==4) ? 0 : op-ref];
michael@0 1006 op[0] = ref[0];
michael@0 1007 op[1] = ref[1];
michael@0 1008 op[2] = ref[2];
michael@0 1009 op[3] = ref[3];
michael@0 1010 #ifdef OLD
michael@0 1011 op += 4, ref += 4; ref -= dec32table[op-ref];
michael@0 1012 A32(op) = A32(ref);
michael@0 1013 op += STEPSIZE-4; ref -= dec64;
michael@0 1014 #else
michael@0 1015 ref += dec32table[op-ref];
michael@0 1016 A32(op+4) = A32(ref);
michael@0 1017 op += STEPSIZE; ref -= dec64;
michael@0 1018 #endif
michael@0 1019 } else { LZ4_COPYSTEP(op,ref); }
michael@0 1020 cpy = op + length - (STEPSIZE-4);
michael@0 1021
michael@0 1022 if (unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4)))
michael@0 1023 {
michael@0 1024 if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
michael@0 1025 if (op<oend-COPYLENGTH) LZ4_WILDCOPY(op, ref, (oend-COPYLENGTH));
michael@0 1026 while(op<cpy) *op++=*ref++;
michael@0 1027 op=cpy;
michael@0 1028 continue;
michael@0 1029 }
michael@0 1030 LZ4_WILDCOPY(op, ref, cpy);
michael@0 1031 op=cpy; /* correction */
michael@0 1032 }
michael@0 1033
michael@0 1034 /* end of decoding */
michael@0 1035 if (endOnInput)
michael@0 1036 return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
michael@0 1037 else
michael@0 1038 return (int) (((char*)ip)-source); /* Nb of input bytes read */
michael@0 1039
michael@0 1040 /* Overflow error detected */
michael@0 1041 _output_error:
michael@0 1042 return (int) (-(((char*)ip)-source))-1;
michael@0 1043 }
michael@0 1044
michael@0 1045
michael@0 1046 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxOutputSize)
michael@0 1047 {
michael@0 1048 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, noDict, NULL, 0);
michael@0 1049 }
michael@0 1050
michael@0 1051 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxOutputSize)
michael@0 1052 {
michael@0 1053 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, partial, targetOutputSize, noDict, NULL, 0);
michael@0 1054 }
michael@0 1055
michael@0 1056 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
michael@0 1057 {
michael@0 1058 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, NULL, 0);
michael@0 1059 }
michael@0 1060
michael@0 1061 /* streaming decompression functions */
michael@0 1062
michael@0 1063 //#define LZ4_STREAMDECODESIZE_U32 4
michael@0 1064 //#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U32 * sizeof(unsigned int))
michael@0 1065 //typedef struct { unsigned int table[LZ4_STREAMDECODESIZE_U32]; } LZ4_streamDecode_t;
michael@0 1066 typedef struct
michael@0 1067 {
michael@0 1068 const char* dictionary;
michael@0 1069 int dictSize;
michael@0 1070 } LZ4_streamDecode_t_internal;
michael@0 1071
michael@0 1072 /*
michael@0 1073 * If you prefer dynamic allocation methods,
michael@0 1074 * LZ4_createStreamDecode()
michael@0 1075 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
michael@0 1076 */
michael@0 1077 void* LZ4_createStreamDecode()
michael@0 1078 {
michael@0 1079 void* lz4s = ALLOCATOR(sizeof(U32), LZ4_STREAMDECODESIZE_U32);
michael@0 1080 MEM_INIT(lz4s, 0, LZ4_STREAMDECODESIZE);
michael@0 1081 return lz4s;
michael@0 1082 }
michael@0 1083
michael@0 1084 /*
michael@0 1085 * LZ4_setDictDecode
michael@0 1086 * Use this function to instruct where to find the dictionary
michael@0 1087 * This function is not necessary if previous data is still available where it was decoded.
michael@0 1088 * Loading a size of 0 is allowed (same effect as no dictionary).
michael@0 1089 * Return : 1 if OK, 0 if error
michael@0 1090 */
michael@0 1091 int LZ4_setDictDecode (void* LZ4_streamDecode, const char* dictionary, int dictSize)
michael@0 1092 {
michael@0 1093 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
michael@0 1094 lz4sd->dictionary = dictionary;
michael@0 1095 lz4sd->dictSize = dictSize;
michael@0 1096 return 1;
michael@0 1097 }
michael@0 1098
michael@0 1099 /*
michael@0 1100 *_continue() :
michael@0 1101 These decoding functions allow decompression of multiple blocks in "streaming" mode.
michael@0 1102 Previously decoded blocks must still be available at the memory position where they were decoded.
michael@0 1103 If it's not possible, save the relevant part of decoded data into a safe buffer,
michael@0 1104 and indicate where it stands using LZ4_setDictDecode()
michael@0 1105 */
michael@0 1106 int LZ4_decompress_safe_continue (void* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
michael@0 1107 {
michael@0 1108 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
michael@0 1109 int result;
michael@0 1110
michael@0 1111 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
michael@0 1112 if (result <= 0) return result;
michael@0 1113 if (lz4sd->dictionary + lz4sd->dictSize == dest)
michael@0 1114 {
michael@0 1115 lz4sd->dictSize += result;
michael@0 1116 }
michael@0 1117 else
michael@0 1118 {
michael@0 1119 lz4sd->dictionary = dest;
michael@0 1120 lz4sd->dictSize = result;
michael@0 1121 }
michael@0 1122
michael@0 1123 return result;
michael@0 1124 }
michael@0 1125
michael@0 1126 int LZ4_decompress_fast_continue (void* LZ4_streamDecode, const char* source, char* dest, int originalSize)
michael@0 1127 {
michael@0 1128 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
michael@0 1129 int result;
michael@0 1130
michael@0 1131 result = LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
michael@0 1132 if (result <= 0) return result;
michael@0 1133 if (lz4sd->dictionary + lz4sd->dictSize == dest)
michael@0 1134 {
michael@0 1135 lz4sd->dictSize += result;
michael@0 1136 }
michael@0 1137 else
michael@0 1138 {
michael@0 1139 lz4sd->dictionary = dest;
michael@0 1140 lz4sd->dictSize = result;
michael@0 1141 }
michael@0 1142
michael@0 1143 return result;
michael@0 1144 }
michael@0 1145
michael@0 1146
michael@0 1147 /*
michael@0 1148 Advanced decoding functions :
michael@0 1149 *_usingDict() :
michael@0 1150 These decoding functions work the same as "_continue" ones,
michael@0 1151 the dictionary must be explicitly provided within parameters
michael@0 1152 */
michael@0 1153
michael@0 1154 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
michael@0 1155 {
michael@0 1156 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, dictStart, dictSize);
michael@0 1157 }
michael@0 1158
michael@0 1159 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
michael@0 1160 {
michael@0 1161 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, dictStart, dictSize);
michael@0 1162 }

mercurial