1 /*
2    Original C comment:
3    
4    LZ4 - Fast LZ compression algorithm
5    Copyright (C) 2011-2015, Yann Collet.
6    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 
8    Redistribution and use in source and binary forms, with or without
9    modification, are permitted provided that the following conditions are
10    met:
11 
12 	   * Redistributions of source code must retain the above copyright
13    notice, this list of conditions and the following disclaimer.
14 	   * Redistributions in binary form must reproduce the above
15    copyright notice, this list of conditions and the following disclaimer
16    in the documentation and/or other materials provided with the
17    distribution.
18 
19    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31    You can contact the author at :
32    - LZ4 source repository : http://code.google.com/p/lz4
33    - LZ4 source mirror : https://github.com/Cyan4973/lz4
34    - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
35 */
36 module gamut.codecs.lz4;
37 
38 nothrow @nogc:
39 
40 private import core.stdc.stdlib;
41 private import core.stdc.string;
42 private import std.system;
43 private import std.bitmanip;
44 private import gamut.codecs.ctypes;
45 
46 /// Version constants
47 enum int LZ4_VERSION_MAJOR   =   1;    /* for breaking interface changes  */
48 /// ditto
49 enum int LZ4_VERSION_MINOR   =   5;    /* for new (non-breaking) interface capabilities */
50 /// ditto
51 enum int LZ4_VERSION_RELEASE =   0;    /* for tweaks, bug-fixes, or development */
52 /// ditto
53 enum int LZ4_VERSION_NUMBER  = (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE);
54 
55 /// Tuning constant
56 enum int LZ4_MEMORY_USAGE    =  cLZ4_MEMORY_USAGE;
57 /// Constant
58 enum int LZ4_MAX_INPUT_SIZE  =  0x7E000000;   /* 2 113 929 216 bytes */
59 /// -
60 uint LZ4_COMPRESSBOUND(uint isize)
61 {
62 	return (isize > LZ4_MAX_INPUT_SIZE) ? 0 : ((isize) + ((isize)/255) + 16);
63 }
64 /// Streaming constants
65 enum int LZ4_STREAMSIZE_U64 =  ((1 << (LZ4_MEMORY_USAGE-3)) + 4);
66 /// ditto
67 enum int LZ4_STREAMSIZE     =  (LZ4_STREAMSIZE_U64 * 8);
68 /// ditto
69 enum int LZ4_STREAMDECODESIZE_U64 =  4;
70 /// ditto
71 enum int LZ4_STREAMDECODESIZE     =  (LZ4_STREAMDECODESIZE_U64 * 8);
72 /// -
73 struct LZ4_stream_t
74 {
75 	long[LZ4_STREAMSIZE_U64] table;
76 }
77 /// -
78 struct LZ4_streamDecode_t
79 {
80 	long[LZ4_STREAMDECODESIZE_U64] table;
81 }
82 
83 //**********************************************************
84 
85 version(LDC)
86 {
87     // GP: When measured, did not make a difference tbh.
88     import ldc.intrinsics;
89     bool likely(bool b) { return llvm_expect!bool(b, true); }
90     bool unlikely(bool b) { return llvm_expect!bool(b, false); }
91 }
92 else
93 {
94     bool likely(bool b) { return b; }
95     bool unlikely(bool b) { return b; }
96 }
97 
98 /* *************************************
99    Reading and writing into memory
100 **************************************/
101 
102 private bool LZ4_64bits()
103 {
104     return size_t.sizeof == 8;
105 }
106 
107 private bool LZ4_isLittleEndian()
108 {
109 	version(LittleEndian)
110 		return true;
111 	else
112 		return false;
113 }
114 
115 
116 // FUTURE: use gamut.utils functions
117 
118 private ushort LZ4_readLE16(const(void)* memPtr)
119 {
120 	version(LittleEndian)
121 	{
122 		return( cast(ushort*)(memPtr))[0];
123 	}
124 	else
125 	{
126 		const(ubyte)* p = memPtr;
127 		return cast(ushort)((cast(ushort*)p)[0] + (p[1]<<8));
128 	}
129 }
130 
131 private void LZ4_writeLE16(void* memPtr, ushort value)
132 {
133 	version(LittleEndian)
134 	{
135 		(cast(ushort*)(memPtr))[0] = value;
136 	}
137 	else
138 	{
139 		ubyte* p = memPtr;
140 		p[0] = cast(ubyte) value;
141 		p[1] = cast(ubyte)(value>>8);
142 	}
143 }
144 
145 
146 private ushort LZ4_read16(const(void)* memPtr)
147 {
148 	return (cast(const(ushort)*)(memPtr))[0];
149 }
150 
151 private uint LZ4_read32(const(void)* memPtr)
152 {
153 	return (cast(const(uint)*)(memPtr))[0];
154 }
155 
156 private ulong LZ4_read64(const(void)* memPtr)
157 {
158 	return (cast(const(ulong)*)(memPtr))[0];
159 }
160 
161 private size_t LZ4_read_ARCH(const(void)* p)
162 {
163 	static if (size_t.sizeof == 8) // BUG: this shouldn't work on arm64
164 	{
165 		return cast(size_t)LZ4_read64(p);
166 	}
167 	else
168 	{
169 		return cast(size_t)LZ4_read32(p);
170 	}
171 }
172 
173 
174 private void LZ4_copy4(void* dstPtr, const(void)* srcPtr)
175 {
176 	dstPtr[0..4][] = srcPtr[0..4][];
177 }
178 
179 private void LZ4_copy8(void* dstPtr, const(void)* srcPtr)
180 {
181 	dstPtr[0..8][] = srcPtr[0..8][];
182 }
183 
184 /* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
185 private void LZ4_wildCopy(void* dstPtr, const(void)* srcPtr, void* dstEnd)
186 {
187 	ubyte* d = cast(ubyte*)dstPtr;
188 	const(ubyte)* s = cast(const(ubyte)*)srcPtr;
189 	ubyte* e = cast(ubyte*)dstEnd;
190 	do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
191 }
192 
193 /**************************************/
194 
195 public uint LZ4_NbCommonBytes (size_t val)
196 {
197     import core.bitop: bsf;
198     assert(val != 0);
199     return bsf(val) >> 3;
200 }
201 unittest
202 {
203     assert(LZ4_NbCommonBytes(1) == 0);
204     assert(LZ4_NbCommonBytes(4) == 0);
205     assert(LZ4_NbCommonBytes(256) == 1);
206     assert(LZ4_NbCommonBytes(65534) == 0);
207     assert(LZ4_NbCommonBytes(0xffffff) == 0);
208     assert(LZ4_NbCommonBytes(0x1000000) == 3);
209 }
210 
211 
212 /********************************
213    Common functions
214 ********************************/
215 
216 private uint LZ4_count(const(ubyte)* pIn, const(ubyte)* pMatch, const(ubyte)* pInLimit)
217 {
218 	const(ubyte)* pStart = pIn;
219 
220 	while (likely(pIn<pInLimit-(STEPSIZE-1)))
221 	{
222 		size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
223 		if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
224 		pIn += LZ4_NbCommonBytes(diff);
225 		return cast(uint)(pIn - pStart);
226 	}
227 
228 	static if (size_t.sizeof == 8) 
229 	{
230 		if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) 
231 		{ 
232 			pIn+=4; 
233 			pMatch+=4; 
234 		}
235 	}
236 	if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
237 	if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
238 	return cast(uint)(pIn - pStart);
239 }
240 
241 /* *************************************
242    Local Utils
243 **************************************/
244 int LZ4_versionNumber () { return LZ4_VERSION_NUMBER; }
245 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
246 
247 
248 /* *************************************
249    Local Structures and types
250 **************************************/
251 private
252 {
253 struct LZ4_stream_t_internal {
254 	uint[HASH_SIZE_U32] hashTable;
255 	uint currentOffset;
256 	uint initCheck;
257 	const(ubyte)* dictionary;
258 	const(ubyte)* bufferStart;
259 	uint dictSize;
260 }
261 
262 enum : int { notLimited = 0, limitedOutput = 1 }
263 alias int limitedOutput_directive;
264 enum : int { byPtr, byU32, byU16 }
265 alias int tableType_t;
266 
267 enum : int { noDict = 0, withPrefix64k, usingExtDict }
268 alias int dict_directive;
269 enum : int { noDictIssue = 0, dictSmall }
270 alias int dictIssue_directive;
271 
272 enum : int { endOnOutputSize = 0, endOnInputSize = 1 }
273 alias int endCondition_directive;
274 enum : int { full = 0, partial = 1 }
275 alias int earlyEnd_directive;
276 
277 }
278 
279 /* *******************************
280    Compression functions
281 ********************************/
282 
283 private uint LZ4_hashSequence(uint sequence, tableType_t tableType)
284 {
285 	if (tableType == byU16)
286 		return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
287 	else
288 		return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
289 }
290 
291 private uint LZ4_hashPosition(const(ubyte)* p, tableType_t tableType) { return LZ4_hashSequence(LZ4_read32(p), tableType); }
292 
293 private void LZ4_putPositionOnHash(const(ubyte)* p, uint h, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
294 {
295 	switch (tableType)
296 	{
297 	case byPtr: { const(ubyte)** hashTable = cast(const(ubyte)**)tableBase; hashTable[h] = p; return; }
298 	case byU32: { uint* hashTable = cast(uint*) tableBase; hashTable[h] = cast(uint)(p-srcBase); return; }
299 	case byU16: { ushort* hashTable = cast(ushort*) tableBase; hashTable[h] = cast(ushort)(p-srcBase); return; }
300 	default: assert(0);
301 	}
302 }
303 
304 private void LZ4_putPosition(const(ubyte)* p, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
305 {
306 	uint h = LZ4_hashPosition(p, tableType);
307 	LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
308 }
309 
310 private const(ubyte)* LZ4_getPositionOnHash(uint h, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
311 {
312 	if (tableType == byPtr) { const(ubyte)** hashTable = cast(const(ubyte)**) tableBase; return hashTable[h]; }
313 	if (tableType == byU32) { uint* hashTable = cast(uint*) tableBase; return hashTable[h] + srcBase; }
314 	{ ushort* hashTable = cast(ushort*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
315 }
316 
317 private const(ubyte)* LZ4_getPosition(const(ubyte)* p, void* tableBase, tableType_t tableType, const(ubyte)* srcBase)
318 {
319 	uint h = LZ4_hashPosition(p, tableType);
320 	return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
321 }
322 
323 private int LZ4_compress_generic(
324 				 void* ctx,
325 				 const(char)* source,
326 				 char* dest,
327 				 int inputSize,
328 				 int maxOutputSize,
329 				 limitedOutput_directive outputLimited,
330 				 tableType_t tableType,
331 				 dict_directive dict,
332 				 dictIssue_directive dictIssue)
333 {
334 	LZ4_stream_t_internal* dictPtr = cast(LZ4_stream_t_internal*)ctx;
335 
336 	const(ubyte)* ip = cast(const(ubyte)*) source;
337 	const(ubyte)* base;
338 	const(ubyte)* lowLimit;
339 	const(ubyte)* lowRefLimit = ip - dictPtr.dictSize;
340 	const(ubyte)* dictionary = dictPtr.dictionary;
341 	const(ubyte)* dictEnd = dictionary + dictPtr.dictSize;
342 	const(size_t) dictDelta = dictEnd - cast(const(ubyte)*)source;
343 	const(ubyte)* anchor = cast(const(ubyte)*) source;
344 	const(ubyte)* iend = ip + inputSize;
345 	const(ubyte)* mflimit = iend - MFLIMIT;
346 	const(ubyte)* matchlimit = iend - LASTLITERALS;
347 
348 	ubyte* op = cast(ubyte*) dest;
349 	ubyte* olimit = op + maxOutputSize;
350 
351 	uint forwardH;
352 	size_t refDelta=0;
353 
354 	/* Init conditions */
355 	if (cast(uint)inputSize > cast(uint)LZ4_MAX_INPUT_SIZE) return 0;          /* Unsupported input size, too large (or negative) */
356 	switch(dict)
357 	{
358 	case noDict:
359 		base = cast(const(ubyte)*)source;
360 		lowLimit = cast(const(ubyte)*)source;
361 		break;
362 	case withPrefix64k:
363 		base = cast(const(ubyte)*)source - dictPtr.currentOffset;
364 		lowLimit = cast(const(ubyte)*)source - dictPtr.dictSize;
365 		break;
366 	case usingExtDict:
367 		base = cast(const(ubyte)*)source - dictPtr.currentOffset;
368 		lowLimit = cast(const(ubyte)*)source;
369 		break;
370 	default:
371 		base = cast(const(ubyte)*)source;
372 		lowLimit = cast(const(ubyte)*)source;
373 		break;
374 	}
375 	if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
376 	if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
377 
378 	/* First ubyte */
379 	LZ4_putPosition(ip, ctx, tableType, base);
380 	ip++; forwardH = LZ4_hashPosition(ip, tableType);
381 
382 	/* Main Loop */
383 	for ( ; ; )
384 	{
385 		const(ubyte)* match;
386 		ubyte* token;
387 		{
388 			const(ubyte)* forwardIp = ip;
389 			uint step=1;
390 			uint searchMatchNb = (1U << LZ4_skipTrigger);
391 
392 			/* Find a match */
393 			do {
394 				uint h = forwardH;
395 				ip = forwardIp;
396 				forwardIp += step;
397 				step = searchMatchNb++ >> LZ4_skipTrigger;
398 
399 				if (unlikely(forwardIp > mflimit)) goto _last_literals;
400 
401 				match = LZ4_getPositionOnHash(h, ctx, tableType, base);
402 				if (dict==usingExtDict)
403 				{
404 					if (match<cast(const(ubyte)*)source)
405 					{
406 						refDelta = dictDelta;
407 						lowLimit = dictionary;
408 					}
409 					else
410 					{
411 						refDelta = 0;
412 						lowLimit = cast(const(ubyte)*)source;
413 					}
414 				}
415 				forwardH = LZ4_hashPosition(forwardIp, tableType);
416 				LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
417 
418 			} while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
419 				|| ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
420 				|| (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
421 		}
422 
423 		/* Catch up */
424 		while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
425 
426 		{
427 			/* Encode Literal length */
428 			uint litLength = cast(uint)(ip - anchor);
429 			token = op++;
430 			if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
431 				return 0;   /* Check output limit */
432 			if (litLength>=RUN_MASK)
433 			{
434 				int len = cast(int)litLength-RUN_MASK;
435 				*token=(RUN_MASK<<ML_BITS);
436 				for(; len >= 255 ; len-=255) *op++ = 255;
437 				*op++ = cast(ubyte)len;
438 			}
439 			else *token = cast(ubyte)(litLength<<ML_BITS);
440 
441 			/* Copy Literals */
442 			LZ4_wildCopy(op, anchor, op+litLength);
443 			op+=litLength;
444 		}
445 
446 _next_match:
447 		/* Encode Offset */
448 		LZ4_writeLE16(op, cast(ushort)(ip-match)); op+=2;
449 
450 		/* Encode MatchLength */
451 		{
452 			uint matchLength;
453 
454 			if ((dict==usingExtDict) && (lowLimit==dictionary))
455 			{
456 				const(ubyte)* limit;
457 				match += refDelta;
458 				limit = ip + (dictEnd-match);
459 				if (limit > matchlimit) limit = matchlimit;
460 				matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
461 				ip += MINMATCH + matchLength;
462 				if (ip==limit)
463 				{
464 					uint more = LZ4_count(ip, cast(const(ubyte)*)source, matchlimit);
465 					matchLength += more;
466 					ip += more;
467 				}
468 			}
469 			else
470 			{
471 				matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
472 				ip += MINMATCH + matchLength;
473 			}
474 
475 			if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
476 				return 0;    /* Check output limit */
477 			if (matchLength>=ML_MASK)
478 			{
479 				*token += ML_MASK;
480 				matchLength -= ML_MASK;
481 				for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
482 				if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
483 				*op++ = cast(ubyte)matchLength;
484 			}
485 			else *token += cast(ubyte)(matchLength);
486 		}
487 
488 		anchor = ip;
489 
490 		/* Test end of chunk */
491 		if (ip > mflimit) break;
492 
493 		/* Fill table */
494 		LZ4_putPosition(ip-2, ctx, tableType, base);
495 
496 		/* Test next position */
497 		match = LZ4_getPosition(ip, ctx, tableType, base);
498 		if (dict==usingExtDict)
499 		{
500 			if (match<cast(const(ubyte)*)source)
501 			{
502 				refDelta = dictDelta;
503 				lowLimit = dictionary;
504 			}
505 			else
506 			{
507 				refDelta = 0;
508 				lowLimit = cast(const(ubyte)*)source;
509 			}
510 		}
511 		LZ4_putPosition(ip, ctx, tableType, base);
512 		if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
513 			&& (match+MAX_DISTANCE>=ip)
514 			&& (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
515 		{ token=op++; *token=0; goto _next_match; }
516 
517 		/* Prepare next loop */
518 		forwardH = LZ4_hashPosition(++ip, tableType);
519 	}
520 
521 _last_literals:
522 	/* Encode Last Literals */
523 	{
524 		int lastRun = cast(int)(iend - anchor);
525 		if ((outputLimited) && ((cast(char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > cast(uint)maxOutputSize))
526 			return 0;   /* Check output limit */
527 		if (lastRun>=cast(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = cast(ubyte) lastRun; }
528 		else *op++ = cast(ubyte)(lastRun<<ML_BITS);
529 		memcpy(op, anchor, iend - anchor);
530 		op += iend-anchor;
531 	}
532 
533 	/* End */
534 	return cast(int) ((cast(char*)op)-dest);
535 }
536 
537 /// -
538 int LZ4_compress(const(char)* source, char* dest, int inputSize)
539 {
540 	ulong[LZ4_STREAMSIZE_U64] ctx;
541 	int result;
542 
543 	if (inputSize < LZ4_64Klimit)
544 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
545 	else
546 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
547 	return result;
548 }
549 /// -
550 int LZ4_compress_limitedOutput(const(char)* source, char* dest, int inputSize, int maxOutputSize)
551 {
552 	ulong[LZ4_STREAMSIZE_U64] ctx;
553 	int result;
554 
555 	if (inputSize < LZ4_64Klimit)
556 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
557 	else
558 		result = LZ4_compress_generic(cast(void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue);
559 	return result;
560 }
561 
562 
563 /* ****************************************
564    Experimental : Streaming functions
565 *****************************************/
566 
567 /**
568  * LZ4_initStream
569  * Use this function once, to init a newly allocated LZ4_stream_t structure
570  * Return : 1 if OK, 0 if error
571  */
572 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
573 {
574 	MEM_INIT(LZ4_stream, 0, LZ4_stream_t.sizeof);
575 }
576 /// -
577 LZ4_stream_t* LZ4_createStream()
578 {
579 	LZ4_stream_t* lz4s = cast(LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
580 	static assert(LZ4_STREAMSIZE >= LZ4_stream_t_internal.sizeof);    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
581 	LZ4_resetStream(lz4s);
582 	return lz4s;
583 }
584 /// -
585 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
586 {
587 	FREEMEM(LZ4_stream);
588 	return (0);
589 }
590 
591 /// -
592 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const(char)* dictionary, int dictSize)
593 {
594 	LZ4_stream_t_internal* dict = cast(LZ4_stream_t_internal*) LZ4_dict;
595 	const(ubyte)* p = cast(const(ubyte)*)dictionary;
596 	const(ubyte)* dictEnd = p + dictSize;
597 	const(ubyte)* base;
598 
599 	if (dict.initCheck) LZ4_resetStream(LZ4_dict);                         /* Uninitialized structure detected */
600 
601 	if (dictSize < MINMATCH)
602 	{
603 		dict.dictionary = null;
604 		dict.dictSize = 0;
605 		return 0;
606 	}
607 
608 	if (p <= dictEnd - 64*KB) p = dictEnd - 64*KB;
609 	base = p - dict.currentOffset;
610 	dict.dictionary = p;
611 	dict.dictSize = cast(uint)(dictEnd - p);
612 	dict.currentOffset += dict.dictSize;
613 
614 	while (p <= dictEnd-MINMATCH)
615 	{
616 		LZ4_putPosition(p, dict, byU32, base);
617 		p+=3;
618 	}
619 
620 	return dict.dictSize;
621 }
622 
623 
624 private void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const(ubyte)* src)
625 {
626 	if ((LZ4_dict.currentOffset > 0x80000000) ||
627 		(cast(size_t)LZ4_dict.currentOffset > cast(size_t)src))   /* address space overflow */
628 	{
629 		/* rescale hash table */
630 		uint delta = LZ4_dict.currentOffset - 64*KB;
631 		const(ubyte)* dictEnd = LZ4_dict.dictionary + LZ4_dict.dictSize;
632 		int i;
633 		for (i=0; i<HASH_SIZE_U32; i++)
634 		{
635 			if (LZ4_dict.hashTable[i] < delta) LZ4_dict.hashTable[i]=0;
636 			else LZ4_dict.hashTable[i] -= delta;
637 		}
638 		LZ4_dict.currentOffset = 64*KB;
639 		if (LZ4_dict.dictSize > 64*KB) LZ4_dict.dictSize = 64*KB;
640 		LZ4_dict.dictionary = dictEnd - LZ4_dict.dictSize;
641 	}
642 }
643 
644 /// -
645 int LZ4_compress_continue_generic (void* LZ4_stream, const(char)* source, char* dest, int inputSize,
646 												int maxOutputSize, limitedOutput_directive limit)
647 {
648 	LZ4_stream_t_internal* streamPtr = cast(LZ4_stream_t_internal*)LZ4_stream;
649 	const(ubyte)* dictEnd = streamPtr.dictionary + streamPtr.dictSize;
650 
651 	const(ubyte)* smallest = cast(const(ubyte)*) source;
652 	if (streamPtr.initCheck) return 0;   /* Uninitialized structure detected */
653 	if ((streamPtr.dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
654 	LZ4_renormDictT(streamPtr, smallest);
655 
656 	/* Check overlapping input/dictionary space */
657 	{
658 		const(ubyte)* sourceEnd = cast(const(ubyte)*) source + inputSize;
659 		if ((sourceEnd > streamPtr.dictionary) && (sourceEnd < dictEnd))
660 		{
661 			streamPtr.dictSize = cast(uint)(dictEnd - sourceEnd);
662 			if (streamPtr.dictSize > 64*KB) streamPtr.dictSize = 64*KB;
663 			if (streamPtr.dictSize < 4) streamPtr.dictSize = 0;
664 			streamPtr.dictionary = dictEnd - streamPtr.dictSize;
665 		}
666 	}
667 
668 	/* prefix mode : source data follows dictionary */
669 	if (dictEnd == cast(const(ubyte)*)source)
670 	{
671 		int result;
672 		if ((streamPtr.dictSize < 64*KB) && (streamPtr.dictSize < streamPtr.currentOffset))
673 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
674 		else
675 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
676 		streamPtr.dictSize += cast(uint)inputSize;
677 		streamPtr.currentOffset += cast(uint)inputSize;
678 		return result;
679 	}
680 
681 	/* external dictionary mode */
682 	{
683 		int result;
684 		if ((streamPtr.dictSize < 64*KB) && (streamPtr.dictSize < streamPtr.currentOffset))
685 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
686 		else
687 			result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
688 		streamPtr.dictionary = cast(const(ubyte)*)source;
689 		streamPtr.dictSize = cast(uint)inputSize;
690 		streamPtr.currentOffset += cast(uint)inputSize;
691 		return result;
692 	}
693 }
694 /// -
695 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const(char)* source, char* dest, int inputSize)
696 {
697 	return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
698 }
699 /// -
700 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const(char)* source, char* dest, int inputSize, int maxOutputSize)
701 {
702 	return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
703 }
704 
705 
706 /** Hidden debug function, to force separate dictionary mode */
707 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const(char)* source, char* dest, int inputSize)
708 {
709 	LZ4_stream_t_internal* streamPtr = cast(LZ4_stream_t_internal*)LZ4_dict;
710 	int result;
711 	const(ubyte)* dictEnd = streamPtr.dictionary + streamPtr.dictSize;
712 
713 	const(ubyte)* smallest = dictEnd;
714 	if (smallest > cast(const(ubyte)*) source) smallest = cast(const(ubyte)*) source;
715 	LZ4_renormDictT(cast(LZ4_stream_t_internal*)LZ4_dict, smallest);
716 
717 	result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
718 
719 	streamPtr.dictionary = cast(const(ubyte)*)source;
720 	streamPtr.dictSize = cast(uint)inputSize;
721 	streamPtr.currentOffset += cast(uint)inputSize;
722 
723 	return result;
724 }
725 
726 /// -
727 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
728 {
729 	LZ4_stream_t_internal* dict = cast(LZ4_stream_t_internal*) LZ4_dict;
730 	const(ubyte)* previousDictEnd = dict.dictionary + dict.dictSize;
731 
732 	if (cast(uint)dictSize > 64*KB) dictSize = 64*KB;   /* useless to define a dictionary > 64*KB */
733 	if (cast(uint)dictSize > dict.dictSize) dictSize = dict.dictSize;
734 
735 	memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
736 
737 	dict.dictionary = cast(const(ubyte)*)safeBuffer;
738 	dict.dictSize = cast(uint)dictSize;
739 
740 	return dictSize;
741 }
742 
743 
744 
745 /* ***************************
746    Decompression functions
747 ****************************/
748 /**
749  * This generic decompression function cover all use cases.
750  * It shall be instantiated several times, using different sets of directives
751  * Note that it is essential this generic function is really inlined,
752  * in order to remove useless branches during compilation optimization.
753  */
754 int LZ4_decompress_generic(
755 				 const(char)* source,
756 				 char* dest,
757 				 int inputSize,
758 				 int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
759 
760 				 int endOnInput,         /* endOnOutputSize, endOnInputSize */
761 				 int partialDecoding,    /* full, partial */
762 				 int targetOutputSize,   /* only used if partialDecoding==partial */
763 				 int dict,               /* noDict, withPrefix64k, usingExtDict */
764 				 const(ubyte)* lowPrefix,  /* == dest if dict == noDict */
765 				 const(ubyte)* dictStart,  /* only if dict==usingExtDict */
766 				 const size_t dictSize         /* note : = 0 if noDict */
767 				 )
768 {
769 	/* Local Variables */
770 	const(ubyte)*  ip = cast(const(ubyte)*) source;
771 	const(ubyte)* iend = ip + inputSize;
772 
773 	ubyte* op = cast(ubyte*) dest;
774 	ubyte* oend = op + outputSize;
775 	ubyte* cpy;
776 	ubyte* oexit = op + targetOutputSize;
777 	const(ubyte)* lowLimit = lowPrefix - dictSize;
778 
779 	const(ubyte)* dictEnd = cast(const(ubyte)*)dictStart + dictSize;
780 	const size_t[8] dec32table = [4, 1, 2, 1, 4, 4, 4, 4];
781 	const size_t[8] dec64table = [0, 0, 0, cast(size_t)-1, 0, 1, 2, 3];
782 
783 	const int safeDecode = (endOnInput==endOnInputSize);
784 	const int checkOffset = ((safeDecode) && (dictSize < cast(int)(64*KB)));
785 
786 
787 	/* Special cases */
788 	if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT;                         /* targetOutputSize too high => decode everything */
789 	if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
790 	if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
791 
792 
793 	/* Main Loop */
794 	while (true)
795 	{
796 		uint token;
797 		size_t length;
798 		const(ubyte)* match;
799 
800 		/* get literal length */
801 		token = *ip++;
802 		if ((length=(token>>ML_BITS)) == RUN_MASK)
803 		{
804 			uint s;
805 			do
806 			{
807 				s = *ip++;
808 				length += s;
809 			}
810 			while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
811 			if ((safeDecode) && unlikely(cast(size_t)(op+length)<cast(size_t)(op)))
812             {
813                 goto _output_error;   /* overflow detection */
814             }
815 			if ((safeDecode) && unlikely(cast(size_t)(ip+length)<cast(size_t)(ip))) 
816             {
817                 goto _output_error;   /* overflow detection */
818             }
819 		}
820 
821 		/* copy literals */
822 		cpy = op+length;
823 		if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
824 			|| ((!endOnInput) && (cpy>oend-COPYLENGTH)))
825 		{
826 			if (partialDecoding)
827 			{
828 				if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
829 				if ((endOnInput) && (ip+length > iend)) 
830                 {
831                     goto _output_error;   /* Error : read attempt beyond end of input buffer */
832                 }
833 			}
834 			else
835 			{
836 				if ((!endOnInput) && (cpy != oend))
837                 {
838                     goto _output_error;       /* Error : block decoding must stop exactly there */
839                 }
840 				if ((endOnInput) && ((ip+length != iend) || (cpy > oend)))
841                 {
842                     goto _output_error;   /* Error : input must be consumed */
843                 }
844 			}
845 			memcpy(op, ip, length);
846 			ip += length;
847 			op += length;
848 			break;     /* Necessarily EOF, due to parsing restrictions */
849 		}
850 		LZ4_wildCopy(op, ip, cpy);
851 		ip += length; op = cpy;
852 
853 		/* get offset */
854 		match = cpy - LZ4_readLE16(ip); ip+=2;
855 		if ((checkOffset) && (unlikely(match < lowLimit)))
856         {
857             goto _output_error;   /* Error : offset outside destination buffer */
858         }
859 
860 		/* get matchlength */
861 		length = token & ML_MASK;
862 		if (length == ML_MASK)
863 		{
864 			uint s;
865 			do
866 			{
867 				if ((endOnInput) && (ip > iend-LASTLITERALS))
868                 {
869                     goto _output_error;
870                 }
871 				s = *ip++;
872 				length += s;
873 			} while (s==255);
874 			if ((safeDecode) && unlikely(cast(size_t)(op+length)<cast(size_t)op)) goto _output_error;   /* overflow detection */
875 		}
876 		length += MINMATCH;
877 
878 		/* check external dictionary */
879 		if ((dict==usingExtDict) && (match < lowPrefix))
880 		{
881 			if (unlikely(op+length > oend-LASTLITERALS))
882             {
883                 goto _output_error;   /* doesn't respect parsing restriction */
884             }
885 
886 			if (length <= cast(size_t)(lowPrefix-match))
887 			{
888 				/* match can be copied as a single segment from external dictionary */
889 				match = dictEnd - (lowPrefix-match);
890 				memcpy(op, match, length);
891 				op += length;
892 			}
893 			else
894 			{
895 				/* match encompass external dictionary and current segment */
896 				size_t copySize = cast(size_t)(lowPrefix-match);
897 				memcpy(op, dictEnd - copySize, copySize);
898 				op += copySize;
899 				copySize = length - copySize;
900 				if (copySize > cast(size_t)(op-lowPrefix))   /* overlap within current segment */
901 				{
902 					ubyte* endOfMatch = op + copySize;
903 					const(ubyte)* copyFrom = lowPrefix;
904 					while (op < endOfMatch) *op++ = *copyFrom++;
905 				}
906 				else
907 				{
908 					memcpy(op, lowPrefix, copySize);
909 					op += copySize;
910 				}
911 			}
912 			continue;
913 		}
914 
915 		/* copy repeated sequence */
916 		cpy = op + length;
917 		if (unlikely((op-match)<8))
918 		{
919 			const size_t dec64 = dec64table[op-match];
920 			op[0] = match[0];
921 			op[1] = match[1];
922 			op[2] = match[2];
923 			op[3] = match[3];
924 			match += dec32table[op-match];
925 			LZ4_copy4(op+4, match);
926 			op += 8; match -= dec64;
927 		} else { LZ4_copy8(op, match); op+=8; match+=8; }
928 
929 		if (unlikely(cpy>oend-12))
930 		{
931 			if (cpy > oend-LASTLITERALS)
932             {
933                 goto _output_error;    /* Error : last LASTLITERALS bytes must be literals */
934             }
935 			if (op < oend-8)
936 			{
937 				LZ4_wildCopy(op, match, oend-8);
938 				match += (oend-8) - op;
939 				op = oend-8;
940 			}
941 			while (op<cpy) *op++ = *match++;
942 		}
943 		else
944 			LZ4_wildCopy(op, match, cpy);
945 		op=cpy;   /* correction */
946 	}
947 
948 	/* end of decoding */
949 	if (endOnInput)
950 	   return cast(int) ((cast(char*)op)-dest);     /* Nb of output bytes decoded */
951 	else
952 	   return cast(int) ((cast(char*)ip)-source);   /* Nb of input bytes read */
953 
954 	/* Overflow error detected */
955 _output_error:
956 	return cast(int) (-((cast(char*)ip)-source))-1;
957 }
958 
959 /// -
960 int LZ4_decompress_safe(const(char)* source, char* dest, int compressedSize, int maxDecompressedSize)
961 {
962 	return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, cast(ubyte*)dest, null, 0);
963 }
964 /// -
965 int LZ4_decompress_safe_partial(const(char)* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
966 {
967 	return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, cast(ubyte*)dest, null, 0);
968 }
969 /// -
970 int LZ4_decompress_fast(const(char)* source, char* dest, int originalSize)
971 {
972 	return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, cast(ubyte*)(dest - 64*KB), null, 64*KB);
973 }
974 /* streaming decompression functions */
975 private struct LZ4_streamDecode_t_internal
976 {
977 	ubyte* externalDict;
978 	size_t extDictSize;
979 	ubyte* prefixEnd;
980 	size_t prefixSize;
981 }
982 
983 /**
984  * If you prefer dynamic allocation methods,
985  * LZ4_createStreamDecode()
986  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
987  */
988 LZ4_streamDecode_t* LZ4_createStreamDecode()
989 {
990 	LZ4_streamDecode_t* lz4s = cast(LZ4_streamDecode_t*) ALLOCATOR(ulong.sizeof, LZ4_STREAMDECODESIZE_U64);
991 	return lz4s;
992 }
993 ///ditto
994 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
995 {
996 	FREEMEM(LZ4_stream);
997 	return 0;
998 }
999 
1000 /**
1001  * LZ4_setStreamDecode
1002  * Use this function to instruct where to find the dictionary
1003  * This function is not necessary if previous data is still available where it was decoded.
1004  * Loading a size of 0 is allowed (same effect as no dictionary).
1005  * Return : 1 if OK, 0 if error
1006  */
1007 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* dictionary, int dictSize)
1008 {
1009 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1010 	lz4sd.prefixSize = cast(size_t) dictSize;
1011 	lz4sd.prefixEnd = cast(ubyte*) dictionary + dictSize;
1012 	lz4sd.externalDict = null;
1013 	lz4sd.extDictSize  = 0;
1014 	return 1;
1015 }
1016 
1017 /**
1018 *_continue() :
1019 	These decoding functions allow decompression of multiple blocks in "streaming" mode.
1020 	Previously decoded blocks must still be available at the memory position where they were decoded.
1021 	If it's not possible, save the relevant part of decoded data into a safe buffer,
1022 	and indicate where it stands using LZ4_setStreamDecode()
1023 */
1024 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* source, char* dest, int compressedSize, int maxOutputSize)
1025 {
1026 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1027 	int result;
1028 
1029 	if (lz4sd.prefixEnd == cast(ubyte*)dest)
1030 	{
1031 		result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1032 										endOnInputSize, full, 0,
1033 										usingExtDict, lz4sd.prefixEnd - lz4sd.prefixSize, lz4sd.externalDict, lz4sd.extDictSize);
1034 		if (result <= 0) return result;
1035 		lz4sd.prefixSize += result;
1036 		lz4sd.prefixEnd  += result;
1037 	}
1038 	else
1039 	{
1040 		lz4sd.extDictSize = lz4sd.prefixSize;
1041 		lz4sd.externalDict = lz4sd.prefixEnd - lz4sd.extDictSize;
1042 		result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1043 										endOnInputSize, full, 0,
1044 										usingExtDict, cast(ubyte*)dest, lz4sd.externalDict, lz4sd.extDictSize);
1045 		if (result <= 0) return result;
1046 		lz4sd.prefixSize = result;
1047 		lz4sd.prefixEnd  = cast(ubyte*)dest + result;
1048 	}
1049 
1050 	return result;
1051 }
1052 ///ditto
1053 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const(char)* source, char* dest, int originalSize)
1054 {
1055 	LZ4_streamDecode_t_internal* lz4sd = cast(LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1056 	int result;
1057 
1058 	if (lz4sd.prefixEnd == cast(ubyte*)dest)
1059 	{
1060 		result = LZ4_decompress_generic(source, dest, 0, originalSize,
1061 										endOnOutputSize, full, 0,
1062 										usingExtDict, lz4sd.prefixEnd - lz4sd.prefixSize, lz4sd.externalDict, lz4sd.extDictSize);
1063 		if (result <= 0) return result;
1064 		lz4sd.prefixSize += originalSize;
1065 		lz4sd.prefixEnd  += originalSize;
1066 	}
1067 	else
1068 	{
1069 		lz4sd.extDictSize = lz4sd.prefixSize;
1070 		lz4sd.externalDict = cast(ubyte*)dest - lz4sd.extDictSize;
1071 		result = LZ4_decompress_generic(source, dest, 0, originalSize,
1072 										endOnOutputSize, full, 0,
1073 										usingExtDict, cast(ubyte*)dest, lz4sd.externalDict, lz4sd.extDictSize);
1074 		if (result <= 0) return result;
1075 		lz4sd.prefixSize = originalSize;
1076 		lz4sd.prefixEnd  = cast(ubyte*)dest + originalSize;
1077 	}
1078 
1079 	return result;
1080 }
1081 
1082 
1083 /**
1084 Advanced decoding functions :
1085 *_usingDict() :
1086 	These decoding functions work the same as "_continue" ones,
1087 	the dictionary must be explicitly provided within parameters
1088 */
1089 
1090 int LZ4_decompress_usingDict_generic(const(char)* source, char* dest, int compressedSize, int maxOutputSize, int safe, const(char)* dictStart, int dictSize)
1091 {
1092 	if (dictSize==0)
1093 		return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, cast(ubyte*)dest, null, 0);
1094 	if (dictStart+dictSize == dest)
1095 	{
1096 		if (dictSize >= cast(int)(64*KB - 1))
1097 			return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, cast(ubyte*)dest-64*KB, null, 0);
1098 		return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, cast(ubyte*)dest-dictSize, null, 0);
1099 	}
1100 	return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, cast(ubyte*)dest, cast(ubyte*)dictStart, dictSize);
1101 }
1102 ///ditto
1103 int LZ4_decompress_safe_usingDict(const(char)* source, char* dest, int compressedSize, int maxOutputSize, const(char)* dictStart, int dictSize)
1104 {
1105 	return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1106 }
1107 ///ditto
1108 int LZ4_decompress_fast_usingDict(const(char)* source, char* dest, int originalSize, const(char)* dictStart, int dictSize)
1109 {
1110 	return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1111 }
1112 
1113 /** debug function */
1114 int LZ4_decompress_safe_forceExtDict(const(char)* source, char* dest, int compressedSize, int maxOutputSize, const(char)* dictStart, int dictSize)
1115 {
1116 	return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, cast(ubyte*)dest, cast(ubyte*)dictStart, dictSize);
1117 }