Geant4 11.1.1
Toolkit for the simulation of the passage of particles through matter
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
deflate.c
Go to the documentation of this file.
1/* deflate.c -- compress data using the deflation algorithm
2 * Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
5
6/*
7 * ALGORITHM
8 *
9 * The "deflation" process depends on being able to identify portions
10 * of the input text which are identical to earlier input (within a
11 * sliding window trailing behind the input currently being processed).
12 *
13 * The most straightforward technique turns out to be the fastest for
14 * most input files: try all possible matches and select the longest.
15 * The key feature of this algorithm is that insertions into the string
16 * dictionary are very simple and thus fast, and deletions are avoided
17 * completely. Insertions are performed at each input character, whereas
18 * string matches are performed only when the previous match ends. So it
19 * is preferable to spend more time in matches to allow very fast string
20 * insertions and avoid deletions. The matching algorithm for small
21 * strings is inspired from that of Rabin & Karp. A brute force approach
22 * is used to find longer strings when a small match has been found.
23 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24 * (by Leonid Broukhis).
25 * A previous version of this file used a more sophisticated algorithm
26 * (by Fiala and Greene) which is guaranteed to run in linear amortized
27 * time, but has a larger average cost, uses more memory and is patented.
28 * However the F&G algorithm may be faster for some highly redundant
29 * files if the parameter max_chain_length (described below) is too large.
30 *
31 * ACKNOWLEDGEMENTS
32 *
33 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34 * I found it in 'freeze' written by Leonid Broukhis.
35 * Thanks to many people for bug reports and testing.
36 *
37 * REFERENCES
38 *
39 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40 * Available in http://tools.ietf.org/html/rfc1951
41 *
42 * A description of the Rabin and Karp algorithm is given in the book
43 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44 *
45 * Fiala,E.R., and Greene,D.H.
46 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47 *
48 */
49
50
51#include "deflate.h"
52
53const char deflate_copyright[] =
54 " deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler ";
55/*
56 If you use the zlib library in a product, an acknowledgment is welcome
57 in the documentation of your product. If for some reason you cannot
58 include such an acknowledgment, I would appreciate that you keep this
59 copyright string in the executable of your product.
60 */
61
62/* ===========================================================================
63 * Function prototypes.
64 */
65typedef enum {
66 need_more, /* block not completed, need more input or more output */
67 block_done, /* block flush performed */
68 finish_started, /* finish started, need only more output at next deflate */
69 finish_done /* finish done, accept no more input or output */
71
72typedef block_state (*compress_func) OF((deflate_state *s, int flush));
73/* Compression function. Returns the block state after the call. */
74
80#ifndef FASTEST
82#endif
85local void lm_init OF((deflate_state *s));
86local void putShortMSB OF((deflate_state *s, uInt b));
87local void flush_pending OF((z_streamp strm));
88local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
89#ifdef ASMV
90# pragma message("Assembler code may have bugs -- use at your own risk")
91 void match_init OF((void)); /* asm code initialization */
92 uInt longest_match OF((deflate_state *s, IPos cur_match));
93#else
94local uInt longest_match OF((deflate_state *s, IPos cur_match));
95#endif
96
97#ifdef ZLIB_DEBUG
98local void check_match OF((deflate_state *s, IPos start, IPos match,
99 int length));
100#endif
101
102/* ===========================================================================
103 * Local data
104 */
105
106#define NIL 0
107/* Tail of hash chains */
108
109#ifndef TOO_FAR
110# define TOO_FAR 4096
111#endif
112/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
113
114/* Values for max_lazy_match, good_match and max_chain_length, depending on
115 * the desired pack level (0..9). The values given below have been tuned to
116 * exclude worst case performance for pathological files. Better values may be
117 * found for specific files.
118 */
119typedef struct config_s {
120 ush good_length; /* reduce lazy search above this match length */
121 ush max_lazy; /* do not perform lazy search above this match length */
122 ush nice_length; /* quit search above this match length */
124 compress_func func;
126
127#ifdef FASTEST
129/* good lazy nice chain */
130/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
131/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
132#else
134/* good lazy nice chain */
135/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
136/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
137/* 2 */ {4, 5, 16, 8, deflate_fast},
138/* 3 */ {4, 6, 32, 32, deflate_fast},
139
140/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
141/* 5 */ {8, 16, 32, 32, deflate_slow},
142/* 6 */ {8, 16, 128, 128, deflate_slow},
143/* 7 */ {8, 32, 128, 256, deflate_slow},
144/* 8 */ {32, 128, 258, 1024, deflate_slow},
145/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
146#endif
147
148/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
149 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
150 * meaning.
151 */
152
153/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
154#define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0))
155
156/* ===========================================================================
157 * Update a hash value with the given input byte
158 * IN assertion: all calls to UPDATE_HASH are made with consecutive input
159 * characters, so that a running hash key can be computed from the previous
160 * key instead of complete recalculation each time.
161 */
162#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
163
164
165/* ===========================================================================
166 * Insert string str in the dictionary and set match_head to the previous head
167 * of the hash chain (the most recent string with same hash key). Return
168 * the previous length of the hash chain.
169 * If this file is compiled with -DFASTEST, the compression level is forced
170 * to 1, and no hash chains are maintained.
171 * IN assertion: all calls to INSERT_STRING are made with consecutive input
172 * characters and the first MIN_MATCH bytes of str are valid (except for
173 * the last MIN_MATCH-1 bytes of the input file).
174 */
175#ifdef FASTEST
176#define INSERT_STRING(s, str, match_head) \
177 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
178 match_head = s->head[s->ins_h], \
179 s->head[s->ins_h] = (Pos)(str))
180#else
181#define INSERT_STRING(s, str, match_head) \
182 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
183 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
184 s->head[s->ins_h] = (Pos)(str))
185#endif
186
187/* ===========================================================================
188 * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
189 * prev[] will be initialized on the fly.
190 */
191#define CLEAR_HASH(s) \
192 do { \
193 s->head[s->hash_size-1] = NIL; \
194 zmemzero((Bytef *)s->head, \
195 (unsigned)(s->hash_size-1)*sizeof(*s->head)); \
196 } while (0)
197
198/* ===========================================================================
199 * Slide the hash table when sliding the window down (could be avoided with 32
200 * bit values at the expense of memory usage). We slide even when level == 0 to
201 * keep the hash table consistent if we switch back to level > 0 later.
202 */
204 deflate_state *s;
205{
206 unsigned n, m;
207 Posf *p;
208 uInt wsize = s->w_size;
209
210 n = s->hash_size;
211 p = &s->head[n];
212 do {
213 m = *--p;
214 *p = (Pos)(m >= wsize ? m - wsize : NIL);
215 } while (--n);
216 n = wsize;
217#ifndef FASTEST
218 p = &s->prev[n];
219 do {
220 m = *--p;
221 *p = (Pos)(m >= wsize ? m - wsize : NIL);
222 /* If n is not on any hash chain, prev[n] is garbage but
223 * its value will never be used.
224 */
225 } while (--n);
226#endif
227}
228
229/* ========================================================================= */
230int ZEXPORT deflateInit_(strm, level, version, stream_size)
231 z_streamp strm;
232 int level;
233 const char *version;
234 int stream_size;
235{
236 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
237 Z_DEFAULT_STRATEGY, version, stream_size);
238 /* To do: ignore strm->next_in if we use it as window */
239}
240
241/* ========================================================================= */
242int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
243 version, stream_size)
244 z_streamp strm;
245 int level;
246 int method;
247 int windowBits;
248 int memLevel;
249 int strategy;
250 const char *version;
251 int stream_size;
252{
253 deflate_state *s;
254 int wrap = 1;
255 static const char my_version[] = ZLIB_VERSION;
256
257 if (version == Z_NULL || version[0] != my_version[0] ||
258 stream_size != sizeof(z_stream)) {
259 return Z_VERSION_ERROR;
260 }
261 if (strm == Z_NULL) return Z_STREAM_ERROR;
262
263 strm->msg = Z_NULL;
264 if (strm->zalloc == (alloc_func)0) {
265#ifdef Z_SOLO
266 return Z_STREAM_ERROR;
267#else
268 strm->zalloc = zcalloc;
269 strm->opaque = (voidpf)0;
270#endif
271 }
272 if (strm->zfree == (free_func)0)
273#ifdef Z_SOLO
274 return Z_STREAM_ERROR;
275#else
276 strm->zfree = zcfree;
277#endif
278
279#ifdef FASTEST
280 if (level != 0) level = 1;
281#else
282 if (level == Z_DEFAULT_COMPRESSION) level = 6;
283#endif
284
285 if (windowBits < 0) { /* suppress zlib wrapper */
286 wrap = 0;
287 windowBits = -windowBits;
288 }
289#ifdef GZIP
290 else if (windowBits > 15) {
291 wrap = 2; /* write gzip wrapper instead */
292 windowBits -= 16;
293 }
294#endif
295 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
296 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
297 strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) {
298 return Z_STREAM_ERROR;
299 }
300 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
301 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
302 if (s == Z_NULL) return Z_MEM_ERROR;
303 strm->state = (struct internal_state FAR *)s;
304 s->strm = strm;
305 s->status = INIT_STATE; /* to pass state test in deflateReset() */
306
307 s->wrap = wrap;
308 s->gzhead = Z_NULL;
309 s->w_bits = (uInt)windowBits;
310 s->w_size = 1 << s->w_bits;
311 s->w_mask = s->w_size - 1;
312
313 s->hash_bits = (uInt)memLevel + 7;
314 s->hash_size = 1 << s->hash_bits;
315 s->hash_mask = s->hash_size - 1;
317
318 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
319 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
320 /* Avoid use of uninitialized value, see:
321 * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360
322 */
323 zmemzero(s->prev, s->w_size * sizeof(Pos));
324 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
325
326 s->high_water = 0; /* nothing written to s->window yet */
327
328 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
329
330 /* We overlay pending_buf and sym_buf. This works since the average size
331 * for length/distance pairs over any compressed block is assured to be 31
332 * bits or less.
333 *
334 * Analysis: The longest fixed codes are a length code of 8 bits plus 5
335 * extra bits, for lengths 131 to 257. The longest fixed distance codes are
336 * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
337 * possible fixed-codes length/distance pair is then 31 bits total.
338 *
339 * sym_buf starts one-fourth of the way into pending_buf. So there are
340 * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
341 * in sym_buf is three bytes -- two for the distance and one for the
342 * literal/length. As each symbol is consumed, the pointer to the next
343 * sym_buf value to read moves forward three bytes. From that symbol, up to
344 * 31 bits are written to pending_buf. The closest the written pending_buf
345 * bits gets to the next sym_buf symbol to read is just before the last
346 * code is written. At that time, 31*(n-2) bits have been written, just
347 * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
348 * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
349 * symbols are written.) The closest the writing gets to what is unread is
350 * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
351 * can range from 128 to 32768.
352 *
353 * Therefore, at a minimum, there are 142 bits of space between what is
354 * written and what is read in the overlain buffers, so the symbols cannot
355 * be overwritten by the compressed data. That space is actually 139 bits,
356 * due to the three-bit fixed-code block header.
357 *
358 * That covers the case where either Z_FIXED is specified, forcing fixed
359 * codes, or when the use of fixed codes is chosen, because that choice
360 * results in a smaller compressed block than dynamic codes. That latter
361 * condition then assures that the above analysis also covers all dynamic
362 * blocks. A dynamic-code block will only be chosen to be emitted if it has
363 * fewer bits than a fixed-code block would for the same set of symbols.
364 * Therefore its average symbol length is assured to be less than 31. So
365 * the compressed data for a dynamic block also cannot overwrite the
366 * symbols from which it is being constructed.
367 */
368
369 s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
370 s->pending_buf_size = (ulg)s->lit_bufsize * 4;
371
372 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
373 s->pending_buf == Z_NULL) {
374 s->status = FINISH_STATE;
375 strm->msg = ERR_MSG(Z_MEM_ERROR);
377 return Z_MEM_ERROR;
378 }
379 s->sym_buf = s->pending_buf + s->lit_bufsize;
380 s->sym_end = (s->lit_bufsize - 1) * 3;
381 /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
382 * on 16 bit machines and because stored blocks are restricted to
383 * 64K-1 bytes.
384 */
385
386 s->level = level;
387 s->strategy = strategy;
388 s->method = (Byte)method;
389
390 return deflateReset(strm);
391}
392
393/* =========================================================================
394 * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
395 */
398{
399 deflate_state *s;
400 if (strm == Z_NULL ||
401 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
402 return 1;
403 s = strm->state;
404 if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE &&
405#ifdef GZIP
406 s->status != GZIP_STATE &&
407#endif
408 s->status != EXTRA_STATE &&
409 s->status != NAME_STATE &&
410 s->status != COMMENT_STATE &&
411 s->status != HCRC_STATE &&
412 s->status != BUSY_STATE &&
413 s->status != FINISH_STATE))
414 return 1;
415 return 0;
416}
417
418/* ========================================================================= */
419int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
421 const Bytef *dictionary;
422 uInt dictLength;
423{
424 deflate_state *s;
425 uInt str, n;
426 int wrap;
427 unsigned avail;
428 z_const unsigned char *next;
429
430 if (deflateStateCheck(strm) || dictionary == Z_NULL)
431 return Z_STREAM_ERROR;
432 s = strm->state;
433 wrap = s->wrap;
434 if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
435 return Z_STREAM_ERROR;
436
437 /* when using zlib wrappers, compute Adler-32 for provided dictionary */
438 if (wrap == 1)
439 strm->adler = adler32(strm->adler, dictionary, dictLength);
440 s->wrap = 0; /* avoid computing Adler-32 in read_buf */
441
442 /* if dictionary would fill window, just replace the history */
443 if (dictLength >= s->w_size) {
444 if (wrap == 0) { /* already empty otherwise */
445 CLEAR_HASH(s);
446 s->strstart = 0;
447 s->block_start = 0L;
448 s->insert = 0;
449 }
450 dictionary += dictLength - s->w_size; /* use the tail */
451 dictLength = s->w_size;
452 }
453
454 /* insert dictionary into window and hash */
455 avail = strm->avail_in;
456 next = strm->next_in;
457 strm->avail_in = dictLength;
458 strm->next_in = (z_const Bytef *)dictionary;
459 fill_window(s);
460 while (s->lookahead >= MIN_MATCH) {
461 str = s->strstart;
462 n = s->lookahead - (MIN_MATCH-1);
463 do {
464 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
465#ifndef FASTEST
466 s->prev[str & s->w_mask] = s->head[s->ins_h];
467#endif
468 s->head[s->ins_h] = (Pos)str;
469 str++;
470 } while (--n);
471 s->strstart = str;
472 s->lookahead = MIN_MATCH-1;
473 fill_window(s);
474 }
475 s->strstart += s->lookahead;
476 s->block_start = (long)s->strstart;
477 s->insert = s->lookahead;
478 s->lookahead = 0;
480 s->match_available = 0;
481 strm->next_in = next;
482 strm->avail_in = avail;
483 s->wrap = wrap;
484 return Z_OK;
485}
486
487/* ========================================================================= */
488int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
490 Bytef *dictionary;
491 uInt *dictLength;
492{
493 deflate_state *s;
494 uInt len;
495
497 return Z_STREAM_ERROR;
498 s = strm->state;
499 len = s->strstart + s->lookahead;
500 if (len > s->w_size)
501 len = s->w_size;
502 if (dictionary != Z_NULL && len)
503 zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len);
504 if (dictLength != Z_NULL)
505 *dictLength = len;
506 return Z_OK;
507}
508
509/* ========================================================================= */
512{
513 deflate_state *s;
514
515 if (deflateStateCheck(strm)) {
516 return Z_STREAM_ERROR;
517 }
518
519 strm->total_in = strm->total_out = 0;
520 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
521 strm->data_type = Z_UNKNOWN;
522
523 s = (deflate_state *)strm->state;
524 s->pending = 0;
525 s->pending_out = s->pending_buf;
526
527 if (s->wrap < 0) {
528 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
529 }
530 s->status =
531#ifdef GZIP
532 s->wrap == 2 ? GZIP_STATE :
533#endif
535 strm->adler =
536#ifdef GZIP
537 s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
538#endif
539 adler32(0L, Z_NULL, 0);
540 s->last_flush = -2;
541
542 _tr_init(s);
543
544 return Z_OK;
545}
546
547/* ========================================================================= */
548int ZEXPORT deflateReset (strm)
550{
551 int ret;
552
553 ret = deflateResetKeep(strm);
554 if (ret == Z_OK)
555 lm_init(strm->state);
556 return ret;
557}
558
559/* ========================================================================= */
563{
564 if (deflateStateCheck(strm) || strm->state->wrap != 2)
565 return Z_STREAM_ERROR;
566 strm->state->gzhead = head;
567 return Z_OK;
568}
569
570/* ========================================================================= */
571int ZEXPORT deflatePending (strm, pending, bits)
572 unsigned *pending;
573 int *bits;
575{
577 if (pending != Z_NULL)
578 *pending = strm->state->pending;
579 if (bits != Z_NULL)
580 *bits = strm->state->bi_valid;
581 return Z_OK;
582}
583
584/* ========================================================================= */
585int ZEXPORT deflatePrime (strm, bits, value)
587 int bits;
588 int value;
589{
590 deflate_state *s;
591 int put;
592
594 s = strm->state;
595 if (bits < 0 || bits > 16 ||
596 s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
597 return Z_BUF_ERROR;
598 do {
599 put = Buf_size - s->bi_valid;
600 if (put > bits)
601 put = bits;
602 s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
603 s->bi_valid += put;
605 value >>= put;
606 bits -= put;
607 } while (bits);
608 return Z_OK;
609}
610
611/* ========================================================================= */
614 int level;
615 int strategy;
616{
617 deflate_state *s;
618 compress_func func;
619
621 s = strm->state;
622
623#ifdef FASTEST
624 if (level != 0) level = 1;
625#else
627#endif
628 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
629 return Z_STREAM_ERROR;
630 }
631 func = configuration_table[s->level].func;
632
633 if ((strategy != s->strategy || func != configuration_table[level].func) &&
634 s->last_flush != -2) {
635 /* Flush the last buffer: */
636 int err = deflate(strm, Z_BLOCK);
637 if (err == Z_STREAM_ERROR)
638 return err;
639 if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
640 return Z_BUF_ERROR;
641 }
642 if (s->level != level) {
643 if (s->level == 0 && s->matches != 0) {
644 if (s->matches == 1)
645 slide_hash(s);
646 else
647 CLEAR_HASH(s);
648 s->matches = 0;
649 }
650 s->level = level;
655 }
656 s->strategy = strategy;
657 return Z_OK;
658}
659
660/* ========================================================================= */
661int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
663 int good_length;
664 int max_lazy;
665 int nice_length;
666 int max_chain;
667{
668 deflate_state *s;
669
671 s = strm->state;
672 s->good_match = (uInt)good_length;
673 s->max_lazy_match = (uInt)max_lazy;
674 s->nice_match = nice_length;
675 s->max_chain_length = (uInt)max_chain;
676 return Z_OK;
677}
678
679/* =========================================================================
680 * For the default windowBits of 15 and memLevel of 8, this function returns
681 * a close to exact, as well as small, upper bound on the compressed size.
682 * They are coded as constants here for a reason--if the #define's are
683 * changed, then this function needs to be changed as well. The return
684 * value for 15 and 8 only works for those exact settings.
685 *
686 * For any setting other than those defaults for windowBits and memLevel,
687 * the value returned is a conservative worst case for the maximum expansion
688 * resulting from using fixed blocks instead of stored blocks, which deflate
689 * can emit on compressed data for some combinations of the parameters.
690 *
691 * This function could be more sophisticated to provide closer upper bounds for
692 * every combination of windowBits and memLevel. But even the conservative
693 * upper bound of about 14% expansion does not seem onerous for output buffer
694 * allocation.
695 */
696uLong ZEXPORT deflateBound(strm, sourceLen)
698 uLong sourceLen;
699{
700 deflate_state *s;
701 uLong complen, wraplen;
702
703 /* conservative upper bound for compressed data */
704 complen = sourceLen +
705 ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
706
707 /* if can't get parameters, return conservative bound plus zlib wrapper */
709 return complen + 6;
710
711 /* compute wrapper length */
712 s = strm->state;
713 switch (s->wrap) {
714 case 0: /* raw deflate */
715 wraplen = 0;
716 break;
717 case 1: /* zlib wrapper */
718 wraplen = 6 + (s->strstart ? 4 : 0);
719 break;
720#ifdef GZIP
721 case 2: /* gzip wrapper */
722 wraplen = 18;
723 if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
724 Bytef *str;
725 if (s->gzhead->extra != Z_NULL)
726 wraplen += 2 + s->gzhead->extra_len;
727 str = s->gzhead->name;
728 if (str != Z_NULL)
729 do {
730 wraplen++;
731 } while (*str++);
732 str = s->gzhead->comment;
733 if (str != Z_NULL)
734 do {
735 wraplen++;
736 } while (*str++);
737 if (s->gzhead->hcrc)
738 wraplen += 2;
739 }
740 break;
741#endif
742 default: /* for compiler happiness */
743 wraplen = 6;
744 }
745
746 /* if not default parameters, return conservative bound */
747 if (s->w_bits != 15 || s->hash_bits != 8 + 7)
748 return complen + wraplen;
749
750 /* default settings: return tight bound for that case */
751 return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
752 (sourceLen >> 25) + 13 - 6 + wraplen;
753}
754
755/* =========================================================================
756 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
757 * IN assertion: the stream state is correct and there is enough room in
758 * pending_buf.
759 */
761 deflate_state *s;
762 uInt b;
763{
764 put_byte(s, (Byte)(b >> 8));
765 put_byte(s, (Byte)(b & 0xff));
766}
767
768/* =========================================================================
769 * Flush as much pending output as possible. All deflate() output, except for
770 * some deflate_stored() output, goes through this function so some
771 * applications may wish to modify it to avoid allocating a large
772 * strm->next_out buffer and copying into it. (See also read_buf()).
773 */
776{
777 unsigned len;
778 deflate_state *s = strm->state;
779
781 len = s->pending;
782 if (len > strm->avail_out) len = strm->avail_out;
783 if (len == 0) return;
784
785 zmemcpy(strm->next_out, s->pending_out, len);
786 strm->next_out += len;
787 s->pending_out += len;
788 strm->total_out += len;
789 strm->avail_out -= len;
790 s->pending -= len;
791 if (s->pending == 0) {
792 s->pending_out = s->pending_buf;
793 }
794}
795
796/* ===========================================================================
797 * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1].
798 */
799#define HCRC_UPDATE(beg) \
800 do { \
801 if (s->gzhead->hcrc && s->pending > (beg)) \
802 strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
803 s->pending - (beg)); \
804 } while (0)
805
806/* ========================================================================= */
807int ZEXPORT deflate (strm, flush)
809 int flush;
810{
811 int old_flush; /* value of flush param for previous deflate call */
812 deflate_state *s;
813
814 if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
815 return Z_STREAM_ERROR;
816 }
817 s = strm->state;
818
819 if (strm->next_out == Z_NULL ||
820 (strm->avail_in != 0 && strm->next_in == Z_NULL) ||
821 (s->status == FINISH_STATE && flush != Z_FINISH)) {
823 }
824 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
825
826 old_flush = s->last_flush;
827 s->last_flush = flush;
828
829 /* Flush as much pending output as possible */
830 if (s->pending != 0) {
832 if (strm->avail_out == 0) {
833 /* Since avail_out is 0, deflate will be called again with
834 * more output space, but possibly with both pending and
835 * avail_in equal to zero. There won't be anything to do,
836 * but this is not an error situation so make sure we
837 * return OK instead of BUF_ERROR at next call of deflate:
838 */
839 s->last_flush = -1;
840 return Z_OK;
841 }
842
843 /* Make sure there is something to do and avoid duplicate consecutive
844 * flushes. For repeated and useless calls with Z_FINISH, we keep
845 * returning Z_STREAM_END instead of Z_BUF_ERROR.
846 */
847 } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
848 flush != Z_FINISH) {
850 }
851
852 /* User must not provide more input after the first FINISH: */
853 if (s->status == FINISH_STATE && strm->avail_in != 0) {
855 }
856
857 /* Write the header */
858 if (s->status == INIT_STATE && s->wrap == 0)
859 s->status = BUSY_STATE;
860 if (s->status == INIT_STATE) {
861 /* zlib header */
862 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
863 uInt level_flags;
864
865 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
866 level_flags = 0;
867 else if (s->level < 6)
868 level_flags = 1;
869 else if (s->level == 6)
870 level_flags = 2;
871 else
872 level_flags = 3;
873 header |= (level_flags << 6);
874 if (s->strstart != 0) header |= PRESET_DICT;
875 header += 31 - (header % 31);
876
877 putShortMSB(s, header);
878
879 /* Save the adler32 of the preset dictionary: */
880 if (s->strstart != 0) {
881 putShortMSB(s, (uInt)(strm->adler >> 16));
882 putShortMSB(s, (uInt)(strm->adler & 0xffff));
883 }
884 strm->adler = adler32(0L, Z_NULL, 0);
885 s->status = BUSY_STATE;
886
887 /* Compression must start with an empty pending buffer */
889 if (s->pending != 0) {
890 s->last_flush = -1;
891 return Z_OK;
892 }
893 }
894#ifdef GZIP
895 if (s->status == GZIP_STATE) {
896 /* gzip header */
897 strm->adler = crc32(0L, Z_NULL, 0);
898 put_byte(s, 31);
899 put_byte(s, 139);
900 put_byte(s, 8);
901 if (s->gzhead == Z_NULL) {
902 put_byte(s, 0);
903 put_byte(s, 0);
904 put_byte(s, 0);
905 put_byte(s, 0);
906 put_byte(s, 0);
907 put_byte(s, s->level == 9 ? 2 :
908 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
909 4 : 0));
910 put_byte(s, OS_CODE);
911 s->status = BUSY_STATE;
912
913 /* Compression must start with an empty pending buffer */
915 if (s->pending != 0) {
916 s->last_flush = -1;
917 return Z_OK;
918 }
919 }
920 else {
921 put_byte(s, (s->gzhead->text ? 1 : 0) +
922 (s->gzhead->hcrc ? 2 : 0) +
923 (s->gzhead->extra == Z_NULL ? 0 : 4) +
924 (s->gzhead->name == Z_NULL ? 0 : 8) +
925 (s->gzhead->comment == Z_NULL ? 0 : 16)
926 );
927 put_byte(s, (Byte)(s->gzhead->time & 0xff));
928 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
929 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
930 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
931 put_byte(s, s->level == 9 ? 2 :
932 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
933 4 : 0));
934 put_byte(s, s->gzhead->os & 0xff);
935 if (s->gzhead->extra != Z_NULL) {
936 put_byte(s, s->gzhead->extra_len & 0xff);
937 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
938 }
939 if (s->gzhead->hcrc)
940 strm->adler = crc32(strm->adler, s->pending_buf,
941 s->pending);
942 s->gzindex = 0;
943 s->status = EXTRA_STATE;
944 }
945 }
946 if (s->status == EXTRA_STATE) {
947 if (s->gzhead->extra != Z_NULL) {
948 ulg beg = s->pending; /* start of bytes to update crc */
949 uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
950 while (s->pending + left > s->pending_buf_size) {
951 uInt copy = s->pending_buf_size - s->pending;
953 s->gzhead->extra + s->gzindex, copy);
955 HCRC_UPDATE(beg);
956 s->gzindex += copy;
958 if (s->pending != 0) {
959 s->last_flush = -1;
960 return Z_OK;
961 }
962 beg = 0;
963 left -= copy;
964 }
966 s->gzhead->extra + s->gzindex, left);
967 s->pending += left;
968 HCRC_UPDATE(beg);
969 s->gzindex = 0;
970 }
971 s->status = NAME_STATE;
972 }
973 if (s->status == NAME_STATE) {
974 if (s->gzhead->name != Z_NULL) {
975 ulg beg = s->pending; /* start of bytes to update crc */
976 int val;
977 do {
978 if (s->pending == s->pending_buf_size) {
979 HCRC_UPDATE(beg);
981 if (s->pending != 0) {
982 s->last_flush = -1;
983 return Z_OK;
984 }
985 beg = 0;
986 }
987 val = s->gzhead->name[s->gzindex++];
988 put_byte(s, val);
989 } while (val != 0);
990 HCRC_UPDATE(beg);
991 s->gzindex = 0;
992 }
994 }
995 if (s->status == COMMENT_STATE) {
996 if (s->gzhead->comment != Z_NULL) {
997 ulg beg = s->pending; /* start of bytes to update crc */
998 int val;
999 do {
1000 if (s->pending == s->pending_buf_size) {
1001 HCRC_UPDATE(beg);
1003 if (s->pending != 0) {
1004 s->last_flush = -1;
1005 return Z_OK;
1006 }
1007 beg = 0;
1008 }
1009 val = s->gzhead->comment[s->gzindex++];
1010 put_byte(s, val);
1011 } while (val != 0);
1012 HCRC_UPDATE(beg);
1013 }
1014 s->status = HCRC_STATE;
1015 }
1016 if (s->status == HCRC_STATE) {
1017 if (s->gzhead->hcrc) {
1018 if (s->pending + 2 > s->pending_buf_size) {
1020 if (s->pending != 0) {
1021 s->last_flush = -1;
1022 return Z_OK;
1023 }
1024 }
1025 put_byte(s, (Byte)(strm->adler & 0xff));
1026 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1027 strm->adler = crc32(0L, Z_NULL, 0);
1028 }
1029 s->status = BUSY_STATE;
1030
1031 /* Compression must start with an empty pending buffer */
1033 if (s->pending != 0) {
1034 s->last_flush = -1;
1035 return Z_OK;
1036 }
1037 }
1038#endif
1039
1040 /* Start a new block or continue the current one.
1041 */
1042 if (strm->avail_in != 0 || s->lookahead != 0 ||
1043 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
1044 block_state bstate;
1045
1046 bstate = s->level == 0 ? deflate_stored(s, flush) :
1047 s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
1048 s->strategy == Z_RLE ? deflate_rle(s, flush) :
1049 (*(configuration_table[s->level].func))(s, flush);
1050
1051 if (bstate == finish_started || bstate == finish_done) {
1052 s->status = FINISH_STATE;
1053 }
1054 if (bstate == need_more || bstate == finish_started) {
1055 if (strm->avail_out == 0) {
1056 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
1057 }
1058 return Z_OK;
1059 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
1060 * of deflate should use the same flush parameter to make sure
1061 * that the flush is complete. So we don't have to output an
1062 * empty block here, this will be done at next call. This also
1063 * ensures that for a very small output buffer, we emit at most
1064 * one empty block.
1065 */
1066 }
1067 if (bstate == block_done) {
1068 if (flush == Z_PARTIAL_FLUSH) {
1069 _tr_align(s);
1070 } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
1071 _tr_stored_block(s, (char*)0, 0L, 0);
1072 /* For a full flush, this empty block will be recognized
1073 * as a special marker by inflate_sync().
1074 */
1075 if (flush == Z_FULL_FLUSH) {
1076 CLEAR_HASH(s); /* forget history */
1077 if (s->lookahead == 0) {
1078 s->strstart = 0;
1079 s->block_start = 0L;
1080 s->insert = 0;
1081 }
1082 }
1083 }
1085 if (strm->avail_out == 0) {
1086 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
1087 return Z_OK;
1088 }
1089 }
1090 }
1091
1092 if (flush != Z_FINISH) return Z_OK;
1093 if (s->wrap <= 0) return Z_STREAM_END;
1094
1095 /* Write the trailer */
1096#ifdef GZIP
1097 if (s->wrap == 2) {
1098 put_byte(s, (Byte)(strm->adler & 0xff));
1099 put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
1100 put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
1101 put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
1102 put_byte(s, (Byte)(strm->total_in & 0xff));
1103 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
1104 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
1105 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
1106 }
1107 else
1108#endif
1109 {
1110 putShortMSB(s, (uInt)(strm->adler >> 16));
1111 putShortMSB(s, (uInt)(strm->adler & 0xffff));
1112 }
1114 /* If avail_out is zero, the application will call deflate again
1115 * to flush the rest.
1116 */
1117 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
1118 return s->pending != 0 ? Z_OK : Z_STREAM_END;
1119}
1120
1121/* ========================================================================= */
1122int ZEXPORT deflateEnd (strm)
1124{
1125 int status;
1126
1128
1129 status = strm->state->status;
1130
1131 /* Deallocate in reverse order of allocations: */
1132 TRY_FREE(strm, strm->state->pending_buf);
1133 TRY_FREE(strm, strm->state->head);
1134 TRY_FREE(strm, strm->state->prev);
1135 TRY_FREE(strm, strm->state->window);
1136
1137 ZFREE(strm, strm->state);
1138 strm->state = Z_NULL;
1139
1140 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
1141}
1142
1143/* =========================================================================
1144 * Copy the source state to the destination state.
1145 * To simplify the source, this is not supported for 16-bit MSDOS (which
1146 * doesn't have enough memory anyway to duplicate compression states).
1147 */
1148int ZEXPORT deflateCopy (dest, source)
1149 z_streamp dest;
1150 z_streamp source;
1151{
1152#ifdef MAXSEG_64K
1153 return Z_STREAM_ERROR;
1154#else
1155 deflate_state *ds;
1156 deflate_state *ss;
1157
1158
1159 if (deflateStateCheck(source) || dest == Z_NULL) {
1160 return Z_STREAM_ERROR;
1161 }
1162
1163 ss = source->state;
1164
1165 zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
1166
1167 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
1168 if (ds == Z_NULL) return Z_MEM_ERROR;
1169 dest->state = (struct internal_state FAR *) ds;
1170 zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
1171 ds->strm = dest;
1172
1173 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
1174 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
1175 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
1176 ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
1177
1178 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
1179 ds->pending_buf == Z_NULL) {
1180 deflateEnd (dest);
1181 return Z_MEM_ERROR;
1182 }
1183 /* following zmemcpy do not work for 16-bit MSDOS */
1184 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
1185 zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
1186 zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
1187 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
1188
1189 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
1190 ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
1191
1192 ds->l_desc.dyn_tree = ds->dyn_ltree;
1193 ds->d_desc.dyn_tree = ds->dyn_dtree;
1194 ds->bl_desc.dyn_tree = ds->bl_tree;
1195
1196 return Z_OK;
1197#endif /* MAXSEG_64K */
1198}
1199
1200/* ===========================================================================
1201 * Read a new buffer from the current input stream, update the adler32
1202 * and total number of bytes read. All deflate() input goes through
1203 * this function so some applications may wish to modify it to avoid
1204 * allocating a large strm->next_in buffer and copying from it.
1205 * (See also flush_pending()).
1206 */
1207local unsigned read_buf(strm, buf, size)
1209 Bytef *buf;
1210 unsigned size;
1211{
1212 unsigned len = strm->avail_in;
1213
1214 if (len > size) len = size;
1215 if (len == 0) return 0;
1216
1217 strm->avail_in -= len;
1218
1219 zmemcpy(buf, strm->next_in, len);
1220 if (strm->state->wrap == 1) {
1221 strm->adler = adler32(strm->adler, buf, len);
1222 }
1223#ifdef GZIP
1224 else if (strm->state->wrap == 2) {
1225 strm->adler = crc32(strm->adler, buf, len);
1226 }
1227#endif
1228 strm->next_in += len;
1229 strm->total_in += len;
1230
1231 return len;
1232}
1233
1234/* ===========================================================================
1235 * Initialize the "longest match" routines for a new zlib stream
1236 */
1238 deflate_state *s;
1239{
1240 s->window_size = (ulg)2L*s->w_size;
1241
1242 CLEAR_HASH(s);
1243
1244 /* Set the default configuration parameters:
1245 */
1246 s->max_lazy_match = configuration_table[s->level].max_lazy;
1247 s->good_match = configuration_table[s->level].good_length;
1248 s->nice_match = configuration_table[s->level].nice_length;
1249 s->max_chain_length = configuration_table[s->level].max_chain;
1250
1251 s->strstart = 0;
1252 s->block_start = 0L;
1253 s->lookahead = 0;
1254 s->insert = 0;
1255 s->match_length = s->prev_length = MIN_MATCH-1;
1256 s->match_available = 0;
1257 s->ins_h = 0;
1258#ifndef FASTEST
1259#ifdef ASMV
1260 match_init(); /* initialize the asm code */
1261#endif
1262#endif
1263}
1264
1265#ifndef FASTEST
1266/* ===========================================================================
1267 * Set match_start to the longest match starting at the given string and
1268 * return its length. Matches shorter or equal to prev_length are discarded,
1269 * in which case the result is equal to prev_length and match_start is
1270 * garbage.
1271 * IN assertions: cur_match is the head of the hash chain for the current
1272 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
1273 * OUT assertion: the match length is not greater than s->lookahead.
1274 */
1275#ifndef ASMV
1276/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
1277 * match.S. The code will be functionally equivalent.
1278 */
1279local uInt longest_match(s, cur_match)
1280 deflate_state *s;
1281 IPos cur_match; /* current match */
1282{
1283 unsigned chain_length = s->max_chain_length;/* max hash chain length */
1284 register Bytef *scan = s->window + s->strstart; /* current string */
1285 register Bytef *match; /* matched string */
1286 register int len; /* length of current match */
1287 int best_len = (int)s->prev_length; /* best match length so far */
1288 int nice_match = s->nice_match; /* stop if match long enough */
1289 IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
1290 s->strstart - (IPos)MAX_DIST(s) : NIL;
1291 /* Stop when cur_match becomes <= limit. To simplify the code,
1292 * we prevent matches with the string of window index 0.
1293 */
1294 Posf *prev = s->prev;
1295 uInt wmask = s->w_mask;
1296
1297#ifdef UNALIGNED_OK
1298 /* Compare two bytes at a time. Note: this is not always beneficial.
1299 * Try with and without -DUNALIGNED_OK to check.
1300 */
1301 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
1302 register ush scan_start = *(ushf*)scan;
1303 register ush scan_end = *(ushf*)(scan+best_len-1);
1304#else
1305 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1306 register Byte scan_end1 = scan[best_len-1];
1307 register Byte scan_end = scan[best_len];
1308#endif
1309
1310 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1311 * It is easy to get rid of this optimization if necessary.
1312 */
1313 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1314
1315 /* Do not waste too much time if we already have a good match: */
1316 if (s->prev_length >= s->good_match) {
1317 chain_length >>= 2;
1318 }
1319 /* Do not look for matches beyond the end of the input. This is necessary
1320 * to make deflate deterministic.
1321 */
1322 if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
1323
1324 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1325
1326 do {
1327 Assert(cur_match < s->strstart, "no future");
1328 match = s->window + cur_match;
1329
1330 /* Skip to next match if the match length cannot increase
1331 * or if the match length is less than 2. Note that the checks below
1332 * for insufficient lookahead only occur occasionally for performance
1333 * reasons. Therefore uninitialized memory will be accessed, and
1334 * conditional jumps will be made that depend on those values.
1335 * However the length of the match is limited to the lookahead, so
1336 * the output of deflate is not affected by the uninitialized values.
1337 */
1338#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
1339 /* This code assumes sizeof(unsigned short) == 2. Do not use
1340 * UNALIGNED_OK if your compiler uses a different size.
1341 */
1342 if (*(ushf*)(match+best_len-1) != scan_end ||
1343 *(ushf*)match != scan_start) continue;
1344
1345 /* It is not necessary to compare scan[2] and match[2] since they are
1346 * always equal when the other bytes match, given that the hash keys
1347 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
1348 * strstart+3, +5, ... up to strstart+257. We check for insufficient
1349 * lookahead only every 4th comparison; the 128th check will be made
1350 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
1351 * necessary to put more guard bytes at the end of the window, or
1352 * to check more often for insufficient lookahead.
1353 */
1354 Assert(scan[2] == match[2], "scan[2]?");
1355 scan++, match++;
1356 do {
1357 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1358 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1359 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1360 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
1361 scan < strend);
1362 /* The funny "do {}" generates better code on most compilers */
1363
1364 /* Here, scan <= window+strstart+257 */
1365 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1366 if (*scan == *match) scan++;
1367
1368 len = (MAX_MATCH - 1) - (int)(strend-scan);
1369 scan = strend - (MAX_MATCH-1);
1370
1371#else /* UNALIGNED_OK */
1372
1373 if (match[best_len] != scan_end ||
1374 match[best_len-1] != scan_end1 ||
1375 *match != *scan ||
1376 *++match != scan[1]) continue;
1377
1378 /* The check at best_len-1 can be removed because it will be made
1379 * again later. (This heuristic is not always a win.)
1380 * It is not necessary to compare scan[2] and match[2] since they
1381 * are always equal when the other bytes match, given that
1382 * the hash keys are equal and that HASH_BITS >= 8.
1383 */
1384 scan += 2, match++;
1385 Assert(*scan == *match, "match[2]?");
1386
1387 /* We check for insufficient lookahead only every 8th comparison;
1388 * the 256th check will be made at strstart+258.
1389 */
1390 do {
1391 } while (*++scan == *++match && *++scan == *++match &&
1392 *++scan == *++match && *++scan == *++match &&
1393 *++scan == *++match && *++scan == *++match &&
1394 *++scan == *++match && *++scan == *++match &&
1395 scan < strend);
1396
1397 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1398
1399 len = MAX_MATCH - (int)(strend - scan);
1400 scan = strend - MAX_MATCH;
1401
1402#endif /* UNALIGNED_OK */
1403
1404 if (len > best_len) {
1405 s->match_start = cur_match;
1406 best_len = len;
1407 if (len >= nice_match) break;
1408#ifdef UNALIGNED_OK
1409 scan_end = *(ushf*)(scan+best_len-1);
1410#else
1411 scan_end1 = scan[best_len-1];
1412 scan_end = scan[best_len];
1413#endif
1414 }
1415 } while ((cur_match = prev[cur_match & wmask]) > limit
1416 && --chain_length != 0);
1417
1418 if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
1419 return s->lookahead;
1420}
1421#endif /* ASMV */
1422
1423#else /* FASTEST */
1424
1425/* ---------------------------------------------------------------------------
1426 * Optimized version for FASTEST only
1427 */
1428local uInt longest_match(s, cur_match)
1429 deflate_state *s;
1430 IPos cur_match; /* current match */
1431{
1432 register Bytef *scan = s->window + s->strstart; /* current string */
1433 register Bytef *match; /* matched string */
1434 register int len; /* length of current match */
1435 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1436
1437 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1438 * It is easy to get rid of this optimization if necessary.
1439 */
1440 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1441
1442 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1443
1444 Assert(cur_match < s->strstart, "no future");
1445
1446 match = s->window + cur_match;
1447
1448 /* Return failure if the match length is less than 2:
1449 */
1450 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1451
1452 /* The check at best_len-1 can be removed because it will be made
1453 * again later. (This heuristic is not always a win.)
1454 * It is not necessary to compare scan[2] and match[2] since they
1455 * are always equal when the other bytes match, given that
1456 * the hash keys are equal and that HASH_BITS >= 8.
1457 */
1458 scan += 2, match += 2;
1459 Assert(*scan == *match, "match[2]?");
1460
1461 /* We check for insufficient lookahead only every 8th comparison;
1462 * the 256th check will be made at strstart+258.
1463 */
1464 do {
1465 } while (*++scan == *++match && *++scan == *++match &&
1466 *++scan == *++match && *++scan == *++match &&
1467 *++scan == *++match && *++scan == *++match &&
1468 *++scan == *++match && *++scan == *++match &&
1469 scan < strend);
1470
1471 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1472
1473 len = MAX_MATCH - (int)(strend - scan);
1474
1475 if (len < MIN_MATCH) return MIN_MATCH - 1;
1476
1477 s->match_start = cur_match;
1478 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1479}
1480
1481#endif /* FASTEST */
1482
1483#ifdef ZLIB_DEBUG
1484
1485#define EQUAL 0
1486/* result of memcmp for equal strings */
1487
1488/* ===========================================================================
1489 * Check that the match at match_start is indeed a match.
1490 */
1491local void check_match(s, start, match, length)
1492 deflate_state *s;
1493 IPos start, match;
1494 int length;
1495{
1496 /* check that the match is indeed a match */
1497 if (zmemcmp(s->window + match,
1498 s->window + start, length) != EQUAL) {
1499 fprintf(stderr, " start %u, match %u, length %d\n",
1500 start, match, length);
1501 do {
1502 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1503 } while (--length != 0);
1504 z_error("invalid match");
1505 }
1506 if (z_verbose > 1) {
1507 fprintf(stderr,"\\[%d,%d]", start-match, length);
1508 do { putc(s->window[start++], stderr); } while (--length != 0);
1509 }
1510}
1511#else
1512# define check_match(s, start, match, length)
1513#endif /* ZLIB_DEBUG */
1514
1515/* ===========================================================================
1516 * Fill the window when the lookahead becomes insufficient.
1517 * Updates strstart and lookahead.
1518 *
1519 * IN assertion: lookahead < MIN_LOOKAHEAD
1520 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1521 * At least one byte has been read, or avail_in == 0; reads are
1522 * performed for at least two bytes (required for the zip translate_eol
1523 * option -- not supported here).
1524 */
1526 deflate_state *s;
1527{
1528 unsigned n;
1529 unsigned more; /* Amount of free space at the end of the window. */
1530 uInt wsize = s->w_size;
1531
1532 Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
1533
1534 do {
1535 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1536
1537 /* Deal with !@#$% 64K limit: */
1538 if (sizeof(int) <= 2) {
1539 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1540 more = wsize;
1541
1542 } else if (more == (unsigned)(-1)) {
1543 /* Very unlikely, but possible on 16 bit machine if
1544 * strstart == 0 && lookahead == 1 (input done a byte at time)
1545 */
1546 more--;
1547 }
1548 }
1549
1550 /* If the window is almost full and there is insufficient lookahead,
1551 * move the upper half to the lower one to make room in the upper half.
1552 */
1553 if (s->strstart >= wsize+MAX_DIST(s)) {
1554
1555 zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
1556 s->match_start -= wsize;
1557 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1558 s->block_start -= (long) wsize;
1559 if (s->insert > s->strstart)
1560 s->insert = s->strstart;
1561 slide_hash(s);
1562 more += wsize;
1563 }
1564 if (s->strm->avail_in == 0) break;
1565
1566 /* If there was no sliding:
1567 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1568 * more == window_size - lookahead - strstart
1569 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1570 * => more >= window_size - 2*WSIZE + 2
1571 * In the BIG_MEM or MMAP case (not yet supported),
1572 * window_size == input_size + MIN_LOOKAHEAD &&
1573 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1574 * Otherwise, window_size == 2*WSIZE so more >= 2.
1575 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1576 */
1577 Assert(more >= 2, "more < 2");
1578
1579 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1580 s->lookahead += n;
1581
1582 /* Initialize the hash value now that we have some input: */
1583 if (s->lookahead + s->insert >= MIN_MATCH) {
1584 uInt str = s->strstart - s->insert;
1585 s->ins_h = s->window[str];
1586 UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
1587#if MIN_MATCH != 3
1588 Call UPDATE_HASH() MIN_MATCH-3 more times
1589#endif
1590 while (s->insert) {
1591 UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
1592#ifndef FASTEST
1593 s->prev[str & s->w_mask] = s->head[s->ins_h];
1594#endif
1595 s->head[s->ins_h] = (Pos)str;
1596 str++;
1597 s->insert--;
1598 if (s->lookahead + s->insert < MIN_MATCH)
1599 break;
1600 }
1601 }
1602 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1603 * but this is not important since only literal bytes will be emitted.
1604 */
1605
1606 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1607
1608 /* If the WIN_INIT bytes after the end of the current data have never been
1609 * written, then zero those bytes in order to avoid memory check reports of
1610 * the use of uninitialized (or uninitialised as Julian writes) bytes by
1611 * the longest match routines. Update the high water mark for the next
1612 * time through here. WIN_INIT is set to MAX_MATCH since the longest match
1613 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
1614 */
1615 if (s->high_water < s->window_size) {
1616 ulg curr = s->strstart + (ulg)(s->lookahead);
1617 ulg init;
1618
1619 if (s->high_water < curr) {
1620 /* Previous high water mark below current data -- zero WIN_INIT
1621 * bytes or up to end of window, whichever is less.
1622 */
1623 init = s->window_size - curr;
1624 if (init > WIN_INIT)
1625 init = WIN_INIT;
1626 zmemzero(s->window + curr, (unsigned)init);
1627 s->high_water = curr + init;
1628 }
1629 else if (s->high_water < (ulg)curr + WIN_INIT) {
1630 /* High water mark at or above current data, but below current data
1631 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
1632 * to end of window, whichever is less.
1633 */
1634 init = (ulg)curr + WIN_INIT - s->high_water;
1635 if (init > s->window_size - s->high_water)
1636 init = s->window_size - s->high_water;
1637 zmemzero(s->window + s->high_water, (unsigned)init);
1638 s->high_water += init;
1639 }
1640 }
1641
1642 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
1643 "not enough room for search");
1644}
1645
1646/* ===========================================================================
1647 * Flush the current block, with given end-of-file flag.
1648 * IN assertion: strstart is set to the end of the current match.
1649 */
1650#define FLUSH_BLOCK_ONLY(s, last) { \
1651 _tr_flush_block(s, (s->block_start >= 0L ? \
1652 (charf *)&s->window[(unsigned)s->block_start] : \
1653 (charf *)Z_NULL), \
1654 (ulg)((long)s->strstart - s->block_start), \
1655 (last)); \
1656 s->block_start = s->strstart; \
1657 flush_pending(s->strm); \
1658 Tracev((stderr,"[FLUSH]")); \
1659}
1660
1661/* Same but force premature exit if necessary. */
1662#define FLUSH_BLOCK(s, last) { \
1663 FLUSH_BLOCK_ONLY(s, last); \
1664 if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
1665}
1666
1667/* Maximum stored block length in deflate format (not including header). */
1668#define MAX_STORED 65535
1669
1670/* Minimum of a and b. */
1671#define MIN(a, b) ((a) > (b) ? (b) : (a))
1672
1673/* ===========================================================================
1674 * Copy without compression as much as possible from the input stream, return
1675 * the current block state.
1676 *
1677 * In case deflateParams() is used to later switch to a non-zero compression
1678 * level, s->matches (otherwise unused when storing) keeps track of the number
1679 * of hash table slides to perform. If s->matches is 1, then one hash table
1680 * slide will be done when switching. If s->matches is 2, the maximum value
1681 * allowed here, then the hash table will be cleared, since two or more slides
1682 * is the same as a clear.
1683 *
1684 * deflate_stored() is written to minimize the number of times an input byte is
1685 * copied. It is most efficient with large input and output buffers, which
1686 * maximizes the opportunites to have a single copy from next_in to next_out.
1687 */
1689 deflate_state *s;
1690 int flush;
1691{
1692 /* Smallest worthy block size when not flushing or finishing. By default
1693 * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
1694 * large input and output buffers, the stored block size will be larger.
1695 */
1696 unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
1697
1698 /* Copy as many min_block or larger stored blocks directly to next_out as
1699 * possible. If flushing, copy the remaining available input to next_out as
1700 * stored blocks, if there is enough space.
1701 */
1702 unsigned len, left, have, last = 0;
1703 unsigned used = s->strm->avail_in;
1704 do {
1705 /* Set len to the maximum size block that we can copy directly with the
1706 * available input data and output space. Set left to how much of that
1707 * would be copied from what's left in the window.
1708 */
1709 len = MAX_STORED; /* maximum deflate stored block length */
1710 have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1711 if (s->strm->avail_out < have) /* need room for header */
1712 break;
1713 /* maximum stored block length that will fit in avail_out: */
1714 have = s->strm->avail_out - have;
1715 left = s->strstart - s->block_start; /* bytes left in window */
1716 if (len > (ulg)left + s->strm->avail_in)
1717 len = left + s->strm->avail_in; /* limit len to the input */
1718 if (len > have)
1719 len = have; /* limit len to the output */
1720
1721 /* If the stored block would be less than min_block in length, or if
1722 * unable to copy all of the available input when flushing, then try
1723 * copying to the window and the pending buffer instead. Also don't
1724 * write an empty block when flushing -- deflate() does that.
1725 */
1726 if (len < min_block && ((len == 0 && flush != Z_FINISH) ||
1727 flush == Z_NO_FLUSH ||
1728 len != left + s->strm->avail_in))
1729 break;
1730
1731 /* Make a dummy stored block in pending to get the header bytes,
1732 * including any pending bits. This also updates the debugging counts.
1733 */
1734 last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
1735 _tr_stored_block(s, (char *)0, 0L, last);
1736
1737 /* Replace the lengths in the dummy stored block with len. */
1738 s->pending_buf[s->pending - 4] = len;
1739 s->pending_buf[s->pending - 3] = len >> 8;
1740 s->pending_buf[s->pending - 2] = ~len;
1741 s->pending_buf[s->pending - 1] = ~len >> 8;
1742
1743 /* Write the stored block header bytes. */
1744 flush_pending(s->strm);
1745
1746#ifdef ZLIB_DEBUG
1747 /* Update debugging counts for the data about to be copied. */
1748 s->compressed_len += len << 3;
1749 s->bits_sent += len << 3;
1750#endif
1751
1752 /* Copy uncompressed bytes from the window to next_out. */
1753 if (left) {
1754 if (left > len)
1755 left = len;
1756 zmemcpy(s->strm->next_out, s->window + s->block_start, left);
1757 s->strm->next_out += left;
1758 s->strm->avail_out -= left;
1759 s->strm->total_out += left;
1760 s->block_start += left;
1761 len -= left;
1762 }
1763
1764 /* Copy uncompressed bytes directly from next_in to next_out, updating
1765 * the check value.
1766 */
1767 if (len) {
1768 read_buf(s->strm, s->strm->next_out, len);
1769 s->strm->next_out += len;
1770 s->strm->avail_out -= len;
1771 s->strm->total_out += len;
1772 }
1773 } while (last == 0);
1774
1775 /* Update the sliding window with the last s->w_size bytes of the copied
1776 * data, or append all of the copied data to the existing window if less
1777 * than s->w_size bytes were copied. Also update the number of bytes to
1778 * insert in the hash tables, in the event that deflateParams() switches to
1779 * a non-zero compression level.
1780 */
1781 used -= s->strm->avail_in; /* number of input bytes directly copied */
1782 if (used) {
1783 /* If any input was used, then no unused input remains in the window,
1784 * therefore s->block_start == s->strstart.
1785 */
1786 if (used >= s->w_size) { /* supplant the previous history */
1787 s->matches = 2; /* clear hash */
1788 zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
1789 s->strstart = s->w_size;
1790 s->insert = s->strstart;
1791 }
1792 else {
1793 if (s->window_size - s->strstart <= used) {
1794 /* Slide the window down. */
1795 s->strstart -= s->w_size;
1796 zmemcpy(s->window, s->window + s->w_size, s->strstart);
1797 if (s->matches < 2)
1798 s->matches++; /* add a pending slide_hash() */
1799 if (s->insert > s->strstart)
1800 s->insert = s->strstart;
1801 }
1802 zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
1803 s->strstart += used;
1804 s->insert += MIN(used, s->w_size - s->insert);
1805 }
1806 s->block_start = s->strstart;
1807 }
1808 if (s->high_water < s->strstart)
1809 s->high_water = s->strstart;
1810
1811 /* If the last block was written to next_out, then done. */
1812 if (last)
1813 return finish_done;
1814
1815 /* If flushing and all input has been consumed, then done. */
1816 if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
1817 s->strm->avail_in == 0 && (long)s->strstart == s->block_start)
1818 return block_done;
1819
1820 /* Fill the window with any remaining input. */
1821 have = s->window_size - s->strstart;
1822 if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
1823 /* Slide the window down. */
1824 s->block_start -= s->w_size;
1825 s->strstart -= s->w_size;
1826 zmemcpy(s->window, s->window + s->w_size, s->strstart);
1827 if (s->matches < 2)
1828 s->matches++; /* add a pending slide_hash() */
1829 have += s->w_size; /* more space now */
1830 if (s->insert > s->strstart)
1831 s->insert = s->strstart;
1832 }
1833 if (have > s->strm->avail_in)
1834 have = s->strm->avail_in;
1835 if (have) {
1836 read_buf(s->strm, s->window + s->strstart, have);
1837 s->strstart += have;
1838 s->insert += MIN(have, s->w_size - s->insert);
1839 }
1840 if (s->high_water < s->strstart)
1841 s->high_water = s->strstart;
1842
1843 /* There was not enough avail_out to write a complete worthy or flushed
1844 * stored block to next_out. Write a stored block to pending instead, if we
1845 * have enough input for a worthy block, or if flushing and there is enough
1846 * room for the remaining input as a stored block in the pending buffer.
1847 */
1848 have = (s->bi_valid + 42) >> 3; /* number of header bytes */
1849 /* maximum stored block length that will fit in pending: */
1850 have = MIN(s->pending_buf_size - have, MAX_STORED);
1851 min_block = MIN(have, s->w_size);
1852 left = s->strstart - s->block_start;
1853 if (left >= min_block ||
1854 ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
1855 s->strm->avail_in == 0 && left <= have)) {
1856 len = MIN(left, have);
1857 last = flush == Z_FINISH && s->strm->avail_in == 0 &&
1858 len == left ? 1 : 0;
1859 _tr_stored_block(s, (charf *)s->window + s->block_start, len, last);
1860 s->block_start += len;
1861 flush_pending(s->strm);
1862 }
1863
1864 /* We've done all we can with the available input and output. */
1865 return last ? finish_started : need_more;
1866}
1867
1868/* ===========================================================================
1869 * Compress as much as possible from the input stream, return the current
1870 * block state.
1871 * This function does not perform lazy evaluation of matches and inserts
1872 * new strings in the dictionary only for unmatched strings or for short
1873 * matches. It is used only for the fast compression options.
1874 */
1876 deflate_state *s;
1877 int flush;
1878{
1879 IPos hash_head; /* head of the hash chain */
1880 int bflush; /* set if current block must be flushed */
1881
1882 for (;;) {
1883 /* Make sure that we always have enough lookahead, except
1884 * at the end of the input file. We need MAX_MATCH bytes
1885 * for the next match, plus MIN_MATCH bytes to insert the
1886 * string following the next match.
1887 */
1888 if (s->lookahead < MIN_LOOKAHEAD) {
1889 fill_window(s);
1890 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1891 return need_more;
1892 }
1893 if (s->lookahead == 0) break; /* flush the current block */
1894 }
1895
1896 /* Insert the string window[strstart .. strstart+2] in the
1897 * dictionary, and set hash_head to the head of the hash chain:
1898 */
1899 hash_head = NIL;
1900 if (s->lookahead >= MIN_MATCH) {
1901 INSERT_STRING(s, s->strstart, hash_head);
1902 }
1903
1904 /* Find the longest match, discarding those <= prev_length.
1905 * At this point we have always match_length < MIN_MATCH
1906 */
1907 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1908 /* To simplify the code, we prevent matches with the string
1909 * of window index 0 (in particular we have to avoid a match
1910 * of the string with itself at the start of the input file).
1911 */
1912 s->match_length = longest_match (s, hash_head);
1913 /* longest_match() sets match_start */
1914 }
1915 if (s->match_length >= MIN_MATCH) {
1916 check_match(s, s->strstart, s->match_start, s->match_length);
1917
1918 _tr_tally_dist(s, s->strstart - s->match_start,
1919 s->match_length - MIN_MATCH, bflush);
1920
1921 s->lookahead -= s->match_length;
1922
1923 /* Insert new strings in the hash table only if the match length
1924 * is not too large. This saves time but degrades compression.
1925 */
1926#ifndef FASTEST
1927 if (s->match_length <= s->max_insert_length &&
1928 s->lookahead >= MIN_MATCH) {
1929 s->match_length--; /* string at strstart already in table */
1930 do {
1931 s->strstart++;
1932 INSERT_STRING(s, s->strstart, hash_head);
1933 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1934 * always MIN_MATCH bytes ahead.
1935 */
1936 } while (--s->match_length != 0);
1937 s->strstart++;
1938 } else
1939#endif
1940 {
1941 s->strstart += s->match_length;
1942 s->match_length = 0;
1943 s->ins_h = s->window[s->strstart];
1944 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1945#if MIN_MATCH != 3
1946 Call UPDATE_HASH() MIN_MATCH-3 more times
1947#endif
1948 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1949 * matter since it will be recomputed at next deflate call.
1950 */
1951 }
1952 } else {
1953 /* No match, output a literal byte */
1954 Tracevv((stderr,"%c", s->window[s->strstart]));
1955 _tr_tally_lit (s, s->window[s->strstart], bflush);
1956 s->lookahead--;
1957 s->strstart++;
1958 }
1959 if (bflush) FLUSH_BLOCK(s, 0);
1960 }
1961 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
1962 if (flush == Z_FINISH) {
1963 FLUSH_BLOCK(s, 1);
1964 return finish_done;
1965 }
1966 if (s->sym_next)
1967 FLUSH_BLOCK(s, 0);
1968 return block_done;
1969}
1970
1971#ifndef FASTEST
1972/* ===========================================================================
1973 * Same as above, but achieves better compression. We use a lazy
1974 * evaluation for matches: a match is finally adopted only if there is
1975 * no better match at the next window position.
1976 */
1978 deflate_state *s;
1979 int flush;
1980{
1981 IPos hash_head; /* head of hash chain */
1982 int bflush; /* set if current block must be flushed */
1983
1984 /* Process the input block. */
1985 for (;;) {
1986 /* Make sure that we always have enough lookahead, except
1987 * at the end of the input file. We need MAX_MATCH bytes
1988 * for the next match, plus MIN_MATCH bytes to insert the
1989 * string following the next match.
1990 */
1991 if (s->lookahead < MIN_LOOKAHEAD) {
1992 fill_window(s);
1993 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1994 return need_more;
1995 }
1996 if (s->lookahead == 0) break; /* flush the current block */
1997 }
1998
1999 /* Insert the string window[strstart .. strstart+2] in the
2000 * dictionary, and set hash_head to the head of the hash chain:
2001 */
2002 hash_head = NIL;
2003 if (s->lookahead >= MIN_MATCH) {
2004 INSERT_STRING(s, s->strstart, hash_head);
2005 }
2006
2007 /* Find the longest match, discarding those <= prev_length.
2008 */
2009 s->prev_length = s->match_length, s->prev_match = s->match_start;
2010 s->match_length = MIN_MATCH-1;
2011
2012 if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
2013 s->strstart - hash_head <= MAX_DIST(s)) {
2014 /* To simplify the code, we prevent matches with the string
2015 * of window index 0 (in particular we have to avoid a match
2016 * of the string with itself at the start of the input file).
2017 */
2018 s->match_length = longest_match (s, hash_head);
2019 /* longest_match() sets match_start */
2020
2021 if (s->match_length <= 5 && (s->strategy == Z_FILTERED
2022#if TOO_FAR <= 32767
2023 || (s->match_length == MIN_MATCH &&
2024 s->strstart - s->match_start > TOO_FAR)
2025#endif
2026 )) {
2027
2028 /* If prev_match is also MIN_MATCH, match_start is garbage
2029 * but we will ignore the current match anyway.
2030 */
2031 s->match_length = MIN_MATCH-1;
2032 }
2033 }
2034 /* If there was a match at the previous step and the current
2035 * match is not better, output the previous match:
2036 */
2037 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
2038 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
2039 /* Do not insert strings in hash table beyond this. */
2040
2041 check_match(s, s->strstart-1, s->prev_match, s->prev_length);
2042
2043 _tr_tally_dist(s, s->strstart -1 - s->prev_match,
2044 s->prev_length - MIN_MATCH, bflush);
2045
2046 /* Insert in hash table all strings up to the end of the match.
2047 * strstart-1 and strstart are already inserted. If there is not
2048 * enough lookahead, the last two strings are not inserted in
2049 * the hash table.
2050 */
2051 s->lookahead -= s->prev_length-1;
2052 s->prev_length -= 2;
2053 do {
2054 if (++s->strstart <= max_insert) {
2055 INSERT_STRING(s, s->strstart, hash_head);
2056 }
2057 } while (--s->prev_length != 0);
2058 s->match_available = 0;
2059 s->match_length = MIN_MATCH-1;
2060 s->strstart++;
2061
2062 if (bflush) FLUSH_BLOCK(s, 0);
2063
2064 } else if (s->match_available) {
2065 /* If there was no match at the previous position, output a
2066 * single literal. If there was a match but the current match
2067 * is longer, truncate the previous match to a single literal.
2068 */
2069 Tracevv((stderr,"%c", s->window[s->strstart-1]));
2070 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
2071 if (bflush) {
2072 FLUSH_BLOCK_ONLY(s, 0);
2073 }
2074 s->strstart++;
2075 s->lookahead--;
2076 if (s->strm->avail_out == 0) return need_more;
2077 } else {
2078 /* There is no previous match to compare with, wait for
2079 * the next step to decide.
2080 */
2081 s->match_available = 1;
2082 s->strstart++;
2083 s->lookahead--;
2084 }
2085 }
2086 Assert (flush != Z_NO_FLUSH, "no flush?");
2087 if (s->match_available) {
2088 Tracevv((stderr,"%c", s->window[s->strstart-1]));
2089 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
2090 s->match_available = 0;
2091 }
2092 s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
2093 if (flush == Z_FINISH) {
2094 FLUSH_BLOCK(s, 1);
2095 return finish_done;
2096 }
2097 if (s->sym_next)
2098 FLUSH_BLOCK(s, 0);
2099 return block_done;
2100}
2101#endif /* FASTEST */
2102
2103/* ===========================================================================
2104 * For Z_RLE, simply look for runs of bytes, generate matches only of distance
2105 * one. Do not maintain a hash table. (It will be regenerated if this run of
2106 * deflate switches away from Z_RLE.)
2107 */
2109 deflate_state *s;
2110 int flush;
2111{
2112 int bflush; /* set if current block must be flushed */
2113 uInt prev; /* byte at distance one to match */
2114 Bytef *scan, *strend; /* scan goes up to strend for length of run */
2115
2116 for (;;) {
2117 /* Make sure that we always have enough lookahead, except
2118 * at the end of the input file. We need MAX_MATCH bytes
2119 * for the longest run, plus one for the unrolled loop.
2120 */
2121 if (s->lookahead <= MAX_MATCH) {
2122 fill_window(s);
2123 if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
2124 return need_more;
2125 }
2126 if (s->lookahead == 0) break; /* flush the current block */
2127 }
2128
2129 /* See how many times the previous byte repeats */
2130 s->match_length = 0;
2131 if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
2132 scan = s->window + s->strstart - 1;
2133 prev = *scan;
2134 if (prev == *++scan && prev == *++scan && prev == *++scan) {
2135 strend = s->window + s->strstart + MAX_MATCH;
2136 do {
2137 } while (prev == *++scan && prev == *++scan &&
2138 prev == *++scan && prev == *++scan &&
2139 prev == *++scan && prev == *++scan &&
2140 prev == *++scan && prev == *++scan &&
2141 scan < strend);
2142 s->match_length = MAX_MATCH - (uInt)(strend - scan);
2143 if (s->match_length > s->lookahead)
2144 s->match_length = s->lookahead;
2145 }
2146 Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
2147 }
2148
2149 /* Emit match if have run of MIN_MATCH or longer, else emit literal */
2150 if (s->match_length >= MIN_MATCH) {
2151 check_match(s, s->strstart, s->strstart - 1, s->match_length);
2152
2153 _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
2154
2155 s->lookahead -= s->match_length;
2156 s->strstart += s->match_length;
2157 s->match_length = 0;
2158 } else {
2159 /* No match, output a literal byte */
2160 Tracevv((stderr,"%c", s->window[s->strstart]));
2161 _tr_tally_lit (s, s->window[s->strstart], bflush);
2162 s->lookahead--;
2163 s->strstart++;
2164 }
2165 if (bflush) FLUSH_BLOCK(s, 0);
2166 }
2167 s->insert = 0;
2168 if (flush == Z_FINISH) {
2169 FLUSH_BLOCK(s, 1);
2170 return finish_done;
2171 }
2172 if (s->sym_next)
2173 FLUSH_BLOCK(s, 0);
2174 return block_done;
2175}
2176
2177/* ===========================================================================
2178 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
2179 * (It will be regenerated if this run of deflate switches away from Huffman.)
2180 */
2182 deflate_state *s;
2183 int flush;
2184{
2185 int bflush; /* set if current block must be flushed */
2186
2187 for (;;) {
2188 /* Make sure that we have a literal to write. */
2189 if (s->lookahead == 0) {
2190 fill_window(s);
2191 if (s->lookahead == 0) {
2192 if (flush == Z_NO_FLUSH)
2193 return need_more;
2194 break; /* flush the current block */
2195 }
2196 }
2197
2198 /* Output a literal byte */
2199 s->match_length = 0;
2200 Tracevv((stderr,"%c", s->window[s->strstart]));
2201 _tr_tally_lit (s, s->window[s->strstart], bflush);
2202 s->lookahead--;
2203 s->strstart++;
2204 if (bflush) FLUSH_BLOCK(s, 0);
2205 }
2206 s->insert = 0;
2207 if (flush == Z_FINISH) {
2208 FLUSH_BLOCK(s, 1);
2209 return finish_done;
2210 }
2211 if (s->sym_next)
2212 FLUSH_BLOCK(s, 0);
2213 return block_done;
2214}
uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len)
Definition: adler32.c:133
Definition: G4Pair.hh:151
unsigned long ZEXPORT crc32(unsigned long crc, const unsigned char FAR *buf, uInt len)
Definition: crc32.c:1062
const config configuration_table[10]
Definition: deflate.c:133
int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head)
Definition: deflate.c:560
int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version, int stream_size)
Definition: deflate.c:230
block_state
Definition: deflate.c:65
@ finish_started
Definition: deflate.c:68
@ block_done
Definition: deflate.c:67
@ need_more
Definition: deflate.c:66
@ finish_done
Definition: deflate.c:69
#define FLUSH_BLOCK_ONLY(s, last)
Definition: deflate.c:1650
#define HCRC_UPDATE(beg)
Definition: deflate.c:799
#define NIL
Definition: deflate.c:106
int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits)
Definition: deflate.c:571
struct config_s config
block_state deflate_stored(deflate_state *s, int flush)
Definition: deflate.c:1688
#define MIN(a, b)
Definition: deflate.c:1671
#define check_match(s, start, match, length)
Definition: deflate.c:1512
void putShortMSB(deflate_state *s, uInt b)
Definition: deflate.c:760
#define UPDATE_HASH(s, h, c)
Definition: deflate.c:162
int ZEXPORT deflateCopy(z_streamp dest, z_streamp source)
Definition: deflate.c:1148
int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary, uInt dictLength)
Definition: deflate.c:419
void flush_pending(z_streamp strm)
Definition: deflate.c:774
int ZEXPORT deflateReset(z_streamp strm)
Definition: deflate.c:548
block_state compress_func OF((deflate_state *s, int flush))
Definition: deflate.c:72
#define INSERT_STRING(s, str, match_head)
Definition: deflate.c:181
int ZEXPORT deflateParams(z_streamp strm, int level, int strategy)
Definition: deflate.c:612
uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen)
Definition: deflate.c:696
unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size)
Definition: deflate.c:1207
const char deflate_copyright[]
Definition: deflate.c:53
block_state deflate_fast(deflate_state *s, int flush)
Definition: deflate.c:1875
block_state deflate_slow(deflate_state *s, int flush)
Definition: deflate.c:1977
#define FLUSH_BLOCK(s, last)
Definition: deflate.c:1662
int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy, int nice_length, int max_chain)
Definition: deflate.c:661
uInt longest_match(deflate_state *s, IPos cur_match)
Definition: deflate.c:1279
int ZEXPORT deflatePrime(z_streamp strm, int bits, int value)
Definition: deflate.c:585
int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary, uInt *dictLength)
Definition: deflate.c:488
#define TOO_FAR
Definition: deflate.c:110
block_state deflate_huff(deflate_state *s, int flush)
Definition: deflate.c:2181
int deflateStateCheck(z_streamp strm)
Definition: deflate.c:396
#define MAX_STORED
Definition: deflate.c:1668
int ZEXPORT deflateResetKeep(z_streamp strm)
Definition: deflate.c:510
int ZEXPORT deflateEnd(z_streamp strm)
Definition: deflate.c:1122
void fill_window(deflate_state *s)
Definition: deflate.c:1525
int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size)
Definition: deflate.c:242
#define RANK(f)
Definition: deflate.c:154
void lm_init(deflate_state *s)
Definition: deflate.c:1237
int ZEXPORT deflate(z_streamp strm, int flush)
Definition: deflate.c:807
void slide_hash(deflate_state *s)
Definition: deflate.c:203
#define CLEAR_HASH(s)
Definition: deflate.c:191
block_state deflate_rle(deflate_state *s, int flush)
Definition: deflate.c:2108
#define FINISH_STATE
Definition: deflate.h:62
#define COMMENT_STATE
Definition: deflate.h:59
#define HCRC_STATE
Definition: deflate.h:60
#define Buf_size
Definition: deflate.h:50
#define GZIP_STATE
Definition: deflate.h:55
#define MAX_DIST(s)
Definition: deflate.h:283
#define BUSY_STATE
Definition: deflate.h:61
#define put_byte(s, c)
Definition: deflate.h:275
#define _tr_tally_dist(s, distance, length, flush)
Definition: deflate.h:328
Pos FAR Posf
Definition: deflate.h:92
ush Pos
Definition: deflate.h:91
#define GZIP
Definition: deflate.h:22
#define INIT_STATE
Definition: deflate.h:53
#define MIN_LOOKAHEAD
Definition: deflate.h:278
#define WIN_INIT
Definition: deflate.h:288
#define NAME_STATE
Definition: deflate.h:58
unsigned IPos
Definition: deflate.h:93
#define _tr_tally_lit(s, c, flush)
Definition: deflate.h:320
#define EXTRA_STATE
Definition: deflate.h:57
#define local
Definition: gzguts.h:114
#define DEF_MEM_LEVEL
Definition: gzguts.h:151
ush good_length
Definition: deflate.c:120
ush max_chain
Definition: deflate.c:123
compress_func func
Definition: deflate.c:124
ush nice_length
Definition: deflate.c:122
ush max_lazy
Definition: deflate.c:121
struct tree_desc_s l_desc
Definition: deflate.h:201
uInt lit_bufsize
Definition: deflate.h:221
int nice_match
Definition: deflate.h:193
uInt lookahead
Definition: deflate.h:163
struct ct_data_s dyn_dtree[2 *D_CODES+1]
Definition: deflate.h:198
long block_start
Definition: deflate.h:153
ulg window_size
Definition: deflate.h:128
uInt sym_end
Definition: deflate.h:242
uInt hash_bits
Definition: deflate.h:143
uInt good_match
Definition: deflate.h:190
Bytef * pending_out
Definition: deflate.h:104
uInt prev_length
Definition: deflate.h:165
uInt hash_mask
Definition: deflate.h:144
ulg high_water
Definition: deflate.h:263
Bytef * window
Definition: deflate.h:118
ulg pending_buf_size
Definition: deflate.h:103
Posf * prev
Definition: deflate.h:133
uInt strstart
Definition: deflate.h:161
struct ct_data_s bl_tree[2 *BL_CODES+1]
Definition: deflate.h:199
struct tree_desc_s bl_desc
Definition: deflate.h:203
uInt match_length
Definition: deflate.h:158
int last_flush
Definition: deflate.h:110
uInt hash_size
Definition: deflate.h:142
z_streamp strm
Definition: deflate.h:100
Posf * head
Definition: deflate.h:139
uInt max_chain_length
Definition: deflate.h:170
struct tree_desc_s d_desc
Definition: deflate.h:202
uInt max_lazy_match
Definition: deflate.h:176
gz_headerp gzhead
Definition: deflate.h:107
uInt matches
Definition: deflate.h:246
int match_available
Definition: deflate.h:160
struct ct_data_s dyn_ltree[HEAP_SIZE]
Definition: deflate.h:197
Bytef * pending_buf
Definition: deflate.h:102
uInt hash_shift
Definition: deflate.h:146
uchf * sym_buf
Definition: deflate.h:219
ct_data * dyn_tree
Definition: deflate.h:86
void ZLIB_INTERNAL _tr_init(deflate_state *s)
Definition: trees.c:378
void ZLIB_INTERNAL _tr_stored_block(deflate_state *s, charf *buf, ulg stored_len, int last)
Definition: trees.c:862
void ZLIB_INTERNAL _tr_flush_bits(deflate_state *s)
Definition: trees.c:886
void ZLIB_INTERNAL _tr_align(deflate_state *s)
Definition: trees.c:896
#define Z_HUFFMAN_ONLY
Definition: zlib.h:197
#define Z_DEFLATED
Definition: zlib.h:209
gz_header FAR * gz_headerp
Definition: zlib.h:131
#define Z_BUF_ERROR
Definition: zlib.h:184
#define Z_UNKNOWN
Definition: zlib.h:206
#define ZLIB_VERSION
Definition: zlib.h:40
#define Z_DEFAULT_STRATEGY
Definition: zlib.h:200
z_stream FAR * z_streamp
Definition: zlib.h:108
#define Z_BLOCK
Definition: zlib.h:173
#define Z_VERSION_ERROR
Definition: zlib.h:185
#define Z_STREAM_END
Definition: zlib.h:178
#define Z_FINISH
Definition: zlib.h:172
#define Z_OK
Definition: zlib.h:177
#define Z_DATA_ERROR
Definition: zlib.h:182
#define Z_FIXED
Definition: zlib.h:199
#define Z_STREAM_ERROR
Definition: zlib.h:181
#define Z_NO_FLUSH
Definition: zlib.h:168
#define Z_NULL
Definition: zlib.h:212
#define Z_PARTIAL_FLUSH
Definition: zlib.h:169
#define Z_MEM_ERROR
Definition: zlib.h:183
#define Z_FULL_FLUSH
Definition: zlib.h:171
#define Z_FILTERED
Definition: zlib.h:196
#define Z_RLE
Definition: zlib.h:198
#define Z_DEFAULT_COMPRESSION
Definition: zlib.h:193
void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr)
Definition: zutil.c:314
voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, unsigned items, unsigned size)
Definition: zutil.c:304
void ZLIB_INTERNAL zmemzero(Bytef *dest, uInt len)
Definition: zutil.c:172
void ZLIB_INTERNAL zmemcpy(Bytef *dest, const Bytef *source, uInt len)
Definition: zutil.c:148
int ZLIB_INTERNAL zmemcmp(Bytef *s1, const Bytef *s2, uInt len) const
Definition: zutil.c:159
#define ERR_RETURN(strm, err)
Definition: zutil.h:60
#define PRESET_DICT
Definition: zutil.h:87
unsigned short ush
Definition: zutil.h:40
#define ZALLOC(strm, items, size)
Definition: zutil.h:264
#define Assert(cond, msg)
Definition: zutil.h:250
#define ERR_MSG(err)
Definition: zutil.h:58
#define ZFREE(strm, addr)
Definition: zutil.h:266
#define MIN_MATCH
Definition: zutil.h:83
#define TRY_FREE(s, p)
Definition: zutil.h:267
#define OS_CODE
Definition: zutil.h:200
uch FAR uchf
Definition: zutil.h:39
#define MAX_MATCH
Definition: zutil.h:84
ush FAR ushf
Definition: zutil.h:41
unsigned long ulg
Definition: zutil.h:42
#define Tracevv(x)
Definition: zutil.h:253