dbf-halloween2015
diff libs/zlib/deflate.c @ 1:c3f5c32cb210
barfed all the libraries in the source tree to make porting easier
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Sun, 01 Nov 2015 00:36:56 +0200 |
parents | |
children |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/libs/zlib/deflate.c Sun Nov 01 00:36:56 2015 +0200 1.3 @@ -0,0 +1,1736 @@ 1.4 +/* deflate.c -- compress data using the deflation algorithm 1.5 + * Copyright (C) 1995-2005 Jean-loup Gailly. 1.6 + * For conditions of distribution and use, see copyright notice in zlib.h 1.7 + */ 1.8 + 1.9 +/* 1.10 + * ALGORITHM 1.11 + * 1.12 + * The "deflation" process depends on being able to identify portions 1.13 + * of the input text which are identical to earlier input (within a 1.14 + * sliding window trailing behind the input currently being processed). 1.15 + * 1.16 + * The most straightforward technique turns out to be the fastest for 1.17 + * most input files: try all possible matches and select the longest. 1.18 + * The key feature of this algorithm is that insertions into the string 1.19 + * dictionary are very simple and thus fast, and deletions are avoided 1.20 + * completely. Insertions are performed at each input character, whereas 1.21 + * string matches are performed only when the previous match ends. So it 1.22 + * is preferable to spend more time in matches to allow very fast string 1.23 + * insertions and avoid deletions. The matching algorithm for small 1.24 + * strings is inspired from that of Rabin & Karp. A brute force approach 1.25 + * is used to find longer strings when a small match has been found. 1.26 + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze 1.27 + * (by Leonid Broukhis). 1.28 + * A previous version of this file used a more sophisticated algorithm 1.29 + * (by Fiala and Greene) which is guaranteed to run in linear amortized 1.30 + * time, but has a larger average cost, uses more memory and is patented. 1.31 + * However the F&G algorithm may be faster for some highly redundant 1.32 + * files if the parameter max_chain_length (described below) is too large. 1.33 + * 1.34 + * ACKNOWLEDGEMENTS 1.35 + * 1.36 + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and 1.37 + * I found it in 'freeze' written by Leonid Broukhis. 1.38 + * Thanks to many people for bug reports and testing. 1.39 + * 1.40 + * REFERENCES 1.41 + * 1.42 + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". 1.43 + * Available in http://www.ietf.org/rfc/rfc1951.txt 1.44 + * 1.45 + * A description of the Rabin and Karp algorithm is given in the book 1.46 + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. 1.47 + * 1.48 + * Fiala,E.R., and Greene,D.H. 1.49 + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 1.50 + * 1.51 + */ 1.52 + 1.53 +/* @(#) $Id$ */ 1.54 + 1.55 +#include "deflate.h" 1.56 + 1.57 +const char deflate_copyright[] = 1.58 + " deflate 1.2.3 Copyright 1995-2005 Jean-loup Gailly "; 1.59 +/* 1.60 + If you use the zlib library in a product, an acknowledgment is welcome 1.61 + in the documentation of your product. If for some reason you cannot 1.62 + include such an acknowledgment, I would appreciate that you keep this 1.63 + copyright string in the executable of your product. 1.64 + */ 1.65 + 1.66 +/* =========================================================================== 1.67 + * Function prototypes. 1.68 + */ 1.69 +typedef enum { 1.70 + need_more, /* block not completed, need more input or more output */ 1.71 + block_done, /* block flush performed */ 1.72 + finish_started, /* finish started, need only more output at next deflate */ 1.73 + finish_done /* finish done, accept no more input or output */ 1.74 +} block_state; 1.75 + 1.76 +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); 1.77 +/* Compression function. Returns the block state after the call. */ 1.78 + 1.79 +local void fill_window OF((deflate_state *s)); 1.80 +local block_state deflate_stored OF((deflate_state *s, int flush)); 1.81 +local block_state deflate_fast OF((deflate_state *s, int flush)); 1.82 +#ifndef FASTEST 1.83 +local block_state deflate_slow OF((deflate_state *s, int flush)); 1.84 +#endif 1.85 +local void lm_init OF((deflate_state *s)); 1.86 +local void putShortMSB OF((deflate_state *s, uInt b)); 1.87 +local void flush_pending OF((z_streamp strm)); 1.88 +local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); 1.89 +#ifndef FASTEST 1.90 +#ifdef ASMV 1.91 + void match_init OF((void)); /* asm code initialization */ 1.92 + uInt longest_match OF((deflate_state *s, IPos cur_match)); 1.93 +#else 1.94 +local uInt longest_match OF((deflate_state *s, IPos cur_match)); 1.95 +#endif 1.96 +#endif 1.97 +local uInt longest_match_fast OF((deflate_state *s, IPos cur_match)); 1.98 + 1.99 +#ifdef DEBUG 1.100 +local void check_match OF((deflate_state *s, IPos start, IPos match, 1.101 + int length)); 1.102 +#endif 1.103 + 1.104 +/* =========================================================================== 1.105 + * Local data 1.106 + */ 1.107 + 1.108 +#define NIL 0 1.109 +/* Tail of hash chains */ 1.110 + 1.111 +#ifndef TOO_FAR 1.112 +# define TOO_FAR 4096 1.113 +#endif 1.114 +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ 1.115 + 1.116 +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) 1.117 +/* Minimum amount of lookahead, except at the end of the input file. 1.118 + * See deflate.c for comments about the MIN_MATCH+1. 1.119 + */ 1.120 + 1.121 +/* Values for max_lazy_match, good_match and max_chain_length, depending on 1.122 + * the desired pack level (0..9). The values given below have been tuned to 1.123 + * exclude worst case performance for pathological files. Better values may be 1.124 + * found for specific files. 1.125 + */ 1.126 +typedef struct config_s { 1.127 + ush good_length; /* reduce lazy search above this match length */ 1.128 + ush max_lazy; /* do not perform lazy search above this match length */ 1.129 + ush nice_length; /* quit search above this match length */ 1.130 + ush max_chain; 1.131 + compress_func func; 1.132 +} config; 1.133 + 1.134 +#ifdef FASTEST 1.135 +local const config configuration_table[2] = { 1.136 +/* good lazy nice chain */ 1.137 +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 1.138 +/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ 1.139 +#else 1.140 +local const config configuration_table[10] = { 1.141 +/* good lazy nice chain */ 1.142 +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 1.143 +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ 1.144 +/* 2 */ {4, 5, 16, 8, deflate_fast}, 1.145 +/* 3 */ {4, 6, 32, 32, deflate_fast}, 1.146 + 1.147 +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ 1.148 +/* 5 */ {8, 16, 32, 32, deflate_slow}, 1.149 +/* 6 */ {8, 16, 128, 128, deflate_slow}, 1.150 +/* 7 */ {8, 32, 128, 256, deflate_slow}, 1.151 +/* 8 */ {32, 128, 258, 1024, deflate_slow}, 1.152 +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ 1.153 +#endif 1.154 + 1.155 +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 1.156 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different 1.157 + * meaning. 1.158 + */ 1.159 + 1.160 +#define EQUAL 0 1.161 +/* result of memcmp for equal strings */ 1.162 + 1.163 +#ifndef NO_DUMMY_DECL 1.164 +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ 1.165 +#endif 1.166 + 1.167 +/* =========================================================================== 1.168 + * Update a hash value with the given input byte 1.169 + * IN assertion: all calls to to UPDATE_HASH are made with consecutive 1.170 + * input characters, so that a running hash key can be computed from the 1.171 + * previous key instead of complete recalculation each time. 1.172 + */ 1.173 +#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) 1.174 + 1.175 + 1.176 +/* =========================================================================== 1.177 + * Insert string str in the dictionary and set match_head to the previous head 1.178 + * of the hash chain (the most recent string with same hash key). Return 1.179 + * the previous length of the hash chain. 1.180 + * If this file is compiled with -DFASTEST, the compression level is forced 1.181 + * to 1, and no hash chains are maintained. 1.182 + * IN assertion: all calls to to INSERT_STRING are made with consecutive 1.183 + * input characters and the first MIN_MATCH bytes of str are valid 1.184 + * (except for the last MIN_MATCH-1 bytes of the input file). 1.185 + */ 1.186 +#ifdef FASTEST 1.187 +#define INSERT_STRING(s, str, match_head) \ 1.188 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 1.189 + match_head = s->head[s->ins_h], \ 1.190 + s->head[s->ins_h] = (Pos)(str)) 1.191 +#else 1.192 +#define INSERT_STRING(s, str, match_head) \ 1.193 + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 1.194 + match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ 1.195 + s->head[s->ins_h] = (Pos)(str)) 1.196 +#endif 1.197 + 1.198 +/* =========================================================================== 1.199 + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). 1.200 + * prev[] will be initialized on the fly. 1.201 + */ 1.202 +#define CLEAR_HASH(s) \ 1.203 + s->head[s->hash_size-1] = NIL; \ 1.204 + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); 1.205 + 1.206 +/* ========================================================================= */ 1.207 +int ZEXPORT deflateInit_(strm, level, version, stream_size) 1.208 + z_streamp strm; 1.209 + int level; 1.210 + const char *version; 1.211 + int stream_size; 1.212 +{ 1.213 + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, 1.214 + Z_DEFAULT_STRATEGY, version, stream_size); 1.215 + /* To do: ignore strm->next_in if we use it as window */ 1.216 +} 1.217 + 1.218 +/* ========================================================================= */ 1.219 +int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, 1.220 + version, stream_size) 1.221 + z_streamp strm; 1.222 + int level; 1.223 + int method; 1.224 + int windowBits; 1.225 + int memLevel; 1.226 + int strategy; 1.227 + const char *version; 1.228 + int stream_size; 1.229 +{ 1.230 + deflate_state *s; 1.231 + int wrap = 1; 1.232 + static const char my_version[] = ZLIB_VERSION; 1.233 + 1.234 + ushf *overlay; 1.235 + /* We overlay pending_buf and d_buf+l_buf. This works since the average 1.236 + * output size for (length,distance) codes is <= 24 bits. 1.237 + */ 1.238 + 1.239 + if (version == Z_NULL || version[0] != my_version[0] || 1.240 + stream_size != sizeof(z_stream)) { 1.241 + return Z_VERSION_ERROR; 1.242 + } 1.243 + if (strm == Z_NULL) return Z_STREAM_ERROR; 1.244 + 1.245 + strm->msg = Z_NULL; 1.246 + if (strm->zalloc == (alloc_func)0) { 1.247 + strm->zalloc = zcalloc; 1.248 + strm->opaque = (voidpf)0; 1.249 + } 1.250 + if (strm->zfree == (free_func)0) strm->zfree = zcfree; 1.251 + 1.252 +#ifdef FASTEST 1.253 + if (level != 0) level = 1; 1.254 +#else 1.255 + if (level == Z_DEFAULT_COMPRESSION) level = 6; 1.256 +#endif 1.257 + 1.258 + if (windowBits < 0) { /* suppress zlib wrapper */ 1.259 + wrap = 0; 1.260 + windowBits = -windowBits; 1.261 + } 1.262 +#ifdef GZIP 1.263 + else if (windowBits > 15) { 1.264 + wrap = 2; /* write gzip wrapper instead */ 1.265 + windowBits -= 16; 1.266 + } 1.267 +#endif 1.268 + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || 1.269 + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 1.270 + strategy < 0 || strategy > Z_FIXED) { 1.271 + return Z_STREAM_ERROR; 1.272 + } 1.273 + if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ 1.274 + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); 1.275 + if (s == Z_NULL) return Z_MEM_ERROR; 1.276 + strm->state = (struct internal_state FAR *)s; 1.277 + s->strm = strm; 1.278 + 1.279 + s->wrap = wrap; 1.280 + s->gzhead = Z_NULL; 1.281 + s->w_bits = windowBits; 1.282 + s->w_size = 1 << s->w_bits; 1.283 + s->w_mask = s->w_size - 1; 1.284 + 1.285 + s->hash_bits = memLevel + 7; 1.286 + s->hash_size = 1 << s->hash_bits; 1.287 + s->hash_mask = s->hash_size - 1; 1.288 + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); 1.289 + 1.290 + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); 1.291 + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); 1.292 + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); 1.293 + 1.294 + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 1.295 + 1.296 + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 1.297 + s->pending_buf = (uchf *) overlay; 1.298 + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); 1.299 + 1.300 + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || 1.301 + s->pending_buf == Z_NULL) { 1.302 + s->status = FINISH_STATE; 1.303 + strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); 1.304 + deflateEnd (strm); 1.305 + return Z_MEM_ERROR; 1.306 + } 1.307 + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 1.308 + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 1.309 + 1.310 + s->level = level; 1.311 + s->strategy = strategy; 1.312 + s->method = (Byte)method; 1.313 + 1.314 + return deflateReset(strm); 1.315 +} 1.316 + 1.317 +/* ========================================================================= */ 1.318 +int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) 1.319 + z_streamp strm; 1.320 + const Bytef *dictionary; 1.321 + uInt dictLength; 1.322 +{ 1.323 + deflate_state *s; 1.324 + uInt length = dictLength; 1.325 + uInt n; 1.326 + IPos hash_head = 0; 1.327 + 1.328 + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || 1.329 + strm->state->wrap == 2 || 1.330 + (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) 1.331 + return Z_STREAM_ERROR; 1.332 + 1.333 + s = strm->state; 1.334 + if (s->wrap) 1.335 + strm->adler = adler32(strm->adler, dictionary, dictLength); 1.336 + 1.337 + if (length < MIN_MATCH) return Z_OK; 1.338 + if (length > MAX_DIST(s)) { 1.339 + length = MAX_DIST(s); 1.340 + dictionary += dictLength - length; /* use the tail of the dictionary */ 1.341 + } 1.342 + zmemcpy(s->window, dictionary, length); 1.343 + s->strstart = length; 1.344 + s->block_start = (long)length; 1.345 + 1.346 + /* Insert all strings in the hash table (except for the last two bytes). 1.347 + * s->lookahead stays null, so s->ins_h will be recomputed at the next 1.348 + * call of fill_window. 1.349 + */ 1.350 + s->ins_h = s->window[0]; 1.351 + UPDATE_HASH(s, s->ins_h, s->window[1]); 1.352 + for (n = 0; n <= length - MIN_MATCH; n++) { 1.353 + INSERT_STRING(s, n, hash_head); 1.354 + } 1.355 + if (hash_head) hash_head = 0; /* to make compiler happy */ 1.356 + return Z_OK; 1.357 +} 1.358 + 1.359 +/* ========================================================================= */ 1.360 +int ZEXPORT deflateReset (strm) 1.361 + z_streamp strm; 1.362 +{ 1.363 + deflate_state *s; 1.364 + 1.365 + if (strm == Z_NULL || strm->state == Z_NULL || 1.366 + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { 1.367 + return Z_STREAM_ERROR; 1.368 + } 1.369 + 1.370 + strm->total_in = strm->total_out = 0; 1.371 + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ 1.372 + strm->data_type = Z_UNKNOWN; 1.373 + 1.374 + s = (deflate_state *)strm->state; 1.375 + s->pending = 0; 1.376 + s->pending_out = s->pending_buf; 1.377 + 1.378 + if (s->wrap < 0) { 1.379 + s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ 1.380 + } 1.381 + s->status = s->wrap ? INIT_STATE : BUSY_STATE; 1.382 + strm->adler = 1.383 +#ifdef GZIP 1.384 + s->wrap == 2 ? crc32(0L, Z_NULL, 0) : 1.385 +#endif 1.386 + adler32(0L, Z_NULL, 0); 1.387 + s->last_flush = Z_NO_FLUSH; 1.388 + 1.389 + _tr_init(s); 1.390 + lm_init(s); 1.391 + 1.392 + return Z_OK; 1.393 +} 1.394 + 1.395 +/* ========================================================================= */ 1.396 +int ZEXPORT deflateSetHeader (strm, head) 1.397 + z_streamp strm; 1.398 + gz_headerp head; 1.399 +{ 1.400 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.401 + if (strm->state->wrap != 2) return Z_STREAM_ERROR; 1.402 + strm->state->gzhead = head; 1.403 + return Z_OK; 1.404 +} 1.405 + 1.406 +/* ========================================================================= */ 1.407 +int ZEXPORT deflatePrime (strm, bits, value) 1.408 + z_streamp strm; 1.409 + int bits; 1.410 + int value; 1.411 +{ 1.412 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.413 + strm->state->bi_valid = bits; 1.414 + strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); 1.415 + return Z_OK; 1.416 +} 1.417 + 1.418 +/* ========================================================================= */ 1.419 +int ZEXPORT deflateParams(strm, level, strategy) 1.420 + z_streamp strm; 1.421 + int level; 1.422 + int strategy; 1.423 +{ 1.424 + deflate_state *s; 1.425 + compress_func func; 1.426 + int err = Z_OK; 1.427 + 1.428 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.429 + s = strm->state; 1.430 + 1.431 +#ifdef FASTEST 1.432 + if (level != 0) level = 1; 1.433 +#else 1.434 + if (level == Z_DEFAULT_COMPRESSION) level = 6; 1.435 +#endif 1.436 + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { 1.437 + return Z_STREAM_ERROR; 1.438 + } 1.439 + func = configuration_table[s->level].func; 1.440 + 1.441 + if (func != configuration_table[level].func && strm->total_in != 0) { 1.442 + /* Flush the last buffer: */ 1.443 + err = deflate(strm, Z_PARTIAL_FLUSH); 1.444 + } 1.445 + if (s->level != level) { 1.446 + s->level = level; 1.447 + s->max_lazy_match = configuration_table[level].max_lazy; 1.448 + s->good_match = configuration_table[level].good_length; 1.449 + s->nice_match = configuration_table[level].nice_length; 1.450 + s->max_chain_length = configuration_table[level].max_chain; 1.451 + } 1.452 + s->strategy = strategy; 1.453 + return err; 1.454 +} 1.455 + 1.456 +/* ========================================================================= */ 1.457 +int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) 1.458 + z_streamp strm; 1.459 + int good_length; 1.460 + int max_lazy; 1.461 + int nice_length; 1.462 + int max_chain; 1.463 +{ 1.464 + deflate_state *s; 1.465 + 1.466 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.467 + s = strm->state; 1.468 + s->good_match = good_length; 1.469 + s->max_lazy_match = max_lazy; 1.470 + s->nice_match = nice_length; 1.471 + s->max_chain_length = max_chain; 1.472 + return Z_OK; 1.473 +} 1.474 + 1.475 +/* ========================================================================= 1.476 + * For the default windowBits of 15 and memLevel of 8, this function returns 1.477 + * a close to exact, as well as small, upper bound on the compressed size. 1.478 + * They are coded as constants here for a reason--if the #define's are 1.479 + * changed, then this function needs to be changed as well. The return 1.480 + * value for 15 and 8 only works for those exact settings. 1.481 + * 1.482 + * For any setting other than those defaults for windowBits and memLevel, 1.483 + * the value returned is a conservative worst case for the maximum expansion 1.484 + * resulting from using fixed blocks instead of stored blocks, which deflate 1.485 + * can emit on compressed data for some combinations of the parameters. 1.486 + * 1.487 + * This function could be more sophisticated to provide closer upper bounds 1.488 + * for every combination of windowBits and memLevel, as well as wrap. 1.489 + * But even the conservative upper bound of about 14% expansion does not 1.490 + * seem onerous for output buffer allocation. 1.491 + */ 1.492 +uLong ZEXPORT deflateBound(strm, sourceLen) 1.493 + z_streamp strm; 1.494 + uLong sourceLen; 1.495 +{ 1.496 + deflate_state *s; 1.497 + uLong destLen; 1.498 + 1.499 + /* conservative upper bound */ 1.500 + destLen = sourceLen + 1.501 + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 11; 1.502 + 1.503 + /* if can't get parameters, return conservative bound */ 1.504 + if (strm == Z_NULL || strm->state == Z_NULL) 1.505 + return destLen; 1.506 + 1.507 + /* if not default parameters, return conservative bound */ 1.508 + s = strm->state; 1.509 + if (s->w_bits != 15 || s->hash_bits != 8 + 7) 1.510 + return destLen; 1.511 + 1.512 + /* default settings: return tight bound for that case */ 1.513 + return compressBound(sourceLen); 1.514 +} 1.515 + 1.516 +/* ========================================================================= 1.517 + * Put a short in the pending buffer. The 16-bit value is put in MSB order. 1.518 + * IN assertion: the stream state is correct and there is enough room in 1.519 + * pending_buf. 1.520 + */ 1.521 +local void putShortMSB (s, b) 1.522 + deflate_state *s; 1.523 + uInt b; 1.524 +{ 1.525 + put_byte(s, (Byte)(b >> 8)); 1.526 + put_byte(s, (Byte)(b & 0xff)); 1.527 +} 1.528 + 1.529 +/* ========================================================================= 1.530 + * Flush as much pending output as possible. All deflate() output goes 1.531 + * through this function so some applications may wish to modify it 1.532 + * to avoid allocating a large strm->next_out buffer and copying into it. 1.533 + * (See also read_buf()). 1.534 + */ 1.535 +local void flush_pending(strm) 1.536 + z_streamp strm; 1.537 +{ 1.538 + unsigned len = strm->state->pending; 1.539 + 1.540 + if (len > strm->avail_out) len = strm->avail_out; 1.541 + if (len == 0) return; 1.542 + 1.543 + zmemcpy(strm->next_out, strm->state->pending_out, len); 1.544 + strm->next_out += len; 1.545 + strm->state->pending_out += len; 1.546 + strm->total_out += len; 1.547 + strm->avail_out -= len; 1.548 + strm->state->pending -= len; 1.549 + if (strm->state->pending == 0) { 1.550 + strm->state->pending_out = strm->state->pending_buf; 1.551 + } 1.552 +} 1.553 + 1.554 +/* ========================================================================= */ 1.555 +int ZEXPORT deflate (strm, flush) 1.556 + z_streamp strm; 1.557 + int flush; 1.558 +{ 1.559 + int old_flush; /* value of flush param for previous deflate call */ 1.560 + deflate_state *s; 1.561 + 1.562 + if (strm == Z_NULL || strm->state == Z_NULL || 1.563 + flush > Z_FINISH || flush < 0) { 1.564 + return Z_STREAM_ERROR; 1.565 + } 1.566 + s = strm->state; 1.567 + 1.568 + if (strm->next_out == Z_NULL || 1.569 + (strm->next_in == Z_NULL && strm->avail_in != 0) || 1.570 + (s->status == FINISH_STATE && flush != Z_FINISH)) { 1.571 + ERR_RETURN(strm, Z_STREAM_ERROR); 1.572 + } 1.573 + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); 1.574 + 1.575 + s->strm = strm; /* just in case */ 1.576 + old_flush = s->last_flush; 1.577 + s->last_flush = flush; 1.578 + 1.579 + /* Write the header */ 1.580 + if (s->status == INIT_STATE) { 1.581 +#ifdef GZIP 1.582 + if (s->wrap == 2) { 1.583 + strm->adler = crc32(0L, Z_NULL, 0); 1.584 + put_byte(s, 31); 1.585 + put_byte(s, 139); 1.586 + put_byte(s, 8); 1.587 + if (s->gzhead == NULL) { 1.588 + put_byte(s, 0); 1.589 + put_byte(s, 0); 1.590 + put_byte(s, 0); 1.591 + put_byte(s, 0); 1.592 + put_byte(s, 0); 1.593 + put_byte(s, s->level == 9 ? 2 : 1.594 + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 1.595 + 4 : 0)); 1.596 + put_byte(s, OS_CODE); 1.597 + s->status = BUSY_STATE; 1.598 + } 1.599 + else { 1.600 + put_byte(s, (s->gzhead->text ? 1 : 0) + 1.601 + (s->gzhead->hcrc ? 2 : 0) + 1.602 + (s->gzhead->extra == Z_NULL ? 0 : 4) + 1.603 + (s->gzhead->name == Z_NULL ? 0 : 8) + 1.604 + (s->gzhead->comment == Z_NULL ? 0 : 16) 1.605 + ); 1.606 + put_byte(s, (Byte)(s->gzhead->time & 0xff)); 1.607 + put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); 1.608 + put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); 1.609 + put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); 1.610 + put_byte(s, s->level == 9 ? 2 : 1.611 + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 1.612 + 4 : 0)); 1.613 + put_byte(s, s->gzhead->os & 0xff); 1.614 + if (s->gzhead->extra != NULL) { 1.615 + put_byte(s, s->gzhead->extra_len & 0xff); 1.616 + put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); 1.617 + } 1.618 + if (s->gzhead->hcrc) 1.619 + strm->adler = crc32(strm->adler, s->pending_buf, 1.620 + s->pending); 1.621 + s->gzindex = 0; 1.622 + s->status = EXTRA_STATE; 1.623 + } 1.624 + } 1.625 + else 1.626 +#endif 1.627 + { 1.628 + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; 1.629 + uInt level_flags; 1.630 + 1.631 + if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) 1.632 + level_flags = 0; 1.633 + else if (s->level < 6) 1.634 + level_flags = 1; 1.635 + else if (s->level == 6) 1.636 + level_flags = 2; 1.637 + else 1.638 + level_flags = 3; 1.639 + header |= (level_flags << 6); 1.640 + if (s->strstart != 0) header |= PRESET_DICT; 1.641 + header += 31 - (header % 31); 1.642 + 1.643 + s->status = BUSY_STATE; 1.644 + putShortMSB(s, header); 1.645 + 1.646 + /* Save the adler32 of the preset dictionary: */ 1.647 + if (s->strstart != 0) { 1.648 + putShortMSB(s, (uInt)(strm->adler >> 16)); 1.649 + putShortMSB(s, (uInt)(strm->adler & 0xffff)); 1.650 + } 1.651 + strm->adler = adler32(0L, Z_NULL, 0); 1.652 + } 1.653 + } 1.654 +#ifdef GZIP 1.655 + if (s->status == EXTRA_STATE) { 1.656 + if (s->gzhead->extra != NULL) { 1.657 + uInt beg = s->pending; /* start of bytes to update crc */ 1.658 + 1.659 + while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { 1.660 + if (s->pending == s->pending_buf_size) { 1.661 + if (s->gzhead->hcrc && s->pending > beg) 1.662 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.663 + s->pending - beg); 1.664 + flush_pending(strm); 1.665 + beg = s->pending; 1.666 + if (s->pending == s->pending_buf_size) 1.667 + break; 1.668 + } 1.669 + put_byte(s, s->gzhead->extra[s->gzindex]); 1.670 + s->gzindex++; 1.671 + } 1.672 + if (s->gzhead->hcrc && s->pending > beg) 1.673 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.674 + s->pending - beg); 1.675 + if (s->gzindex == s->gzhead->extra_len) { 1.676 + s->gzindex = 0; 1.677 + s->status = NAME_STATE; 1.678 + } 1.679 + } 1.680 + else 1.681 + s->status = NAME_STATE; 1.682 + } 1.683 + if (s->status == NAME_STATE) { 1.684 + if (s->gzhead->name != NULL) { 1.685 + uInt beg = s->pending; /* start of bytes to update crc */ 1.686 + int val; 1.687 + 1.688 + do { 1.689 + if (s->pending == s->pending_buf_size) { 1.690 + if (s->gzhead->hcrc && s->pending > beg) 1.691 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.692 + s->pending - beg); 1.693 + flush_pending(strm); 1.694 + beg = s->pending; 1.695 + if (s->pending == s->pending_buf_size) { 1.696 + val = 1; 1.697 + break; 1.698 + } 1.699 + } 1.700 + val = s->gzhead->name[s->gzindex++]; 1.701 + put_byte(s, val); 1.702 + } while (val != 0); 1.703 + if (s->gzhead->hcrc && s->pending > beg) 1.704 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.705 + s->pending - beg); 1.706 + if (val == 0) { 1.707 + s->gzindex = 0; 1.708 + s->status = COMMENT_STATE; 1.709 + } 1.710 + } 1.711 + else 1.712 + s->status = COMMENT_STATE; 1.713 + } 1.714 + if (s->status == COMMENT_STATE) { 1.715 + if (s->gzhead->comment != NULL) { 1.716 + uInt beg = s->pending; /* start of bytes to update crc */ 1.717 + int val; 1.718 + 1.719 + do { 1.720 + if (s->pending == s->pending_buf_size) { 1.721 + if (s->gzhead->hcrc && s->pending > beg) 1.722 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.723 + s->pending - beg); 1.724 + flush_pending(strm); 1.725 + beg = s->pending; 1.726 + if (s->pending == s->pending_buf_size) { 1.727 + val = 1; 1.728 + break; 1.729 + } 1.730 + } 1.731 + val = s->gzhead->comment[s->gzindex++]; 1.732 + put_byte(s, val); 1.733 + } while (val != 0); 1.734 + if (s->gzhead->hcrc && s->pending > beg) 1.735 + strm->adler = crc32(strm->adler, s->pending_buf + beg, 1.736 + s->pending - beg); 1.737 + if (val == 0) 1.738 + s->status = HCRC_STATE; 1.739 + } 1.740 + else 1.741 + s->status = HCRC_STATE; 1.742 + } 1.743 + if (s->status == HCRC_STATE) { 1.744 + if (s->gzhead->hcrc) { 1.745 + if (s->pending + 2 > s->pending_buf_size) 1.746 + flush_pending(strm); 1.747 + if (s->pending + 2 <= s->pending_buf_size) { 1.748 + put_byte(s, (Byte)(strm->adler & 0xff)); 1.749 + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1.750 + strm->adler = crc32(0L, Z_NULL, 0); 1.751 + s->status = BUSY_STATE; 1.752 + } 1.753 + } 1.754 + else 1.755 + s->status = BUSY_STATE; 1.756 + } 1.757 +#endif 1.758 + 1.759 + /* Flush as much pending output as possible */ 1.760 + if (s->pending != 0) { 1.761 + flush_pending(strm); 1.762 + if (strm->avail_out == 0) { 1.763 + /* Since avail_out is 0, deflate will be called again with 1.764 + * more output space, but possibly with both pending and 1.765 + * avail_in equal to zero. There won't be anything to do, 1.766 + * but this is not an error situation so make sure we 1.767 + * return OK instead of BUF_ERROR at next call of deflate: 1.768 + */ 1.769 + s->last_flush = -1; 1.770 + return Z_OK; 1.771 + } 1.772 + 1.773 + /* Make sure there is something to do and avoid duplicate consecutive 1.774 + * flushes. For repeated and useless calls with Z_FINISH, we keep 1.775 + * returning Z_STREAM_END instead of Z_BUF_ERROR. 1.776 + */ 1.777 + } else if (strm->avail_in == 0 && flush <= old_flush && 1.778 + flush != Z_FINISH) { 1.779 + ERR_RETURN(strm, Z_BUF_ERROR); 1.780 + } 1.781 + 1.782 + /* User must not provide more input after the first FINISH: */ 1.783 + if (s->status == FINISH_STATE && strm->avail_in != 0) { 1.784 + ERR_RETURN(strm, Z_BUF_ERROR); 1.785 + } 1.786 + 1.787 + /* Start a new block or continue the current one. 1.788 + */ 1.789 + if (strm->avail_in != 0 || s->lookahead != 0 || 1.790 + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { 1.791 + block_state bstate; 1.792 + 1.793 + bstate = (*(configuration_table[s->level].func))(s, flush); 1.794 + 1.795 + if (bstate == finish_started || bstate == finish_done) { 1.796 + s->status = FINISH_STATE; 1.797 + } 1.798 + if (bstate == need_more || bstate == finish_started) { 1.799 + if (strm->avail_out == 0) { 1.800 + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ 1.801 + } 1.802 + return Z_OK; 1.803 + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 1.804 + * of deflate should use the same flush parameter to make sure 1.805 + * that the flush is complete. So we don't have to output an 1.806 + * empty block here, this will be done at next call. This also 1.807 + * ensures that for a very small output buffer, we emit at most 1.808 + * one empty block. 1.809 + */ 1.810 + } 1.811 + if (bstate == block_done) { 1.812 + if (flush == Z_PARTIAL_FLUSH) { 1.813 + _tr_align(s); 1.814 + } else { /* FULL_FLUSH or SYNC_FLUSH */ 1.815 + _tr_stored_block(s, (char*)0, 0L, 0); 1.816 + /* For a full flush, this empty block will be recognized 1.817 + * as a special marker by inflate_sync(). 1.818 + */ 1.819 + if (flush == Z_FULL_FLUSH) { 1.820 + CLEAR_HASH(s); /* forget history */ 1.821 + } 1.822 + } 1.823 + flush_pending(strm); 1.824 + if (strm->avail_out == 0) { 1.825 + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 1.826 + return Z_OK; 1.827 + } 1.828 + } 1.829 + } 1.830 + Assert(strm->avail_out > 0, "bug2"); 1.831 + 1.832 + if (flush != Z_FINISH) return Z_OK; 1.833 + if (s->wrap <= 0) return Z_STREAM_END; 1.834 + 1.835 + /* Write the trailer */ 1.836 +#ifdef GZIP 1.837 + if (s->wrap == 2) { 1.838 + put_byte(s, (Byte)(strm->adler & 0xff)); 1.839 + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 1.840 + put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); 1.841 + put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); 1.842 + put_byte(s, (Byte)(strm->total_in & 0xff)); 1.843 + put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); 1.844 + put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); 1.845 + put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); 1.846 + } 1.847 + else 1.848 +#endif 1.849 + { 1.850 + putShortMSB(s, (uInt)(strm->adler >> 16)); 1.851 + putShortMSB(s, (uInt)(strm->adler & 0xffff)); 1.852 + } 1.853 + flush_pending(strm); 1.854 + /* If avail_out is zero, the application will call deflate again 1.855 + * to flush the rest. 1.856 + */ 1.857 + if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ 1.858 + return s->pending != 0 ? Z_OK : Z_STREAM_END; 1.859 +} 1.860 + 1.861 +/* ========================================================================= */ 1.862 +int ZEXPORT deflateEnd (strm) 1.863 + z_streamp strm; 1.864 +{ 1.865 + int status; 1.866 + 1.867 + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 1.868 + 1.869 + status = strm->state->status; 1.870 + if (status != INIT_STATE && 1.871 + status != EXTRA_STATE && 1.872 + status != NAME_STATE && 1.873 + status != COMMENT_STATE && 1.874 + status != HCRC_STATE && 1.875 + status != BUSY_STATE && 1.876 + status != FINISH_STATE) { 1.877 + return Z_STREAM_ERROR; 1.878 + } 1.879 + 1.880 + /* Deallocate in reverse order of allocations: */ 1.881 + TRY_FREE(strm, strm->state->pending_buf); 1.882 + TRY_FREE(strm, strm->state->head); 1.883 + TRY_FREE(strm, strm->state->prev); 1.884 + TRY_FREE(strm, strm->state->window); 1.885 + 1.886 + ZFREE(strm, strm->state); 1.887 + strm->state = Z_NULL; 1.888 + 1.889 + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; 1.890 +} 1.891 + 1.892 +/* ========================================================================= 1.893 + * Copy the source state to the destination state. 1.894 + * To simplify the source, this is not supported for 16-bit MSDOS (which 1.895 + * doesn't have enough memory anyway to duplicate compression states). 1.896 + */ 1.897 +int ZEXPORT deflateCopy (dest, source) 1.898 + z_streamp dest; 1.899 + z_streamp source; 1.900 +{ 1.901 +#ifdef MAXSEG_64K 1.902 + return Z_STREAM_ERROR; 1.903 +#else 1.904 + deflate_state *ds; 1.905 + deflate_state *ss; 1.906 + ushf *overlay; 1.907 + 1.908 + 1.909 + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { 1.910 + return Z_STREAM_ERROR; 1.911 + } 1.912 + 1.913 + ss = source->state; 1.914 + 1.915 + zmemcpy(dest, source, sizeof(z_stream)); 1.916 + 1.917 + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); 1.918 + if (ds == Z_NULL) return Z_MEM_ERROR; 1.919 + dest->state = (struct internal_state FAR *) ds; 1.920 + zmemcpy(ds, ss, sizeof(deflate_state)); 1.921 + ds->strm = dest; 1.922 + 1.923 + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); 1.924 + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); 1.925 + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); 1.926 + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); 1.927 + ds->pending_buf = (uchf *) overlay; 1.928 + 1.929 + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || 1.930 + ds->pending_buf == Z_NULL) { 1.931 + deflateEnd (dest); 1.932 + return Z_MEM_ERROR; 1.933 + } 1.934 + /* following zmemcpy do not work for 16-bit MSDOS */ 1.935 + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); 1.936 + zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); 1.937 + zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); 1.938 + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); 1.939 + 1.940 + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); 1.941 + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); 1.942 + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; 1.943 + 1.944 + ds->l_desc.dyn_tree = ds->dyn_ltree; 1.945 + ds->d_desc.dyn_tree = ds->dyn_dtree; 1.946 + ds->bl_desc.dyn_tree = ds->bl_tree; 1.947 + 1.948 + return Z_OK; 1.949 +#endif /* MAXSEG_64K */ 1.950 +} 1.951 + 1.952 +/* =========================================================================== 1.953 + * Read a new buffer from the current input stream, update the adler32 1.954 + * and total number of bytes read. All deflate() input goes through 1.955 + * this function so some applications may wish to modify it to avoid 1.956 + * allocating a large strm->next_in buffer and copying from it. 1.957 + * (See also flush_pending()). 1.958 + */ 1.959 +local int read_buf(strm, buf, size) 1.960 + z_streamp strm; 1.961 + Bytef *buf; 1.962 + unsigned size; 1.963 +{ 1.964 + unsigned len = strm->avail_in; 1.965 + 1.966 + if (len > size) len = size; 1.967 + if (len == 0) return 0; 1.968 + 1.969 + strm->avail_in -= len; 1.970 + 1.971 + if (strm->state->wrap == 1) { 1.972 + strm->adler = adler32(strm->adler, strm->next_in, len); 1.973 + } 1.974 +#ifdef GZIP 1.975 + else if (strm->state->wrap == 2) { 1.976 + strm->adler = crc32(strm->adler, strm->next_in, len); 1.977 + } 1.978 +#endif 1.979 + zmemcpy(buf, strm->next_in, len); 1.980 + strm->next_in += len; 1.981 + strm->total_in += len; 1.982 + 1.983 + return (int)len; 1.984 +} 1.985 + 1.986 +/* =========================================================================== 1.987 + * Initialize the "longest match" routines for a new zlib stream 1.988 + */ 1.989 +local void lm_init (s) 1.990 + deflate_state *s; 1.991 +{ 1.992 + s->window_size = (ulg)2L*s->w_size; 1.993 + 1.994 + CLEAR_HASH(s); 1.995 + 1.996 + /* Set the default configuration parameters: 1.997 + */ 1.998 + s->max_lazy_match = configuration_table[s->level].max_lazy; 1.999 + s->good_match = configuration_table[s->level].good_length; 1.1000 + s->nice_match = configuration_table[s->level].nice_length; 1.1001 + s->max_chain_length = configuration_table[s->level].max_chain; 1.1002 + 1.1003 + s->strstart = 0; 1.1004 + s->block_start = 0L; 1.1005 + s->lookahead = 0; 1.1006 + s->match_length = s->prev_length = MIN_MATCH-1; 1.1007 + s->match_available = 0; 1.1008 + s->ins_h = 0; 1.1009 +#ifndef FASTEST 1.1010 +#ifdef ASMV 1.1011 + match_init(); /* initialize the asm code */ 1.1012 +#endif 1.1013 +#endif 1.1014 +} 1.1015 + 1.1016 +#ifndef FASTEST 1.1017 +/* =========================================================================== 1.1018 + * Set match_start to the longest match starting at the given string and 1.1019 + * return its length. Matches shorter or equal to prev_length are discarded, 1.1020 + * in which case the result is equal to prev_length and match_start is 1.1021 + * garbage. 1.1022 + * IN assertions: cur_match is the head of the hash chain for the current 1.1023 + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 1.1024 + * OUT assertion: the match length is not greater than s->lookahead. 1.1025 + */ 1.1026 +#ifndef ASMV 1.1027 +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or 1.1028 + * match.S. The code will be functionally equivalent. 1.1029 + */ 1.1030 +local uInt longest_match(s, cur_match) 1.1031 + deflate_state *s; 1.1032 + IPos cur_match; /* current match */ 1.1033 +{ 1.1034 + unsigned chain_length = s->max_chain_length;/* max hash chain length */ 1.1035 + register Bytef *scan = s->window + s->strstart; /* current string */ 1.1036 + register Bytef *match; /* matched string */ 1.1037 + register int len; /* length of current match */ 1.1038 + int best_len = s->prev_length; /* best match length so far */ 1.1039 + int nice_match = s->nice_match; /* stop if match long enough */ 1.1040 + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? 1.1041 + s->strstart - (IPos)MAX_DIST(s) : NIL; 1.1042 + /* Stop when cur_match becomes <= limit. To simplify the code, 1.1043 + * we prevent matches with the string of window index 0. 1.1044 + */ 1.1045 + Posf *prev = s->prev; 1.1046 + uInt wmask = s->w_mask; 1.1047 + 1.1048 +#ifdef UNALIGNED_OK 1.1049 + /* Compare two bytes at a time. Note: this is not always beneficial. 1.1050 + * Try with and without -DUNALIGNED_OK to check. 1.1051 + */ 1.1052 + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; 1.1053 + register ush scan_start = *(ushf*)scan; 1.1054 + register ush scan_end = *(ushf*)(scan+best_len-1); 1.1055 +#else 1.1056 + register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1.1057 + register Byte scan_end1 = scan[best_len-1]; 1.1058 + register Byte scan_end = scan[best_len]; 1.1059 +#endif 1.1060 + 1.1061 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1.1062 + * It is easy to get rid of this optimization if necessary. 1.1063 + */ 1.1064 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1.1065 + 1.1066 + /* Do not waste too much time if we already have a good match: */ 1.1067 + if (s->prev_length >= s->good_match) { 1.1068 + chain_length >>= 2; 1.1069 + } 1.1070 + /* Do not look for matches beyond the end of the input. This is necessary 1.1071 + * to make deflate deterministic. 1.1072 + */ 1.1073 + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; 1.1074 + 1.1075 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1.1076 + 1.1077 + do { 1.1078 + Assert(cur_match < s->strstart, "no future"); 1.1079 + match = s->window + cur_match; 1.1080 + 1.1081 + /* Skip to next match if the match length cannot increase 1.1082 + * or if the match length is less than 2. Note that the checks below 1.1083 + * for insufficient lookahead only occur occasionally for performance 1.1084 + * reasons. Therefore uninitialized memory will be accessed, and 1.1085 + * conditional jumps will be made that depend on those values. 1.1086 + * However the length of the match is limited to the lookahead, so 1.1087 + * the output of deflate is not affected by the uninitialized values. 1.1088 + */ 1.1089 +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) 1.1090 + /* This code assumes sizeof(unsigned short) == 2. Do not use 1.1091 + * UNALIGNED_OK if your compiler uses a different size. 1.1092 + */ 1.1093 + if (*(ushf*)(match+best_len-1) != scan_end || 1.1094 + *(ushf*)match != scan_start) continue; 1.1095 + 1.1096 + /* It is not necessary to compare scan[2] and match[2] since they are 1.1097 + * always equal when the other bytes match, given that the hash keys 1.1098 + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at 1.1099 + * strstart+3, +5, ... up to strstart+257. We check for insufficient 1.1100 + * lookahead only every 4th comparison; the 128th check will be made 1.1101 + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is 1.1102 + * necessary to put more guard bytes at the end of the window, or 1.1103 + * to check more often for insufficient lookahead. 1.1104 + */ 1.1105 + Assert(scan[2] == match[2], "scan[2]?"); 1.1106 + scan++, match++; 1.1107 + do { 1.1108 + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1109 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1110 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1111 + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1.1112 + scan < strend); 1.1113 + /* The funny "do {}" generates better code on most compilers */ 1.1114 + 1.1115 + /* Here, scan <= window+strstart+257 */ 1.1116 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1117 + if (*scan == *match) scan++; 1.1118 + 1.1119 + len = (MAX_MATCH - 1) - (int)(strend-scan); 1.1120 + scan = strend - (MAX_MATCH-1); 1.1121 + 1.1122 +#else /* UNALIGNED_OK */ 1.1123 + 1.1124 + if (match[best_len] != scan_end || 1.1125 + match[best_len-1] != scan_end1 || 1.1126 + *match != *scan || 1.1127 + *++match != scan[1]) continue; 1.1128 + 1.1129 + /* The check at best_len-1 can be removed because it will be made 1.1130 + * again later. (This heuristic is not always a win.) 1.1131 + * It is not necessary to compare scan[2] and match[2] since they 1.1132 + * are always equal when the other bytes match, given that 1.1133 + * the hash keys are equal and that HASH_BITS >= 8. 1.1134 + */ 1.1135 + scan += 2, match++; 1.1136 + Assert(*scan == *match, "match[2]?"); 1.1137 + 1.1138 + /* We check for insufficient lookahead only every 8th comparison; 1.1139 + * the 256th check will be made at strstart+258. 1.1140 + */ 1.1141 + do { 1.1142 + } while (*++scan == *++match && *++scan == *++match && 1.1143 + *++scan == *++match && *++scan == *++match && 1.1144 + *++scan == *++match && *++scan == *++match && 1.1145 + *++scan == *++match && *++scan == *++match && 1.1146 + scan < strend); 1.1147 + 1.1148 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1149 + 1.1150 + len = MAX_MATCH - (int)(strend - scan); 1.1151 + scan = strend - MAX_MATCH; 1.1152 + 1.1153 +#endif /* UNALIGNED_OK */ 1.1154 + 1.1155 + if (len > best_len) { 1.1156 + s->match_start = cur_match; 1.1157 + best_len = len; 1.1158 + if (len >= nice_match) break; 1.1159 +#ifdef UNALIGNED_OK 1.1160 + scan_end = *(ushf*)(scan+best_len-1); 1.1161 +#else 1.1162 + scan_end1 = scan[best_len-1]; 1.1163 + scan_end = scan[best_len]; 1.1164 +#endif 1.1165 + } 1.1166 + } while ((cur_match = prev[cur_match & wmask]) > limit 1.1167 + && --chain_length != 0); 1.1168 + 1.1169 + if ((uInt)best_len <= s->lookahead) return (uInt)best_len; 1.1170 + return s->lookahead; 1.1171 +} 1.1172 +#endif /* ASMV */ 1.1173 +#endif /* FASTEST */ 1.1174 + 1.1175 +/* --------------------------------------------------------------------------- 1.1176 + * Optimized version for level == 1 or strategy == Z_RLE only 1.1177 + */ 1.1178 +local uInt longest_match_fast(s, cur_match) 1.1179 + deflate_state *s; 1.1180 + IPos cur_match; /* current match */ 1.1181 +{ 1.1182 + register Bytef *scan = s->window + s->strstart; /* current string */ 1.1183 + register Bytef *match; /* matched string */ 1.1184 + register int len; /* length of current match */ 1.1185 + register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1.1186 + 1.1187 + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1.1188 + * It is easy to get rid of this optimization if necessary. 1.1189 + */ 1.1190 + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1.1191 + 1.1192 + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1.1193 + 1.1194 + Assert(cur_match < s->strstart, "no future"); 1.1195 + 1.1196 + match = s->window + cur_match; 1.1197 + 1.1198 + /* Return failure if the match length is less than 2: 1.1199 + */ 1.1200 + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; 1.1201 + 1.1202 + /* The check at best_len-1 can be removed because it will be made 1.1203 + * again later. (This heuristic is not always a win.) 1.1204 + * It is not necessary to compare scan[2] and match[2] since they 1.1205 + * are always equal when the other bytes match, given that 1.1206 + * the hash keys are equal and that HASH_BITS >= 8. 1.1207 + */ 1.1208 + scan += 2, match += 2; 1.1209 + Assert(*scan == *match, "match[2]?"); 1.1210 + 1.1211 + /* We check for insufficient lookahead only every 8th comparison; 1.1212 + * the 256th check will be made at strstart+258. 1.1213 + */ 1.1214 + do { 1.1215 + } while (*++scan == *++match && *++scan == *++match && 1.1216 + *++scan == *++match && *++scan == *++match && 1.1217 + *++scan == *++match && *++scan == *++match && 1.1218 + *++scan == *++match && *++scan == *++match && 1.1219 + scan < strend); 1.1220 + 1.1221 + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1.1222 + 1.1223 + len = MAX_MATCH - (int)(strend - scan); 1.1224 + 1.1225 + if (len < MIN_MATCH) return MIN_MATCH - 1; 1.1226 + 1.1227 + s->match_start = cur_match; 1.1228 + return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; 1.1229 +} 1.1230 + 1.1231 +#ifdef DEBUG 1.1232 +/* =========================================================================== 1.1233 + * Check that the match at match_start is indeed a match. 1.1234 + */ 1.1235 +local void check_match(s, start, match, length) 1.1236 + deflate_state *s; 1.1237 + IPos start, match; 1.1238 + int length; 1.1239 +{ 1.1240 + /* check that the match is indeed a match */ 1.1241 + if (zmemcmp(s->window + match, 1.1242 + s->window + start, length) != EQUAL) { 1.1243 + fprintf(stderr, " start %u, match %u, length %d\n", 1.1244 + start, match, length); 1.1245 + do { 1.1246 + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); 1.1247 + } while (--length != 0); 1.1248 + z_error("invalid match"); 1.1249 + } 1.1250 + if (z_verbose > 1) { 1.1251 + fprintf(stderr,"\\[%d,%d]", start-match, length); 1.1252 + do { putc(s->window[start++], stderr); } while (--length != 0); 1.1253 + } 1.1254 +} 1.1255 +#else 1.1256 +# define check_match(s, start, match, length) 1.1257 +#endif /* DEBUG */ 1.1258 + 1.1259 +/* =========================================================================== 1.1260 + * Fill the window when the lookahead becomes insufficient. 1.1261 + * Updates strstart and lookahead. 1.1262 + * 1.1263 + * IN assertion: lookahead < MIN_LOOKAHEAD 1.1264 + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 1.1265 + * At least one byte has been read, or avail_in == 0; reads are 1.1266 + * performed for at least two bytes (required for the zip translate_eol 1.1267 + * option -- not supported here). 1.1268 + */ 1.1269 +local void fill_window(s) 1.1270 + deflate_state *s; 1.1271 +{ 1.1272 + register unsigned n, m; 1.1273 + register Posf *p; 1.1274 + unsigned more; /* Amount of free space at the end of the window. */ 1.1275 + uInt wsize = s->w_size; 1.1276 + 1.1277 + do { 1.1278 + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); 1.1279 + 1.1280 + /* Deal with !@#$% 64K limit: */ 1.1281 + if (sizeof(int) <= 2) { 1.1282 + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 1.1283 + more = wsize; 1.1284 + 1.1285 + } else if (more == (unsigned)(-1)) { 1.1286 + /* Very unlikely, but possible on 16 bit machine if 1.1287 + * strstart == 0 && lookahead == 1 (input done a byte at time) 1.1288 + */ 1.1289 + more--; 1.1290 + } 1.1291 + } 1.1292 + 1.1293 + /* If the window is almost full and there is insufficient lookahead, 1.1294 + * move the upper half to the lower one to make room in the upper half. 1.1295 + */ 1.1296 + if (s->strstart >= wsize+MAX_DIST(s)) { 1.1297 + 1.1298 + zmemcpy(s->window, s->window+wsize, (unsigned)wsize); 1.1299 + s->match_start -= wsize; 1.1300 + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ 1.1301 + s->block_start -= (long) wsize; 1.1302 + 1.1303 + /* Slide the hash table (could be avoided with 32 bit values 1.1304 + at the expense of memory usage). We slide even when level == 0 1.1305 + to keep the hash table consistent if we switch back to level > 0 1.1306 + later. (Using level 0 permanently is not an optimal usage of 1.1307 + zlib, so we don't care about this pathological case.) 1.1308 + */ 1.1309 + /* %%% avoid this when Z_RLE */ 1.1310 + n = s->hash_size; 1.1311 + p = &s->head[n]; 1.1312 + do { 1.1313 + m = *--p; 1.1314 + *p = (Pos)(m >= wsize ? m-wsize : NIL); 1.1315 + } while (--n); 1.1316 + 1.1317 + n = wsize; 1.1318 +#ifndef FASTEST 1.1319 + p = &s->prev[n]; 1.1320 + do { 1.1321 + m = *--p; 1.1322 + *p = (Pos)(m >= wsize ? m-wsize : NIL); 1.1323 + /* If n is not on any hash chain, prev[n] is garbage but 1.1324 + * its value will never be used. 1.1325 + */ 1.1326 + } while (--n); 1.1327 +#endif 1.1328 + more += wsize; 1.1329 + } 1.1330 + if (s->strm->avail_in == 0) return; 1.1331 + 1.1332 + /* If there was no sliding: 1.1333 + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 1.1334 + * more == window_size - lookahead - strstart 1.1335 + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 1.1336 + * => more >= window_size - 2*WSIZE + 2 1.1337 + * In the BIG_MEM or MMAP case (not yet supported), 1.1338 + * window_size == input_size + MIN_LOOKAHEAD && 1.1339 + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 1.1340 + * Otherwise, window_size == 2*WSIZE so more >= 2. 1.1341 + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 1.1342 + */ 1.1343 + Assert(more >= 2, "more < 2"); 1.1344 + 1.1345 + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); 1.1346 + s->lookahead += n; 1.1347 + 1.1348 + /* Initialize the hash value now that we have some input: */ 1.1349 + if (s->lookahead >= MIN_MATCH) { 1.1350 + s->ins_h = s->window[s->strstart]; 1.1351 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1.1352 +#if MIN_MATCH != 3 1.1353 + Call UPDATE_HASH() MIN_MATCH-3 more times 1.1354 +#endif 1.1355 + } 1.1356 + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 1.1357 + * but this is not important since only literal bytes will be emitted. 1.1358 + */ 1.1359 + 1.1360 + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); 1.1361 +} 1.1362 + 1.1363 +/* =========================================================================== 1.1364 + * Flush the current block, with given end-of-file flag. 1.1365 + * IN assertion: strstart is set to the end of the current match. 1.1366 + */ 1.1367 +#define FLUSH_BLOCK_ONLY(s, eof) { \ 1.1368 + _tr_flush_block(s, (s->block_start >= 0L ? \ 1.1369 + (charf *)&s->window[(unsigned)s->block_start] : \ 1.1370 + (charf *)Z_NULL), \ 1.1371 + (ulg)((long)s->strstart - s->block_start), \ 1.1372 + (eof)); \ 1.1373 + s->block_start = s->strstart; \ 1.1374 + flush_pending(s->strm); \ 1.1375 + Tracev((stderr,"[FLUSH]")); \ 1.1376 +} 1.1377 + 1.1378 +/* Same but force premature exit if necessary. */ 1.1379 +#define FLUSH_BLOCK(s, eof) { \ 1.1380 + FLUSH_BLOCK_ONLY(s, eof); \ 1.1381 + if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ 1.1382 +} 1.1383 + 1.1384 +/* =========================================================================== 1.1385 + * Copy without compression as much as possible from the input stream, return 1.1386 + * the current block state. 1.1387 + * This function does not insert new strings in the dictionary since 1.1388 + * uncompressible data is probably not useful. This function is used 1.1389 + * only for the level=0 compression option. 1.1390 + * NOTE: this function should be optimized to avoid extra copying from 1.1391 + * window to pending_buf. 1.1392 + */ 1.1393 +local block_state deflate_stored(s, flush) 1.1394 + deflate_state *s; 1.1395 + int flush; 1.1396 +{ 1.1397 + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited 1.1398 + * to pending_buf_size, and each stored block has a 5 byte header: 1.1399 + */ 1.1400 + ulg max_block_size = 0xffff; 1.1401 + ulg max_start; 1.1402 + 1.1403 + if (max_block_size > s->pending_buf_size - 5) { 1.1404 + max_block_size = s->pending_buf_size - 5; 1.1405 + } 1.1406 + 1.1407 + /* Copy as much as possible from input to output: */ 1.1408 + for (;;) { 1.1409 + /* Fill the window as much as possible: */ 1.1410 + if (s->lookahead <= 1) { 1.1411 + 1.1412 + Assert(s->strstart < s->w_size+MAX_DIST(s) || 1.1413 + s->block_start >= (long)s->w_size, "slide too late"); 1.1414 + 1.1415 + fill_window(s); 1.1416 + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; 1.1417 + 1.1418 + if (s->lookahead == 0) break; /* flush the current block */ 1.1419 + } 1.1420 + Assert(s->block_start >= 0L, "block gone"); 1.1421 + 1.1422 + s->strstart += s->lookahead; 1.1423 + s->lookahead = 0; 1.1424 + 1.1425 + /* Emit a stored block if pending_buf will be full: */ 1.1426 + max_start = s->block_start + max_block_size; 1.1427 + if (s->strstart == 0 || (ulg)s->strstart >= max_start) { 1.1428 + /* strstart == 0 is possible when wraparound on 16-bit machine */ 1.1429 + s->lookahead = (uInt)(s->strstart - max_start); 1.1430 + s->strstart = (uInt)max_start; 1.1431 + FLUSH_BLOCK(s, 0); 1.1432 + } 1.1433 + /* Flush if we may have to slide, otherwise block_start may become 1.1434 + * negative and the data will be gone: 1.1435 + */ 1.1436 + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { 1.1437 + FLUSH_BLOCK(s, 0); 1.1438 + } 1.1439 + } 1.1440 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1441 + return flush == Z_FINISH ? finish_done : block_done; 1.1442 +} 1.1443 + 1.1444 +/* =========================================================================== 1.1445 + * Compress as much as possible from the input stream, return the current 1.1446 + * block state. 1.1447 + * This function does not perform lazy evaluation of matches and inserts 1.1448 + * new strings in the dictionary only for unmatched strings or for short 1.1449 + * matches. It is used only for the fast compression options. 1.1450 + */ 1.1451 +local block_state deflate_fast(s, flush) 1.1452 + deflate_state *s; 1.1453 + int flush; 1.1454 +{ 1.1455 + IPos hash_head = NIL; /* head of the hash chain */ 1.1456 + int bflush; /* set if current block must be flushed */ 1.1457 + 1.1458 + for (;;) { 1.1459 + /* Make sure that we always have enough lookahead, except 1.1460 + * at the end of the input file. We need MAX_MATCH bytes 1.1461 + * for the next match, plus MIN_MATCH bytes to insert the 1.1462 + * string following the next match. 1.1463 + */ 1.1464 + if (s->lookahead < MIN_LOOKAHEAD) { 1.1465 + fill_window(s); 1.1466 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1.1467 + return need_more; 1.1468 + } 1.1469 + if (s->lookahead == 0) break; /* flush the current block */ 1.1470 + } 1.1471 + 1.1472 + /* Insert the string window[strstart .. strstart+2] in the 1.1473 + * dictionary, and set hash_head to the head of the hash chain: 1.1474 + */ 1.1475 + if (s->lookahead >= MIN_MATCH) { 1.1476 + INSERT_STRING(s, s->strstart, hash_head); 1.1477 + } 1.1478 + 1.1479 + /* Find the longest match, discarding those <= prev_length. 1.1480 + * At this point we have always match_length < MIN_MATCH 1.1481 + */ 1.1482 + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { 1.1483 + /* To simplify the code, we prevent matches with the string 1.1484 + * of window index 0 (in particular we have to avoid a match 1.1485 + * of the string with itself at the start of the input file). 1.1486 + */ 1.1487 +#ifdef FASTEST 1.1488 + if ((s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) || 1.1489 + (s->strategy == Z_RLE && s->strstart - hash_head == 1)) { 1.1490 + s->match_length = longest_match_fast (s, hash_head); 1.1491 + } 1.1492 +#else 1.1493 + if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { 1.1494 + s->match_length = longest_match (s, hash_head); 1.1495 + } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { 1.1496 + s->match_length = longest_match_fast (s, hash_head); 1.1497 + } 1.1498 +#endif 1.1499 + /* longest_match() or longest_match_fast() sets match_start */ 1.1500 + } 1.1501 + if (s->match_length >= MIN_MATCH) { 1.1502 + check_match(s, s->strstart, s->match_start, s->match_length); 1.1503 + 1.1504 + _tr_tally_dist(s, s->strstart - s->match_start, 1.1505 + s->match_length - MIN_MATCH, bflush); 1.1506 + 1.1507 + s->lookahead -= s->match_length; 1.1508 + 1.1509 + /* Insert new strings in the hash table only if the match length 1.1510 + * is not too large. This saves time but degrades compression. 1.1511 + */ 1.1512 +#ifndef FASTEST 1.1513 + if (s->match_length <= s->max_insert_length && 1.1514 + s->lookahead >= MIN_MATCH) { 1.1515 + s->match_length--; /* string at strstart already in table */ 1.1516 + do { 1.1517 + s->strstart++; 1.1518 + INSERT_STRING(s, s->strstart, hash_head); 1.1519 + /* strstart never exceeds WSIZE-MAX_MATCH, so there are 1.1520 + * always MIN_MATCH bytes ahead. 1.1521 + */ 1.1522 + } while (--s->match_length != 0); 1.1523 + s->strstart++; 1.1524 + } else 1.1525 +#endif 1.1526 + { 1.1527 + s->strstart += s->match_length; 1.1528 + s->match_length = 0; 1.1529 + s->ins_h = s->window[s->strstart]; 1.1530 + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1.1531 +#if MIN_MATCH != 3 1.1532 + Call UPDATE_HASH() MIN_MATCH-3 more times 1.1533 +#endif 1.1534 + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 1.1535 + * matter since it will be recomputed at next deflate call. 1.1536 + */ 1.1537 + } 1.1538 + } else { 1.1539 + /* No match, output a literal byte */ 1.1540 + Tracevv((stderr,"%c", s->window[s->strstart])); 1.1541 + _tr_tally_lit (s, s->window[s->strstart], bflush); 1.1542 + s->lookahead--; 1.1543 + s->strstart++; 1.1544 + } 1.1545 + if (bflush) FLUSH_BLOCK(s, 0); 1.1546 + } 1.1547 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1548 + return flush == Z_FINISH ? finish_done : block_done; 1.1549 +} 1.1550 + 1.1551 +#ifndef FASTEST 1.1552 +/* =========================================================================== 1.1553 + * Same as above, but achieves better compression. We use a lazy 1.1554 + * evaluation for matches: a match is finally adopted only if there is 1.1555 + * no better match at the next window position. 1.1556 + */ 1.1557 +local block_state deflate_slow(s, flush) 1.1558 + deflate_state *s; 1.1559 + int flush; 1.1560 +{ 1.1561 + IPos hash_head = NIL; /* head of hash chain */ 1.1562 + int bflush; /* set if current block must be flushed */ 1.1563 + 1.1564 + /* Process the input block. */ 1.1565 + for (;;) { 1.1566 + /* Make sure that we always have enough lookahead, except 1.1567 + * at the end of the input file. We need MAX_MATCH bytes 1.1568 + * for the next match, plus MIN_MATCH bytes to insert the 1.1569 + * string following the next match. 1.1570 + */ 1.1571 + if (s->lookahead < MIN_LOOKAHEAD) { 1.1572 + fill_window(s); 1.1573 + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1.1574 + return need_more; 1.1575 + } 1.1576 + if (s->lookahead == 0) break; /* flush the current block */ 1.1577 + } 1.1578 + 1.1579 + /* Insert the string window[strstart .. strstart+2] in the 1.1580 + * dictionary, and set hash_head to the head of the hash chain: 1.1581 + */ 1.1582 + if (s->lookahead >= MIN_MATCH) { 1.1583 + INSERT_STRING(s, s->strstart, hash_head); 1.1584 + } 1.1585 + 1.1586 + /* Find the longest match, discarding those <= prev_length. 1.1587 + */ 1.1588 + s->prev_length = s->match_length, s->prev_match = s->match_start; 1.1589 + s->match_length = MIN_MATCH-1; 1.1590 + 1.1591 + if (hash_head != NIL && s->prev_length < s->max_lazy_match && 1.1592 + s->strstart - hash_head <= MAX_DIST(s)) { 1.1593 + /* To simplify the code, we prevent matches with the string 1.1594 + * of window index 0 (in particular we have to avoid a match 1.1595 + * of the string with itself at the start of the input file). 1.1596 + */ 1.1597 + if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { 1.1598 + s->match_length = longest_match (s, hash_head); 1.1599 + } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { 1.1600 + s->match_length = longest_match_fast (s, hash_head); 1.1601 + } 1.1602 + /* longest_match() or longest_match_fast() sets match_start */ 1.1603 + 1.1604 + if (s->match_length <= 5 && (s->strategy == Z_FILTERED 1.1605 +#if TOO_FAR <= 32767 1.1606 + || (s->match_length == MIN_MATCH && 1.1607 + s->strstart - s->match_start > TOO_FAR) 1.1608 +#endif 1.1609 + )) { 1.1610 + 1.1611 + /* If prev_match is also MIN_MATCH, match_start is garbage 1.1612 + * but we will ignore the current match anyway. 1.1613 + */ 1.1614 + s->match_length = MIN_MATCH-1; 1.1615 + } 1.1616 + } 1.1617 + /* If there was a match at the previous step and the current 1.1618 + * match is not better, output the previous match: 1.1619 + */ 1.1620 + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { 1.1621 + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; 1.1622 + /* Do not insert strings in hash table beyond this. */ 1.1623 + 1.1624 + check_match(s, s->strstart-1, s->prev_match, s->prev_length); 1.1625 + 1.1626 + _tr_tally_dist(s, s->strstart -1 - s->prev_match, 1.1627 + s->prev_length - MIN_MATCH, bflush); 1.1628 + 1.1629 + /* Insert in hash table all strings up to the end of the match. 1.1630 + * strstart-1 and strstart are already inserted. If there is not 1.1631 + * enough lookahead, the last two strings are not inserted in 1.1632 + * the hash table. 1.1633 + */ 1.1634 + s->lookahead -= s->prev_length-1; 1.1635 + s->prev_length -= 2; 1.1636 + do { 1.1637 + if (++s->strstart <= max_insert) { 1.1638 + INSERT_STRING(s, s->strstart, hash_head); 1.1639 + } 1.1640 + } while (--s->prev_length != 0); 1.1641 + s->match_available = 0; 1.1642 + s->match_length = MIN_MATCH-1; 1.1643 + s->strstart++; 1.1644 + 1.1645 + if (bflush) FLUSH_BLOCK(s, 0); 1.1646 + 1.1647 + } else if (s->match_available) { 1.1648 + /* If there was no match at the previous position, output a 1.1649 + * single literal. If there was a match but the current match 1.1650 + * is longer, truncate the previous match to a single literal. 1.1651 + */ 1.1652 + Tracevv((stderr,"%c", s->window[s->strstart-1])); 1.1653 + _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1.1654 + if (bflush) { 1.1655 + FLUSH_BLOCK_ONLY(s, 0); 1.1656 + } 1.1657 + s->strstart++; 1.1658 + s->lookahead--; 1.1659 + if (s->strm->avail_out == 0) return need_more; 1.1660 + } else { 1.1661 + /* There is no previous match to compare with, wait for 1.1662 + * the next step to decide. 1.1663 + */ 1.1664 + s->match_available = 1; 1.1665 + s->strstart++; 1.1666 + s->lookahead--; 1.1667 + } 1.1668 + } 1.1669 + Assert (flush != Z_NO_FLUSH, "no flush?"); 1.1670 + if (s->match_available) { 1.1671 + Tracevv((stderr,"%c", s->window[s->strstart-1])); 1.1672 + _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1.1673 + s->match_available = 0; 1.1674 + } 1.1675 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1676 + return flush == Z_FINISH ? finish_done : block_done; 1.1677 +} 1.1678 +#endif /* FASTEST */ 1.1679 + 1.1680 +#if 0 1.1681 +/* =========================================================================== 1.1682 + * For Z_RLE, simply look for runs of bytes, generate matches only of distance 1.1683 + * one. Do not maintain a hash table. (It will be regenerated if this run of 1.1684 + * deflate switches away from Z_RLE.) 1.1685 + */ 1.1686 +local block_state deflate_rle(s, flush) 1.1687 + deflate_state *s; 1.1688 + int flush; 1.1689 +{ 1.1690 + int bflush; /* set if current block must be flushed */ 1.1691 + uInt run; /* length of run */ 1.1692 + uInt max; /* maximum length of run */ 1.1693 + uInt prev; /* byte at distance one to match */ 1.1694 + Bytef *scan; /* scan for end of run */ 1.1695 + 1.1696 + for (;;) { 1.1697 + /* Make sure that we always have enough lookahead, except 1.1698 + * at the end of the input file. We need MAX_MATCH bytes 1.1699 + * for the longest encodable run. 1.1700 + */ 1.1701 + if (s->lookahead < MAX_MATCH) { 1.1702 + fill_window(s); 1.1703 + if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { 1.1704 + return need_more; 1.1705 + } 1.1706 + if (s->lookahead == 0) break; /* flush the current block */ 1.1707 + } 1.1708 + 1.1709 + /* See how many times the previous byte repeats */ 1.1710 + run = 0; 1.1711 + if (s->strstart > 0) { /* if there is a previous byte, that is */ 1.1712 + max = s->lookahead < MAX_MATCH ? s->lookahead : MAX_MATCH; 1.1713 + scan = s->window + s->strstart - 1; 1.1714 + prev = *scan++; 1.1715 + do { 1.1716 + if (*scan++ != prev) 1.1717 + break; 1.1718 + } while (++run < max); 1.1719 + } 1.1720 + 1.1721 + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 1.1722 + if (run >= MIN_MATCH) { 1.1723 + check_match(s, s->strstart, s->strstart - 1, run); 1.1724 + _tr_tally_dist(s, 1, run - MIN_MATCH, bflush); 1.1725 + s->lookahead -= run; 1.1726 + s->strstart += run; 1.1727 + } else { 1.1728 + /* No match, output a literal byte */ 1.1729 + Tracevv((stderr,"%c", s->window[s->strstart])); 1.1730 + _tr_tally_lit (s, s->window[s->strstart], bflush); 1.1731 + s->lookahead--; 1.1732 + s->strstart++; 1.1733 + } 1.1734 + if (bflush) FLUSH_BLOCK(s, 0); 1.1735 + } 1.1736 + FLUSH_BLOCK(s, flush == Z_FINISH); 1.1737 + return flush == Z_FINISH ? finish_done : block_done; 1.1738 +} 1.1739 +#endif