1 /* vi: set sw=4 ts=4: */
3 * Small lzma deflate implementation.
4 * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
6 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
7 * Copyright (C) 1999-2005 Igor Pavlov
9 * Licensed under GPLv2 or later, see file LICENSE in this source tree.
12 #include "bb_archive.h"
15 # define dbg(...) bb_error_msg(__VA_ARGS__)
17 # define dbg(...) ((void)0)
21 #if ENABLE_FEATURE_LZMA_FAST
22 # define speed_inline ALWAYS_INLINE
26 # define size_inline ALWAYS_INLINE
34 /* Was keeping rc on stack in unlzma and separately allocating buffer,
35 * but with "buffer 'attached to' allocated rc" code is smaller: */
36 /* uint8_t *buffer; */
37 #define RC_BUFFER ((uint8_t*)(rc+1))
41 /* Had provisions for variable buffer, but we don't need it here */
42 /* int buffer_size; */
43 #define RC_BUFFER_SIZE 0x10000
50 #define RC_TOP_BITS 24
51 #define RC_MOVE_BITS 5
52 #define RC_MODEL_TOTAL_BITS 11
55 /* Called once in rc_do_normalize() */
56 static void rc_read(rc_t *rc)
58 int buffer_size = safe_read(rc->fd, RC_BUFFER, RC_BUFFER_SIZE);
59 //TODO: return -1 instead
60 //This will make unlzma delete broken unpacked file on unpack errors
62 bb_simple_error_msg_and_die("unexpected EOF");
63 rc->buffer_end = RC_BUFFER + buffer_size;
67 /* Called twice, but one callsite is in speed_inline'd rc_is_bit_1() */
68 static void rc_do_normalize(rc_t *rc)
70 if (rc->ptr >= rc->buffer_end)
73 rc->code = (rc->code << 8) | *rc->ptr++;
75 static ALWAYS_INLINE void rc_normalize(rc_t *rc)
77 if (rc->range < (1 << RC_TOP_BITS)) {
83 static ALWAYS_INLINE rc_t* rc_init(int fd) /*, int buffer_size) */
88 rc = xzalloc(sizeof(*rc) + RC_BUFFER_SIZE);
91 /* rc->ptr = rc->buffer_end; */
93 for (i = 0; i < 5; i++) {
96 rc->range = 0xffffffff;
101 static ALWAYS_INLINE void rc_free(rc_t *rc)
106 /* rc_is_bit_1 is called 9 times */
107 static speed_inline int rc_is_bit_1(rc_t *rc, uint16_t *p)
110 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
111 if (rc->code < rc->bound) {
112 rc->range = rc->bound;
113 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
116 rc->range -= rc->bound;
117 rc->code -= rc->bound;
118 *p -= *p >> RC_MOVE_BITS;
122 /* Called 4 times in unlzma loop */
123 static ALWAYS_INLINE int rc_get_bit(rc_t *rc, uint16_t *p, int *symbol)
125 int ret = rc_is_bit_1(rc, p);
126 *symbol = *symbol * 2 + ret;
131 static ALWAYS_INLINE int rc_direct_bit(rc_t *rc)
135 if (rc->code >= rc->range) {
136 rc->code -= rc->range;
143 static speed_inline void
144 rc_bit_tree_decode(rc_t *rc, uint16_t *p, int num_levels, int *symbol)
150 rc_get_bit(rc, p + *symbol, symbol);
151 *symbol -= 1 << num_levels;
159 } PACKED lzma_header_t;
162 /* #defines will force compiler to compute/optimize each one with each usage.
163 * Have heart and use enum instead. */
165 LZMA_BASE_SIZE = 1846,
168 LZMA_NUM_POS_BITS_MAX = 4,
170 LZMA_LEN_NUM_LOW_BITS = 3,
171 LZMA_LEN_NUM_MID_BITS = 3,
172 LZMA_LEN_NUM_HIGH_BITS = 8,
175 LZMA_LEN_CHOICE_2 = (LZMA_LEN_CHOICE + 1),
176 LZMA_LEN_LOW = (LZMA_LEN_CHOICE_2 + 1),
177 LZMA_LEN_MID = (LZMA_LEN_LOW \
178 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))),
179 LZMA_LEN_HIGH = (LZMA_LEN_MID \
180 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))),
181 LZMA_NUM_LEN_PROBS = (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)),
183 LZMA_NUM_STATES = 12,
184 LZMA_NUM_LIT_STATES = 7,
186 LZMA_START_POS_MODEL_INDEX = 4,
187 LZMA_END_POS_MODEL_INDEX = 14,
188 LZMA_NUM_FULL_DISTANCES = (1 << (LZMA_END_POS_MODEL_INDEX >> 1)),
190 LZMA_NUM_POS_SLOT_BITS = 6,
191 LZMA_NUM_LEN_TO_POS_STATES = 4,
193 LZMA_NUM_ALIGN_BITS = 4,
195 LZMA_MATCH_MIN_LEN = 2,
198 LZMA_IS_REP = (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)),
199 LZMA_IS_REP_G0 = (LZMA_IS_REP + LZMA_NUM_STATES),
200 LZMA_IS_REP_G1 = (LZMA_IS_REP_G0 + LZMA_NUM_STATES),
201 LZMA_IS_REP_G2 = (LZMA_IS_REP_G1 + LZMA_NUM_STATES),
202 LZMA_IS_REP_0_LONG = (LZMA_IS_REP_G2 + LZMA_NUM_STATES),
203 LZMA_POS_SLOT = (LZMA_IS_REP_0_LONG \
204 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)),
205 LZMA_SPEC_POS = (LZMA_POS_SLOT \
206 + (LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)),
207 LZMA_ALIGN = (LZMA_SPEC_POS \
208 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX),
209 LZMA_LEN_CODER = (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)),
210 LZMA_REP_LEN_CODER = (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS),
211 LZMA_LITERAL = (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS),
215 IF_DESKTOP(long long) int FAST_FUNC
216 unpack_lzma_stream(transformer_state_t *xstate)
218 IF_DESKTOP(long long total_written = 0;)
219 lzma_header_t header;
221 uint32_t pos_state_mask;
222 uint32_t literal_pos_mask;
227 uint32_t buffer_size;
228 uint8_t previous_byte = 0;
229 size_t buffer_pos = 0, global_pos = 0;
232 uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
234 if (full_read(xstate->src_fd, &header, sizeof(header)) != sizeof(header)
235 || header.pos >= (9 * 5 * 5)
237 bb_simple_error_msg("bad lzma header");
245 pos_state_mask = (1 << pb) - 1;
246 literal_pos_mask = (1 << lp) - 1;
248 /* Example values from linux-3.3.4.tar.lzma:
249 * dict_size: 64M, dst_size: 2^64-1
251 header.dict_size = SWAP_LE32(header.dict_size);
252 header.dst_size = SWAP_LE64(header.dst_size);
254 if (header.dict_size == 0)
257 buffer_size = MIN(header.dst_size, header.dict_size);
258 buffer = xmalloc(buffer_size);
263 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
264 p = xmalloc(num_probs * sizeof(*p));
265 num_probs += LZMA_LITERAL - LZMA_BASE_SIZE;
266 for (i = 0; i < num_probs; i++)
267 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
270 rc = rc_init(xstate->src_fd); /*, RC_BUFFER_SIZE); */
272 while (global_pos + buffer_pos < header.dst_size) {
273 int pos_state = (buffer_pos + global_pos) & pos_state_mask;
274 uint16_t *prob = p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
276 if (!rc_is_bit_1(rc, prob)) {
277 static const char next_state[LZMA_NUM_STATES] =
278 { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5 };
281 prob = (p + LZMA_LITERAL
282 + (LZMA_LIT_SIZE * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
283 + (previous_byte >> (8 - lc))
288 if (state >= LZMA_NUM_LIT_STATES) {
292 pos = buffer_pos - rep0;
293 if ((int32_t)pos < 0)
294 pos += header.dict_size;
295 match_byte = buffer[pos];
300 bit = match_byte & 0x100;
301 bit ^= (rc_get_bit(rc, prob + 0x100 + bit + mi, &mi) << 8); /* 0x100 or 0 */
304 } while (mi < 0x100);
307 rc_get_bit(rc, prob + mi, &mi);
310 state = next_state[state];
312 previous_byte = (uint8_t) mi;
313 #if ENABLE_FEATURE_LZMA_FAST
315 buffer[buffer_pos++] = previous_byte;
316 if (buffer_pos == header.dict_size) {
318 global_pos += header.dict_size;
319 if (transformer_write(xstate, buffer, header.dict_size) != (ssize_t)header.dict_size)
321 IF_DESKTOP(total_written += header.dict_size;)
331 #define prob_len prob2
333 prob2 = p + LZMA_IS_REP + state;
334 if (!rc_is_bit_1(rc, prob2)) {
338 state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
339 prob2 = p + LZMA_LEN_CODER;
341 prob2 += LZMA_IS_REP_G0 - LZMA_IS_REP;
342 if (!rc_is_bit_1(rc, prob2)) {
343 prob2 = (p + LZMA_IS_REP_0_LONG
344 + (state << LZMA_NUM_POS_BITS_MAX)
347 if (!rc_is_bit_1(rc, prob2)) {
348 #if ENABLE_FEATURE_LZMA_FAST
350 state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
352 pos = buffer_pos - rep0;
353 if ((int32_t)pos < 0) {
354 pos += header.dict_size;
355 /* see unzip_bad_lzma_2.zip: */
356 if (pos >= buffer_size) {
357 dbg("%d pos:%d buffer_size:%d", __LINE__, pos, buffer_size);
361 previous_byte = buffer[pos];
364 state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
372 prob2 += LZMA_IS_REP_G1 - LZMA_IS_REP_G0;
374 if (rc_is_bit_1(rc, prob2)) {
375 prob2 += LZMA_IS_REP_G2 - LZMA_IS_REP_G1;
377 if (rc_is_bit_1(rc, prob2)) {
386 state = state < LZMA_NUM_LIT_STATES ? 8 : 11;
387 prob2 = p + LZMA_REP_LEN_CODER;
390 prob_len = prob2 + LZMA_LEN_CHOICE;
391 num_bits = LZMA_LEN_NUM_LOW_BITS;
392 if (!rc_is_bit_1(rc, prob_len)) {
393 prob_len += LZMA_LEN_LOW - LZMA_LEN_CHOICE
394 + (pos_state << LZMA_LEN_NUM_LOW_BITS);
397 prob_len += LZMA_LEN_CHOICE_2 - LZMA_LEN_CHOICE;
398 if (!rc_is_bit_1(rc, prob_len)) {
399 prob_len += LZMA_LEN_MID - LZMA_LEN_CHOICE_2
400 + (pos_state << LZMA_LEN_NUM_MID_BITS);
401 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
402 num_bits += LZMA_LEN_NUM_MID_BITS - LZMA_LEN_NUM_LOW_BITS;
404 prob_len += LZMA_LEN_HIGH - LZMA_LEN_CHOICE_2;
405 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
406 + (1 << LZMA_LEN_NUM_MID_BITS));
407 num_bits += LZMA_LEN_NUM_HIGH_BITS - LZMA_LEN_NUM_LOW_BITS;
410 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
417 state += LZMA_NUM_LIT_STATES;
418 prob3 = p + LZMA_POS_SLOT +
419 ((len < LZMA_NUM_LEN_TO_POS_STATES ? len :
420 LZMA_NUM_LEN_TO_POS_STATES - 1)
421 << LZMA_NUM_POS_SLOT_BITS);
422 rc_bit_tree_decode(rc, prob3,
423 LZMA_NUM_POS_SLOT_BITS, &pos_slot);
425 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
426 int i2, mi2, num_bits2 = (pos_slot >> 1) - 1;
427 rep0 = 2 | (pos_slot & 1);
428 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
430 prob3 = p + LZMA_SPEC_POS + rep0 - pos_slot - 1;
432 for (; num_bits2 != LZMA_NUM_ALIGN_BITS; num_bits2--)
433 rep0 = (rep0 << 1) | rc_direct_bit(rc);
434 rep0 <<= LZMA_NUM_ALIGN_BITS;
435 // Note: (int32_t)rep0 may be < 0 here
436 // (I have linux-3.3.4.tar.lzma which has it).
437 // I moved the check after "++rep0 == 0" check below.
438 prob3 = p + LZMA_ALIGN;
442 while (num_bits2--) {
443 if (rc_get_bit(rc, prob3 + mi2, &mi2))
449 if ((int32_t)rep0 <= 0) {
452 dbg("%d rep0:%d", __LINE__, rep0);
457 len += LZMA_MATCH_MIN_LEN;
459 * LZMA SDK has this optimized:
460 * it precalculates size and copies many bytes
461 * in a loop with simpler checks, a-la:
463 * *(dest) = *(dest + ofs);
464 * while (++dest != lim);
467 * buffer[buffer_pos++] = buffer[pos];
468 * if (++pos == header.dict_size)
470 * } while (--cur_len != 0);
471 * Our code is slower (more checks per byte copy):
473 IF_NOT_FEATURE_LZMA_FAST(string:)
475 uint32_t pos = buffer_pos - rep0;
476 if ((int32_t)pos < 0) {
477 pos += header.dict_size;
478 /* bug 10436 has an example file where this triggers: */
479 //if ((int32_t)pos < 0)
481 /* more stringent test (see unzip_bad_lzma_1.zip): */
482 if (pos >= buffer_size)
485 previous_byte = buffer[pos];
486 IF_NOT_FEATURE_LZMA_FAST(one_byte2:)
487 buffer[buffer_pos++] = previous_byte;
488 if (buffer_pos == header.dict_size) {
490 global_pos += header.dict_size;
491 if (transformer_write(xstate, buffer, header.dict_size) != (ssize_t)header.dict_size)
493 IF_DESKTOP(total_written += header.dict_size;)
496 } while (len != 0 && buffer_pos < header.dst_size);
497 /* FIXME: ...........^^^^^
498 * shouldn't it be "global_pos + buffer_pos < header.dst_size"?
499 * It probably should, but it is a "do we accidentally
500 * unpack more bytes than expected?" check - which
501 * never happens for well-formed compression data...
507 IF_NOT_DESKTOP(int total_written = 0; /* success */)
508 IF_DESKTOP(total_written += buffer_pos;)
509 if (transformer_write(xstate, buffer, buffer_pos) != (ssize_t)buffer_pos) {
511 /* One of our users, bbunpack(), expects _us_ to emit
512 * the error message (since it's the best place to give
513 * potentially more detailed information).
514 * Do not fail silently.
516 bb_simple_error_msg("corrupted data");
517 total_written = -1; /* failure */
522 return total_written;