2 * Small range coder implementation for lzma.
3 * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
5 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
6 * Copyright (c) 1999-2005 Igor Pavlov
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 # if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >0)
29 # define always_inline __attribute__((always_inline)) inline
31 # define always_inline inline
35 #ifdef CONFIG_FEATURE_LZMA_FAST
36 # define speed_inline always_inline
54 #define RC_TOP_BITS 24
55 #define RC_MOVE_BITS 5
56 #define RC_MODEL_TOTAL_BITS 11
59 /* Called twice: once at startup and once in rc_normalize() */
60 static void rc_read(rc_t * rc)
62 rc->buffer_size = read(rc->fd, rc->buffer, rc->buffer_size);
63 if (rc->buffer_size <= 0)
64 bb_error_msg_and_die("unexpected EOF");
66 rc->buffer_end = rc->buffer + rc->buffer_size;
70 static always_inline void rc_init(rc_t * rc, int fd, int buffer_size)
75 rc->buffer = malloc(buffer_size);
76 rc->buffer_size = buffer_size;
77 rc->buffer_end = rc->buffer + rc->buffer_size;
78 rc->ptr = rc->buffer_end;
81 rc->range = 0xFFFFFFFF;
82 for (i = 0; i < 5; i++) {
83 if (rc->ptr >= rc->buffer_end)
85 rc->code = (rc->code << 8) | *rc->ptr++;
89 /* Called once. TODO: bb_maybe_free() */
90 static always_inline void rc_free(rc_t * rc)
92 if (ENABLE_FEATURE_CLEAN_UP)
96 /* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
97 static void rc_do_normalize(rc_t * rc)
99 if (rc->ptr >= rc->buffer_end)
102 rc->code = (rc->code << 8) | *rc->ptr++;
104 static always_inline void rc_normalize(rc_t * rc)
106 if (rc->range < (1 << RC_TOP_BITS)) {
112 /* Why rc_is_bit_0_helper exists?
113 * Because we want to always expose (rc->code < rc->bound) to optimizer
115 static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p)
118 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
121 static always_inline int rc_is_bit_0(rc_t * rc, uint16_t * p)
123 uint32_t t = rc_is_bit_0_helper(rc, p);
127 /* Called ~10 times, but very small, thus inlined */
128 static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p)
130 rc->range = rc->bound;
131 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
133 static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p)
135 rc->range -= rc->bound;
136 rc->code -= rc->bound;
137 *p -= *p >> RC_MOVE_BITS;
140 /* Called 4 times in unlzma loop */
141 static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
143 if (rc_is_bit_0(rc, p)) {
144 rc_update_bit_0(rc, p);
148 rc_update_bit_1(rc, p);
149 *symbol = *symbol * 2 + 1;
155 static always_inline int rc_direct_bit(rc_t * rc)
159 if (rc->code >= rc->range) {
160 rc->code -= rc->range;
167 static speed_inline void
168 rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
174 rc_get_bit(rc, p + *symbol, symbol);
175 *symbol -= 1 << num_levels;