2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting) ---*/
10 /*-------------------------------------------------------------*/
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
22 This program is released under the terms of the license contained
24 ------------------------------------------------------------------ */
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
33 /* #include "bzlib_private.h" */
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
39 /*---------------------------------------------------*/
41 void BZ2_bsInitWrite(EState* s)
48 /*---------------------------------------------------*/
50 void bsFinishWrite(EState* s)
52 while (s->bsLive > 0) {
53 *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
60 /*---------------------------------------------------*/
62 /* Helps only on level 5, on other levels hurts. ? */
63 #if CONFIG_BZIP2_FAST >= 5
66 void bsW(EState* s, int32_t n, uint32_t v)
68 while (s->bsLive >= 8) {
69 *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
73 s->bsBuff |= (v << (32 - s->bsLive - n));
76 /* Same with n == 16: */
78 #if CONFIG_BZIP2_FAST >= 5
81 void bsW16(EState* s, uint32_t v)
83 while (s->bsLive >= 8) {
84 *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
88 s->bsBuff |= (v << (16 - s->bsLive));
93 /*---------------------------------------------------*/
95 void bsPutU16(EState* s, unsigned u)
101 /*---------------------------------------------------*/
103 void bsPutU32(EState* s, unsigned u)
105 //bsW(s, 32, u); // can't use: may try "uint32 << -n"
106 bsW16(s, (u >> 16) & 0xffff);
107 bsW16(s, u & 0xffff);
111 /*---------------------------------------------------*/
112 /*--- The back end proper ---*/
113 /*---------------------------------------------------*/
115 /*---------------------------------------------------*/
117 void makeMaps_e(EState* s)
121 for (i = 0; i < 256; i++) {
123 s->unseqToSeq[i] = s->nInUse;
130 /*---------------------------------------------------*/
132 void generateMTFValues(EState* s)
141 * After sorting (eg, here),
142 * s->arr1[0 .. s->nblock-1] holds sorted order,
144 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
145 * holds the original block data.
147 * The first thing to do is generate the MTF values,
148 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
150 * Because there are strictly fewer or equal MTF values
151 * than block values, ptr values in this area are overwritten
152 * with MTF values only when they are no longer needed.
154 * The final compressed bitstream is generated into the
155 * area starting at &((uint8_t*)s->arr2)[s->nblock]
157 * These storage aliases are set up in bzCompressInit(),
158 * except for the last one, which is arranged in
161 uint32_t* ptr = s->ptr;
162 uint8_t* block = s->block;
163 uint16_t* mtfv = s->mtfv;
168 for (i = 0; i <= EOB; i++)
173 for (i = 0; i < s->nInUse; i++)
176 for (i = 0; i < s->nblock; i++) {
178 AssertD(wr <= i, "generateMTFValues(1)");
182 ll_i = s->unseqToSeq[block[j]];
183 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
192 mtfv[wr] = BZ_RUNB; wr++;
193 s->mtfFreq[BZ_RUNB]++;
195 mtfv[wr] = BZ_RUNA; wr++;
196 s->mtfFreq[BZ_RUNA]++;
198 if (zPend < 2) break;
199 zPend = (uint32_t)(zPend - 2) / 2;
200 /* bbox: unsigned div is easier */
205 register uint8_t rtmp;
206 register uint8_t* ryy_j;
207 register uint8_t rll_i;
212 while (rll_i != rtmp) {
213 register uint8_t rtmp2;
220 j = ryy_j - &(yy[0]);
234 s->mtfFreq[BZ_RUNB]++;
238 s->mtfFreq[BZ_RUNA]++;
242 zPend = (uint32_t)(zPend - 2) / 2;
243 /* bbox: unsigned div is easier */
256 /*---------------------------------------------------*/
257 #define BZ_LESSER_ICOST 0
258 #define BZ_GREATER_ICOST 15
261 void sendMTFValues(EState* s)
263 int32_t v, t, i, j, gs, ge, bt, bc, iter;
264 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
268 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
269 * is a global since the decoder also needs it.
271 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
272 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
273 * are also globals only used in this proc.
274 * Made global to keep stack frame size small.
276 #define code sendMTFValues__code
277 #define rfreq sendMTFValues__rfreq
278 #define len_pack sendMTFValues__len_pack
280 uint16_t cost[BZ_N_GROUPS];
281 int32_t fave[BZ_N_GROUPS];
283 uint16_t* mtfv = s->mtfv;
285 alphaSize = s->nInUse + 2;
286 for (t = 0; t < BZ_N_GROUPS; t++)
287 for (v = 0; v < alphaSize; v++)
288 s->len[t][v] = BZ_GREATER_ICOST;
290 /*--- Decide how many coding tables to use ---*/
291 AssertH(s->nMTF > 0, 3001);
292 if (s->nMTF < 200) nGroups = 2; else
293 if (s->nMTF < 600) nGroups = 3; else
294 if (s->nMTF < 1200) nGroups = 4; else
295 if (s->nMTF < 2400) nGroups = 5; else
298 /*--- Generate an initial set of coding tables ---*/
300 int32_t nPart, remF, tFreq, aFreq;
306 tFreq = remF / nPart;
309 while (aFreq < tFreq && ge < alphaSize-1) {
311 aFreq += s->mtfFreq[ge];
315 && nPart != nGroups && nPart != 1
316 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
318 aFreq -= s->mtfFreq[ge];
322 for (v = 0; v < alphaSize; v++)
323 if (v >= gs && v <= ge)
324 s->len[nPart-1][v] = BZ_LESSER_ICOST;
326 s->len[nPart-1][v] = BZ_GREATER_ICOST;
335 * Iterate up to BZ_N_ITERS times to improve the tables.
337 for (iter = 0; iter < BZ_N_ITERS; iter++) {
338 for (t = 0; t < nGroups; t++)
341 for (t = 0; t < nGroups; t++)
342 for (v = 0; v < alphaSize; v++)
345 #if CONFIG_BZIP2_FAST >= 5
347 * Set up an auxiliary length table which is used to fast-track
348 * the common case (nGroups == 6).
351 for (v = 0; v < alphaSize; v++) {
352 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
353 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
354 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
361 /*--- Set group start & end marks. --*/
364 ge = gs + BZ_G_SIZE - 1;
369 * Calculate the cost of this group as coded
370 * by each of the coding tables.
372 for (t = 0; t < nGroups; t++)
374 #if CONFIG_BZIP2_FAST >= 5
375 if (nGroups == 6 && 50 == ge-gs+1) {
376 /*--- fast track the common case ---*/
377 register uint32_t cost01, cost23, cost45;
378 register uint16_t icv;
379 cost01 = cost23 = cost45 = 0;
380 #define BZ_ITER(nn) \
381 icv = mtfv[gs+(nn)]; \
382 cost01 += s->len_pack[icv][0]; \
383 cost23 += s->len_pack[icv][1]; \
384 cost45 += s->len_pack[icv][2];
385 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
386 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
387 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
388 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
389 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
390 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
391 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
392 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
393 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
394 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
396 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
397 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
398 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
402 /*--- slow version which correctly handles all situations ---*/
403 for (i = gs; i <= ge; i++) {
404 uint16_t icv = mtfv[i];
405 for (t = 0; t < nGroups; t++)
406 cost[t] += s->len[t][icv];
410 * Find the coding table which is best for this group,
411 * and record its identity in the selector table.
417 for (t = 1 /*0*/; t < nGroups; t++) {
424 s->selector[nSelectors] = bt;
428 * Increment the symbol frequencies for the selected table.
430 /* 1% faster compress. +800 bytes */
431 #if CONFIG_BZIP2_FAST >= 4
432 if (nGroups == 6 && 50 == ge-gs+1) {
433 /*--- fast track the common case ---*/
434 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
435 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
436 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
437 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
438 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
439 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
440 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
441 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
442 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
443 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
444 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
450 /*--- slow version which correctly handles all situations ---*/
452 s->rfreq[bt][mtfv[gs]]++;
455 /* already is: gs = ge + 1; */
460 * Recompute the tables based on the accumulated frequencies.
462 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
463 * comment in huffman.c for details. */
464 for (t = 0; t < nGroups; t++)
465 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
468 AssertH(nGroups < 8, 3002);
469 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
471 /*--- Compute MTF values for the selectors. ---*/
473 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
475 for (i = 0; i < nGroups; i++)
477 for (i = 0; i < nSelectors; i++) {
478 ll_i = s->selector[i];
481 while (ll_i != tmp) {
488 s->selectorMtf[i] = j;
492 /*--- Assign actual codes for the tables. --*/
493 for (t = 0; t < nGroups; t++) {
496 for (i = 0; i < alphaSize; i++) {
497 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
498 if (s->len[t][i] < minLen) minLen = s->len[t][i];
500 AssertH(!(maxLen > 17 /*20*/), 3004);
501 AssertH(!(minLen < 1), 3005);
502 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
505 /*--- Transmit the mapping table. ---*/
507 /* bbox: optimized a bit more than in bzip2 */
509 for (i = 0; i < 16; i++) {
510 if (sizeof(long) <= 4) {
511 inUse16 = inUse16*2 +
512 ((*(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 0])
513 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 4])
514 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 8])
515 | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
516 } else { /* Our CPU can do better */
517 inUse16 = inUse16*2 +
518 ((*(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 0])
519 | *(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
525 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
526 for (i = 0; i < 16; i++) {
529 for (j = 0; j < 16; j++)
530 v16 = v16*2 + s->inUse[i * 16 + j];
537 /*--- Now the selectors. ---*/
539 bsW(s, 15, nSelectors);
540 for (i = 0; i < nSelectors; i++) {
541 for (j = 0; j < s->selectorMtf[i]; j++)
546 /*--- Now the coding tables. ---*/
547 for (t = 0; t < nGroups; t++) {
548 int32_t curr = s->len[t][0];
550 for (i = 0; i < alphaSize; i++) {
551 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
552 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
557 /*--- And finally, the block data proper ---*/
563 ge = gs + BZ_G_SIZE - 1;
566 AssertH(s->selector[selCtr] < nGroups, 3006);
568 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
570 if (nGroups == 6 && 50 == ge-gs+1) {
571 /*--- fast track the common case ---*/
573 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
574 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
575 #define BZ_ITAH(nn) \
576 mtfv_i = mtfv[gs+(nn)]; \
577 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
578 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
579 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
580 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
581 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
582 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
583 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
584 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
585 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
586 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
587 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
593 /*--- slow version which correctly handles all situations ---*/
594 /* code is bit bigger, but moves multiply out of the loop */
595 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
596 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
599 s_len_sel_selCtr[mtfv[gs]],
600 s_code_sel_selCtr[mtfv[gs]]
604 /* already is: gs = ge+1; */
608 AssertH(selCtr == nSelectors, 3007);
615 /*---------------------------------------------------*/
617 void BZ2_compressBlock(EState* s, int is_last_block)
620 BZ_FINALISE_CRC(s->blockCRC);
621 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
622 s->combinedCRC ^= s->blockCRC;
624 s->posZ = s->zbits; // was: s->numZ = 0;
629 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
631 s->state_out_pos = s->zbits;
633 /*-- If this is the first block, create the stream header. --*/
634 if (s->blockNo == 1) {
636 /*bsPutU8(s, BZ_HDR_B);*/
637 /*bsPutU8(s, BZ_HDR_Z);*/
638 /*bsPutU8(s, BZ_HDR_h);*/
639 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
640 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
644 /*bsPutU8(s, 0x31);*/
645 /*bsPutU8(s, 0x41);*/
646 /*bsPutU8(s, 0x59);*/
647 /*bsPutU8(s, 0x26);*/
648 bsPutU32(s, 0x31415926);
649 /*bsPutU8(s, 0x53);*/
650 /*bsPutU8(s, 0x59);*/
653 /*-- Now the block's CRC, so it is in a known place. --*/
654 bsPutU32(s, s->blockCRC);
657 * Now a single bit indicating (non-)randomisation.
658 * As of version 0.9.5, we use a better sorting algorithm
659 * which makes randomisation unnecessary. So always set
660 * the randomised bit to 'no'. Of course, the decoder
661 * still needs to be able to handle randomised blocks
662 * so as to maintain backwards compatibility with
663 * older versions of bzip2.
667 bsW(s, 24, s->origPtr);
668 generateMTFValues(s);
672 /*-- If this is the last block, add the stream trailer. --*/
674 /*bsPutU8(s, 0x17);*/
675 /*bsPutU8(s, 0x72);*/
676 /*bsPutU8(s, 0x45);*/
677 /*bsPutU8(s, 0x38);*/
678 bsPutU32(s, 0x17724538);
679 /*bsPutU8(s, 0x50);*/
680 /*bsPutU8(s, 0x90);*/
682 bsPutU32(s, s->combinedCRC);
688 /*-------------------------------------------------------------*/
689 /*--- end compress.c ---*/
690 /*-------------------------------------------------------------*/