source: MondoRescue/branches/2.2.9/mindi-busybox/archival/libarchive/bz/compress.c@ 2725

Last change on this file since 2725 was 2725, checked in by Bruno Cornec, 13 years ago
  • Update mindi-busybox to 1.18.3 to avoid problems with the tar command which is now failing on recent versions with busybox 1.7.3
  • Property svn:eol-style set to native
File size: 17.6 KB
Line 
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Compression machinery (not incl block sorting) ---*/
9/*--- compress.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
31*/
32
33/* #include "bzlib_private.h" */
34
35/*---------------------------------------------------*/
36/*--- Bit stream I/O ---*/
37/*---------------------------------------------------*/
38
39/*---------------------------------------------------*/
40static
41void BZ2_bsInitWrite(EState* s)
42{
43 s->bsLive = 0;
44 s->bsBuff = 0;
45}
46
47
48/*---------------------------------------------------*/
49static NOINLINE
50void bsFinishWrite(EState* s)
51{
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
57 }
58}
59
60
61/*---------------------------------------------------*/
62static
63/* Helps only on level 5, on other levels hurts. ? */
64#if CONFIG_BZIP2_FEATURE_SPEED >= 5
65ALWAYS_INLINE
66#endif
67void bsW(EState* s, int32_t n, uint32_t v)
68{
69 while (s->bsLive >= 8) {
70 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 s->numZ++;
72 s->bsBuff <<= 8;
73 s->bsLive -= 8;
74 }
75 s->bsBuff |= (v << (32 - s->bsLive - n));
76 s->bsLive += n;
77}
78
79
80/*---------------------------------------------------*/
81static
82void bsPutU32(EState* s, unsigned u)
83{
84 bsW(s, 8, (u >> 24) & 0xff);
85 bsW(s, 8, (u >> 16) & 0xff);
86 bsW(s, 8, (u >> 8) & 0xff);
87 bsW(s, 8, u & 0xff);
88}
89
90
91/*---------------------------------------------------*/
92static
93void bsPutU16(EState* s, unsigned u)
94{
95 bsW(s, 8, (u >> 8) & 0xff);
96 bsW(s, 8, u & 0xff);
97}
98
99
100/*---------------------------------------------------*/
101/*--- The back end proper ---*/
102/*---------------------------------------------------*/
103
104/*---------------------------------------------------*/
105static
106void makeMaps_e(EState* s)
107{
108 int i;
109 s->nInUse = 0;
110 for (i = 0; i < 256; i++) {
111 if (s->inUse[i]) {
112 s->unseqToSeq[i] = s->nInUse;
113 s->nInUse++;
114 }
115 }
116}
117
118
119/*---------------------------------------------------*/
120static NOINLINE
121void generateMTFValues(EState* s)
122{
123 uint8_t yy[256];
124 int32_t i, j;
125 int32_t zPend;
126 int32_t wr;
127 int32_t EOB;
128
129 /*
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 * and
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
135 *
136 * The first thing to do is generate the MTF values,
137 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
138 *
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
142 *
143 * The final compressed bitstream is generated into the
144 * area starting at &((uint8_t*)s->arr2)[s->nblock]
145 *
146 * These storage aliases are set up in bzCompressInit(),
147 * except for the last one, which is arranged in
148 * compressBlock().
149 */
150 uint32_t* ptr = s->ptr;
151 uint8_t* block = s->block;
152 uint16_t* mtfv = s->mtfv;
153
154 makeMaps_e(s);
155 EOB = s->nInUse+1;
156
157 for (i = 0; i <= EOB; i++)
158 s->mtfFreq[i] = 0;
159
160 wr = 0;
161 zPend = 0;
162 for (i = 0; i < s->nInUse; i++)
163 yy[i] = (uint8_t) i;
164
165 for (i = 0; i < s->nblock; i++) {
166 uint8_t ll_i;
167 AssertD(wr <= i, "generateMTFValues(1)");
168 j = ptr[i] - 1;
169 if (j < 0)
170 j += s->nblock;
171 ll_i = s->unseqToSeq[block[j]];
172 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
173
174 if (yy[0] == ll_i) {
175 zPend++;
176 } else {
177 if (zPend > 0) {
178 zPend--;
179 while (1) {
180 if (zPend & 1) {
181 mtfv[wr] = BZ_RUNB; wr++;
182 s->mtfFreq[BZ_RUNB]++;
183 } else {
184 mtfv[wr] = BZ_RUNA; wr++;
185 s->mtfFreq[BZ_RUNA]++;
186 }
187 if (zPend < 2) break;
188 zPend = (uint32_t)(zPend - 2) / 2;
189 /* bbox: unsigned div is easier */
190 };
191 zPend = 0;
192 }
193 {
194 register uint8_t rtmp;
195 register uint8_t* ryy_j;
196 register uint8_t rll_i;
197 rtmp = yy[1];
198 yy[1] = yy[0];
199 ryy_j = &(yy[1]);
200 rll_i = ll_i;
201 while (rll_i != rtmp) {
202 register uint8_t rtmp2;
203 ryy_j++;
204 rtmp2 = rtmp;
205 rtmp = *ryy_j;
206 *ryy_j = rtmp2;
207 };
208 yy[0] = rtmp;
209 j = ryy_j - &(yy[0]);
210 mtfv[wr] = j+1;
211 wr++;
212 s->mtfFreq[j+1]++;
213 }
214 }
215 }
216
217 if (zPend > 0) {
218 zPend--;
219 while (1) {
220 if (zPend & 1) {
221 mtfv[wr] = BZ_RUNB;
222 wr++;
223 s->mtfFreq[BZ_RUNB]++;
224 } else {
225 mtfv[wr] = BZ_RUNA;
226 wr++;
227 s->mtfFreq[BZ_RUNA]++;
228 }
229 if (zPend < 2)
230 break;
231 zPend = (uint32_t)(zPend - 2) / 2;
232 /* bbox: unsigned div is easier */
233 };
234 zPend = 0;
235 }
236
237 mtfv[wr] = EOB;
238 wr++;
239 s->mtfFreq[EOB]++;
240
241 s->nMTF = wr;
242}
243
244
245/*---------------------------------------------------*/
246#define BZ_LESSER_ICOST 0
247#define BZ_GREATER_ICOST 15
248
249static NOINLINE
250void sendMTFValues(EState* s)
251{
252 int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
253 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
254 int32_t nGroups, nBytes;
255
256 /*
257 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 * is a global since the decoder also needs it.
259 *
260 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
261 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
262 * are also globals only used in this proc.
263 * Made global to keep stack frame size small.
264 */
265#define code sendMTFValues__code
266#define rfreq sendMTFValues__rfreq
267#define len_pack sendMTFValues__len_pack
268
269 uint16_t cost[BZ_N_GROUPS];
270 int32_t fave[BZ_N_GROUPS];
271
272 uint16_t* mtfv = s->mtfv;
273
274 alphaSize = s->nInUse + 2;
275 for (t = 0; t < BZ_N_GROUPS; t++)
276 for (v = 0; v < alphaSize; v++)
277 s->len[t][v] = BZ_GREATER_ICOST;
278
279 /*--- Decide how many coding tables to use ---*/
280 AssertH(s->nMTF > 0, 3001);
281 if (s->nMTF < 200) nGroups = 2; else
282 if (s->nMTF < 600) nGroups = 3; else
283 if (s->nMTF < 1200) nGroups = 4; else
284 if (s->nMTF < 2400) nGroups = 5; else
285 nGroups = 6;
286
287 /*--- Generate an initial set of coding tables ---*/
288 {
289 int32_t nPart, remF, tFreq, aFreq;
290
291 nPart = nGroups;
292 remF = s->nMTF;
293 gs = 0;
294 while (nPart > 0) {
295 tFreq = remF / nPart;
296 ge = gs - 1;
297 aFreq = 0;
298 while (aFreq < tFreq && ge < alphaSize-1) {
299 ge++;
300 aFreq += s->mtfFreq[ge];
301 }
302
303 if (ge > gs
304 && nPart != nGroups && nPart != 1
305 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
306 ) {
307 aFreq -= s->mtfFreq[ge];
308 ge--;
309 }
310
311 for (v = 0; v < alphaSize; v++)
312 if (v >= gs && v <= ge)
313 s->len[nPart-1][v] = BZ_LESSER_ICOST;
314 else
315 s->len[nPart-1][v] = BZ_GREATER_ICOST;
316
317 nPart--;
318 gs = ge + 1;
319 remF -= aFreq;
320 }
321 }
322
323 /*
324 * Iterate up to BZ_N_ITERS times to improve the tables.
325 */
326 for (iter = 0; iter < BZ_N_ITERS; iter++) {
327 for (t = 0; t < nGroups; t++)
328 fave[t] = 0;
329
330 for (t = 0; t < nGroups; t++)
331 for (v = 0; v < alphaSize; v++)
332 s->rfreq[t][v] = 0;
333
334#if CONFIG_BZIP2_FEATURE_SPEED >= 5
335 /*
336 * Set up an auxiliary length table which is used to fast-track
337 * the common case (nGroups == 6).
338 */
339 if (nGroups == 6) {
340 for (v = 0; v < alphaSize; v++) {
341 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
342 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
343 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
344 }
345 }
346#endif
347 nSelectors = 0;
348 totc = 0;
349 gs = 0;
350 while (1) {
351 /*--- Set group start & end marks. --*/
352 if (gs >= s->nMTF)
353 break;
354 ge = gs + BZ_G_SIZE - 1;
355 if (ge >= s->nMTF)
356 ge = s->nMTF-1;
357
358 /*
359 * Calculate the cost of this group as coded
360 * by each of the coding tables.
361 */
362 for (t = 0; t < nGroups; t++)
363 cost[t] = 0;
364#if CONFIG_BZIP2_FEATURE_SPEED >= 5
365 if (nGroups == 6 && 50 == ge-gs+1) {
366 /*--- fast track the common case ---*/
367 register uint32_t cost01, cost23, cost45;
368 register uint16_t icv;
369 cost01 = cost23 = cost45 = 0;
370#define BZ_ITER(nn) \
371 icv = mtfv[gs+(nn)]; \
372 cost01 += s->len_pack[icv][0]; \
373 cost23 += s->len_pack[icv][1]; \
374 cost45 += s->len_pack[icv][2];
375 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
376 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
377 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
378 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
379 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
380 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
381 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
382 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
383 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
384 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
385#undef BZ_ITER
386 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
387 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
388 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
389
390 } else
391#endif
392 {
393 /*--- slow version which correctly handles all situations ---*/
394 for (i = gs; i <= ge; i++) {
395 uint16_t icv = mtfv[i];
396 for (t = 0; t < nGroups; t++)
397 cost[t] += s->len[t][icv];
398 }
399 }
400 /*
401 * Find the coding table which is best for this group,
402 * and record its identity in the selector table.
403 */
404 /*bc = 999999999;*/
405 /*bt = -1;*/
406 bc = cost[0];
407 bt = 0;
408 for (t = 1 /*0*/; t < nGroups; t++) {
409 if (cost[t] < bc) {
410 bc = cost[t];
411 bt = t;
412 }
413 }
414 totc += bc;
415 fave[bt]++;
416 s->selector[nSelectors] = bt;
417 nSelectors++;
418
419 /*
420 * Increment the symbol frequencies for the selected table.
421 */
422/* 1% faster compress. +800 bytes */
423#if CONFIG_BZIP2_FEATURE_SPEED >= 4
424 if (nGroups == 6 && 50 == ge-gs+1) {
425 /*--- fast track the common case ---*/
426#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
427 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
428 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
429 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
430 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
431 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
432 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
433 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
434 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
435 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
436 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
437#undef BZ_ITUR
438 gs = ge + 1;
439 } else
440#endif
441 {
442 /*--- slow version which correctly handles all situations ---*/
443 while (gs <= ge) {
444 s->rfreq[bt][mtfv[gs]]++;
445 gs++;
446 }
447 /* already is: gs = ge + 1; */
448 }
449 }
450
451 /*
452 * Recompute the tables based on the accumulated frequencies.
453 */
454 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
455 * comment in huffman.c for details. */
456 for (t = 0; t < nGroups; t++)
457 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
458 }
459
460 AssertH(nGroups < 8, 3002);
461 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
462
463 /*--- Compute MTF values for the selectors. ---*/
464 {
465 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
466
467 for (i = 0; i < nGroups; i++)
468 pos[i] = i;
469 for (i = 0; i < nSelectors; i++) {
470 ll_i = s->selector[i];
471 j = 0;
472 tmp = pos[j];
473 while (ll_i != tmp) {
474 j++;
475 tmp2 = tmp;
476 tmp = pos[j];
477 pos[j] = tmp2;
478 };
479 pos[0] = tmp;
480 s->selectorMtf[i] = j;
481 }
482 };
483
484 /*--- Assign actual codes for the tables. --*/
485 for (t = 0; t < nGroups; t++) {
486 minLen = 32;
487 maxLen = 0;
488 for (i = 0; i < alphaSize; i++) {
489 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
490 if (s->len[t][i] < minLen) minLen = s->len[t][i];
491 }
492 AssertH(!(maxLen > 17 /*20*/), 3004);
493 AssertH(!(minLen < 1), 3005);
494 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
495 }
496
497 /*--- Transmit the mapping table. ---*/
498 {
499 /* bbox: optimized a bit more than in bzip2 */
500 int inUse16 = 0;
501 for (i = 0; i < 16; i++) {
502 if (sizeof(long) <= 4) {
503 inUse16 = inUse16*2 +
504 ((*(uint32_t*)&(s->inUse[i * 16 + 0])
505 | *(uint32_t*)&(s->inUse[i * 16 + 4])
506 | *(uint32_t*)&(s->inUse[i * 16 + 8])
507 | *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
508 } else { /* Our CPU can do better */
509 inUse16 = inUse16*2 +
510 ((*(uint64_t*)&(s->inUse[i * 16 + 0])
511 | *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
512 }
513 }
514
515 nBytes = s->numZ;
516 bsW(s, 16, inUse16);
517
518 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
519 for (i = 0; i < 16; i++) {
520 if (inUse16 < 0) {
521 unsigned v16 = 0;
522 for (j = 0; j < 16; j++)
523 v16 = v16*2 + s->inUse[i * 16 + j];
524 bsW(s, 16, v16);
525 }
526 inUse16 <<= 1;
527 }
528 }
529
530 /*--- Now the selectors. ---*/
531 nBytes = s->numZ;
532 bsW(s, 3, nGroups);
533 bsW(s, 15, nSelectors);
534 for (i = 0; i < nSelectors; i++) {
535 for (j = 0; j < s->selectorMtf[i]; j++)
536 bsW(s, 1, 1);
537 bsW(s, 1, 0);
538 }
539
540 /*--- Now the coding tables. ---*/
541 nBytes = s->numZ;
542
543 for (t = 0; t < nGroups; t++) {
544 int32_t curr = s->len[t][0];
545 bsW(s, 5, curr);
546 for (i = 0; i < alphaSize; i++) {
547 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
548 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
549 bsW(s, 1, 0);
550 }
551 }
552
553 /*--- And finally, the block data proper ---*/
554 nBytes = s->numZ;
555 selCtr = 0;
556 gs = 0;
557 while (1) {
558 if (gs >= s->nMTF)
559 break;
560 ge = gs + BZ_G_SIZE - 1;
561 if (ge >= s->nMTF)
562 ge = s->nMTF-1;
563 AssertH(s->selector[selCtr] < nGroups, 3006);
564
565/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
566#if 0
567 if (nGroups == 6 && 50 == ge-gs+1) {
568 /*--- fast track the common case ---*/
569 uint16_t mtfv_i;
570 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
571 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
572#define BZ_ITAH(nn) \
573 mtfv_i = mtfv[gs+(nn)]; \
574 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
575 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
576 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
577 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
578 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
579 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
580 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
581 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
582 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
583 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
584 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
585#undef BZ_ITAH
586 gs = ge+1;
587 } else
588#endif
589 {
590 /*--- slow version which correctly handles all situations ---*/
591 /* code is bit bigger, but moves multiply out of the loop */
592 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
593 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
594 while (gs <= ge) {
595 bsW(s,
596 s_len_sel_selCtr[mtfv[gs]],
597 s_code_sel_selCtr[mtfv[gs]]
598 );
599 gs++;
600 }
601 /* already is: gs = ge+1; */
602 }
603 selCtr++;
604 }
605 AssertH(selCtr == nSelectors, 3007);
606#undef code
607#undef rfreq
608#undef len_pack
609}
610
611
612/*---------------------------------------------------*/
613static
614void BZ2_compressBlock(EState* s, int is_last_block)
615{
616 if (s->nblock > 0) {
617 BZ_FINALISE_CRC(s->blockCRC);
618 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
619 s->combinedCRC ^= s->blockCRC;
620 if (s->blockNo > 1)
621 s->numZ = 0;
622
623 BZ2_blockSort(s);
624 }
625
626 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
627
628 /*-- If this is the first block, create the stream header. --*/
629 if (s->blockNo == 1) {
630 BZ2_bsInitWrite(s);
631 /*bsPutU8(s, BZ_HDR_B);*/
632 /*bsPutU8(s, BZ_HDR_Z);*/
633 /*bsPutU8(s, BZ_HDR_h);*/
634 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
635 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
636 }
637
638 if (s->nblock > 0) {
639 /*bsPutU8(s, 0x31);*/
640 /*bsPutU8(s, 0x41);*/
641 /*bsPutU8(s, 0x59);*/
642 /*bsPutU8(s, 0x26);*/
643 bsPutU32(s, 0x31415926);
644 /*bsPutU8(s, 0x53);*/
645 /*bsPutU8(s, 0x59);*/
646 bsPutU16(s, 0x5359);
647
648 /*-- Now the block's CRC, so it is in a known place. --*/
649 bsPutU32(s, s->blockCRC);
650
651 /*
652 * Now a single bit indicating (non-)randomisation.
653 * As of version 0.9.5, we use a better sorting algorithm
654 * which makes randomisation unnecessary. So always set
655 * the randomised bit to 'no'. Of course, the decoder
656 * still needs to be able to handle randomised blocks
657 * so as to maintain backwards compatibility with
658 * older versions of bzip2.
659 */
660 bsW(s, 1, 0);
661
662 bsW(s, 24, s->origPtr);
663 generateMTFValues(s);
664 sendMTFValues(s);
665 }
666
667 /*-- If this is the last block, add the stream trailer. --*/
668 if (is_last_block) {
669 /*bsPutU8(s, 0x17);*/
670 /*bsPutU8(s, 0x72);*/
671 /*bsPutU8(s, 0x45);*/
672 /*bsPutU8(s, 0x38);*/
673 bsPutU32(s, 0x17724538);
674 /*bsPutU8(s, 0x50);*/
675 /*bsPutU8(s, 0x90);*/
676 bsPutU16(s, 0x5090);
677 bsPutU32(s, s->combinedCRC);
678 bsFinishWrite(s);
679 }
680}
681
682
683/*-------------------------------------------------------------*/
684/*--- end compress.c ---*/
685/*-------------------------------------------------------------*/
Note: See TracBrowser for help on using the repository browser.