diff options
author | Leon Woestenberg <leon.woestenberg@gmail.com> | 2006-10-12 15:26:49 +0000 |
---|---|---|
committer | Marcin Juszkiewicz <hrw@openembedded.org> | 2006-10-12 15:26:49 +0000 |
commit | 9faf013f17822b391692cabb5a99de267beaa912 (patch) | |
tree | 05d4f0c8a52ee436f05782609d99691e2596db0e /packages/squashfs-tools/files | |
parent | 7328b25edee6451c77616afe8e7c27bf3990d8d4 (diff) |
squashfs-tools: added 3.1r2 (normal and lzma) - closes #1466
Diffstat (limited to 'packages/squashfs-tools/files')
5 files changed, 2776 insertions, 0 deletions
diff --git a/packages/squashfs-tools/files/002-squashfs_lzma.patch b/packages/squashfs-tools/files/002-squashfs_lzma.patch new file mode 100644 index 0000000000..a166e84971 --- /dev/null +++ b/packages/squashfs-tools/files/002-squashfs_lzma.patch @@ -0,0 +1,886 @@ +diff -Nur linux-2.6.16/fs/squashfs/inode.c linux-2.6.16-owrt/fs/squashfs/inode.c +--- linux-2.6.16/fs/squashfs/inode.c 2006-03-21 10:55:59.000000000 +0100 ++++ linux-2.6.16-owrt/fs/squashfs/inode.c 2006-03-21 12:24:37.000000000 +0100 +@@ -4,6 +4,9 @@ + * Copyright (c) 2002, 2003, 2004, 2005, 2006 + * Phillip Lougher <phillip@lougher.org.uk> + * ++ * LZMA decompressor support added by Oleg I. Vdovikin ++ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su> ++ * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2, +@@ -21,6 +24,7 @@ + * inode.c + */ + ++#define SQUASHFS_LZMA + #include <linux/types.h> + #include <linux/squashfs_fs.h> + #include <linux/module.h> +@@ -44,6 +48,19 @@ + + #include "squashfs.h" + ++#ifdef SQUASHFS_LZMA ++#include "LzmaDecode.h" ++ ++/* default LZMA settings, should be in sync with mksquashfs */ ++#define LZMA_LC 3 ++#define LZMA_LP 0 ++#define LZMA_PB 2 ++ ++#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \ ++ (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb)) ++ ++#endif ++ + static void squashfs_put_super(struct super_block *); + static int squashfs_statfs(struct super_block *, struct kstatfs *); + static int squashfs_symlink_readpage(struct file *file, struct page *page); +@@ -64,7 +81,11 @@ + const char *, void *); + + ++#ifdef SQUASHFS_LZMA ++static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE]; ++#else + static z_stream stream; ++#endif + + static struct file_system_type squashfs_fs_type = { + .owner = THIS_MODULE, +@@ -249,6 +270,15 @@ + if (compressed) { + int zlib_err; + ++#ifdef SQUASHFS_LZMA ++ if ((zlib_err = LzmaDecode(lzma_workspace, ++ LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB, ++ c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK) ++ { ++ ERROR("lzma returned unexpected result 0x%x\n", zlib_err); ++ bytes = 0; ++ } ++#else + stream.next_in = c_buffer; + stream.avail_in = c_byte; + stream.next_out = buffer; +@@ -263,6 +293,7 @@ + bytes = 0; + } else + bytes = stream.total_out; ++#endif + + up(&msblk->read_data_mutex); + } +@@ -2046,15 +2077,19 @@ + printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) " + "Phillip Lougher\n"); + ++#ifndef SQUASHFS_LZMA + if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) { + ERROR("Failed to allocate zlib workspace\n"); + destroy_inodecache(); + err = -ENOMEM; + goto out; + } ++#endif + + if ((err = register_filesystem(&squashfs_fs_type))) { ++#ifndef SQUASHFS_LZMA + vfree(stream.workspace); ++#endif + destroy_inodecache(); + } + +@@ -2065,7 +2100,9 @@ + + static void __exit exit_squashfs_fs(void) + { ++#ifndef SQUASHFS_LZMA + vfree(stream.workspace); ++#endif + unregister_filesystem(&squashfs_fs_type); + destroy_inodecache(); + } +diff -Nur linux-2.6.16/fs/squashfs/LzmaDecode.c linux-2.6.16-owrt/fs/squashfs/LzmaDecode.c +--- linux-2.6.16/fs/squashfs/LzmaDecode.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.16-owrt/fs/squashfs/LzmaDecode.c 2006-03-21 10:56:57.000000000 +0100 +@@ -0,0 +1,663 @@ ++/* ++ LzmaDecode.c ++ LZMA Decoder ++ ++ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25) ++ http://www.7-zip.org/ ++ ++ LZMA SDK is licensed under two licenses: ++ 1) GNU Lesser General Public License (GNU LGPL) ++ 2) Common Public License (CPL) ++ It means that you can select one of these two licenses and ++ follow rules of that license. ++ ++ SPECIAL EXCEPTION: ++ Igor Pavlov, as the author of this code, expressly permits you to ++ statically or dynamically link your code (or bind by name) to the ++ interfaces of this file without subjecting your linked code to the ++ terms of the CPL or GNU LGPL. Any modifications or additions ++ to this file, however, are subject to the LGPL or CPL terms. ++*/ ++ ++#include "LzmaDecode.h" ++ ++#ifndef Byte ++#define Byte unsigned char ++#endif ++ ++#define kNumTopBits 24 ++#define kTopValue ((UInt32)1 << kNumTopBits) ++ ++#define kNumBitModelTotalBits 11 ++#define kBitModelTotal (1 << kNumBitModelTotalBits) ++#define kNumMoveBits 5 ++ ++typedef struct _CRangeDecoder ++{ ++ Byte *Buffer; ++ Byte *BufferLim; ++ UInt32 Range; ++ UInt32 Code; ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *InCallback; ++ int Result; ++ #endif ++ int ExtraBytes; ++} CRangeDecoder; ++ ++Byte RangeDecoderReadByte(CRangeDecoder *rd) ++{ ++ if (rd->Buffer == rd->BufferLim) ++ { ++ #ifdef _LZMA_IN_CB ++ UInt32 size; ++ rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size); ++ rd->BufferLim = rd->Buffer + size; ++ if (size == 0) ++ #endif ++ { ++ rd->ExtraBytes = 1; ++ return 0xFF; ++ } ++ } ++ return (*rd->Buffer++); ++} ++ ++/* #define ReadByte (*rd->Buffer++) */ ++#define ReadByte (RangeDecoderReadByte(rd)) ++ ++void RangeDecoderInit(CRangeDecoder *rd, ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *inCallback ++ #else ++ Byte *stream, UInt32 bufferSize ++ #endif ++ ) ++{ ++ int i; ++ #ifdef _LZMA_IN_CB ++ rd->InCallback = inCallback; ++ rd->Buffer = rd->BufferLim = 0; ++ #else ++ rd->Buffer = stream; ++ rd->BufferLim = stream + bufferSize; ++ #endif ++ rd->ExtraBytes = 0; ++ rd->Code = 0; ++ rd->Range = (0xFFFFFFFF); ++ for(i = 0; i < 5; i++) ++ rd->Code = (rd->Code << 8) | ReadByte; ++} ++ ++#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code; ++#define RC_FLUSH_VAR rd->Range = range; rd->Code = code; ++#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; } ++ ++UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits) ++{ ++ RC_INIT_VAR ++ UInt32 result = 0; ++ int i; ++ for (i = numTotalBits; i > 0; i--) ++ { ++ /* UInt32 t; */ ++ range >>= 1; ++ ++ result <<= 1; ++ if (code >= range) ++ { ++ code -= range; ++ result |= 1; ++ } ++ /* ++ t = (code - range) >> 31; ++ t &= 1; ++ code -= range & (t - 1); ++ result = (result + result) | (1 - t); ++ */ ++ RC_NORMALIZE ++ } ++ RC_FLUSH_VAR ++ return result; ++} ++ ++int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd) ++{ ++ UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob; ++ if (rd->Code < bound) ++ { ++ rd->Range = bound; ++ *prob += (kBitModelTotal - *prob) >> kNumMoveBits; ++ if (rd->Range < kTopValue) ++ { ++ rd->Code = (rd->Code << 8) | ReadByte; ++ rd->Range <<= 8; ++ } ++ return 0; ++ } ++ else ++ { ++ rd->Range -= bound; ++ rd->Code -= bound; ++ *prob -= (*prob) >> kNumMoveBits; ++ if (rd->Range < kTopValue) ++ { ++ rd->Code = (rd->Code << 8) | ReadByte; ++ rd->Range <<= 8; ++ } ++ return 1; ++ } ++} ++ ++#define RC_GET_BIT2(prob, mi, A0, A1) \ ++ UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \ ++ if (code < bound) \ ++ { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \ ++ else \ ++ { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \ ++ RC_NORMALIZE ++ ++#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;) ++ ++int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd) ++{ ++ int mi = 1; ++ int i; ++ #ifdef _LZMA_LOC_OPT ++ RC_INIT_VAR ++ #endif ++ for(i = numLevels; i > 0; i--) ++ { ++ #ifdef _LZMA_LOC_OPT ++ CProb *prob = probs + mi; ++ RC_GET_BIT(prob, mi) ++ #else ++ mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd); ++ #endif ++ } ++ #ifdef _LZMA_LOC_OPT ++ RC_FLUSH_VAR ++ #endif ++ return mi - (1 << numLevels); ++} ++ ++int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd) ++{ ++ int mi = 1; ++ int i; ++ int symbol = 0; ++ #ifdef _LZMA_LOC_OPT ++ RC_INIT_VAR ++ #endif ++ for(i = 0; i < numLevels; i++) ++ { ++ #ifdef _LZMA_LOC_OPT ++ CProb *prob = probs + mi; ++ RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i)) ++ #else ++ int bit = RangeDecoderBitDecode(probs + mi, rd); ++ mi = mi + mi + bit; ++ symbol |= (bit << i); ++ #endif ++ } ++ #ifdef _LZMA_LOC_OPT ++ RC_FLUSH_VAR ++ #endif ++ return symbol; ++} ++ ++Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd) ++{ ++ int symbol = 1; ++ #ifdef _LZMA_LOC_OPT ++ RC_INIT_VAR ++ #endif ++ do ++ { ++ #ifdef _LZMA_LOC_OPT ++ CProb *prob = probs + symbol; ++ RC_GET_BIT(prob, symbol) ++ #else ++ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd); ++ #endif ++ } ++ while (symbol < 0x100); ++ #ifdef _LZMA_LOC_OPT ++ RC_FLUSH_VAR ++ #endif ++ return symbol; ++} ++ ++Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte) ++{ ++ int symbol = 1; ++ #ifdef _LZMA_LOC_OPT ++ RC_INIT_VAR ++ #endif ++ do ++ { ++ int bit; ++ int matchBit = (matchByte >> 7) & 1; ++ matchByte <<= 1; ++ #ifdef _LZMA_LOC_OPT ++ { ++ CProb *prob = probs + ((1 + matchBit) << 8) + symbol; ++ RC_GET_BIT2(prob, symbol, bit = 0, bit = 1) ++ } ++ #else ++ bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd); ++ symbol = (symbol << 1) | bit; ++ #endif ++ if (matchBit != bit) ++ { ++ while (symbol < 0x100) ++ { ++ #ifdef _LZMA_LOC_OPT ++ CProb *prob = probs + symbol; ++ RC_GET_BIT(prob, symbol) ++ #else ++ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd); ++ #endif ++ } ++ break; ++ } ++ } ++ while (symbol < 0x100); ++ #ifdef _LZMA_LOC_OPT ++ RC_FLUSH_VAR ++ #endif ++ return symbol; ++} ++ ++#define kNumPosBitsMax 4 ++#define kNumPosStatesMax (1 << kNumPosBitsMax) ++ ++#define kLenNumLowBits 3 ++#define kLenNumLowSymbols (1 << kLenNumLowBits) ++#define kLenNumMidBits 3 ++#define kLenNumMidSymbols (1 << kLenNumMidBits) ++#define kLenNumHighBits 8 ++#define kLenNumHighSymbols (1 << kLenNumHighBits) ++ ++#define LenChoice 0 ++#define LenChoice2 (LenChoice + 1) ++#define LenLow (LenChoice2 + 1) ++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits)) ++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits)) ++#define kNumLenProbs (LenHigh + kLenNumHighSymbols) ++ ++int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState) ++{ ++ if(RangeDecoderBitDecode(p + LenChoice, rd) == 0) ++ return RangeDecoderBitTreeDecode(p + LenLow + ++ (posState << kLenNumLowBits), kLenNumLowBits, rd); ++ if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0) ++ return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid + ++ (posState << kLenNumMidBits), kLenNumMidBits, rd); ++ return kLenNumLowSymbols + kLenNumMidSymbols + ++ RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd); ++} ++ ++#define kNumStates 12 ++ ++#define kStartPosModelIndex 4 ++#define kEndPosModelIndex 14 ++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) ++ ++#define kNumPosSlotBits 6 ++#define kNumLenToPosStates 4 ++ ++#define kNumAlignBits 4 ++#define kAlignTableSize (1 << kNumAlignBits) ++ ++#define kMatchMinLen 2 ++ ++#define IsMatch 0 ++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax)) ++#define IsRepG0 (IsRep + kNumStates) ++#define IsRepG1 (IsRepG0 + kNumStates) ++#define IsRepG2 (IsRepG1 + kNumStates) ++#define IsRep0Long (IsRepG2 + kNumStates) ++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax)) ++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) ++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex) ++#define LenCoder (Align + kAlignTableSize) ++#define RepLenCoder (LenCoder + kNumLenProbs) ++#define Literal (RepLenCoder + kNumLenProbs) ++ ++#if Literal != LZMA_BASE_SIZE ++StopCompilingDueBUG ++#endif ++ ++#ifdef _LZMA_OUT_READ ++ ++typedef struct _LzmaVarState ++{ ++ CRangeDecoder RangeDecoder; ++ Byte *Dictionary; ++ UInt32 DictionarySize; ++ UInt32 DictionaryPos; ++ UInt32 GlobalPos; ++ UInt32 Reps[4]; ++ int lc; ++ int lp; ++ int pb; ++ int State; ++ int PreviousIsMatch; ++ int RemainLen; ++} LzmaVarState; ++ ++int LzmaDecoderInit( ++ unsigned char *buffer, UInt32 bufferSize, ++ int lc, int lp, int pb, ++ unsigned char *dictionary, UInt32 dictionarySize, ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *inCallback ++ #else ++ unsigned char *inStream, UInt32 inSize ++ #endif ++ ) ++{ ++ LzmaVarState *vs = (LzmaVarState *)buffer; ++ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState)); ++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp)); ++ UInt32 i; ++ if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState)) ++ return LZMA_RESULT_NOT_ENOUGH_MEM; ++ vs->Dictionary = dictionary; ++ vs->DictionarySize = dictionarySize; ++ vs->DictionaryPos = 0; ++ vs->GlobalPos = 0; ++ vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1; ++ vs->lc = lc; ++ vs->lp = lp; ++ vs->pb = pb; ++ vs->State = 0; ++ vs->PreviousIsMatch = 0; ++ vs->RemainLen = 0; ++ dictionary[dictionarySize - 1] = 0; ++ for (i = 0; i < numProbs; i++) ++ p[i] = kBitModelTotal >> 1; ++ RangeDecoderInit(&vs->RangeDecoder, ++ #ifdef _LZMA_IN_CB ++ inCallback ++ #else ++ inStream, inSize ++ #endif ++ ); ++ return LZMA_RESULT_OK; ++} ++ ++int LzmaDecode(unsigned char *buffer, ++ unsigned char *outStream, UInt32 outSize, ++ UInt32 *outSizeProcessed) ++{ ++ LzmaVarState *vs = (LzmaVarState *)buffer; ++ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState)); ++ CRangeDecoder rd = vs->RangeDecoder; ++ int state = vs->State; ++ int previousIsMatch = vs->PreviousIsMatch; ++ Byte previousByte; ++ UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3]; ++ UInt32 nowPos = 0; ++ UInt32 posStateMask = (1 << (vs->pb)) - 1; ++ UInt32 literalPosMask = (1 << (vs->lp)) - 1; ++ int lc = vs->lc; ++ int len = vs->RemainLen; ++ UInt32 globalPos = vs->GlobalPos; ++ ++ Byte *dictionary = vs->Dictionary; ++ UInt32 dictionarySize = vs->DictionarySize; ++ UInt32 dictionaryPos = vs->DictionaryPos; ++ ++ if (len == -1) ++ { ++ *outSizeProcessed = 0; ++ return LZMA_RESULT_OK; ++ } ++ ++ while(len > 0 && nowPos < outSize) ++ { ++ UInt32 pos = dictionaryPos - rep0; ++ if (pos >= dictionarySize) ++ pos += dictionarySize; ++ outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos]; ++ if (++dictionaryPos == dictionarySize) ++ dictionaryPos = 0; ++ len--; ++ } ++ if (dictionaryPos == 0) ++ previousByte = dictionary[dictionarySize - 1]; ++ else ++ previousByte = dictionary[dictionaryPos - 1]; ++#else ++ ++int LzmaDecode( ++ Byte *buffer, UInt32 bufferSize, ++ int lc, int lp, int pb, ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *inCallback, ++ #else ++ unsigned char *inStream, UInt32 inSize, ++ #endif ++ unsigned char *outStream, UInt32 outSize, ++ UInt32 *outSizeProcessed) ++{ ++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp)); ++ CProb *p = (CProb *)buffer; ++ CRangeDecoder rd; ++ UInt32 i; ++ int state = 0; ++ int previousIsMatch = 0; ++ Byte previousByte = 0; ++ UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1; ++ UInt32 nowPos = 0; ++ UInt32 posStateMask = (1 << pb) - 1; ++ UInt32 literalPosMask = (1 << lp) - 1; ++ int len = 0; ++ if (bufferSize < numProbs * sizeof(CProb)) ++ return LZMA_RESULT_NOT_ENOUGH_MEM; ++ for (i = 0; i < numProbs; i++) ++ p[i] = kBitModelTotal >> 1; ++ RangeDecoderInit(&rd, ++ #ifdef _LZMA_IN_CB ++ inCallback ++ #else ++ inStream, inSize ++ #endif ++ ); ++#endif ++ ++ *outSizeProcessed = 0; ++ while(nowPos < outSize) ++ { ++ int posState = (int)( ++ (nowPos ++ #ifdef _LZMA_OUT_READ ++ + globalPos ++ #endif ++ ) ++ & posStateMask); ++ #ifdef _LZMA_IN_CB ++ if (rd.Result != LZMA_RESULT_OK) ++ return rd.Result; ++ #endif ++ if (rd.ExtraBytes != 0) ++ return LZMA_RESULT_DATA_ERROR; ++ if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0) ++ { ++ CProb *probs = p + Literal + (LZMA_LIT_SIZE * ++ ((( ++ (nowPos ++ #ifdef _LZMA_OUT_READ ++ + globalPos ++ #endif ++ ) ++ & literalPosMask) << lc) + (previousByte >> (8 - lc)))); ++ ++ if (state < 4) state = 0; ++ else if (state < 10) state -= 3; ++ else state -= 6; ++ if (previousIsMatch) ++ { ++ Byte matchByte; ++ #ifdef _LZMA_OUT_READ ++ UInt32 pos = dictionaryPos - rep0; ++ if (pos >= dictionarySize) ++ pos += dictionarySize; ++ matchByte = dictionary[pos]; ++ #else ++ matchByte = outStream[nowPos - rep0]; ++ #endif ++ previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte); ++ previousIsMatch = 0; ++ } ++ else ++ previousByte = LzmaLiteralDecode(probs, &rd); ++ outStream[nowPos++] = previousByte; ++ #ifdef _LZMA_OUT_READ ++ dictionary[dictionaryPos] = previousByte; ++ if (++dictionaryPos == dictionarySize) ++ dictionaryPos = 0; ++ #endif ++ } ++ else ++ { ++ previousIsMatch = 1; ++ if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1) ++ { ++ if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0) ++ { ++ if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0) ++ { ++ #ifdef _LZMA_OUT_READ ++ UInt32 pos; ++ #endif ++ if ( ++ (nowPos ++ #ifdef _LZMA_OUT_READ ++ + globalPos ++ #endif ++ ) ++ == 0) ++ return LZMA_RESULT_DATA_ERROR; ++ state = state < 7 ? 9 : 11; ++ #ifdef _LZMA_OUT_READ ++ pos = dictionaryPos - rep0; ++ if (pos >= dictionarySize) ++ pos += dictionarySize; ++ previousByte = dictionary[pos]; ++ dictionary[dictionaryPos] = previousByte; ++ if (++dictionaryPos == dictionarySize) ++ dictionaryPos = 0; ++ #else ++ previousByte = outStream[nowPos - rep0]; ++ #endif ++ outStream[nowPos++] = previousByte; ++ continue; ++ } ++ } ++ else ++ { ++ UInt32 distance; ++ if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0) ++ distance = rep1; ++ else ++ { ++ if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0) ++ distance = rep2; ++ else ++ { ++ distance = rep3; ++ rep3 = rep2; ++ } ++ rep2 = rep1; ++ } ++ rep1 = rep0; ++ rep0 = distance; ++ } ++ len = LzmaLenDecode(p + RepLenCoder, &rd, posState); ++ state = state < 7 ? 8 : 11; ++ } ++ else ++ { ++ int posSlot; ++ rep3 = rep2; ++ rep2 = rep1; ++ rep1 = rep0; ++ state = state < 7 ? 7 : 10; ++ len = LzmaLenDecode(p + LenCoder, &rd, posState); ++ posSlot = RangeDecoderBitTreeDecode(p + PosSlot + ++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << ++ kNumPosSlotBits), kNumPosSlotBits, &rd); ++ if (posSlot >= kStartPosModelIndex) ++ { ++ int numDirectBits = ((posSlot >> 1) - 1); ++ rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits); ++ if (posSlot < kEndPosModelIndex) ++ { ++ rep0 += RangeDecoderReverseBitTreeDecode( ++ p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd); ++ } ++ else ++ { ++ rep0 += RangeDecoderDecodeDirectBits(&rd, ++ numDirectBits - kNumAlignBits) << kNumAlignBits; ++ rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd); ++ } ++ } ++ else ++ rep0 = posSlot; ++ rep0++; ++ } ++ if (rep0 == (UInt32)(0)) ++ { ++ /* it's for stream version */ ++ len = -1; ++ break; ++ } ++ if (rep0 > nowPos ++ #ifdef _LZMA_OUT_READ ++ + globalPos ++ #endif ++ ) ++ { ++ return LZMA_RESULT_DATA_ERROR; ++ } ++ len += kMatchMinLen; ++ do ++ { ++ #ifdef _LZMA_OUT_READ ++ UInt32 pos = dictionaryPos - rep0; ++ if (pos >= dictionarySize) ++ pos += dictionarySize; ++ previousByte = dictionary[pos]; ++ dictionary[dictionaryPos] = previousByte; ++ if (++dictionaryPos == dictionarySize) ++ dictionaryPos = 0; ++ #else ++ previousByte = outStream[nowPos - rep0]; ++ #endif ++ outStream[nowPos++] = previousByte; ++ len--; ++ } ++ while(len > 0 && nowPos < outSize); ++ } ++ } ++ ++ #ifdef _LZMA_OUT_READ ++ vs->RangeDecoder = rd; ++ vs->DictionaryPos = dictionaryPos; ++ vs->GlobalPos = globalPos + nowPos; ++ vs->Reps[0] = rep0; ++ vs->Reps[1] = rep1; ++ vs->Reps[2] = rep2; ++ vs->Reps[3] = rep3; ++ vs->State = state; ++ vs->PreviousIsMatch = previousIsMatch; ++ vs->RemainLen = len; ++ #endif ++ ++ *outSizeProcessed = nowPos; ++ return LZMA_RESULT_OK; ++} +diff -Nur linux-2.6.16/fs/squashfs/LzmaDecode.h linux-2.6.16-owrt/fs/squashfs/LzmaDecode.h +--- linux-2.6.16/fs/squashfs/LzmaDecode.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.16-owrt/fs/squashfs/LzmaDecode.h 2006-03-21 10:56:57.000000000 +0100 +@@ -0,0 +1,100 @@ ++/* ++ LzmaDecode.h ++ LZMA Decoder interface ++ ++ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25) ++ http://www.7-zip.org/ ++ ++ LZMA SDK is licensed under two licenses: ++ 1) GNU Lesser General Public License (GNU LGPL) ++ 2) Common Public License (CPL) ++ It means that you can select one of these two licenses and ++ follow rules of that license. ++ ++ SPECIAL EXCEPTION: ++ Igor Pavlov, as the author of this code, expressly permits you to ++ statically or dynamically link your code (or bind by name) to the ++ interfaces of this file without subjecting your linked code to the ++ terms of the CPL or GNU LGPL. Any modifications or additions ++ to this file, however, are subject to the LGPL or CPL terms. ++*/ ++ ++#ifndef __LZMADECODE_H ++#define __LZMADECODE_H ++ ++/* #define _LZMA_IN_CB */ ++/* Use callback for input data */ ++ ++/* #define _LZMA_OUT_READ */ ++/* Use read function for output data */ ++ ++/* #define _LZMA_PROB32 */ ++/* It can increase speed on some 32-bit CPUs, ++ but memory usage will be doubled in that case */ ++ ++/* #define _LZMA_LOC_OPT */ ++/* Enable local speed optimizations inside code */ ++ ++#ifndef UInt32 ++#ifdef _LZMA_UINT32_IS_ULONG ++#define UInt32 unsigned long ++#else ++#define UInt32 unsigned int ++#endif ++#endif ++ ++#ifdef _LZMA_PROB32 ++#define CProb UInt32 ++#else ++#define CProb unsigned short ++#endif ++ ++#define LZMA_RESULT_OK 0 ++#define LZMA_RESULT_DATA_ERROR 1 ++#define LZMA_RESULT_NOT_ENOUGH_MEM 2 ++ ++#ifdef _LZMA_IN_CB ++typedef struct _ILzmaInCallback ++{ ++ int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize); ++} ILzmaInCallback; ++#endif ++ ++#define LZMA_BASE_SIZE 1846 ++#define LZMA_LIT_SIZE 768 ++ ++/* ++bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb) ++bufferSize += 100 in case of _LZMA_OUT_READ ++by default CProb is unsigned short, ++but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int) ++*/ ++ ++#ifdef _LZMA_OUT_READ ++int LzmaDecoderInit( ++ unsigned char *buffer, UInt32 bufferSize, ++ int lc, int lp, int pb, ++ unsigned char *dictionary, UInt32 dictionarySize, ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *inCallback ++ #else ++ unsigned char *inStream, UInt32 inSize ++ #endif ++); ++#endif ++ ++int LzmaDecode( ++ unsigned char *buffer, ++ #ifndef _LZMA_OUT_READ ++ UInt32 bufferSize, ++ int lc, int lp, int pb, ++ #ifdef _LZMA_IN_CB ++ ILzmaInCallback *inCallback, ++ #else ++ unsigned char *inStream, UInt32 inSize, ++ #endif ++ #endif ++ unsigned char *outStream, UInt32 outSize, ++ UInt32 *outSizeProcessed); ++ ++#endif +diff -Nur linux-2.6.16/fs/squashfs/Makefile linux-2.6.16-owrt/fs/squashfs/Makefile +--- linux-2.6.16/fs/squashfs/Makefile 2006-03-21 10:55:59.000000000 +0100 ++++ linux-2.6.16-owrt/fs/squashfs/Makefile 2006-03-21 10:57:08.000000000 +0100 +@@ -5,3 +5,4 @@ + obj-$(CONFIG_SQUASHFS) += squashfs.o + squashfs-y += inode.o + squashfs-y += squashfs2_0.o ++squashfs-y += LzmaDecode.o diff --git a/packages/squashfs-tools/files/Makefile-3.1.patch b/packages/squashfs-tools/files/Makefile-3.1.patch new file mode 100644 index 0000000000..1b7c49878f --- /dev/null +++ b/packages/squashfs-tools/files/Makefile-3.1.patch @@ -0,0 +1,34 @@ +Modifies the Makefile to work with OpenEmbedded. + +I copied the CFLAGS_R approach from the existing 2.0 squashfs-tools, +without actually knowing why this approach was taken. It works though. + +Leon Woestenberg <leonw@mailcan.com> + +--- squashfs-tools/Makefile 2006-10-10 13:59:05.000000000 +0200 ++++ squashfs-tools/Makefile.oe 2006-10-10 14:06:01.000000000 +0200 +@@ -1,17 +1,22 @@ + INCLUDEDIR = . ++CFLAGS_R = -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE ++LIBS = -lz -lpthread + + CFLAGS := -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -O2 + + all: mksquashfs unsquashfs + + mksquashfs: mksquashfs.o read_fs.o sort.o +- $(CC) mksquashfs.o read_fs.o sort.o -lz -lpthread -o $@ ++ $(CC) $(LDFLAGS) mksquashfs.o read_fs.o sort.o $(LIBS) -o $@ + + mksquashfs.o: mksquashfs.c squashfs_fs.h mksquashfs.h global.h sort.h ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o mksquashfs.o mksquashfs.c + + read_fs.o: read_fs.c squashfs_fs.h read_fs.h global.h +- ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o read_fs.o read_fs.c ++ + sort.o: sort.c squashfs_fs.h global.h sort.h ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o sort.o sort.c + + unsquashfs: unsquashfs.o + $(CC) unsquashfs.o -lz -o $@ diff --git a/packages/squashfs-tools/files/Makefile.patch b/packages/squashfs-tools/files/Makefile.patch new file mode 100644 index 0000000000..beabea88f9 --- /dev/null +++ b/packages/squashfs-tools/files/Makefile.patch @@ -0,0 +1,31 @@ +Index: squashfs-tools/Makefile +=================================================================== +--- squashfs-tools.orig/Makefile 2006-08-21 01:42:45.000000000 +0200 ++++ squashfs-tools/Makefile 2006-09-21 11:42:10.000000000 +0200 +@@ -1,19 +1,24 @@ + INCLUDEDIR = . + + CFLAGS := -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -O2 ++CFLAGS_R = -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE ++LIBS = -lz + + all: mksquashfs unsquashfs + + mksquashfs: mksquashfs.o read_fs.o sort.o +- $(CC) mksquashfs.o read_fs.o sort.o -lz -lpthread -o $@ ++ $(CC) $(LDFLAGS) mksquashfs.o read_fs.o sort.o -lz -lpthread -o $@ + + mksquashfs.o: mksquashfs.c squashfs_fs.h mksquashfs.h global.h sort.h ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o mksquashfs.o mksquashfs.c + + read_fs.o: read_fs.c squashfs_fs.h read_fs.h global.h ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o read_fs.o read_fs.c + + sort.o: sort.c squashfs_fs.h global.h sort.h ++ $(CC) $(CFLAGS_R) $(CFLAGS) -c -o sort.o sort.c + + unsquashfs: unsquashfs.o +- $(CC) unsquashfs.o -lz -o $@ ++ $(CC) $(LDFLAGS) unsquashfs.o -lz -o $@ + + unsquashfs.o: unsquashfs.c squashfs_fs.h read_fs.h global.h diff --git a/packages/squashfs-tools/files/squashfs3.1r2-tools-lzma_Makefile.patch b/packages/squashfs-tools/files/squashfs3.1r2-tools-lzma_Makefile.patch new file mode 100644 index 0000000000..eeb578ab12 --- /dev/null +++ b/packages/squashfs-tools/files/squashfs3.1r2-tools-lzma_Makefile.patch @@ -0,0 +1,40 @@ +Modifies the Makefile so that it builds {mk,un}squashfs-lzma instead +of {mk,un}squashfs. This requires the common (non-lzma) patch called +Makefile-3.1 to be applied first. + +Leon Woestenberg <leonw@mailcan.com> + +--- squashfs-tools/Makefile 2006-10-10 16:07:43.000000000 +0200 ++++ squashfs-lzma-tools/Makefile 2006-10-10 15:44:57.000000000 +0200 +@@ -1,13 +1,15 @@ + INCLUDEDIR = . + CFLAGS_R = -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE +-LIBS = -lz -lpthread ++#LZMAPATH = ./lzma/C/7zip/Compress/LZMA_Lib ++LIBS = -llzma -lz -lpthread + + CFLAGS := -I$(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -O2 + + all: mksquashfs unsquashfs + +-mksquashfs: mksquashfs.o read_fs.o sort.o +- $(CC) $(LDFLAGS) mksquashfs.o read_fs.o sort.o $(LIBS) -o $@ ++mksquashfs-lzma: mksquashfs.o read_fs.o sort.o ++# make -C $(LZMAPATH) ++ $(CXX) $(LDFLAGS) mksquashfs.o read_fs.o sort.o $(LIBS) -o $@ + + mksquashfs.o: mksquashfs.c squashfs_fs.h mksquashfs.h global.h sort.h + $(CC) $(CFLAGS_R) $(CFLAGS) -c -o mksquashfs.o mksquashfs.c +@@ -18,7 +20,10 @@ + sort.o: sort.c squashfs_fs.h global.h sort.h + $(CC) $(CFLAGS_R) $(CFLAGS) -c -o sort.o sort.c + +-unsquashfs: unsquashfs.o +- $(CC) unsquashfs.o -lz -o $@ ++unsquashfs-lzma: unsquashfs.o ++# make -C $(LZMAPATH) ++ $(CXX) unsquashfs.o $(LIBS) -o $@ + + unsquashfs.o: unsquashfs.c squashfs_fs.h read_fs.h global.h ++ ++clean: diff --git a/packages/squashfs-tools/files/squashfs3.1r2-tools-replace_zlib_with_lzma.patch b/packages/squashfs-tools/files/squashfs3.1r2-tools-replace_zlib_with_lzma.patch new file mode 100644 index 0000000000..2947de0858 --- /dev/null +++ b/packages/squashfs-tools/files/squashfs3.1r2-tools-replace_zlib_with_lzma.patch @@ -0,0 +1,1785 @@ +Modify the source code to use the lzma {,de}compression routines. + +Leon Woestenberg <leonw@mailcan.com> + +diff -Nur squashfs-tools/mksquashfs.c squashfs-lzma-tools/mksquashfs.c +--- squashfs-tools/mksquashfs.c 2006-08-31 00:07:37.000000000 +0200 ++++ squashfs-lzma-tools/mksquashfs.c 2006-08-22 09:35:14.000000000 +0200 +@@ -40,16 +40,13 @@ + #include <signal.h> + #include <setjmp.h> + #include <sys/mman.h> +-#include <pthread.h> + + #ifndef linux + #define __BYTE_ORDER BYTE_ORDER + #define __BIG_ENDIAN BIG_ENDIAN + #define __LITTLE_ENDIAN LITTLE_ENDIAN +-#include <sys/sysctl.h> + #else + #include <endian.h> +-#include <sys/sysinfo.h> + #endif + + #include <squashfs_fs.h> +@@ -149,7 +146,7 @@ + struct inode_info *inode_info[INODE_HASH_SIZE]; + + /* hash tables used to do fast duplicate searches in duplicate check */ +-struct file_info *dupl[65536]; ++struct file_info *dupl[65536], *frag_dups[65536]; + int dup_files = 0; + + /* list of exclude dirs/files */ +@@ -165,7 +162,7 @@ + + /* fragment block data structures */ + int fragments = 0; +-struct file_buffer *fragment_data = NULL; ++char fragment_data[SQUASHFS_FILE_SIZE]; + int fragment_size = 0; + struct fragment { + unsigned int index; +@@ -174,7 +171,6 @@ + }; + #define FRAG_SIZE 32768 + squashfs_fragment_entry *fragment_table = NULL; +-int fragments_outstanding = 0; + + + /* current inode number for directories and non directories */ +@@ -198,15 +194,13 @@ + + /* in memory file info */ + struct file_info { +- long long file_size; + long long bytes; + unsigned short checksum; +- unsigned short fragment_checksum; + long long start; + unsigned int *block_list; + struct file_info *next; + struct fragment *fragment; +- char checksum_flag; ++ unsigned short fragment_checksum; + }; + + /* count of how many times SIGINT or SIGQUIT has been sent */ +@@ -225,7 +219,6 @@ + sdir_count, sfifo_count, ssock_count, sdup_files; + int sfragments; + int restore = 0; +-int threads; + + /* flag whether destination file is a block device */ + int block_device = 0; +@@ -236,67 +229,14 @@ + /* save destination file name for deleting on error */ + char *destination_file = NULL; + +-/* data allocator status struct. Allocators are used to keep +- track of memory buffers passed between different threads */ +-struct allocator { +- int max_buffers; +- int count; +- int buffer_size; +- pthread_mutex_t mutex; +- pthread_cond_t wait; +-}; +- +-/* struct describing a memory buffer passed between threads */ +-struct file_buffer { +- struct allocator *allocator; +- void (*release)(int); +- int release_data; +- long long block; +- int size; +- int c_byte; +- unsigned int block_order; +- int fragment; +- int error; +- struct file_buffer *next; +- char data[0]; +-}; +- +-/* struct describing queues used to pass data between threads */ +-struct queue { +- int size; +- int readp; +- int writep; +- pthread_mutex_t mutex; +- pthread_cond_t empty; +- pthread_cond_t full; +- void **data; +-}; +- +-/* describes the list of blocks in a file which is a possible +- duplicate. For each block, it indicates whether the block is +- in memory or on disk */ +-struct buffer_list { +- long long start; +- int size; +- struct file_buffer *read_buffer; ++/* structure to used to pass in a pointer or an integer ++ * to duplicate buffer read helper functions. ++ */ ++struct duplicate_buffer_handle { ++ char *ptr; ++ long long start; + }; + +-struct allocator *reader_buffer, *writer_buffer, *fragment_buffer; +-struct queue *to_reader, *from_reader, *to_writer, *from_writer, *from_deflate, *to_frag; +-pthread_t *thread, *deflator_thread, *frag_deflator_thread; +-pthread_mutex_t fragment_mutex; +-pthread_cond_t fragment_waiting; +-pthread_mutex_t pos_mutex; +- +-/* user options that control parallelisation */ +-int processors = -1; +-/* default size of output buffer in Mbytes */ +-#define WRITER_BUFFER_DEFAULT 512 +-/* default size of input buffer in Mbytes */ +-#define READER_BUFFER_DEFAULT 64 +-int writer_buffer_size; +-int reader_buffer_size; +- + void add_old_root_entry(char *name, squashfs_inode inode, int inode_number, int type); + extern int read_super(int fd, squashfs_super_block *sBlk, int *be, char *source); + extern long long read_filesystem(char *root_name, int fd, squashfs_super_block *sBlk, char **cinode_table, +@@ -309,166 +249,18 @@ + unsigned int *inode_dir_inode_number, unsigned int *inode_dir_parent_inode, + void (push_directory_entry)(char *, squashfs_inode, int, int), + squashfs_fragment_entry **fragment_table); +-extern int read_sort_file(char *filename, int source, char *source_path[]); +-extern void sort_files_and_write(struct dir_info *dir); +-struct file_info *duplicate(long long file_size, long long bytes, unsigned int **block_list, long long *start, struct fragment **fragment, struct file_buffer *file_buffer, struct buffer_list *buffer_list, int blocks, unsigned short checksum, unsigned short fragment_checksum, int checksum_flag); ++int get_sorted_inode(squashfs_inode *inode, struct stat *buf); ++int read_sort_file(char *filename, int source, char *source_path[]); ++void sort_files_and_write(struct dir_info *dir); ++struct file_info *duplicate(char *(get_next_file_block)(struct duplicate_buffer_handle *, unsigned int), struct duplicate_buffer_handle *file_start, long long bytes, unsigned int **block_list, long long *start, int blocks, struct fragment **fragment, char *frag_data, int frag_bytes); + struct dir_info *dir_scan1(char *, int (_readdir)(char *, char *, struct dir_info *)); +-struct file_info *add_non_dup(long long file_size, long long bytes, unsigned int *block_list, long long start, struct fragment *fragment, unsigned short checksum, unsigned short fragment_checksum, int checksum_flag); +-extern void generate_file_priorities(struct dir_info *dir, int priority, struct stat *buf); +-extern struct priority_entry *priority_list[65536]; +- +- +-struct allocator *alloc_init(int buffer_size, int max_buffers) +-{ +- struct allocator *allocator = malloc(sizeof(struct allocator)); +- +- if(allocator == NULL) +- return NULL; +- +- allocator->max_buffers = max_buffers; +- allocator->buffer_size = buffer_size; +- allocator->count = 0; +- pthread_mutex_init(&allocator->mutex, NULL); +- pthread_cond_init(&allocator->wait, NULL); +- +- return allocator; +-} +- +- +-struct file_buffer *alloc_get(struct allocator *allocator) +-{ +- struct file_buffer *file_buffer; +- +- pthread_mutex_lock(&allocator->mutex); +- +- while(allocator->count == allocator->max_buffers) +- pthread_cond_wait(&allocator->wait, &allocator->mutex); +- +- if((file_buffer = malloc(sizeof(struct file_buffer) + allocator->buffer_size)) == NULL) +- goto failed; +- +- file_buffer->release = NULL; +- file_buffer->allocator = allocator; +- allocator->count ++; +- +-failed: +- pthread_mutex_unlock(&allocator->mutex); +- return file_buffer; +-} +- +- +-struct file_buffer *alloc_get_2(struct allocator *allocator, void (*release)(int), int release_data) +-{ +- struct file_buffer *file_buffer = alloc_get(allocator); +- +- if(file_buffer) { +- file_buffer->release = release; +- file_buffer->release_data = release_data; +- } +- return file_buffer; +-} +- +- +-void alloc_free(struct file_buffer *file_buffer) +-{ +- struct allocator *allocator; +- +- if(file_buffer == NULL) +- return; +- +- allocator = file_buffer->allocator; +- +- if(file_buffer->release) +- file_buffer->release(file_buffer->release_data); +- +- pthread_mutex_lock(&allocator->mutex); +- free(file_buffer); +- if(allocator->count == 0) +- ERROR("alloc_free: freeing buffer for empty allocator!\n"); +- else +- allocator->count --; +- pthread_cond_signal(&allocator->wait); +- pthread_mutex_unlock(&allocator->mutex); +-} +- +- +-struct queue *queue_init(int size) +-{ +- struct queue *queue = malloc(sizeof(struct queue)); +- +- if(queue == NULL) +- return NULL; +- +- if((queue->data = malloc(sizeof(void *) * (size + 1))) == NULL) { +- free(queue); +- return NULL; +- } +- +- queue->size = size + 1; +- queue->readp = queue->writep = 0; +- pthread_mutex_init(&queue->mutex, NULL); +- pthread_cond_init(&queue->empty, NULL); +- pthread_cond_init(&queue->full, NULL); +- +- return queue; +-} +- +- +-void queue_put(struct queue *queue, void *data) +-{ +- int nextp; +- +- pthread_mutex_lock(&queue->mutex); +- +- while((nextp = (queue->writep + 1) % queue->size) == queue->readp) +- pthread_cond_wait(&queue->full, &queue->mutex); +- +- queue->data[queue->writep] = data; +- queue->writep = nextp; +- pthread_cond_signal(&queue->empty); +- pthread_mutex_unlock(&queue->mutex); +-} +- +- +-void *queue_get(struct queue *queue) +-{ +- void *data; +- pthread_mutex_lock(&queue->mutex); +- +- while(queue->readp == queue->writep) +- pthread_cond_wait(&queue->empty, &queue->mutex); +- +- data = queue->data[queue->readp]; +- queue->readp = (queue->readp + 1) % queue->size; +- pthread_cond_signal(&queue->full); +- pthread_mutex_unlock(&queue->mutex); +- +- return data; +-} +- + + #define MKINODE(A) ((squashfs_inode)(((squashfs_inode) inode_bytes << 16) + (((char *)A) - data_cache))) + + +-inline void waitforthread(int i) +-{ +- TRACE("Waiting for thread %d\n", i); +- while(thread[i] != 0) +- sched_yield(); +-} +- +- + void restorefs() + { +- int i; +- + ERROR("Exiting - restoring original filesystem!\n\n"); +- +- for(i = 0; i < 2 + processors * 2; i++) +- pthread_kill(thread[i], SIGUSR1); +- for(i = 0; i < 2 + processors * 2; i++) +- waitforthread(i); +- TRACE("All threads in signal handler\n"); + bytes = sbytes; + memcpy(data_cache, sdata_cache, cache_bytes = scache_bytes); + memcpy(directory_data_cache, sdirectory_data_cache, directory_cache_bytes = sdirectory_cache_bytes); +@@ -495,13 +287,12 @@ + + void sighandler() + { +- if(++interrupted > 2) +- return; +- if(interrupted == 2) ++ if(interrupted == 1) + restorefs(); + else { + ERROR("Interrupting will restore original filesystem!\n"); + ERROR("Interrupt again to quit\n"); ++ interrupted ++; + } + } + +@@ -512,81 +303,22 @@ + } + + +-void sigusr1_handler() +-{ +- int i; +- sigset_t sigmask; +- pthread_t thread_id = pthread_self(); +- +- for(i = 0; i < (2 + processors * 2) && thread[i] != thread_id; i++); +- thread[i] = (pthread_t) 0; +- +- TRACE("Thread %d(%p) in sigusr1_handler\n", i, thread_id); +- +- sigemptyset(&sigmask); +- sigaddset(&sigmask, SIGINT); +- sigaddset(&sigmask, SIGQUIT); +- sigaddset(&sigmask, SIGUSR1); +- while(1) { +- sigsuspend(&sigmask); +- TRACE("After wait in sigusr1_handler :(\n"); +- } +-} +- +- +-unsigned int mangle2(z_stream **strm, char *d, char *s, int size, int block_size, int uncompressed, int data_block) ++unsigned int mangle(char *d, char *s, int size, int block_size, int uncompressed, int data_block) + { +- unsigned long c_byte; ++ unsigned long c_byte = block_size << 1; + unsigned int res; +- z_stream *stream = *strm; +- +- if(uncompressed) +- goto notcompressed; +- +- if(stream == NULL) { +- if((stream = *strm = malloc(sizeof(z_stream))) == NULL) +- BAD_ERROR("mangle::compress failed, not enough memory\n"); +- +- stream->zalloc = Z_NULL; +- stream->zfree = Z_NULL; +- stream->opaque = 0; + +- if((res = deflateInit(stream, 9)) != Z_OK) { +- if(res == Z_MEM_ERROR) +- BAD_ERROR("zlib::compress failed, not enough memory\n"); +- else if(res == Z_STREAM_ERROR) +- BAD_ERROR("zlib::compress failed, not a valid compression level\n"); +- else if(res == Z_VERSION_ERROR) +- BAD_ERROR("zlib::compress failed, incorrect zlib version\n"); +- else +- BAD_ERROR("zlib::compress failed, unknown error %d\n", res); +- } +- } else if((res = deflateReset(stream)) != Z_OK) { +- if(res == Z_STREAM_ERROR) +- BAD_ERROR("zlib::compress failed, stream state inconsistent\n"); +- else +- BAD_ERROR("zlib::compress failed, unknown error %d\n", res); +- } +- +- stream->next_in = (unsigned char *) s; +- stream->avail_in = size; +- stream->next_out = (unsigned char *) d; +- stream->avail_out = block_size; +- +- res = deflate(stream, Z_FINISH); +- if(res != Z_STREAM_END && res != Z_OK) { +- if(res == Z_STREAM_ERROR) +- BAD_ERROR("zlib::compress failed, stream state inconsistent\n"); ++ if(!uncompressed && (res = compress2((unsigned char *) d, &c_byte, (unsigned char *) s, size, 9)) != Z_OK) { ++ if(res == Z_MEM_ERROR) ++ BAD_ERROR("zlib::compress failed, not enough memory\n"); + else if(res == Z_BUF_ERROR) +- BAD_ERROR("zlib::compress failed, no progress possible\n"); ++ BAD_ERROR("zlib::compress failed, not enough room in output buffer\n"); + else + BAD_ERROR("zlib::compress failed, unknown error %d\n", res); ++ return 0; + } + +- c_byte = stream->total_out; +- +- if(res != Z_STREAM_END || c_byte >= size) { +-notcompressed: ++ if(uncompressed || c_byte >= size) { + memcpy(d, s, size); + return size | (data_block ? SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT); + } +@@ -595,14 +327,6 @@ + } + + +-unsigned int mangle(char *d, char *s, int size, int block_size, int uncompressed, int data_block) +-{ +- static z_stream *stream = NULL; +- +- return mangle2(&stream, d, s, size, block_size, uncompressed, data_block); +-} +- +- + squashfs_base_inode_header *get_inode(int req_size) + { + int data_space; +@@ -655,7 +379,6 @@ + { + off_t off = byte; + +- pthread_mutex_lock(&pos_mutex); + if(lseek(fd, off, SEEK_SET) == -1) { + perror("Lseek on destination failed"); + EXIT_MKSQUASHFS(); +@@ -665,7 +388,6 @@ + perror("Read on destination failed"); + EXIT_MKSQUASHFS(); + } +- pthread_mutex_unlock(&pos_mutex); + } + + +@@ -673,9 +395,6 @@ + { + off_t off = byte; + +- if(interrupted < 2) +- pthread_mutex_lock(&pos_mutex); +- + if(lseek(fd, off, SEEK_SET) == -1) { + perror("Lseek on destination failed"); + EXIT_MKSQUASHFS(); +@@ -685,9 +404,6 @@ + perror("Write on destination failed"); + EXIT_MKSQUASHFS(); + } +- +- if(interrupted < 2) +- pthread_mutex_unlock(&pos_mutex); + } + + +@@ -1173,23 +889,10 @@ + } + + +-char *get_fragment(char *buffer, struct fragment *fragment, int *cached_fragment) ++char *get_fragment(char *buffer, struct fragment *fragment) + { +- squashfs_fragment_entry *disk_fragment; +- int size; +- +- if(fragment->index == *cached_fragment || fragment->index == SQUASHFS_INVALID_FRAG) +- return buffer + fragment->offset; +- +- if(fragment_data && fragment->index == fragments) +- return fragment_data->data + fragment->offset; +- +- pthread_mutex_lock(&fragment_mutex); +- while(fragment_table[fragment->index].pending) +- pthread_cond_wait(&fragment_waiting, &fragment_mutex); +- pthread_mutex_unlock(&fragment_mutex); +- disk_fragment = &fragment_table[fragment->index]; +- size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size); ++ squashfs_fragment_entry *disk_fragment = &fragment_table[fragment->index]; ++ int size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size); + + if(SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size)) { + int res; +@@ -1209,74 +912,53 @@ + } else + read_bytes(fd, disk_fragment->start_block, size, buffer); + +- *cached_fragment = fragment->index; + return buffer + fragment->offset; + } + + +-void ensure_fragments_flushed() +-{ +- pthread_mutex_lock(&fragment_mutex); +- while(fragments_outstanding) +- pthread_cond_wait(&fragment_waiting, &fragment_mutex); +- pthread_mutex_unlock(&fragment_mutex); +-} +- +- + void write_fragment() + { ++ int compressed_size; ++ char buffer[block_size << 1]; ++ + if(fragment_size == 0) + return; + +- pthread_mutex_lock(&fragment_mutex); +- if(fragments % FRAG_SIZE == 0) { +- if((fragment_table = (squashfs_fragment_entry *) realloc(fragment_table, (fragments + FRAG_SIZE) * sizeof(squashfs_fragment_entry))) == NULL) { +- pthread_mutex_unlock(&fragment_mutex); ++ if(fragments % FRAG_SIZE == 0) ++ if((fragment_table = (squashfs_fragment_entry *) realloc(fragment_table, (fragments + FRAG_SIZE) * sizeof(squashfs_fragment_entry))) == NULL) + BAD_ERROR("Out of memory in fragment table\n"); +- } +- } +- fragment_data->size = fragment_size; +- fragment_data->block = fragments; +- fragment_table[fragments].pending = TRUE; +- fragments_outstanding ++; +- queue_put(to_frag, fragment_data); ++ fragment_table[fragments].size = mangle(buffer, fragment_data, fragment_size, block_size, noF, 1); ++ fragment_table[fragments].start_block = bytes; ++ compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(fragment_table[fragments].size); ++ write_bytes(fd, bytes, compressed_size, buffer); ++ bytes += compressed_size; ++ total_uncompressed += fragment_size; ++ total_compressed += compressed_size; ++ TRACE("Writing fragment %d, uncompressed size %d, compressed size %d\n",fragments, fragment_size, compressed_size); + fragments ++; + fragment_size = 0; +- pthread_mutex_unlock(&fragment_mutex); +-} +- +-void frag_release(int block) +-{ +- pthread_mutex_lock(&fragment_mutex); +- fragment_table[block].pending = FALSE; +- pthread_cond_signal(&fragment_waiting); +- pthread_mutex_unlock(&fragment_mutex); + } + + + static struct fragment empty_fragment = {SQUASHFS_INVALID_FRAG, 0, 0}; +-struct fragment *get_and_fill_fragment(struct file_buffer *file_buffer) ++struct fragment *get_and_fill_fragment(char *buff, int size) + { + struct fragment *ffrg; +- + +- if(file_buffer == NULL || file_buffer->size == 0) ++ if(size == 0) + return &empty_fragment; + +- if(fragment_size + file_buffer->size > block_size) ++ if(fragment_size + size > block_size) + write_fragment(); + + if((ffrg = (struct fragment *) malloc(sizeof(struct fragment))) == NULL) + BAD_ERROR("Out of memory in fragment block allocation!\n"); + +- if(fragment_size == 0) +- fragment_data = alloc_get(fragment_buffer); +- + ffrg->index = fragments; + ffrg->offset = fragment_size; +- ffrg->size = file_buffer->size; +- memcpy(fragment_data->data + fragment_size, file_buffer->data, file_buffer->size); +- fragment_size += file_buffer->size; ++ ffrg->size = size; ++ memcpy(fragment_data + fragment_size, buff, size); ++ fragment_size += size; + + return ffrg; + } +@@ -1333,10 +1015,19 @@ + } + + ++char *read_from_buffer(struct duplicate_buffer_handle *handle, unsigned int avail_bytes) ++{ ++ char *v = handle->ptr; ++ handle->ptr += avail_bytes; ++ return v; ++} ++ ++ + char read_from_file_buffer[SQUASHFS_FILE_MAX_SIZE]; +-char *read_from_disk(long long start, unsigned int avail_bytes) ++char *read_from_file(struct duplicate_buffer_handle *handle, unsigned int avail_bytes) + { +- read_bytes(fd, start, avail_bytes, read_from_file_buffer); ++ read_bytes(fd, handle->start, avail_bytes, read_from_file_buffer); ++ handle->start += avail_bytes; + return read_from_file_buffer; + } + +@@ -1344,205 +1035,99 @@ + /* + * Compute 16 bit BSD checksum over the data + */ +-unsigned short get_checksum(char *buff, int bytes, unsigned short chksum) +-{ +- unsigned char *b = (unsigned char *) buff; +- +- while(bytes --) { +- chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1; +- chksum += *b++; +- } +- +- return chksum; +-} +- +- +-unsigned short get_checksum_disk(long long start, long long l) ++unsigned short get_checksum(char *(get_next_file_block)(struct duplicate_buffer_handle *, unsigned int), struct duplicate_buffer_handle *handle, long long l) + { + unsigned short chksum = 0; +- unsigned int bytes; ++ unsigned int bytes = 0; ++ unsigned char *b; ++ struct duplicate_buffer_handle position = *handle; + + while(l) { + bytes = l > SQUASHFS_FILE_MAX_SIZE ? SQUASHFS_FILE_MAX_SIZE : l; + l -= bytes; +- chksum = get_checksum(read_from_disk(start, bytes), bytes, chksum); +- start += bytes; +- } +- +- return chksum; +-} +- +- +-unsigned short get_checksum_buffer(struct buffer_list *buffer_list, unsigned int blocks) +-{ +- unsigned short chksum = 0; +- int block; +- +- for(block = 0; block < blocks; block ++) { +- struct buffer_list *b = &buffer_list[block]; +- +- if(b->read_buffer) +- chksum = get_checksum(b->read_buffer->data, b->read_buffer->size, chksum); +- else +- chksum = get_checksum(read_from_disk(b->start, b->size), b->size, chksum); ++ b = (unsigned char *) get_next_file_block(&position, bytes); ++ while(bytes--) { ++ chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1; ++ chksum += *b++; ++ } + } + + return chksum; + } + + +-unsigned short get_checksum_mem(char *buff, int bytes) +-{ +- return get_checksum(buff, bytes, 0); +-} +- +- +-unsigned short get_checksum_mem_buffer(struct file_buffer *file_buffer) +-{ +- if(file_buffer == NULL) +- return 0; +- else +- return get_checksum(file_buffer->data, file_buffer->size, 0); +-} +- +- + int cached_frag = -1; +-char fragdata[SQUASHFS_FILE_MAX_SIZE]; +-#define DUP_HASH(a) (a & 0xffff) +-void add_file(long long start, long long file_size, long long file_bytes, unsigned int *block_listp, int blocks, unsigned int fragment, int offset, int bytes) ++void add_file(long long start, long long file_bytes, unsigned int *block_listp, int blocks, unsigned int fragment, int offset, int bytes) + { + struct fragment *frg; ++ struct file_info *dupl_ptr; + char *datap; ++ struct duplicate_buffer_handle handle; + unsigned int *block_list = block_listp; +- struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; + +- if(!duplicate_checking || file_size == 0) ++ if(!duplicate_checking) + return; + +- for(; dupl_ptr; dupl_ptr = dupl_ptr->next) { +- if(file_size != dupl_ptr->file_size) +- continue; +- if(blocks != 0 && start != dupl_ptr->start) +- continue; +- if(fragment != dupl_ptr->fragment->index) +- continue; +- if(fragment != SQUASHFS_INVALID_FRAG && (offset != dupl_ptr->fragment->offset || bytes != dupl_ptr->fragment->size)) +- continue; +- return; +- } +- + if((frg = (struct fragment *) malloc(sizeof(struct fragment))) == NULL) + BAD_ERROR("Out of memory in fragment block allocation!\n"); + + frg->index = fragment; + frg->offset = offset; + frg->size = bytes; +- +- add_non_dup(file_size, file_bytes, block_list, start, frg, 0, 0, FALSE); ++ if(fragment == cached_frag || fragment == SQUASHFS_INVALID_FRAG) ++ datap = fragment_data + offset; ++ else ++ datap = get_fragment(fragment_data, frg); ++ handle.start = start; ++ if((dupl_ptr = duplicate(read_from_file, &handle, file_bytes, &block_listp, &start, blocks, &frg, datap, bytes)) != NULL) ++ dupl_ptr->fragment = frg; ++ else ++ free(block_list); ++ cached_frag = fragment; + } + ++ + char cached_fragment[SQUASHFS_FILE_SIZE]; + int cached_frag1 = -1; + +-int pre_duplicate(long long file_size) +-{ +- struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; +- +- for(; dupl_ptr; dupl_ptr = dupl_ptr->next) +- if(dupl_ptr->file_size == file_size) +- return TRUE; +- +- return FALSE; +-} +- +- +-int pre_duplicate_frag(long long file_size, unsigned short checksum) ++struct file_info *duplicate(char *(get_next_file_block)(struct duplicate_buffer_handle *, unsigned int), struct duplicate_buffer_handle *file_start, long long bytes, unsigned int **block_list, long long *start, int blocks, struct fragment **fragment, char *frag_data, int frag_bytes) + { +- struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; +- +- for(; dupl_ptr; dupl_ptr = dupl_ptr->next) +- if(dupl_ptr->file_size == file_size) { +- if(dupl_ptr->checksum_flag == FALSE) { +- dupl_ptr->checksum = get_checksum_disk(dupl_ptr->start, dupl_ptr->bytes); +- dupl_ptr->fragment_checksum = get_checksum_mem(get_fragment(cached_fragment, dupl_ptr->fragment, &cached_frag1), file_size); +- dupl_ptr->checksum_flag = TRUE; +- } +- if(dupl_ptr->fragment_checksum == checksum) +- return TRUE; +- } +- +- return FALSE; +-} +- +- +-struct file_info *add_non_dup(long long file_size, long long bytes, unsigned int *block_list, long long start, struct fragment *fragment, unsigned short checksum, unsigned short fragment_checksum, int checksum_flag) +-{ +- struct file_info *dupl_ptr; ++ unsigned short checksum = get_checksum(get_next_file_block, file_start, bytes); ++ struct duplicate_buffer_handle handle = { frag_data, 0 }; ++ unsigned short fragment_checksum = get_checksum(read_from_buffer, &handle, frag_bytes); ++ struct file_info *dupl_ptr = bytes ? dupl[checksum] : frag_dups[fragment_checksum]; + +- if((dupl_ptr = (struct file_info *) malloc(sizeof(struct file_info))) == NULL) { +- BAD_ERROR("Out of memory in dup_files allocation!\n"); +- } +- +- dupl_ptr->file_size = file_size; +- dupl_ptr->bytes = bytes; +- dupl_ptr->block_list = block_list; +- dupl_ptr->start = start; +- dupl_ptr->fragment = fragment; +- dupl_ptr->checksum = checksum; +- dupl_ptr->fragment_checksum = fragment_checksum; +- dupl_ptr->checksum_flag = checksum_flag; +- dupl_ptr->next = dupl[DUP_HASH(file_size)]; +- dupl[DUP_HASH(file_size)] = dupl_ptr; +- dup_files ++; +- +- return dupl_ptr; +-} +- +- +-struct file_info *duplicate(long long file_size, long long bytes, unsigned int **block_list, long long *start, struct fragment **fragment, struct file_buffer *file_buffer, struct buffer_list *buffer_list, int blocks, unsigned short checksum, unsigned short fragment_checksum, int checksum_flag) +-{ +- struct file_info *dupl_ptr = dupl[DUP_HASH(file_size)]; +- int frag_bytes = file_buffer ? file_buffer->size : 0; + + for(; dupl_ptr; dupl_ptr = dupl_ptr->next) +- if(file_size == dupl_ptr->file_size && bytes == dupl_ptr->bytes && frag_bytes == dupl_ptr->fragment->size) { +- char buffer2[SQUASHFS_FILE_MAX_SIZE]; ++ if(bytes == dupl_ptr->bytes && frag_bytes == dupl_ptr->fragment->size && fragment_checksum == dupl_ptr->fragment_checksum) { ++ char buffer1[SQUASHFS_FILE_MAX_SIZE]; ++ long long dup_bytes = dupl_ptr->bytes; + long long dup_start = dupl_ptr->start; ++ struct duplicate_buffer_handle position = *file_start; + char *buffer; +- int block; ++ while(dup_bytes) { ++ int avail_bytes = dup_bytes > SQUASHFS_FILE_MAX_SIZE ? SQUASHFS_FILE_MAX_SIZE : dup_bytes; + +- if(checksum_flag == FALSE) { +- checksum = get_checksum_buffer(buffer_list, blocks); +- fragment_checksum = get_checksum_mem_buffer(file_buffer); +- checksum_flag = TRUE; +- } +- +- if(dupl_ptr->checksum_flag == FALSE) { +- dupl_ptr->checksum = get_checksum_disk(dupl_ptr->start, dupl_ptr->bytes); +- dupl_ptr->fragment_checksum = get_checksum_mem(get_fragment(cached_fragment, dupl_ptr->fragment, &cached_frag1), frag_bytes); +- dupl_ptr->checksum_flag = TRUE; +- } +- +- if(checksum != dupl_ptr->checksum || fragment_checksum != dupl_ptr->fragment_checksum) +- continue; +- +- for(block = 0; block < blocks; block ++) { +- struct buffer_list *b = &buffer_list[block]; +- +- if(b->read_buffer) +- buffer = b->read_buffer->data; +- else +- buffer = read_from_disk(b->start, b->size); +- +- read_bytes(fd, dup_start, b->size, buffer2); +- if(memcmp(buffer, buffer2, b->size) != 0) ++ buffer = get_next_file_block(&position, avail_bytes); ++ read_bytes(fd, dup_start, avail_bytes, buffer1); ++ if(memcmp(buffer, buffer1, avail_bytes) != 0) + break; +- dup_start += b->size; ++ dup_bytes -= avail_bytes; ++ dup_start += avail_bytes; + } +- if(block == blocks) { +- char *fragment_buffer1 = get_fragment(cached_fragment, dupl_ptr->fragment, &cached_frag1); ++ if(dup_bytes == 0) { ++ char *fragment_buffer1; ++ ++ if(dupl_ptr->fragment->index == fragments || dupl_ptr->fragment->index == SQUASHFS_INVALID_FRAG) ++ fragment_buffer1 = fragment_data + dupl_ptr->fragment->offset; ++ else if(dupl_ptr->fragment->index == cached_frag1) ++ fragment_buffer1 = cached_fragment + dupl_ptr->fragment->offset; ++ else { ++ fragment_buffer1 = get_fragment(cached_fragment, dupl_ptr->fragment); ++ cached_frag1 = dupl_ptr->fragment->index; ++ } + +- if(frag_bytes == 0 || memcmp(file_buffer->data, fragment_buffer1, frag_bytes) == 0) { ++ if(frag_bytes == 0 || memcmp(frag_data, fragment_buffer1, frag_bytes) == 0) { + TRACE("Found duplicate file, start 0x%llx, size %lld, checksum 0x%x, fragment %d, size %d, offset %d, checksum 0x%x\n", dupl_ptr->start, + dupl_ptr->bytes, dupl_ptr->checksum, dupl_ptr->fragment->index, frag_bytes, dupl_ptr->fragment->offset, fragment_checksum); + *block_list = dupl_ptr->block_list; +@@ -1554,520 +1139,135 @@ + } + + +- return add_non_dup(file_size, bytes, *block_list, *start, *fragment, checksum, fragment_checksum, checksum_flag); +-} +- +- +-void reader_read_file(struct dir_ent *dir_ent) +-{ +- struct stat *buf = &dir_ent->inode->buf; +- int count; +- int blocks = (buf->st_size + block_size - 1) >> block_log; +- int frag_block = !no_fragments && (always_use_fragments || +- (buf->st_size < block_size)) ? buf->st_size >> block_log : -1; +- int file; +- static int block_order = 0; +- struct file_buffer *file_buffer; +- +- if(buf->st_size == 0 || dir_ent->inode->read) +- return; +- +- if((file = open(dir_ent->pathname, O_RDONLY)) == -1) +- goto read_err; +- +- for(count = 0; count < blocks; count ++) { +- file_buffer = alloc_get(reader_buffer); +- +- if((file_buffer->size = read(file, file_buffer->data, block_size)) == -1) { +- close(file); +- goto read_err2; +- } +- file_buffer->block = count; +- file_buffer->block_order = block_order ++; +- file_buffer->error = FALSE; +- if(file_buffer->fragment = count == frag_block) +- queue_put(from_deflate, file_buffer); +- else +- queue_put(from_reader, file_buffer); +- } +- +- close(file); +- dir_ent->inode->read = TRUE; +- +- return; +- +-read_err: +- file_buffer = alloc_get(reader_buffer); +-read_err2: +- file_buffer->block_order = block_order ++; +- file_buffer->error = TRUE; +- queue_put(from_deflate, file_buffer); +-} +- +- +-void reader_scan(struct dir_info *dir) { +- int i; +- +- for(i = 0; i < dir->count; i++) { +- struct dir_ent *dir_ent = dir->list[i]; +- struct stat *buf = &dir_ent->inode->buf; +- if(dir_ent->data) +- continue; +- +- switch(buf->st_mode & S_IFMT) { +- case S_IFREG: +- reader_read_file(dir_ent); +- break; +- case S_IFDIR: +- reader_scan(dir_ent->dir); +- break; +- } +- } +-} +- +- +-void *reader(void *arg) +-{ +- int oldstate; +- +- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate); +- pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate); +- +- if(!sorted) +- reader_scan(queue_get(to_reader)); +- else { +- int i; +- struct priority_entry *entry; +- +- queue_get(to_reader); +- for(i = 65535; i >= 0; i--) +- for(entry = priority_list[i]; entry; entry = entry->next) +- reader_read_file(entry->dir); +- } +-} +- +- +-void *writer(void *arg) +-{ +- int write_error = FALSE; +- int oldstate; +- +- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate); +- pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate); +- +- while(1) { +- struct file_buffer *file_buffer = queue_get(to_writer); +- off_t off; +- +- if(file_buffer == NULL) { +- queue_put(from_writer, write_error ? (void *) &write_error : NULL); +- continue; +- } +- +- off = file_buffer->block; +- +- pthread_mutex_lock(&pos_mutex); +- +- if(!write_error && lseek(fd, off, SEEK_SET) == -1) { +- perror("Lseek on destination failed"); +- write_error = TRUE; +- } +- +- if(!write_error && write(fd, file_buffer->data, file_buffer->size) == -1) { +- perror("Write on destination failed"); +- write_error = TRUE; +- } +- pthread_mutex_unlock(&pos_mutex); +- +- alloc_free(file_buffer); +- } +-} +- +- +-void *deflator(void *arg) +-{ +- z_stream *stream = NULL; +- int oldstate; +- +- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate); +- pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate); +- +- while(1) { +- struct file_buffer *file_buffer = queue_get(from_reader); +- struct file_buffer *write_buffer = alloc_get(writer_buffer); +- +- write_buffer->c_byte = mangle2(&stream, write_buffer->data, file_buffer->data, file_buffer->size, block_size, noD, 1); +- write_buffer->block = file_buffer->block; +- write_buffer->block_order = file_buffer->block_order; +- write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK(write_buffer->c_byte); +- write_buffer->fragment = FALSE; +- write_buffer->error = FALSE; +- alloc_free(file_buffer); +- queue_put(from_deflate, write_buffer); +- } +-} +- +- +-void *frag_deflator(void *arg) +-{ +- z_stream *stream = NULL; +- int oldstate; +- +- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate); +- pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldstate); +- +- while(1) { +- int c_byte, compressed_size; +- struct file_buffer *file_buffer = queue_get(to_frag); +- struct file_buffer *write_buffer = alloc_get_2(writer_buffer, frag_release, file_buffer->block); +- +- c_byte = mangle2(&stream, write_buffer->data, file_buffer->data, file_buffer->size, block_size, noF, 1); +- compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte); +- pthread_mutex_lock(&fragment_mutex); +- fragment_table[file_buffer->block].size = c_byte; +- fragment_table[file_buffer->block].start_block = bytes; +- write_buffer->size = compressed_size; +- write_buffer->block = bytes; +- queue_put(to_writer, write_buffer); +- bytes += compressed_size; +- total_uncompressed += file_buffer->size; +- total_compressed += compressed_size; +- TRACE("Writing fragment %d, uncompressed size %d, compressed size %d\n", file_buffer->block, file_buffer->size, compressed_size); +- fragments_outstanding --; +- pthread_cond_signal(&fragment_waiting); +- pthread_mutex_unlock(&fragment_mutex); +- alloc_free(file_buffer); ++ if((dupl_ptr = (struct file_info *) malloc(sizeof(struct file_info))) == NULL) { ++ BAD_ERROR("Out of memory in dup_files allocation!\n"); + } +-} +- +- +-#define HASH_ENTRIES 256 +-#define BLOCK_HASH(a) (a % HASH_ENTRIES) +-struct file_buffer *block_hash[HASH_ENTRIES]; + +-void push_buffer(struct file_buffer *file_buffer) +-{ +- int hash = BLOCK_HASH(file_buffer->block_order); +- +- file_buffer->next = block_hash[hash]; +- block_hash[hash] = file_buffer; +-} +- +- +-struct file_buffer *get_file_buffer(struct queue *queue) +-{ +- static unsigned int block_order = 0; +- int hash = BLOCK_HASH(block_order); +- struct file_buffer *file_buffer = block_hash[hash], *prev = NULL; +- +- for(;file_buffer; prev = file_buffer, file_buffer = file_buffer->next) +- if(file_buffer->block_order == block_order) +- break; ++ dupl_ptr->bytes = bytes; ++ dupl_ptr->checksum = checksum; ++ dupl_ptr->start = *start; ++ dupl_ptr->fragment_checksum = fragment_checksum; ++ dupl_ptr->block_list = *block_list; + +- if(file_buffer) { +- if(prev) +- prev->next = file_buffer->next; +- else +- block_hash[hash] = file_buffer->next; ++ dup_files ++; ++ if(bytes) { ++ dupl_ptr->next = dupl[checksum]; ++ dupl[checksum] = dupl_ptr; + } else { +- while(1) { +- file_buffer = queue_get(queue); +- if(file_buffer->block_order == block_order) +- break; +- push_buffer(file_buffer); +- } ++ dupl_ptr->next = frag_dups[fragment_checksum]; ++ frag_dups[fragment_checksum] = dupl_ptr; + } + +- block_order ++; +- +- return file_buffer; +-} +- +- +-int write_file_empty(squashfs_inode *inode, struct dir_ent *dir_ent, int *duplicate_file) +-{ +- file_count ++; +- *duplicate_file = FALSE; +- return dir_ent->inode->nlink == 1 ? +- create_inode(inode, dir_ent, SQUASHFS_FILE_TYPE, 0, 0, 0, NULL, &empty_fragment, NULL) : +- create_inode(inode, dir_ent, SQUASHFS_LREG_TYPE, 0, 0, 0, NULL, &empty_fragment, NULL); +-} +- +- +-int write_file_frag_dup(squashfs_inode *inode, struct dir_ent *dir_ent, int size, int *duplicate_file, struct file_buffer *file_buffer, unsigned short checksum) +-{ +- int file; +- struct file_info *dupl_ptr; +- struct fragment *fragment; +- unsigned int *block_listp = NULL; +- long long start = 0; +- +- dupl_ptr = duplicate(size, 0, &block_listp, &start, &fragment, file_buffer, NULL, 0, 0, checksum, TRUE); +- +- if(dupl_ptr) { +- *duplicate_file = FALSE; +- fragment = get_and_fill_fragment(file_buffer); +- dupl_ptr->fragment = fragment; +- } else +- *duplicate_file = TRUE; +- +- alloc_free(file_buffer); +- +- total_bytes += size; +- file_count ++; +- +- return dir_ent->inode->nlink == 1 ? +- create_inode(inode, dir_ent, SQUASHFS_FILE_TYPE, size, 0, 0, NULL, fragment, NULL) : +- create_inode(inode, dir_ent, SQUASHFS_LREG_TYPE, size, 0, 0, NULL, fragment, NULL); +-} +- +- +-int write_file_frag(squashfs_inode *inode, struct dir_ent *dir_ent, int size, int *duplicate_file) +-{ +- struct fragment *fragment; +- unsigned short checksum; +- struct file_buffer *file_buffer = get_file_buffer(from_deflate); +- +- if(file_buffer->size != size) +- printf("bug\n"); +- +- if(file_buffer->error) { +- alloc_free(file_buffer); +- return FALSE; +- } +- +- checksum = get_checksum_mem_buffer(file_buffer); +- +- if(pre_duplicate_frag(size, checksum)) +- return write_file_frag_dup(inode, dir_ent, size, duplicate_file, file_buffer, checksum); +- +- fragment = get_and_fill_fragment(file_buffer); +- +- alloc_free(file_buffer); +- +- if(duplicate_checking) +- add_non_dup(size, 0, NULL, 0, fragment, 0, checksum, TRUE); +- +- total_bytes += size; +- file_count ++; +- +- *duplicate_file = FALSE; +- +- return dir_ent->inode->nlink == 1 ? +- create_inode(inode, dir_ent, SQUASHFS_FILE_TYPE, size, 0, 0, NULL, fragment, NULL) : +- create_inode(inode, dir_ent, SQUASHFS_LREG_TYPE, size, 0, 0, NULL, fragment, NULL); ++ return dupl_ptr; + } + + +-int write_file_blocks(squashfs_inode *inode, struct dir_ent *dir_ent, long long read_size) ++#define MINALLOCBYTES (1024 * 1024) ++int write_file(squashfs_inode *inode, struct dir_ent *dir_ent, long long size, int *duplicate_file) + { +- int block, status; ++ int block = 0, i, file, whole_file = 1, status; + unsigned int c_byte, frag_bytes; +- long long bbytes, file_bytes, start; ++ long long bbytes, file_bytes = 0, start; ++ char buff[block_size], *c_buffer = NULL, *filename = dir_ent->pathname; + struct fragment *fragment; +- struct file_info *dupl_ptr; +- int blocks = (read_size + block_size - 1) >> block_log; +- unsigned int *block_list; +- struct file_buffer *read_buffer; +- +- if(!no_fragments && always_use_fragments) { +- blocks = read_size >> block_log; +- frag_bytes = read_size % block_size; +- } else +- frag_bytes = 0; ++ struct file_info *dupl_ptr = NULL; ++ struct duplicate_buffer_handle handle; ++ long long read_size = (size > SQUASHFS_MAX_FILE_SIZE) ? SQUASHFS_MAX_FILE_SIZE : size; ++ int blocks = (read_size + block_size - 1) >> block_log, allocated_blocks = blocks; ++ unsigned int *block_list, *block_listp; + + if((block_list = malloc(blocks * sizeof(unsigned int))) == NULL) + BAD_ERROR("Out of memory allocating block_list\n"); ++ block_listp = block_list; + +- ensure_fragments_flushed(); +- +- file_bytes = 0; +- start = bytes; +- for(block = 0; block < blocks; block ++) { +- read_buffer = get_file_buffer(from_deflate); +- if(read_buffer->error) +- goto read_err; +- +- block_list[block] = read_buffer->c_byte; +- read_buffer->block = bytes; +- bytes += read_buffer->size; +- file_bytes += read_buffer->size; +- queue_put(to_writer, read_buffer); +- } +- +- if(frag_bytes != 0) { +- read_buffer = get_file_buffer(from_deflate); +- if(read_buffer->size != frag_bytes) +- printf("bug\n"); +- if(read_buffer->error) +- goto read_err; +- } else +- read_buffer = NULL; +- +- fragment = get_and_fill_fragment(read_buffer); +- alloc_free(read_buffer); +- +- if(duplicate_checking) +- add_non_dup(read_size, file_bytes, block_list, start, fragment, 0, 0, FALSE); +- file_count ++; +- total_bytes += read_size; +- if(dir_ent->inode->nlink == 1 && read_size < ((long long) (1<<30) - 1)) +- status = create_inode(inode, dir_ent, SQUASHFS_FILE_TYPE, read_size, start, blocks, block_list, fragment, NULL); +- else +- status = create_inode(inode, dir_ent, SQUASHFS_LREG_TYPE, read_size, start, blocks, block_list, fragment, NULL); +- if(duplicate_checking == FALSE) +- free(block_list); +- return status; +- +-read_err: +- perror("Error in reading file, skipping..."); +- if(block) { +- queue_put(to_writer, NULL); +- if(queue_get(from_writer) != 0) +- EXIT_MKSQUASHFS(); +- bytes = start; +- if(!block_device) +- ftruncate(fd, bytes); +- } +- free(block_list); +- alloc_free(read_buffer); +- return FALSE; +-} +- +- +-int write_file_blocks_dup(squashfs_inode *inode, struct dir_ent *dir_ent, long long read_size, int *duplicate_file) +-{ +- int block, status, thresh; +- unsigned int c_byte, frag_bytes; +- long long bbytes, file_bytes, start; +- struct fragment *fragment; +- struct file_info *dupl_ptr; +- int blocks = (read_size + block_size - 1) >> block_log; +- unsigned int *block_list, *block_listp; +- struct file_buffer *read_buffer; +- struct file_data *file_data; +- struct buffer_list *buffer_list; +- +- if(!no_fragments && always_use_fragments) { +- blocks = read_size >> block_log; ++ if(!no_fragments && (read_size < block_size || always_use_fragments)) { ++ allocated_blocks = blocks = read_size >> block_log; + frag_bytes = read_size % block_size; + } else + frag_bytes = 0; + +- if((block_list = malloc(blocks * sizeof(unsigned int))) == NULL) +- BAD_ERROR("Out of memory allocating block_list\n"); +- block_listp = block_list; +- +- if((buffer_list = malloc(blocks * sizeof(struct buffer_list))) == NULL) +- BAD_ERROR("Out of memory allocating file block list\n"); +- +- ensure_fragments_flushed(); ++ if(size > read_size) ++ ERROR("file %s truncated to %lld bytes\n", filename, SQUASHFS_MAX_FILE_SIZE); + +- file_bytes = 0; +- start = bytes; +- thresh = blocks > (writer_buffer_size - processors) ? blocks - (writer_buffer_size - processors): 0; +- for(block = 0; block < blocks; block ++) { +- read_buffer = get_file_buffer(from_deflate); +- if(read_buffer->error) +- goto read_err; ++ total_bytes += read_size; ++ if((file = open(filename, O_RDONLY)) == -1) ++ goto read_err; + +- block_list[block] = read_buffer->c_byte; +- read_buffer->block = bytes; +- bytes += read_buffer->size; +- file_bytes += read_buffer->size; +- +- if(block < thresh) { +- buffer_list[block].read_buffer = NULL; +- queue_put(to_writer, read_buffer); +- } else +- buffer_list[block].read_buffer = read_buffer; +- buffer_list[block].start = read_buffer->block; +- buffer_list[block].size = read_buffer->size; ++ do { ++ long long bytes = (((long long) allocated_blocks) + 1) << block_log; ++ if(bytes != ((size_t) bytes) || (c_buffer = (char *) malloc(bytes)) == NULL) { ++ TRACE("Out of memory allocating write_file buffer, allocated_blocks %ld, blocks %d\n", allocated_blocks, blocks); ++ whole_file = 0; ++ if(bytes < MINALLOCBYTES) ++ BAD_ERROR("Out of memory allocating write_file buffer, could not allocate %ld blocks (%d Kbytes)\n", allocated_blocks, allocated_blocks << (block_log - 10)); ++ allocated_blocks >>= 1; ++ } ++ } while(!c_buffer); ++ ++ for(start = bytes; block < blocks; file_bytes += bbytes) { ++ for(i = 0, bbytes = 0; (i < allocated_blocks) && (block < blocks); i++) { ++ int available_bytes = read_size - (block * block_size) > block_size ? block_size : read_size - (block * block_size); ++ if(read(file, buff, available_bytes) == -1) ++ goto read_err; ++ c_byte = mangle(c_buffer + bbytes, buff, available_bytes, block_size, noD, 1); ++ block_list[block ++] = c_byte; ++ bbytes += SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte); ++ } ++ if(!whole_file) { ++ write_bytes(fd, bytes, bbytes, c_buffer); ++ bytes += bbytes; ++ } + } + +- if(frag_bytes != 0) { +- read_buffer = get_file_buffer(from_deflate); +- if(read_buffer->size != frag_bytes) +- printf("bug\n"); +- if(read_buffer->error) ++ if(frag_bytes != 0) ++ if(read(file, buff, frag_bytes) == -1) + goto read_err; +- } else +- read_buffer = NULL; +- +- queue_put(to_writer, NULL); +- if(queue_get(from_writer) != 0) +- EXIT_MKSQUASHFS(); +- +- dupl_ptr = duplicate(read_size, file_bytes, &block_listp, &start, &fragment, read_buffer, buffer_list, blocks, 0, 0, FALSE); + +- if(dupl_ptr) { +- *duplicate_file = FALSE; +- for(block = thresh; block < blocks; block ++) +- queue_put(to_writer, buffer_list[block].read_buffer); +- fragment = get_and_fill_fragment(read_buffer); +- dupl_ptr->fragment = fragment; ++ close(file); ++ if(whole_file) { ++ handle.ptr = c_buffer; ++ if(duplicate_checking && (dupl_ptr = duplicate(read_from_buffer, &handle, file_bytes, &block_listp, &start, blocks, &fragment, buff, frag_bytes)) == NULL) { ++ *duplicate_file = TRUE; ++ goto wr_inode; ++ } ++ write_bytes(fd, bytes, file_bytes, c_buffer); ++ bytes += file_bytes; + } else { +- *duplicate_file = TRUE; +- for(block = thresh; block < blocks; block ++) +- alloc_free(buffer_list[block].read_buffer); +- bytes = buffer_list[0].start; +- if(thresh && !block_device) +- ftruncate(fd, bytes); ++ handle.start = start; ++ if(duplicate_checking && (dupl_ptr = duplicate(read_from_file, &handle, file_bytes, &block_listp, &start, blocks, &fragment, buff, frag_bytes)) == NULL) { ++ bytes = start; ++ if(!block_device) ++ ftruncate(fd, bytes); ++ *duplicate_file = TRUE; ++ goto wr_inode; ++ } + } + +- alloc_free(read_buffer); +- free(buffer_list); +- file_count ++; +- total_bytes += read_size; ++ fragment = get_and_fill_fragment(buff, frag_bytes); ++ if(duplicate_checking) ++ dupl_ptr->fragment = fragment; ++ ++ *duplicate_file = FALSE; + ++wr_inode: ++ free(c_buffer); ++ file_count ++; + if(dir_ent->inode->nlink == 1 && read_size < ((long long) (1<<30) - 1)) + status = create_inode(inode, dir_ent, SQUASHFS_FILE_TYPE, read_size, start, blocks, block_listp, fragment, NULL); + else + status = create_inode(inode, dir_ent, SQUASHFS_LREG_TYPE, read_size, start, blocks, block_listp, fragment, NULL); +- if(*duplicate_file == TRUE) ++ if(duplicate_checking == FALSE || *duplicate_file == TRUE) + free(block_list); + return status; + + read_err: + perror("Error in reading file, skipping..."); +- if(block && thresh) { +- queue_put(to_writer, NULL); +- if(queue_get(from_writer) != 0) +- EXIT_MKSQUASHFS(); +- bytes = start; +- if(!block_device) +- ftruncate(fd, bytes); +- } +- for(blocks = thresh; blocks < block; blocks ++) +- alloc_free(buffer_list[blocks].read_buffer); +- free(buffer_list); ++ free(c_buffer); + free(block_list); +- alloc_free(read_buffer); + return FALSE; + } + + +-int write_file(squashfs_inode *inode, struct dir_ent *dir_ent, long long size, int *duplicate_file) +-{ +- long long read_size = (size > SQUASHFS_MAX_FILE_SIZE) ? SQUASHFS_MAX_FILE_SIZE : size; +- +- if(size > read_size) +- ERROR("file %s truncated to %lld bytes\n", dir_ent->pathname, SQUASHFS_MAX_FILE_SIZE); +- +- if(read_size == 0) +- return write_file_empty(inode, dir_ent, duplicate_file); +- +- if(!no_fragments && (read_size < block_size)) +- return write_file_frag(inode, dir_ent, read_size, duplicate_file); +- +- if(pre_duplicate(read_size)) +- return write_file_blocks_dup(inode, dir_ent, read_size, duplicate_file); +- +- *duplicate_file = FALSE; +- return write_file_blocks(inode, dir_ent, read_size); +-} +- +- + char b_buffer[8192]; + char *name; + char *basename_r(); +@@ -2134,7 +1334,6 @@ + BAD_ERROR("Out of memory in inode hash table entry allocation\n"); + + memcpy(&inode->buf, buf, sizeof(struct stat)); +- inode->read = FALSE; + inode->inode = SQUASHFS_INVALID_BLK; + inode->nlink = 1; + if((buf->st_mode & S_IFMT) == S_IFDIR) +@@ -2357,9 +1556,6 @@ + return; + } + if(sorted) +- generate_file_priorities(dir_info, 0, &dir_info->dir_ent->inode->buf); +- queue_put(to_reader, dir_info); +- if(sorted) + sort_files_and_write(dir_info); + dir_scan2(inode, dir_info); + } +@@ -2495,7 +1691,7 @@ + case SQUASHFS_CHRDEV_TYPE: + INFO("character device %s inode 0x%llx LINK\n", dir_name, *inode); + break; +- case SQUASHFS_BLKDEV_TYPE: ++ caseSQUASHFS_BLKDEV_TYPE: + INFO("block device %s inode 0x%llx LINK\n", dir_name, *inode); + break; + case SQUASHFS_FIFO_TYPE: +@@ -2595,76 +1791,8 @@ + } + + +-void initialise_threads() +-{ +- int i; +- sigset_t sigmask, old_mask; +- +- sigemptyset(&sigmask); +- sigaddset(&sigmask, SIGINT); +- sigaddset(&sigmask, SIGQUIT); +- if(sigprocmask(SIG_BLOCK, &sigmask, &old_mask) == -1) +- BAD_ERROR("Failed to set signal mask in intialise_threads\n"); +- +- signal(SIGUSR1, sigusr1_handler); +- +- if(processors == -1) { +-#ifndef linux +- int mib[2]; +- size_t len = sizeof(processors); +- +- mib[0] = CTL_HW; +-#ifdef HW_AVAILCPU +- mib[1] = HW_AVAILCPU; +-#else +- mib[1] = HW_NCPU; +-#endif +- +- if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) { +- ERROR("Failed to get number of available processors. Defaulting to 1\n"); +- processors = 1; +- } +-#else +- processors = get_nprocs(); +-#endif +- } +- +- if((thread = malloc((2 + processors * 2) * sizeof(pthread_t))) == NULL) +- BAD_ERROR("Out of memory allocating thread descriptors\n"); +- deflator_thread = &thread[2]; +- frag_deflator_thread = &deflator_thread[processors]; +- +- to_reader = queue_init(1); +- from_reader = queue_init(reader_buffer_size); +- to_writer = queue_init(writer_buffer_size); +- from_writer = queue_init(1); +- from_deflate = queue_init(reader_buffer_size); +- to_frag = queue_init(processors * 2); +- reader_buffer = alloc_init(SQUASHFS_FILE_MAX_SIZE, reader_buffer_size); +- writer_buffer = alloc_init(SQUASHFS_FILE_MAX_SIZE, writer_buffer_size); +- fragment_buffer = alloc_init(SQUASHFS_FILE_MAX_SIZE, processors * 2); +- pthread_create(&thread[0], NULL, reader, NULL); +- pthread_create(&thread[1], NULL, writer, NULL); +- pthread_mutex_init(&fragment_mutex, NULL); +- pthread_cond_init(&fragment_waiting, NULL); +- +- for(i = 0; i < processors; i++) { +- if(pthread_create(&deflator_thread[i], NULL, deflator, NULL) != 0 ) +- BAD_ERROR("Failed to create thread\n"); +- if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator, NULL) != 0) +- BAD_ERROR("Failed to create thread\n"); +- } +- +- printf("Parallel mksquashfs: Using %d processor%s\n", processors, +- processors == 1 ? "" : "s"); +- +- if(sigprocmask(SIG_SETMASK, &old_mask, NULL) == -1) +- BAD_ERROR("Failed to set signal mask in intialise_threads\n"); +-} +- +- + #define VERSION() \ +- printf("mksquashfs version 3.1-r2 (2006/08/30)\n");\ ++ printf("mksquashfs version 3.0 (2006/03/15)\n");\ + printf("copyright (C) 2006 Phillip Lougher <phillip@lougher.org.uk>\n\n"); \ + printf("This program is free software; you can redistribute it and/or\n");\ + printf("modify it under the terms of the GNU General Public License\n");\ +@@ -2682,7 +1810,6 @@ + char *b, *root_name = NULL; + int be, nopad = FALSE, keep_as_directory = FALSE, orig_be; + squashfs_inode inode; +- int readb_mbytes = READER_BUFFER_DEFAULT, writeb_mbytes = WRITER_BUFFER_DEFAULT; + + #if __BYTE_ORDER == __BIG_ENDIAN + be = TRUE; +@@ -2701,34 +1828,7 @@ + source_path = argv + 1; + source = i - 2; + for(; i < argc; i++) { +- if(strcmp(argv[i], "-processors") == 0) { +- if((++i == argc) || (processors = strtol(argv[i], &b, 10), *b != '\0')) { +- ERROR("%s: -processors missing or invalid processor number\n", argv[0]); +- exit(1); +- } +- if(processors < 1) { +- ERROR("%s: -processors should be 1 or larger\n", argv[0]); +- exit(1); +- } +- } else if(strcmp(argv[i], "-read_queue") == 0) { +- if((++i == argc) || (readb_mbytes = strtol(argv[i], &b, 10), *b != '\0')) { +- ERROR("%s: -read_queue missing or invalid queue size\n", argv[0]); +- exit(1); +- } +- if(readb_mbytes < 1) { +- ERROR("%s: -read_queue should be 1 megabyte or larger\n", argv[0]); +- exit(1); +- } +- } else if(strcmp(argv[i], "-write_queue") == 0) { +- if((++i == argc) || (writeb_mbytes = strtol(argv[i], &b, 10), *b != '\0')) { +- ERROR("%s: -write_queue missing or invalid queue size\n", argv[0]); +- exit(1); +- } +- if(writeb_mbytes < 1) { +- ERROR("%s: -write_queue should be 1 megabyte or larger\n", argv[0]); +- exit(1); +- } +- } else if(strcmp(argv[i], "-b") == 0) { ++ if(strcmp(argv[i], "-b") == 0) { + if((++i == argc) || (block_size = strtol(argv[i], &b, 10), *b !='\0')) { + ERROR("%s: -b missing or invalid block size\n", argv[0]); + exit(1); +@@ -2851,9 +1951,6 @@ + ERROR("-version\t\tprint version, licence and copyright message\n"); + ERROR("-info\t\t\tprint files written to filesystem\n"); + ERROR("-b <block_size>\t\tset data block to <block_size>. Default %d bytes\n", SQUASHFS_FILE_SIZE); +- ERROR("-processors <number>\tUse <number> processors. By default will use number of\n\t\t\tprocessors available\n"); +- ERROR("-read-queue <size>\tSet input queue to <size> Mbytes. Default %d Mbytes\n", READER_BUFFER_DEFAULT); +- ERROR("-write-queue <size>\tSet output queue to <size> Mbytes. Default %d Mbytes\n", WRITER_BUFFER_DEFAULT); + ERROR("-noI\t\t\tdo not compress inode table\n"); + ERROR("-noD\t\t\tdo not compress data blocks\n"); + ERROR("-noF\t\t\tdo not compress fragment blocks\n"); +@@ -2887,9 +1984,6 @@ + } + } + +- reader_buffer_size = readb_mbytes << (20 - block_log); +- writer_buffer_size = writeb_mbytes << (20 - block_log); +- + for(i = 0; i < source; i++) + if(stat(source_path[i], &source_buf) == -1) { + fprintf(stderr, "Cannot stat source directory \"%s\" because %s\n", source_path[i], strerror(errno)); +@@ -2975,8 +2069,6 @@ + else if(strcmp(argv[i], "-b") == 0 || strcmp(argv[i], "-root-becomes") == 0 || strcmp(argv[i], "-ef") == 0) + i++; + +- initialise_threads(); +- + if(delete) { + printf("Creating %s %d.%d filesystem on %s, block size %d.\n", + be ? "big endian" : "little endian", SQUASHFS_MAJOR, SQUASHFS_MINOR, argv[source + 1], block_size); +@@ -3104,13 +2196,6 @@ + restore_filesystem: + write_fragment(); + sBlk.fragments = fragments; +- if(interrupted < 2) { +- ensure_fragments_flushed(); +- queue_put(to_writer, NULL); +- if(queue_get(from_writer) != 0) +- EXIT_MKSQUASHFS(); +- } +- + sBlk.inode_table_start = write_inodes(); + sBlk.directory_table_start = write_directories(); + sBlk.fragment_table_start = write_fragment_table(); +diff -Nur squashfs-tools/read_fs.c squashfs-lzma-tools/read_fs.c +--- squashfs-tools/read_fs.c 2006-08-21 01:40:35.000000000 +0200 ++++ squashfs-lzma-tools/read_fs.c 2006-08-22 09:39:31.000000000 +0200 +@@ -22,7 +22,7 @@ + */ + + extern void read_bytes(int, long long, int, char *); +-extern int add_file(long long, long long, long long, unsigned int *, int, unsigned int, int, int); ++extern int add_file(long long, long long, unsigned int *, int, unsigned int, int, int); + + #define TRUE 1 + #define FALSE 0 +@@ -186,8 +186,7 @@ + + sBlk->block_size - 1) >> sBlk->block_log : inode.file_size >> + sBlk->block_log; + long long file_bytes = 0; +- int i; +- long long start = inode.start_block; ++ int i, start = inode.start_block; + unsigned int *block_list; + + TRACE("scan_inode_table: regular file, file_size %lld, blocks %d\n", inode.file_size, blocks); +@@ -211,7 +210,7 @@ + for(i = 0; i < blocks; i++) + file_bytes += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]); + +- add_file(start, inode.file_size, file_bytes, block_list, blocks, inode.fragment, inode.offset, frag_bytes); ++ add_file(start, file_bytes, block_list, blocks, inode.fragment, inode.offset, frag_bytes); + cur_ptr += blocks * sizeof(unsigned int); + break; + } +@@ -220,8 +219,7 @@ + int frag_bytes; + int blocks; + long long file_bytes = 0; +- int i; +- long long start; ++ int i, start; + unsigned int *block_list; + + if(swap) { +@@ -258,7 +256,7 @@ + for(i = 0; i < blocks; i++) + file_bytes += SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]); + +- add_file(start, inode.file_size, file_bytes, block_list, blocks, inode.fragment, inode.offset, frag_bytes); ++ add_file(start, file_bytes, block_list, blocks, inode.fragment, inode.offset, frag_bytes); + cur_ptr += blocks * sizeof(unsigned int); + break; + } +diff -Nur squashfs-tools/README squashfs-lzma-tools/README +--- squashfs-tools/README 1970-01-01 01:00:00.000000000 +0100 ++++ squashfs-lzma-tools/README 2006-08-22 09:36:18.000000000 +0200 +@@ -0,0 +1,2 @@ ++This is mksquashfs patched with LZMA support ++and with the patch for 'fragment_table rounding bug' +\ No newline at end of file +diff -Nur squashfs-tools/sort.c squashfs-lzma-tools/sort.c +--- squashfs-tools/sort.c 2006-08-21 01:40:35.000000000 +0200 ++++ squashfs-lzma-tools/sort.c 2006-06-25 05:17:43.000000000 +0200 +@@ -71,6 +71,11 @@ + + struct sort_info *sort_info_list[65536]; + ++struct priority_entry { ++ struct dir_ent *dir; ++ struct priority_entry *next; ++}; ++ + struct priority_entry *priority_list[65536]; + + extern int silent; +@@ -230,6 +235,8 @@ + squashfs_inode inode; + int duplicate_file; + ++ generate_file_priorities(dir, 0, &dir->dir_ent->inode->buf); ++ + for(i = 65535; i >= 0; i--) + for(entry = priority_list[i]; entry; entry = entry->next) { + TRACE("%d: %s\n", i - 32768, entry->dir->pathname); +diff -Nur squashfs-tools/sort.h squashfs-lzma-tools/sort.h +--- squashfs-tools/sort.h 2006-08-21 01:40:35.000000000 +0200 ++++ squashfs-lzma-tools/sort.h 2006-06-25 05:17:43.000000000 +0200 +@@ -51,12 +51,6 @@ + squashfs_inode inode; + unsigned int type; + unsigned int inode_number; +- char read; + struct inode_info *next; + }; +- +-struct priority_entry { +- struct dir_ent *dir; +- struct priority_entry *next; +-}; + #endif +diff -Nur squashfs-tools/squashfs_fs.h squashfs-lzma-tools/squashfs_fs.h +--- squashfs-tools/squashfs_fs.h 2006-08-21 02:00:22.000000000 +0200 ++++ squashfs-lzma-tools/squashfs_fs.h 2006-06-25 05:17:43.000000000 +0200 +@@ -308,7 +308,7 @@ + struct squashfs_fragment_entry { + long long start_block; + unsigned int size; +- unsigned int pending; ++ unsigned int unused; + } __attribute__ ((packed)); + + extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen); |