diff --git a/filesystem/squashfs/lru.go b/filesystem/squashfs/lru.go new file mode 100644 index 00000000..9185d44c --- /dev/null +++ b/filesystem/squashfs/lru.go @@ -0,0 +1,138 @@ +package squashfs + +import ( + "sync" +) + +// A simple least recently used cache +type lru struct { + mu sync.Mutex + cache map[int64]*lruBlock // cache keyed on block position in file + maxBlocks int // max number of blocks in cache + root lruBlock // root block in LRU circular list +} + +// A data block to store in the lru cache +type lruBlock struct { + mu sync.Mutex // lock while fetching + data []byte // data block - nil while being fetched + prev *lruBlock // prev block in LRU list + next *lruBlock // next block in LRU list + pos int64 // position it was read off disk + size uint16 // compressed size on disk +} + +// Create a new LRU cache of a maximum of maxBlocks blocks of size +func newLRU(maxBlocks int) *lru { + l := &lru{ + cache: make(map[int64]*lruBlock), + maxBlocks: maxBlocks, + root: lruBlock{ + pos: -1, + }, + } + l.root.prev = &l.root // circularly link the root node + l.root.next = &l.root + return l +} + +// Unlink the block from the list +func (l *lru) unlink(block *lruBlock) { + block.prev.next = block.next + block.next.prev = block.prev + block.prev = nil + block.next = nil +} + +// Pop a block from the end of the list +func (l *lru) pop() *lruBlock { + block := l.root.prev + if block == &l.root { + panic("internal error: list empty") + } + l.unlink(block) + return block +} + +// Add a block to the start of the list +func (l *lru) push(block *lruBlock) { + oldHead := l.root.next + l.root.next = block + block.prev = &l.root + block.next = oldHead + oldHead.prev = block +} + +// ensure there are no more than n blocks in the cache +func (l *lru) trim(maxBlocks int) { + for len(l.cache) > maxBlocks && len(l.cache) > 0 { + // Remove a block from the cache + block := l.pop() + delete(l.cache, block.pos) + } +} + +// add block to the cache, pruning the cache as appropriate +func (l *lru) add(block *lruBlock) { + l.trim(l.maxBlocks - 1) + l.cache[block.pos] = block + l.push(block) +} + +// Fetch data returning size used from input and error +// +// data should be a subslice of buf +type fetchFn func() (data []byte, size uint16, err error) + +// Get the block at pos from the cache. +// +// If it isn't found in the cache then fetch() is called to get it. +// +// This does read through caching and takes care not to block parallel +// calls to the fetch() function. +func (l *lru) get(pos int64, fetch fetchFn) (data []byte, size uint16, err error) { + if l == nil { + return fetch() + } + l.mu.Lock() + block, found := l.cache[pos] + if !found { + // Add an empty block with data == nil + block = &lruBlock{ + pos: pos, + } + // Add it to the cache and the tail of the list + l.add(block) + } else { + // Remove the block from the list + l.unlink(block) + // Add it back to the start + l.push(block) + } + block.mu.Lock() // transfer the lock to the block + l.mu.Unlock() + defer block.mu.Unlock() + + if block.data != nil { + return block.data, block.size, nil + } + + // Fetch the block + data, size, err = fetch() + if err != nil { + return nil, 0, err + } + block.data = data + block.size = size + return data, size, nil +} + +// Sets the number of blocks to be used in the cache +// +// It makes sure that there are no more than maxBlocks in the cache. +func (l *lru) setMaxBlocks(maxBlocks int) { + l.mu.Lock() + defer l.mu.Unlock() + l.maxBlocks = maxBlocks + l.trim(l.maxBlocks) +} diff --git a/filesystem/squashfs/lru_test.go b/filesystem/squashfs/lru_test.go new file mode 100644 index 00000000..1b62e021 --- /dev/null +++ b/filesystem/squashfs/lru_test.go @@ -0,0 +1,236 @@ +package squashfs + +import ( + "errors" + "strings" + "testing" +) + +//nolint:gocyclo // we really do not care about the cyclomatic complexity of a test function. Maybe someday we will improve it. +func TestLRU(t *testing.T) { + const maxBlocks = 10 + l := newLRU(maxBlocks) + + assertEmpty := func(want bool) { + t.Helper() + got := l.root.prev == &l.root && l.root.next == &l.root + if want != got { + t.Errorf("Wanted empty %v but got %v", want, got) + } + } + + assertClear := func(block *lruBlock, want bool) { + t.Helper() + got := block.next == nil && block.prev == nil + if want != got { + t.Errorf("Wanted block clear %v but block clear %v", want, got) + } + } + + assertNoError := func(err error) { + t.Helper() + if err != nil { + t.Errorf("Expected no error but got: %v", err) + } + } + + assertCacheBlocks := func(want int) { + if len(l.cache) != want { + t.Errorf("Expected len(l.cache)=%d but got %d", want, len(l.cache)) + } + } + + t.Run("Simple", func(t *testing.T) { + assertEmpty(true) + block := &lruBlock{ + pos: 1, + } + assertClear(block, true) + l.push(block) + assertClear(block, false) + assertEmpty(false) + block2 := l.pop() + if block.pos != block2.pos { + t.Errorf("Wanted block %d but got %d", block.pos, block2.pos) + } + assertClear(block, true) + assertClear(block2, true) + assertEmpty(true) + }) + + t.Run("Unlink", func(t *testing.T) { + assertEmpty(true) + block := &lruBlock{ + pos: 1, + } + assertClear(block, true) + l.push(block) + assertClear(block, false) + assertEmpty(false) + l.unlink(block) + assertEmpty(true) + assertClear(block, true) + }) + + // Check that we push blocks on and off in FIFO order + t.Run("FIFO", func(t *testing.T) { + assertEmpty(true) + for i := int64(1); i <= 10; i++ { + block := &lruBlock{ + pos: i, + } + l.push(block) + } + assertEmpty(false) + for i := int64(1); i <= 10; i++ { + block := l.pop() + if block.pos != i { + t.Errorf("Wanted block %d but got %d", i, block.pos) + } + } + assertEmpty(true) + }) + + t.Run("Empty", func(t *testing.T) { + defer func() { + r, ok := recover().(string) + if !ok || !strings.Contains(r, "list empty") { + t.Errorf("Panic string doesn't contain list empty: %q", r) + } + }() + assertEmpty(true) + l.pop() + t.Errorf("Expected exception to be thrown") + }) + + t.Run("Add", func(t *testing.T) { + assertEmpty(true) + for i := 1; i <= 2*maxBlocks; i++ { + block := &lruBlock{ + pos: int64(i), + } + l.add(block) + wantItems := i + if i >= maxBlocks { + wantItems = maxBlocks + } + gotItems := len(l.cache) + if wantItems != gotItems { + t.Errorf("Expected %d items but got %d", wantItems, gotItems) + } + } + assertEmpty(false) + // Check the blocks are correct in the cache + for i := maxBlocks + 1; i <= 2*maxBlocks; i++ { + block, found := l.cache[int64(i)] + if !found { + t.Errorf("Didn't find block at %d", i) + } else if block.pos != int64(i) { + t.Errorf("Expected block.pos=%d but got %d", i, block.pos) + } + } + // Check the blocks are correct in the list + block := l.root.prev + for i := maxBlocks + 1; i <= 2*maxBlocks; i++ { + if block.pos != int64(i) { + t.Errorf("Expected block.pos=%d but got %d", i, block.pos) + } + block = block.prev + } + + t.Run("Trim", func(t *testing.T) { + assertCacheBlocks(maxBlocks) + l.trim(maxBlocks - 1) + assertCacheBlocks(maxBlocks - 1) + l.trim(maxBlocks - 1) + assertCacheBlocks(maxBlocks - 1) + + t.Run("SetMaxBlocks", func(t *testing.T) { + assertCacheBlocks(maxBlocks - 1) + l.setMaxBlocks(maxBlocks - 2) + assertCacheBlocks(maxBlocks - 2) + if l.maxBlocks != maxBlocks-2 { + t.Errorf("Expected maxBlocks %d but got %d", maxBlocks-2, l.maxBlocks) + } + l.setMaxBlocks(maxBlocks) + assertCacheBlocks(maxBlocks - 2) + if l.maxBlocks != maxBlocks { + t.Errorf("Expected maxBlocks %d but got %d", maxBlocks, l.maxBlocks) + } + }) + }) + }) + + // Check blocks are as expected in the cache and LRU list + checkCache := func(expectedPos ...int64) { + t.Helper() + // Check the blocks are correct in the cache + for _, pos := range expectedPos { + block, found := l.cache[pos] + if !found { + t.Errorf("Didn't find block at %d", pos) + } else if block.pos != pos { + t.Errorf("Expected block.pos=%d but got %d", pos, block.pos) + } + } + // Check the blocks are correct in the list + block := l.root.next + for _, pos := range expectedPos { + if block.pos != pos { + t.Errorf("Expected block.pos=%d but got %d", pos, block.pos) + } + block = block.next + } + } + + l = newLRU(10) + t.Run("Get", func(t *testing.T) { + // Fill the cache + for i := 1; i <= 2*maxBlocks; i++ { + pos := int64(i) + _, _, err := l.get(pos, func() (data []byte, size uint16, err error) { + buf := []byte{byte(pos)} + return buf, uint16(i), nil + }) + assertNoError(err) + } + checkCache(20, 19, 18, 17, 16, 15, 14, 13, 12, 11) + + // Test cache HIT + data, size, err := l.get(int64(14), func() (data []byte, size uint16, err error) { + return nil, 0, errors.New("cached block not found") + }) + assertNoError(err) + if data[0] != 14 { + t.Errorf("Expected magic %d but got %d", 14, data[0]) + } + if size != 14 { + t.Errorf("Expected size %d but got %d", 14, size) + } + checkCache(14, 20, 19, 18, 17, 16, 15, 13, 12, 11) + + // Test cache MISS + data, size, err = l.get(int64(1), func() (data []byte, size uint16, err error) { + buf := []byte{1} + return buf, uint16(1), nil + }) + assertNoError(err) + if data[0] != byte(1) { + t.Errorf("Expected magic %d but got %d", byte(1), data[0]) + } + if size != uint16(1) { + t.Errorf("Expected size %d but got %d", 1, size) + } + checkCache(1, 14, 20, 19, 18, 17, 16, 15, 13, 12) + + // Test cache fetch ERROR + testErr := errors.New("test error") + _, _, err = l.get(int64(2), func() (data []byte, size uint16, err error) { + return nil, 0, testErr + }) + if err != testErr { + t.Errorf("Want error %q but got %q", testErr, err) + } + checkCache(2, 1, 14, 20, 19, 18, 17, 16, 15, 13) + }) +} diff --git a/filesystem/squashfs/metadatablock.go b/filesystem/squashfs/metadatablock.go index 6711f4ef..8ef65098 100644 --- a/filesystem/squashfs/metadatablock.go +++ b/filesystem/squashfs/metadatablock.go @@ -26,6 +26,7 @@ func getMetadataSize(b []byte) (size uint16, compressed bool, err error) { return size, compressed, nil } +// FIXME this isn't used anywhere except in the test code func parseMetadata(b []byte, c Compressor) (block *metadatablock, err error) { if len(b) < minMetadataBlockSize { return nil, fmt.Errorf("metadata block was of len %d, less than minimum %d", len(b), minMetadataBlockSize) @@ -71,33 +72,35 @@ func (m *metadatablock) toBytes(c Compressor) ([]byte, error) { return b, nil } -func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) { - // read bytes off the reader to determine how big it is and if compressed - b := make([]byte, 2) - _, _ = r.ReadAt(b, location) - size, compressed, err := getMetadataSize(b) - if err != nil { - return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err) - } - b = make([]byte, size) - read, err := r.ReadAt(b, location+2) - if err != nil && err != io.EOF { - return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err) - } - if read != len(b) { - return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location) - } - data = b - if compressed { - if c == nil { - return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location) - } - data, err = c.decompress(b) +func (fs *FileSystem) readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) { + return fs.cache.get(location, func() (data []byte, size uint16, err error) { + // read bytes off the reader to determine how big it is and if compressed + b := make([]byte, 2) + _, _ = r.ReadAt(b, location) + size, compressed, err := getMetadataSize(b) if err != nil { - return nil, 0, fmt.Errorf("decompress error: %v", err) + return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err) } - } - return data, size + 2, nil + b = make([]byte, size) + read, err := r.ReadAt(b, location+2) + if err != nil && err != io.EOF { + return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err) + } + if read != len(b) { + return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location) + } + data = b + if compressed { + if c == nil { + return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location) + } + data, err = c.decompress(b) + if err != nil { + return nil, 0, fmt.Errorf("decompress error: %v", err) + } + } + return data, size + 2, nil + }) } // readMetadata read as many bytes of metadata as required for the given size, with the byteOffset provided as a starting @@ -105,13 +108,13 @@ func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, si // requests to read 500 bytes beginning at offset 8000 into the first block. // it always returns to the end of the block, even if that is greater than the given size. This makes it easy to use more // data than expected on first read. The consumer is expected to cut it down, if needed -func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) { +func (fs *FileSystem) readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) { var ( b []byte blockOffset = int(initialBlockOffset) ) // we know how many blocks, so read them all in - m, read, err := readMetaBlock(r, c, firstBlock+int64(blockOffset)) + m, read, err := fs.readMetaBlock(r, c, firstBlock+int64(blockOffset)) if err != nil { return nil, err } @@ -119,7 +122,7 @@ func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOff // do we have any more to read? for len(b) < size { blockOffset += int(read) - m, read, err = readMetaBlock(r, c, firstBlock+int64(blockOffset)) + m, read, err = fs.readMetaBlock(r, c, firstBlock+int64(blockOffset)) if err != nil { return nil, err } diff --git a/filesystem/squashfs/metadatablock_internal_test.go b/filesystem/squashfs/metadatablock_internal_test.go index 3bf6882b..72bc67b8 100644 --- a/filesystem/squashfs/metadatablock_internal_test.go +++ b/filesystem/squashfs/metadatablock_internal_test.go @@ -122,8 +122,9 @@ func TestReadMetaBlock(t *testing.T) { {[]byte{0x5, 0x00, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 0, &testCompressorAddBytes{err: fmt.Errorf("unknown")}, 0x0, fmt.Errorf("decompress error: unknown"), nil}, } + var fs = &FileSystem{} for i, tt := range tests { - b, size, err := readMetaBlock(bytes.NewReader(tt.b), tt.c, tt.location) + b, size, err := fs.readMetaBlock(bytes.NewReader(tt.b), tt.c, tt.location) switch { case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): t.Errorf("%d: mismatched error, actual then expected", i) diff --git a/filesystem/squashfs/squashfs.go b/filesystem/squashfs/squashfs.go index 11753dad..bdd8d5a7 100644 --- a/filesystem/squashfs/squashfs.go +++ b/filesystem/squashfs/squashfs.go @@ -17,6 +17,7 @@ const ( metadataBlockSize = 8 * KB minBlocksize = 4 * KB maxBlocksize = 1 * MB + defaultCacheSize = 128 * MB ) // FileSystem implements the FileSystem interface @@ -32,6 +33,7 @@ type FileSystem struct { uidsGids []uint32 xattrs *xAttrTable rootDir inode + cache *lru } // Equal compare if two filesystems are equal @@ -111,7 +113,38 @@ func Create(f util.File, size, start, blocksize int64) (*FileSystem, error) { // which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) // where a partition starts and ends. // -// If the provided blocksize is 0, it will use the default of 2K bytes +// If the provided blocksize is 0, it will use the default of 2K bytes. +// +// This will use a cache for the decompressed blocks of 128 MB by +// default. (You can set this with the SetCacheSize method and read +// its size with the GetCacheSize method). A block cache is essential +// for performance when reading. This implements a cache for the +// fragments (tail ends of files) and the metadata (directory +// listings) which otherwise would be read, decompressed and discarded +// many times. +// +// Unpacking a 3 GB squashfs made from the tensorflow docker image like this: +// +// docker export $(docker create tensorflow/tensorflow:latest-gpu-jupyter) -o tensorflow.tar.gz +// mkdir -p tensorflow && tar xf tensorflow.tar.gz -C tensorflow +// [ -f tensorflow.sqfs ] && rm tensorflow.sqfs +// mksquashfs tensorflow tensorflow.sqfs -comp zstd -Xcompression-level 3 -b 1M -no-xattrs -all-root +// +// Gives these timings with and without cache: +// +// - no caching: 206s +// - 256 MB cache: 16.7s +// - 128 MB cache: 17.5s (the default) +// - 64 MB cache: 23.4s +// - 32 MB cache: 54.s +// +// The cached versions compare favourably to the C program unsquashfs +// which takes 12.0s to unpack the same archive. +// +// These tests were done using rclone and the archive backend which +// uses this library like this: +// +// rclone -P --transfers 16 --checkers 16 copy :archive:/path/to/tensorflow.sqfs /tmp/tensorflow func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { var ( read int @@ -185,6 +218,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { compressor: compress, fragments: fragments, uidsGids: uidsgids, + cache: newLRU(int(defaultCacheSize) / int(s.blocksize)), } // for efficiency, read in the root inode right now rootInode, err := fs.getInode(s.rootInode.block, s.rootInode.offset, inodeBasicDirectory) @@ -200,6 +234,30 @@ func (fs *FileSystem) Type() filesystem.Type { return filesystem.TypeSquashfs } +// SetCacheSize set the maximum memory used by the block cache to cacheSize bytes. +// +// The default is 128 MB. +// +// If this is <= 0 then the cache will be disabled. +func (fs *FileSystem) SetCacheSize(cacheSize int) { + if fs.cache == nil { + return + } + blocks := cacheSize / int(fs.blocksize) + if blocks <= 0 { + blocks = 0 + } + fs.cache.setMaxBlocks(blocks) +} + +// GetCacheSize get the maximum memory used by the block cache in bytes. +func (fs *FileSystem) GetCacheSize() int { + if fs.cache == nil { + return 0 + } + return fs.cache.maxBlocks * int(fs.blocksize) +} + // Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that: // // * It will make the entire tree path if it does not exist @@ -442,7 +500,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // get the block // start by getting the minimum for the proposed type. It very well might be wrong. size := inodeTypeToSize(iType) - uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -456,7 +514,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod size = inodeTypeToSize(iType) // Read more data if necessary (quite rare) if size > len(uncompressed) { - uncompressed, err = readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -470,7 +528,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // if it returns extra > 0, then it needs that many more bytes to be read, and to be reparsed if extra > 0 { size += extra - uncompressed, err = readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -490,7 +548,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // block when uncompressed. func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size int) (*directory, error) { // get the block - uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size) + uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -533,25 +591,32 @@ func (fs *FileSystem) readFragment(index, offset uint32, fragmentSize int64) ([] return nil, fmt.Errorf("cannot find fragment block with index %d", index) } fragmentInfo := fs.fragments[index] - // figure out the size of the compressed block and if it is compressed - b := make([]byte, fragmentInfo.size) - read, err := fs.file.ReadAt(b, int64(fragmentInfo.start)) - if err != nil && err != io.EOF { - return nil, fmt.Errorf("unable to read fragment block %d: %v", index, err) - } - if read != len(b) { - return nil, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index) - } - - data := b - if fragmentInfo.compressed { - if fs.compressor == nil { - return nil, fmt.Errorf("fragment compressed but do not have valid compressor") + pos := int64(fragmentInfo.start) + data, _, err := fs.cache.get(pos, func() (data []byte, size uint16, err error) { + // figure out the size of the compressed block and if it is compressed + b := make([]byte, fragmentInfo.size) + read, err := fs.file.ReadAt(b, pos) + if err != nil && err != io.EOF { + return nil, 0, fmt.Errorf("unable to read fragment block %d: %v", index, err) } - data, err = fs.compressor.decompress(b) - if err != nil { - return nil, fmt.Errorf("decompress error: %v", err) + if read != len(b) { + return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index) } + + data = b + if fragmentInfo.compressed { + if fs.compressor == nil { + return nil, 0, fmt.Errorf("fragment compressed but do not have valid compressor") + } + data, err = fs.compressor.decompress(b) + if err != nil { + return nil, 0, fmt.Errorf("decompress error: %v", err) + } + } + return data, 0, nil + }) + if err != nil { + return nil, err } // now get the data from the offset return data[offset : int64(offset)+fragmentSize], nil @@ -594,8 +659,9 @@ func readFragmentTable(s *superblock, file util.File, c Compressor) ([]*fragment // load in the actual fragment entries // read each block and uncompress it var fragmentTable []*fragmentEntry + var fs = &FileSystem{} for i, offset := range offsets { - uncompressed, _, err := readMetaBlock(file, c, offset) + uncompressed, _, err := fs.readMetaBlock(file, c, offset) if err != nil { return nil, fmt.Errorf("error reading meta block %d at position %d: %v", i, offset, err) } @@ -664,13 +730,14 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable, var ( uncompressed []byte size uint16 + fs = &FileSystem{} ) bIndex := make([]byte, 0) // convert those into indexes for i := 0; i+8-1 < len(b); i += 8 { locn := binary.LittleEndian.Uint64(b[i : i+8]) - uncompressed, _, err = readMetaBlock(file, c, int64(locn)) + uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn)) if err != nil { return nil, fmt.Errorf("error reading xattr index meta block %d at position %d: %v", i, locn, err) } @@ -681,7 +748,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable, xAttrEnd := binary.LittleEndian.Uint64(b[:8]) xAttrData := make([]byte, 0) for i := xAttrStart; i < xAttrEnd; { - uncompressed, size, err = readMetaBlock(file, c, int64(i)) + uncompressed, size, err = fs.readMetaBlock(file, c, int64(i)) if err != nil { return nil, fmt.Errorf("error reading xattr data meta block at position %d: %v", i, err) } @@ -755,13 +822,14 @@ func readUidsGids(s *superblock, file util.File, c Compressor) ([]uint32, error) var ( uncompressed []byte + fs = &FileSystem{} ) data := make([]byte, 0) // convert those into indexes for i := 0; i+8-1 < len(b); i += 8 { locn := binary.LittleEndian.Uint64(b[i : i+8]) - uncompressed, _, err = readMetaBlock(file, c, int64(locn)) + uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn)) if err != nil { return nil, fmt.Errorf("error reading uidgid index meta block %d at position %d: %v", i, locn, err) } diff --git a/filesystem/squashfs/squashfs_test.go b/filesystem/squashfs/squashfs_test.go index a0ca729d..236d98e6 100644 --- a/filesystem/squashfs/squashfs_test.go +++ b/filesystem/squashfs/squashfs_test.go @@ -68,6 +68,29 @@ func TestSquashfsType(t *testing.T) { } } +func TestSquashfsSetCacheSize(t *testing.T) { + fs, err := getValidSquashfsFSReadOnly() + if err != nil { + t.Fatalf("Failed to get read-only squashfs filesystem: %v", err) + } + assertCacheSize := func(want int) { + got := fs.GetCacheSize() + if want != got { + t.Errorf("Want cache size %d but got %d", want, got) + } + } + // Check we can set the Cache size for a Read FileSystem + assertCacheSize(128 * 1024 * 1024) + fs.SetCacheSize(1024 * 1024) + assertCacheSize(1024 * 1024) + fs.SetCacheSize(0) + fs.SetCacheSize(-1) + assertCacheSize(0) + // Check we can set the Cache size for a Write FileSystem + fs = &squashfs.FileSystem{} + assertCacheSize(0) +} + func TestSquashfsMkdir(t *testing.T) { t.Run("read-only", func(t *testing.T) { fs, err := getValidSquashfsFSReadOnly()