Skip to content

Commit

Permalink
squashfs: make readMetadata a method on FileSystem #205
Browse files Browse the repository at this point in the history
  • Loading branch information
ncw committed Dec 31, 2023
1 parent b20cf01 commit 5bdd6ea
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 12 deletions.
9 changes: 5 additions & 4 deletions filesystem/squashfs/metadatablock.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ func getMetadataSize(b []byte) (size uint16, compressed bool, err error) {
return size, compressed, nil
}

// FIXME this isn't used anywhere except in the test code
func parseMetadata(b []byte, c Compressor) (block *metadatablock, err error) {
if len(b) < minMetadataBlockSize {
return nil, fmt.Errorf("metadata block was of len %d, less than minimum %d", len(b), minMetadataBlockSize)
Expand Down Expand Up @@ -71,7 +72,7 @@ func (m *metadatablock) toBytes(c Compressor) ([]byte, error) {
return b, nil
}

func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) {
func (fs *FileSystem) readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) {
// read bytes off the reader to determine how big it is and if compressed
b := make([]byte, 2)
_, _ = r.ReadAt(b, location)
Expand Down Expand Up @@ -105,21 +106,21 @@ func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, si
// requests to read 500 bytes beginning at offset 8000 into the first block.
// it always returns to the end of the block, even if that is greater than the given size. This makes it easy to use more
// data than expected on first read. The consumer is expected to cut it down, if needed
func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) {
func (fs *FileSystem) readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) {
var (
b []byte
blockOffset = int(initialBlockOffset)
)
// we know how many blocks, so read them all in
m, read, err := readMetaBlock(r, c, firstBlock+int64(blockOffset))
m, read, err := fs.readMetaBlock(r, c, firstBlock+int64(blockOffset))
if err != nil {
return nil, err
}
b = append(b, m[byteOffset:]...)
// do we have any more to read?
for len(b) < size {
blockOffset += int(read)
m, read, err = readMetaBlock(r, c, firstBlock+int64(blockOffset))
m, read, err = fs.readMetaBlock(r, c, firstBlock+int64(blockOffset))
if err != nil {
return nil, err
}
Expand Down
3 changes: 2 additions & 1 deletion filesystem/squashfs/metadatablock_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,9 @@ func TestReadMetaBlock(t *testing.T) {
{[]byte{0x5, 0x00, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 0, &testCompressorAddBytes{err: fmt.Errorf("unknown")}, 0x0, fmt.Errorf("decompress error: unknown"), nil},
}

var fs = &FileSystem{}
for i, tt := range tests {
b, size, err := readMetaBlock(bytes.NewReader(tt.b), tt.c, tt.location)
b, size, err := fs.readMetaBlock(bytes.NewReader(tt.b), tt.c, tt.location)
switch {
case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())):
t.Errorf("%d: mismatched error, actual then expected", i)
Expand Down
17 changes: 10 additions & 7 deletions filesystem/squashfs/squashfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// get the block
// start by getting the minimum for the proposed type. It very well might be wrong.
size := inodeTypeToSize(iType)
uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
Expand Down Expand Up @@ -470,7 +470,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// if it returns extra > 0, then it needs that many more bytes to be read, and to be reparsed
if extra > 0 {
size += extra
uncompressed, err = readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
Expand All @@ -490,7 +490,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// block when uncompressed.
func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size int) (*directory, error) {
// get the block
uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size)
uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
Expand Down Expand Up @@ -594,8 +594,9 @@ func readFragmentTable(s *superblock, file util.File, c Compressor) ([]*fragment
// load in the actual fragment entries
// read each block and uncompress it
var fragmentTable []*fragmentEntry
var fs = &FileSystem{}
for i, offset := range offsets {
uncompressed, _, err := readMetaBlock(file, c, offset)
uncompressed, _, err := fs.readMetaBlock(file, c, offset)
if err != nil {
return nil, fmt.Errorf("error reading meta block %d at position %d: %v", i, offset, err)
}
Expand Down Expand Up @@ -664,13 +665,14 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable,
var (
uncompressed []byte
size uint16
fs = &FileSystem{}
)

bIndex := make([]byte, 0)
// convert those into indexes
for i := 0; i+8-1 < len(b); i += 8 {
locn := binary.LittleEndian.Uint64(b[i : i+8])
uncompressed, _, err = readMetaBlock(file, c, int64(locn))
uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn))
if err != nil {
return nil, fmt.Errorf("error reading xattr index meta block %d at position %d: %v", i, locn, err)
}
Expand All @@ -681,7 +683,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable,
xAttrEnd := binary.LittleEndian.Uint64(b[:8])
xAttrData := make([]byte, 0)
for i := xAttrStart; i < xAttrEnd; {
uncompressed, size, err = readMetaBlock(file, c, int64(i))
uncompressed, size, err = fs.readMetaBlock(file, c, int64(i))
if err != nil {
return nil, fmt.Errorf("error reading xattr data meta block at position %d: %v", i, err)
}
Expand Down Expand Up @@ -755,13 +757,14 @@ func readUidsGids(s *superblock, file util.File, c Compressor) ([]uint32, error)

var (
uncompressed []byte
fs = &FileSystem{}
)

data := make([]byte, 0)
// convert those into indexes
for i := 0; i+8-1 < len(b); i += 8 {
locn := binary.LittleEndian.Uint64(b[i : i+8])
uncompressed, _, err = readMetaBlock(file, c, int64(locn))
uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn))
if err != nil {
return nil, fmt.Errorf("error reading uidgid index meta block %d at position %d: %v", i, locn, err)
}
Expand Down

0 comments on commit 5bdd6ea

Please sign in to comment.