-
Notifications
You must be signed in to change notification settings - Fork 0
/
read.go
93 lines (81 loc) · 3.06 KB
/
read.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
// Copyright (c) 2020 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ice
import (
"encoding/binary"
"github.com/blugelabs/ice/compress"
)
func (s *Segment) initDecompressedStoredFieldChunks(n int) {
s.m.Lock()
s.decompressedStoredFieldChunks = make(map[uint32]*segmentCacheData, n)
for i := uint32(0); i < uint32(n); i++ {
s.decompressedStoredFieldChunks[i] = &segmentCacheData{}
}
s.m.Unlock()
}
func (s *Segment) getDocStoredMetaAndUnCompressed(docNum uint64) (meta, data []byte, err error) {
_, storedOffset, err := s.getDocStoredOffsetsOnly(docNum)
if err != nil {
return nil, nil, err
}
// document chunk coder
var uncompressed []byte
chunkI := uint32(docNum) / defaultDocumentChunkSize
storedFieldDecompressed := s.decompressedStoredFieldChunks[chunkI]
storedFieldDecompressed.m.Lock()
if storedFieldDecompressed.data == nil {
// we haven't already loaded and decompressed this chunk
chunkOffsetStart := s.storedFieldChunkOffsets[int(chunkI)]
chunkOffsetEnd := s.storedFieldChunkOffsets[int(chunkI)+1]
compressed, err := s.data.Read(int(chunkOffsetStart), int(chunkOffsetEnd))
if err != nil {
return nil, nil, err
}
// decompress it
storedFieldDecompressed.data, err = compress.Decompress(nil, compressed)
if err != nil {
return nil, nil, err
}
}
// once initialized it wouldn't change, so we can unlock the mutex
uncompressed = storedFieldDecompressed.data
storedFieldDecompressed.m.Unlock()
metaDataLenEnd := storedOffset + binary.MaxVarintLen64
if metaDataLenEnd > uint64(len(uncompressed)) {
metaDataLenEnd = uint64(len(uncompressed))
}
metaLenData := uncompressed[storedOffset:metaDataLenEnd]
var n uint64
metaLen, read := binary.Uvarint(metaLenData)
n += uint64(read)
dataLenEnd := storedOffset + n + binary.MaxVarintLen64
if dataLenEnd > uint64(len(uncompressed)) {
dataLenEnd = uint64(len(uncompressed))
}
dataLenData := uncompressed[int(storedOffset+n):dataLenEnd]
dataLen, read := binary.Uvarint(dataLenData)
n += uint64(read)
meta = uncompressed[int(storedOffset+n):int(storedOffset+n+metaLen)]
data = uncompressed[int(storedOffset+n+metaLen):int(storedOffset+n+metaLen+dataLen)]
return meta, data, nil
}
func (s *Segment) getDocStoredOffsetsOnly(docNum uint64) (indexOffset, storedOffset uint64, err error) {
indexOffset = s.footer.storedIndexOffset + (fileAddrWidth * docNum)
storedOffsetData, err := s.data.Read(int(indexOffset), int(indexOffset+fileAddrWidth))
if err != nil {
return 0, 0, err
}
storedOffset = binary.BigEndian.Uint64(storedOffsetData)
return indexOffset, storedOffset, nil
}