This repository has been archived by the owner on May 14, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
compaction.js
435 lines (401 loc) · 12.9 KB
/
compaction.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
// SPDX-FileCopyrightText: 2022 Anders Rune Jensen
//
// SPDX-License-Identifier: LGPL-3.0-only
const RAF = require('polyraf')
const fs = require('fs')
const Obv = require('obz')
const push = require('push-stream')
const mutexify = require('mutexify')
const debug = require('debug')('async-append-only-log')
const Record = require('./record')
function getStateFilename(logFilename) {
return logFilename + '.compaction'
}
function stateFileExists(logFilename) {
return fs.existsSync(getStateFilename(logFilename))
}
const NO_TRUNCATE = 0xffffffff
/**
* This file has state describing the continuation of the compaction algorithm.
*
* - bytes 0..3: UInt32LE for the version of this file format
* smallest version is 1.
* - bytes 4..7: UInt32LE for the startOffset, usually the start of some block
* - bytes 8..11: UInt32LE for block index where to perform truncation
* where 0xFFFFFFFF means no truncation to-be-done yet
* - bytes 12..15: UInt32LE for the blockIndex to-be-compacted
* - bytes 16..19: UInt32LE for the 1st unshifted record's offset
* - bytes 20..(20+blockSize-1): blockBuf containing the 1st unshifted record
*/
function PersistentState(logFilename, blockSize) {
const raf = RAF(getStateFilename(logFilename))
const writeLock = mutexify()
const stateFileSize = 4 + 4 + 4 + 4 + 4 + blockSize
function load(cb) {
raf.stat(function onRAFStatDone(err, stat) {
const fileSize = !err && stat ? stat.size : -1
if (fileSize <= 0) {
const state = {
version: 1,
startOffset: 0,
truncateBlockIndex: NO_TRUNCATE,
compactedBlockIndex: 0,
unshiftedOffset: 0,
unshiftedBlockBuffer: null,
initial: true,
}
cb(null, state)
} else {
raf.read(0, stateFileSize, function onFirstRAFReadDone(err, buf) {
if (err) return cb(err)
const state = {
version: buf.readUInt32LE(0),
startOffset: buf.readUInt32LE(4),
truncateBlockIndex: buf.readUInt32LE(8),
compactedBlockIndex: buf.readUInt32LE(12),
unshiftedOffset: buf.readUInt32LE(16),
unshiftedBlockBuf: buf.slice(20),
initial: false,
}
cb(null, state)
})
}
})
}
function save(state, cb) {
const buf = Buffer.alloc(stateFileSize)
buf.writeUInt32LE(state.version, 0)
buf.writeUint32LE(state.startOffset, 4)
buf.writeUInt32LE(state.truncateBlockIndex, 8)
buf.writeUInt32LE(state.compactedBlockIndex, 12)
buf.writeUInt32LE(state.unshiftedOffset, 16)
state.unshiftedBlockBuf.copy(buf, 20)
writeLock((unlock) => {
raf.write(0, buf, function onRafWriteDone(err) {
if (err) return unlock(cb, err)
if (raf.fd) {
fs.fsync(raf.fd, function onFsyncDone(err) {
if (err) unlock(cb, err)
else unlock(cb, null, state)
})
} else unlock(cb, null, state)
})
})
}
function destroy(cb) {
if (stateFileExists(logFilename)) {
raf.close(function onRAFClosed(err) {
if (err) return cb(err)
fs.unlink(raf.filename, cb)
})
} else {
cb()
}
}
return {
load,
save,
destroy,
}
}
/**
* Compaction is the process of removing deleted records from the log by
* rewriting blocks in the log *in situ*, moving ("shifting") subsequent records
* to earlier slots, to fill the spaces left by the deleted records.
*
* The compaction algorithm is, at a high level:
* - Keep track of some state, comprised of:
* - compactedBlockIndex: blockIndex of the current block being compacted.
* all blocks before this have already been compacted. This state always
* increases, never decreases.
* - unshiftedOffset: offset of the first unshifted record in the log, that
* is, the first record that has not been shifted to earlier slots. This
* offset is greater or equal to the compacted block's start offset, and
* may be either in the same block as the compacted block, or even in a much
* later block. This state always increases, never decreases.
* - unshiftedBlockBuf: the block containing the first unshifted record
* - Save the state to disk
* - Compact one block at a time, in increasing order of blockIndex
* - When a block is compacted, the state file is updated
* - Once all blocks have been compacted, delete the state file
*/
function Compaction(log, onDone) {
const persistentState = PersistentState(log.filename, log.blockSize)
const progress = Obv() // for the unshifted offset
let startOffset = 0
let version = 0
let holesFound = true // assume true
let compactedBlockIndex = -1
let compactedBlockBuf = null
let compactedOffset = 0
let compactedBlockIdenticalToUnshifted = true
let unshiftedBlockIndex = 0
let unshiftedBlockBuf = null
let unshiftedOffset = 0
let truncateBlockIndex = NO_TRUNCATE
loadPersistentState(function onCompactionStateLoaded2(err) {
if (err) return onDone(err)
if (truncateBlockIndex !== NO_TRUNCATE) {
truncateAndBeDone()
} else {
compactedBlockIndex -= 1 // because it'll be incremented very soon
compactNextBlock()
}
})
function loadPersistentState(cb) {
persistentState.load(function onCompactionStateLoaded(err, state) {
if (err) return cb(err)
if (state.version !== 1) return cb(new Error('unsupported state version'))
version = state.version
startOffset = state.startOffset
truncateBlockIndex = state.truncateBlockIndex
compactedBlockIndex = state.compactedBlockIndex
unshiftedOffset = state.unshiftedOffset
unshiftedBlockBuf = state.unshiftedBlockBuf
unshiftedBlockIndex = Math.floor(state.unshiftedOffset / log.blockSize)
if (state.initial) {
findStateFromLog(function foundStateFromLog(err, state) {
if (err) return cb(err)
compactedBlockIndex = state.compactedBlockIndex
startOffset = compactedBlockIndex * log.blockSize
unshiftedOffset = state.unshiftedOffset
unshiftedBlockBuf = state.unshiftedBlockBuf
unshiftedBlockIndex = Math.floor(unshiftedOffset / log.blockSize)
savePersistentState(cb)
})
} else {
cb()
}
})
}
function savePersistentState(cb) {
if (!unshiftedBlockBuf) {
loadUnshiftedBlock(saveIt)
} else {
saveIt()
}
function saveIt() {
persistentState.save(
{
version,
startOffset,
truncateBlockIndex,
compactedBlockIndex,
unshiftedOffset,
unshiftedBlockBuf,
},
cb
)
}
}
function findStateFromLog(cb) {
findFirstDeletedOffset(function gotFirstDeleted(err, holeOffset) {
if (err) return cb(err)
if (holeOffset === -1) {
compactedBlockIndex = Math.floor(log.since.value / log.blockSize)
holesFound = false
stop()
return
}
const blockStart = holeOffset - (holeOffset % log.blockSize)
const blockIndex = Math.floor(holeOffset / log.blockSize)
findNonDeletedOffsetGTE(blockStart, function gotNonDeleted(err, offset) {
if (err) return cb(err)
if (offset === -1) {
compactedBlockIndex = Math.floor((holeOffset - 1) / log.blockSize)
stop()
return
}
const state = {
compactedBlockIndex: blockIndex,
unshiftedOffset: offset,
unshiftedBlockBuf: null,
}
cb(null, state)
})
})
}
function findFirstDeletedOffset(cb) {
let once = false
log.stream({ offsets: true, values: true }).pipe(
push.drain(
function sinkToFindFirstDeleted(record) {
if (record.value === null && !once) {
once = true
cb(null, record.offset)
return false
}
},
function sinkEndedLookingForDeleted() {
cb(null, -1)
}
)
)
}
function findNonDeletedOffsetGTE(gte, cb) {
let once = false
log.stream({ gte, offsets: true, values: true }).pipe(
push.drain(
function sinkToFindNonDeleted(record) {
if (record.value !== null && !once) {
once = true
cb(null, record.offset)
return false
}
},
function sinkEndedLookingForNonDeleted() {
cb(null, -1)
}
)
)
}
function continueCompactingBlock() {
while (true) {
// Fetch the unshifted block, if necessary
if (!unshiftedBlockBuf) {
loadUnshiftedBlock(continueCompactingBlock)
return
}
// When all records have been shifted (thus end of log), stop compacting
if (unshiftedBlockIndex === -1) {
saveCompactedBlock(function onCompactedBlockSaved(err) {
if (err) return onDone(err)
stop()
})
return
}
const [unshiftedDataBuf, unshiftedRecSize] = getUnshiftedRecord()
// Get a non-deleted unshifted record, if necessary
if (!unshiftedDataBuf) {
goToNextUnshifted()
continue
}
const compactedBlockStart = compactedBlockIndex * log.blockSize
const offsetInCompactedBlock = compactedOffset - compactedBlockStart
// Proceed to compact the next block if this block is full
if (log.hasNoSpaceFor(unshiftedDataBuf, offsetInCompactedBlock)) {
saveCompactedBlock()
setImmediate(compactNextBlock)
return
}
if (
compactedBlockIndex !== unshiftedBlockIndex ||
compactedOffset !== unshiftedOffset
) {
compactedBlockIdenticalToUnshifted = false
}
// Copy record to new compacted block
Record.write(compactedBlockBuf, offsetInCompactedBlock, unshiftedDataBuf)
goToNextUnshifted()
compactedOffset += unshiftedRecSize
}
}
function saveCompactedBlock(cb) {
if (compactedBlockIdenticalToUnshifted) {
if (cb) cb()
} else {
const blockIndex = compactedBlockIndex
log.overwrite(blockIndex, compactedBlockBuf, function onOverwritten(err) {
if (err && cb) cb(err)
else if (err) return onDone(err)
else {
debug('compacted block %d', blockIndex)
if (cb) cb()
}
})
}
}
function loadUnshiftedBlock(cb) {
const blockStart = unshiftedBlockIndex * log.blockSize
log.getBlock(blockStart, function onBlockLoaded(err, blockBuf) {
if (err) return onDone(err)
unshiftedBlockBuf = blockBuf
cb()
})
}
function getUnshiftedRecord() {
const [, dataBuf, recSize] = log.getDataNextOffset(
unshiftedBlockBuf,
unshiftedOffset,
true
)
return [dataBuf, recSize]
}
function goToNextUnshifted() {
let [nextOffset] = log.getDataNextOffset(
unshiftedBlockBuf,
unshiftedOffset,
true
)
if (nextOffset === -1) {
unshiftedBlockIndex = -1
} else if (nextOffset === 0) {
unshiftedBlockIndex += 1
unshiftedBlockBuf = null
unshiftedOffset = unshiftedBlockIndex * log.blockSize
} else {
unshiftedOffset = nextOffset
}
}
function compactNextBlock() {
compactedBlockIndex += 1
compactedBlockBuf = Buffer.alloc(log.blockSize)
compactedOffset = compactedBlockIndex * log.blockSize
compactedBlockIdenticalToUnshifted = true
progress.set(calculateProgressStats())
savePersistentState(function onCompactionStateSaved(err) {
if (err) return onDone(err)
continueCompactingBlock()
})
}
function calculateProgressStats() {
const percent =
(unshiftedOffset - startOffset) / (log.since.value - startOffset)
return {
startOffset,
compactedOffset,
unshiftedOffset,
percent,
}
}
function stop() {
compactedBlockBuf = null
unshiftedBlockBuf = null
truncateBlockIndex = compactedBlockIndex
const state = {
version,
startOffset,
truncateBlockIndex,
compactedBlockIndex: 0,
unshiftedOffset: 0,
unshiftedBlockBuf: Buffer.alloc(0),
}
persistentState.save(state, function onTruncateStateSaved(err) {
if (err) return onDone(err)
truncateAndBeDone()
})
}
function truncateAndBeDone() {
if (truncateAndBeDone === NO_TRUNCATE) {
return onDone(new Error('Cannot truncate log yet'))
}
log.truncate(truncateBlockIndex, function onTruncatedLog(err, sizeDiff) {
if (err) return onDone(err)
persistentState.destroy(function onStateDestroyed(err) {
if (err) return onDone(err)
if (sizeDiff === 0 && holesFound) {
// Truncation did not make the log smaller but it did rewrite the log.
// So report 1 byte as a way of saying that compaction filled holes.
onDone(null, { sizeDiff: 1 })
} else {
onDone(null, { sizeDiff })
}
})
})
}
return {
progress,
}
}
Compaction.stateFileExists = stateFileExists
module.exports = Compaction