Skip to content

Commit

Permalink
Merge pull request #2061 from OffchainLabs/db-conversion
Browse files Browse the repository at this point in the history
Add tool for database conversion
  • Loading branch information
tsahee authored Aug 17, 2024
2 parents 5d600df + 93aaaef commit 0639404
Show file tree
Hide file tree
Showing 18 changed files with 1,089 additions and 76 deletions.
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,8 @@ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/prover /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/dbconv /usr/local/bin/
COPY ./scripts/convert-databases.bash /usr/local/bin/
COPY --from=machine-versions /workspace/machines /home/user/target/machines
COPY ./scripts/validate-wasm-module-root.sh .
RUN ./validate-wasm-module-root.sh /home/user/target/machines /usr/local/bin/prover
Expand Down
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ all: build build-replay-env test-gen-proofs
@touch .make/all

.PHONY: build
build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager)
build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager dbconv)
@printf $(done)

.PHONY: build-node-deps
Expand Down Expand Up @@ -310,6 +310,9 @@ $(output_root)/bin/nitro-val: $(DEP_PREDICATE) build-node-deps
$(output_root)/bin/seq-coordinator-manager: $(DEP_PREDICATE) build-node-deps
go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/seq-coordinator-manager"

$(output_root)/bin/dbconv: $(DEP_PREDICATE) build-node-deps
go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/dbconv"

# recompile wasm, but don't change timestamp unless files differ
$(replay_wasm): $(DEP_PREDICATE) $(go_source) .make/solgen
mkdir -p `dirname $(replay_wasm)`
Expand Down
52 changes: 26 additions & 26 deletions cmd/conf/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".handles", PersistentConfigDefault.Handles, "number of file descriptor handles to use for the database")
f.String(prefix+".ancient", PersistentConfigDefault.Ancient, "directory of ancient where the chain freezer can be opened")
f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use. If set to empty string the database type will be autodetected and if no pre-existing database is found it will default to creating new pebble database ('leveldb', 'pebble' or '' = auto-detect)")
PebbleConfigAddOptions(prefix+".pebble", f)
PebbleConfigAddOptions(prefix+".pebble", f, &PersistentConfigDefault.Pebble)
}

func (c *PersistentConfig) ResolveDirectoryNames() error {
Expand Down Expand Up @@ -120,9 +120,9 @@ var PebbleConfigDefault = PebbleConfig{
Experimental: PebbleExperimentalConfigDefault,
}

func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions")
PebbleExperimentalConfigAddOptions(prefix+".experimental", f)
func PebbleConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleConfig) {
f.Int(prefix+".max-concurrent-compactions", defaultConfig.MaxConcurrentCompactions, "maximum number of concurrent compactions")
PebbleExperimentalConfigAddOptions(prefix+".experimental", f, &defaultConfig.Experimental)
}

func (c *PebbleConfig) Validate() error {
Expand Down Expand Up @@ -189,29 +189,29 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{
ForceWriterParallelism: false,
}

func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".bytes-per-sync", PebbleExperimentalConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background")
f.Int(prefix+".l0-compaction-file-threshold", PebbleExperimentalConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction")
f.Int(prefix+".l0-compaction-threshold", PebbleExperimentalConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction")
f.Int(prefix+".l0-stop-writes-threshold", PebbleExperimentalConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached")
f.Int64(prefix+".l-base-max-bytes", PebbleExperimentalConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.")
f.Int(prefix+".mem-table-stop-writes-threshold", PebbleExperimentalConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables")
f.Bool(prefix+".disable-automatic-compactions", PebbleExperimentalConfigDefault.DisableAutomaticCompactions, "disables automatic compactions")
f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background")
f.String(prefix+".wal-dir", PebbleExperimentalConfigDefault.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables")
f.Int(prefix+".wal-min-sync-interval", PebbleExperimentalConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.")
f.Int(prefix+".target-byte-deletion-rate", PebbleExperimentalConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).")
f.Int(prefix+".block-size", PebbleExperimentalConfigDefault.BlockSize, "target uncompressed size in bytes of each table block")
f.Int(prefix+".index-block-size", PebbleExperimentalConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32))
f.Int64(prefix+".target-file-size", PebbleExperimentalConfigDefault.TargetFileSize, "target file size for the level 0")
f.Bool(prefix+".target-file-size-equal-levels", PebbleExperimentalConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1")
func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleExperimentalConfig) {
f.Int(prefix+".bytes-per-sync", defaultConfig.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background")
f.Int(prefix+".l0-compaction-file-threshold", defaultConfig.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction")
f.Int(prefix+".l0-compaction-threshold", defaultConfig.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction")
f.Int(prefix+".l0-stop-writes-threshold", defaultConfig.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached")
f.Int64(prefix+".l-base-max-bytes", defaultConfig.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.")
f.Int(prefix+".mem-table-stop-writes-threshold", defaultConfig.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables")
f.Bool(prefix+".disable-automatic-compactions", defaultConfig.DisableAutomaticCompactions, "disables automatic compactions")
f.Int(prefix+".wal-bytes-per-sync", defaultConfig.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background")
f.String(prefix+".wal-dir", defaultConfig.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables")
f.Int(prefix+".wal-min-sync-interval", defaultConfig.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.")
f.Int(prefix+".target-byte-deletion-rate", defaultConfig.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).")
f.Int(prefix+".block-size", defaultConfig.BlockSize, "target uncompressed size in bytes of each table block")
f.Int(prefix+".index-block-size", defaultConfig.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32))
f.Int64(prefix+".target-file-size", defaultConfig.TargetFileSize, "target file size for the level 0")
f.Bool(prefix+".target-file-size-equal-levels", defaultConfig.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1")

f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.")
f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.")
f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate")
f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).")
f.Int(prefix+".max-writer-concurrency", PebbleExperimentalConfigDefault.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.")
f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.")
f.Int(prefix+".l0-compaction-concurrency", defaultConfig.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.")
f.Uint64(prefix+".compaction-debt-concurrency", defaultConfig.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.")
f.Int64(prefix+".read-compaction-rate", defaultConfig.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate")
f.Int64(prefix+".read-sampling-multiplier", defaultConfig.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).")
f.Int(prefix+".max-writer-concurrency", defaultConfig.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.")
f.Bool(prefix+".force-writer-parallelism", defaultConfig.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.")
}

func (c *PebbleExperimentalConfig) Validate() error {
Expand Down
3 changes: 1 addition & 2 deletions cmd/conf/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"time"

"github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/spf13/pflag"
)

Expand Down Expand Up @@ -55,7 +54,7 @@ var InitConfigDefault = InitConfig{
Prune: "",
PruneBloomSize: 2048,
PruneThreads: runtime.NumCPU(),
PruneTrieCleanCache: gethexec.DefaultCachingConfig.TrieCleanCache,
PruneTrieCleanCache: 600,
RecreateMissingStateFrom: 0, // 0 = disabled
RebuildLocalWasm: true,
ReorgToBatch: -1,
Expand Down
95 changes: 95 additions & 0 deletions cmd/dbconv/dbconv/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
package dbconv

import (
"errors"
"fmt"

"github.com/offchainlabs/nitro/cmd/conf"
"github.com/offchainlabs/nitro/cmd/genericconf"
flag "github.com/spf13/pflag"
)

type DBConfig struct {
Data string `koanf:"data"`
DBEngine string `koanf:"db-engine"`
Handles int `koanf:"handles"`
Cache int `koanf:"cache"`
Namespace string `koanf:"namespace"`
Pebble conf.PebbleConfig `koanf:"pebble"`
}

var DBConfigDefaultDst = DBConfig{
DBEngine: "pebble",
Handles: conf.PersistentConfigDefault.Handles,
Cache: 2048, // 2048 MB
Namespace: "dstdb/",
Pebble: conf.PebbleConfigDefault,
}

var DBConfigDefaultSrc = DBConfig{
DBEngine: "leveldb",
Handles: conf.PersistentConfigDefault.Handles,
Cache: 2048, // 2048 MB
Namespace: "srcdb/",
}

func DBConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *DBConfig) {
f.String(prefix+".data", defaultConfig.Data, "directory of stored chain state")
f.String(prefix+".db-engine", defaultConfig.DBEngine, "backing database implementation to use ('leveldb' or 'pebble')")
f.Int(prefix+".handles", defaultConfig.Handles, "number of files to be open simultaneously")
f.Int(prefix+".cache", defaultConfig.Cache, "the capacity(in megabytes) of the data caching")
f.String(prefix+".namespace", defaultConfig.Namespace, "metrics namespace")
conf.PebbleConfigAddOptions(prefix+".pebble", f, &defaultConfig.Pebble)
}

type DBConvConfig struct {
Src DBConfig `koanf:"src"`
Dst DBConfig `koanf:"dst"`
IdealBatchSize int `koanf:"ideal-batch-size"`
Convert bool `koanf:"convert"`
Compact bool `koanf:"compact"`
Verify string `koanf:"verify"`
LogLevel string `koanf:"log-level"`
LogType string `koanf:"log-type"`
Metrics bool `koanf:"metrics"`
MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"`
}

var DefaultDBConvConfig = DBConvConfig{
Src: DBConfigDefaultSrc,
Dst: DBConfigDefaultDst,
IdealBatchSize: 100 * 1024 * 1024, // 100 MB
Convert: false,
Compact: false,
Verify: "",
LogLevel: "INFO",
LogType: "plaintext",
Metrics: false,
MetricsServer: genericconf.MetricsServerConfigDefault,
}

func DBConvConfigAddOptions(f *flag.FlagSet) {
DBConfigAddOptions("src", f, &DefaultDBConvConfig.Src)
DBConfigAddOptions("dst", f, &DefaultDBConvConfig.Dst)
f.Int("ideal-batch-size", DefaultDBConvConfig.IdealBatchSize, "ideal write batch size")
f.Bool("convert", DefaultDBConvConfig.Convert, "enables conversion step")
f.Bool("compact", DefaultDBConvConfig.Compact, "enables compaction step")
f.String("verify", DefaultDBConvConfig.Verify, "enables verification step (\"\" = disabled, \"keys\" = only keys, \"full\" = keys and values)")
f.String("log-level", DefaultDBConvConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE")
f.String("log-type", DefaultDBConvConfig.LogType, "log type (plaintext or json)")
f.Bool("metrics", DefaultDBConvConfig.Metrics, "enable metrics")
genericconf.MetricsServerAddOptions("metrics-server", f)
}

func (c *DBConvConfig) Validate() error {
if c.Verify != "keys" && c.Verify != "full" && c.Verify != "" {
return fmt.Errorf("Invalid verify mode: %v", c.Verify)
}
if !c.Convert && c.Verify == "" && !c.Compact {
return errors.New("nothing to be done, conversion, verification and compaction disabled")
}
if c.IdealBatchSize <= 0 {
return fmt.Errorf("Invalid ideal batch size: %d, has to be greater then 0", c.IdealBatchSize)
}
return nil
}
Loading

0 comments on commit 0639404

Please sign in to comment.