diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index cfb9dd16944a..99c9d95156d1 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -12,13 +12,11 @@ package state import ( context "context" reflect "reflect" - sync "sync" time "time" database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" validators "github.com/ava-labs/avalanchego/snow/validators" - logging "github.com/ava-labs/avalanchego/utils/logging" avax "github.com/ava-labs/avalanchego/vms/components/avax" block "github.com/ava-labs/avalanchego/vms/platformvm/block" fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -1516,20 +1514,6 @@ func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) } -// PruneAndIndex mocks base method. -func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneAndIndex indicates an expected call of PruneAndIndex. -func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) -} - // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() @@ -1666,21 +1650,6 @@ func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) } -// ShouldPrune mocks base method. -func (m *MockState) ShouldPrune() (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ShouldPrune") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ShouldPrune indicates an expected call of ShouldPrune. -func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) -} - // UTXOIDs mocks base method. func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 1707882fd291..32007b4fc928 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -7,8 +7,6 @@ import ( "context" "errors" "fmt" - "math" - "sync" "time" "github.com/google/btree" @@ -23,15 +21,12 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -46,13 +41,6 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) -const ( - pruneCommitLimit = 1024 - pruneCommitSleepMultiplier = 5 - pruneCommitSleepCap = 10 * time.Second - pruneUpdateFrequency = 30 * time.Second -) - var ( _ State = (*state)(nil) @@ -87,7 +75,6 @@ var ( LastAcceptedKey = []byte("last accepted") HeightsIndexedKey = []byte("heights indexed") InitializedKey = []byte("initialized") - PrunedKey = []byte("pruned") ) // Chain collects all methods to manage the state of the chain for block @@ -181,18 +168,6 @@ type State interface { // Discard uncommitted changes to the database. Abort() - // Returns if the state should be pruned and indexed to remove rejected - // blocks and generate the block height index. - // - // TODO: Remove after v1.11.x is activated - ShouldPrune() (bool, error) - - // Removes rejected blocks from disk and indexes accepted blocks by height. This - // function supports being (and is recommended to be) called asynchronously. - // - // TODO: Remove after v1.11.x is activated - PruneAndIndex(sync.Locker, logging.Logger) error - // Commit changes to the base database. Commit() error @@ -205,13 +180,6 @@ type State interface { Close() error } -// TODO: Remove after v1.11.x is activated -type stateBlk struct { - Blk block.Block - Bytes []byte `serialize:"true"` - Status choices.Status `serialize:"true"` -} - /* * VMDB * |-. validators @@ -276,7 +244,6 @@ type stateBlk struct { * | '-- txID -> nil * '-. singletons * |-- initializedKey -> nil - * |-- prunedKey -> nil * |-- timestampKey -> timestamp * |-- currentSupplyKey -> currentSupply * |-- lastAcceptedKey -> lastAccepted @@ -477,27 +444,6 @@ func New( return nil, err } - // Before we start accepting new blocks, we check if the pruning process needs - // to be run. - // - // TODO: Cleanup after v1.11.x is activated - shouldPrune, err := s.ShouldPrune() - if err != nil { - return nil, err - } - if shouldPrune { - // If the pruned key is on disk, we must delete it to ensure our disk - // can't get into a partially pruned state if the node restarts mid-way - // through pruning. - if err := s.singletonDB.Delete(PrunedKey); err != nil { - return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) - } - - if err := s.Commit(); err != nil { - return nil, fmt.Errorf("failed to commit to baseDB: %w", err) - } - } - return s, nil } @@ -770,37 +716,6 @@ func (s *state) doneInit() error { return s.singletonDB.Put(InitializedKey, nil) } -func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(PrunedKey) - if err != nil { - return true, err - } - - // If [prunedKey] is not in [singletonDB], [PruneAndIndex()] did not finish - // execution. - if !has { - return true, nil - } - - // To ensure the db was not modified since we last ran [PruneAndIndex()], we - // must verify that [s.lastAccepted] is height indexed. - blk, err := s.GetStatelessBlock(s.lastAccepted) - if err != nil { - return true, err - } - - _, err = s.GetBlockIDAtHeight(blk.Height()) - if err == database.ErrNotFound { - return true, nil - } - - return false, err -} - -func (s *state) donePrune() error { - return s.singletonDB.Put(PrunedKey, nil) -} - func (s *state) GetSubnets() ([]*txs.Tx, error) { if s.cachedSubnets != nil { return s.cachedSubnets, nil @@ -1939,16 +1854,11 @@ func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { return nil, err } - blk, status, _, err := parseStoredBlock(blkBytes) + blk, err := block.Parse(block.GenesisCodec, blkBytes) if err != nil { return nil, err } - if status != choices.Accepted { - s.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - } - s.blockCache.Put(blockID, blk) return blk, nil } @@ -2450,187 +2360,3 @@ func (s *state) writeMetadata() error { return nil } - -// Returns the block, status of the block, and whether it is a [stateBlk]. -// Invariant: blkBytes is safe to parse with blocks.GenesisCodec -// -// TODO: Remove after v1.11.x is activated -func parseStoredBlock(blkBytes []byte) (block.Block, choices.Status, bool, error) { - // Attempt to parse as blocks.Block - blk, err := block.Parse(block.GenesisCodec, blkBytes) - if err == nil { - return blk, choices.Accepted, false, nil - } - - // Fallback to [stateBlk] - blkState := stateBlk{} - if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { - return nil, choices.Processing, false, err - } - - blkState.Blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) - if err != nil { - return nil, choices.Processing, false, err - } - - return blkState.Blk, blkState.Status, true, nil -} - -func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { - lock.Lock() - // It is possible that new blocks are added after grabbing this iterator. New - // blocks are guaranteed to be accepted and height-indexed, so we don't need to - // check them. - blockIterator := s.blockDB.NewIterator() - // Releasing is done using a closure to ensure that updating blockIterator will - // result in having the most recent iterator released when executing the - // deferred function. - defer func() { - blockIterator.Release() - }() - - // While we are pruning the disk, we disable caching of the data we are - // modifying. Caching is re-enabled when pruning finishes. - // - // Note: If an unexpected error occurs the caches are never re-enabled. - // That's fine as the node is going to be in an unhealthy state regardless. - oldBlockIDCache := s.blockIDCache - s.blockIDCache = &cache.Empty[uint64, ids.ID]{} - lock.Unlock() - - log.Info("starting state pruning and indexing") - - var ( - startTime = time.Now() - lastCommit = startTime - lastUpdate = startTime - numPruned = 0 - numIndexed = 0 - ) - - for blockIterator.Next() { - blkBytes := blockIterator.Value() - - blk, status, isStateBlk, err := parseStoredBlock(blkBytes) - if err != nil { - return err - } - - if status != choices.Accepted { - // Remove non-accepted blocks from disk. - if err := s.blockDB.Delete(blockIterator.Key()); err != nil { - return fmt.Errorf("failed to delete block: %w", err) - } - - numPruned++ - - // We don't index the height of non-accepted blocks. - continue - } - - blkHeight := blk.Height() - blkID := blk.ID() - - // Populate the map of height -> blockID. - heightKey := database.PackUInt64(blkHeight) - if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { - return fmt.Errorf("failed to add blockID: %w", err) - } - - // Since we only store accepted blocks on disk, we only need to store a map of - // ids.ID to Block. - if isStateBlk { - if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { - return fmt.Errorf("failed to write block: %w", err) - } - } - - numIndexed++ - - if numIndexed%pruneCommitLimit == 0 { - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. - lock.Lock() - err := utils.Err( - s.Commit(), - blockIterator.Error(), - ) - lock.Unlock() - if err != nil { - return err - } - - // We release the iterator here to allow the underlying database to - // clean up deleted state. - blockIterator.Release() - - now := time.Now() - if now.Sub(lastUpdate) > pruneUpdateFrequency { - lastUpdate = now - - progress := timer.ProgressFromHash(blkID[:]) - eta := timer.EstimateETA( - startTime, - progress, - math.MaxUint64, - ) - - log.Info("committing state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), - zap.Duration("eta", eta), - ) - } - - // We take the minimum here because it's possible that the node is - // currently bootstrapping. This would mean that grabbing the lock - // could take an extremely long period of time; which we should not - // delay processing for. - pruneDuration := now.Sub(lastCommit) - sleepDuration := min( - pruneCommitSleepMultiplier*pruneDuration, - pruneCommitSleepCap, - ) - time.Sleep(sleepDuration) - - // Make sure not to include the sleep duration into the next prune - // duration. - lastCommit = time.Now() - - blockIterator = s.blockDB.NewIteratorWithStart(blkID[:]) - } - } - - // Ensure we fully iterated over all blocks before writing that pruning has - // finished. - // - // Note: This is needed because a transient read error could cause the - // iterator to stop early. - if err := blockIterator.Error(); err != nil { - return err - } - - if err := s.donePrune(); err != nil { - return err - } - - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. - lock.Lock() - defer lock.Unlock() - - // Make sure we flush the original cache before re-enabling it to prevent - // surfacing any stale data. - oldBlockIDCache.Flush() - s.blockIDCache = oldBlockIDCache - - log.Info("finished state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), - zap.Duration("duration", time.Since(startTime)), - ) - - return s.Commit() -} diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index f4e29ccceda6..e5eda2256f12 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -1294,117 +1293,6 @@ func requireEqualPublicKeysValidatorSet( } } -func TestParsedStateBlock(t *testing.T) { - require := require.New(t) - - var blks []block.Block - - { - blk, err := block.NewApricotAbortBlock(ids.GenerateTestID(), 1000) - require.NoError(err) - blks = append(blks, blk) - } - - { - blk, err := block.NewApricotAtomicBlock(ids.GenerateTestID(), 1000, &txs.Tx{ - Unsigned: &txs.AdvanceTimeTx{ - Time: 1000, - }, - }) - require.NoError(err) - blks = append(blks, blk) - } - - { - blk, err := block.NewApricotCommitBlock(ids.GenerateTestID(), 1000) - require.NoError(err) - blks = append(blks, blk) - } - - { - tx := &txs.Tx{ - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, - } - require.NoError(tx.Initialize(txs.Codec)) - blk, err := block.NewApricotProposalBlock(ids.GenerateTestID(), 1000, tx) - require.NoError(err) - blks = append(blks, blk) - } - - { - tx := &txs.Tx{ - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, - } - require.NoError(tx.Initialize(txs.Codec)) - blk, err := block.NewApricotStandardBlock(ids.GenerateTestID(), 1000, []*txs.Tx{tx}) - require.NoError(err) - blks = append(blks, blk) - } - - { - blk, err := block.NewBanffAbortBlock(time.Now(), ids.GenerateTestID(), 1000) - require.NoError(err) - blks = append(blks, blk) - } - - { - blk, err := block.NewBanffCommitBlock(time.Now(), ids.GenerateTestID(), 1000) - require.NoError(err) - blks = append(blks, blk) - } - - { - tx := &txs.Tx{ - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, - } - require.NoError(tx.Initialize(txs.Codec)) - - blk, err := block.NewBanffProposalBlock(time.Now(), ids.GenerateTestID(), 1000, tx, []*txs.Tx{}) - require.NoError(err) - blks = append(blks, blk) - } - - { - tx := &txs.Tx{ - Unsigned: &txs.RewardValidatorTx{ - TxID: ids.GenerateTestID(), - }, - } - require.NoError(tx.Initialize(txs.Codec)) - - blk, err := block.NewBanffStandardBlock(time.Now(), ids.GenerateTestID(), 1000, []*txs.Tx{tx}) - require.NoError(err) - blks = append(blks, blk) - } - - for _, blk := range blks { - stBlk := stateBlk{ - Blk: blk, - Bytes: blk.Bytes(), - Status: choices.Accepted, - } - - stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) - require.NoError(err) - - gotBlk, _, isStateBlk, err := parseStoredBlock(stBlkBytes) - require.NoError(err) - require.True(isStateBlk) - require.Equal(blk.ID(), gotBlk.ID()) - - gotBlk, _, isStateBlk, err = parseStoredBlock(blk.Bytes()) - require.NoError(err) - require.False(isStateBlk) - require.Equal(blk.ID(), gotBlk.ID()) - } -} - func TestStateSubnetOwner(t *testing.T) { require := require.New(t) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 30e1b8c3d63d..4dd1b2d6af38 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -92,9 +92,6 @@ type VM struct { onShutdownCtx context.Context // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() onShutdownCtxCancel context.CancelFunc - - // TODO: Remove after v1.11.x is activated - pruned utils.Atomic[bool] } // Initialize this blockchain. @@ -248,30 +245,6 @@ func (vm *VM) Initialize( // [periodicallyPruneMempool] grabs the context lock. go vm.periodicallyPruneMempool(execConfig.MempoolPruneFrequency) - shouldPrune, err := vm.state.ShouldPrune() - if err != nil { - return fmt.Errorf( - "failed to check if the database should be pruned: %w", - err, - ) - } - if !shouldPrune { - chainCtx.Log.Info("state already pruned and indexed") - vm.pruned.Set(true) - return nil - } - - go func() { - err := vm.state.PruneAndIndex(&vm.ctx.Lock, vm.ctx.Log) - if err != nil { - vm.ctx.Log.Error("state pruning and height indexing failed", - zap.Error(err), - ) - } - - vm.pruned.Set(true) - }() - return nil } @@ -530,12 +503,8 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } -func (vm *VM) VerifyHeightIndex(_ context.Context) error { - if vm.pruned.Get() { - return nil - } - - return snowmanblock.ErrIndexIncomplete +func (*VM) VerifyHeightIndex(_ context.Context) error { + return nil } func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) {